Merge branch 'slab/rcu' into slab/next
[cascardo/linux.git] / mm / slub.c
index d3d1767..e841d89 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,6 +281,30 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
        return (p - addr) / s->size;
 }
 
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+       /*
+        * Debugging requires use of the padding between object
+        * and whatever may come after it.
+        */
+       if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+               return s->objsize;
+
+#endif
+       /*
+        * If we have the need to store the freelist pointer
+        * back there or track user information then we can
+        * only use the space before that information.
+        */
+       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+               return s->inuse;
+       /*
+        * Else we can use all the padding etc for the allocation
+        */
+       return s->size;
+}
+
 static inline int order_objects(int order, unsigned long size, int reserved)
 {
        return ((PAGE_SIZE << order) - reserved) / size;
@@ -805,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
 {
        flags &= gfp_allowed_mask;
-       kmemcheck_slab_alloc(s, flags, object, s->objsize);
+       kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
        kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
 }
 
@@ -1254,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        __free_pages(page, order);
 }
 
+#define need_reserve_slab_rcu                                          \
+       (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
 static void rcu_free_slab(struct rcu_head *h)
 {
        struct page *page;
 
-       page = container_of((struct list_head *)h, struct page, lru);
+       if (need_reserve_slab_rcu)
+               page = virt_to_head_page(h);
+       else
+               page = container_of((struct list_head *)h, struct page, lru);
+
        __free_slab(page->slab, page);
 }
 
 static void free_slab(struct kmem_cache *s, struct page *page)
 {
        if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
-               /*
-                * RCU free overloads the RCU head over the LRU
-                */
-               struct rcu_head *head = (void *)&page->lru;
+               struct rcu_head *head;
+
+               if (need_reserve_slab_rcu) {
+                       int order = compound_order(page);
+                       int offset = (PAGE_SIZE << order) - s->reserved;
+
+                       VM_BUG_ON(s->reserved != sizeof(*head));
+                       head = page_address(page) + offset;
+               } else {
+                       /*
+                        * RCU free overloads the RCU head over the LRU
+                        */
+                       head = (void *)&page->lru;
+               }
 
                call_rcu(head, rcu_free_slab);
        } else
@@ -2356,6 +2397,9 @@ static int kmem_cache_open(struct kmem_cache *s,
        s->flags = kmem_cache_flags(size, flags, name, ctor);
        s->reserved = 0;
 
+       if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+               s->reserved = sizeof(struct rcu_head);
+
        if (!calculate_sizes(s, -1))
                goto error;
        if (disable_higher_order_debug) {
@@ -2405,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
 }
 EXPORT_SYMBOL(kmem_cache_size);
 
-const char *kmem_cache_name(struct kmem_cache *s)
-{
-       return s->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
                                                        const char *text)
 {
@@ -2702,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
 size_t ksize(const void *object)
 {
        struct page *page;
-       struct kmem_cache *s;
 
        if (unlikely(object == ZERO_SIZE_PTR))
                return 0;
@@ -2713,28 +2750,8 @@ size_t ksize(const void *object)
                WARN_ON(!PageCompound(page));
                return PAGE_SIZE << compound_order(page);
        }
-       s = page->slab;
 
-#ifdef CONFIG_SLUB_DEBUG
-       /*
-        * Debugging requires use of the padding between object
-        * and whatever may come after it.
-        */
-       if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
-               return s->objsize;
-
-#endif
-       /*
-        * If we have the need to store the freelist pointer
-        * back there or track user information then we can
-        * only use the space before that information.
-        */
-       if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
-               return s->inuse;
-       /*
-        * Else we can use all the padding etc for the allocation
-        */
-       return s->size;
+       return slab_ksize(page->slab);
 }
 EXPORT_SYMBOL(ksize);