mm/slab: activate debug_pagealloc in SLAB when it is actually enabled
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Tue, 15 Mar 2016 21:54:15 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c

index 14c3f9c..4807cf4 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1838,7 +1838,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
 
                if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-                       if (cachep->size % PAGE_SIZE == 0 &&
+                       if (debug_pagealloc_enabled() &&
+                               cachep->size % PAGE_SIZE == 0 &&
                                        OFF_SLAB(cachep))
                                kernel_map_pages(virt_to_page(objp),
                                        cachep->size / PAGE_SIZE, 1);
@@ -2176,7 +2177,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
         * to check size >= 256. It guarantees that all necessary small
         * sized slab is initialized in current slab initialization sequence.
         */
-       if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+       if (debug_pagealloc_enabled() &&
+               !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
                size >= 256 && cachep->object_size > cache_line_size() &&
                ALIGN(size, cachep->align) < PAGE_SIZE) {
                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
@@ -2232,7 +2234,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
                 * poisoning, then it's going to smash the contents of
                 * the redzone and userword anyhow, so switch them off.
                 */
-               if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
+               if (debug_pagealloc_enabled() &&
+                       size % PAGE_SIZE == 0 && flags & SLAB_POISON)
                        flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
 #endif
        }
@@ -2716,7 +2719,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
        set_obj_status(page, objnr, OBJECT_FREE);
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+               if (debug_pagealloc_enabled() &&
+                       (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
                        store_stackinfo(cachep, objp, caller);
                        kernel_map_pages(virt_to_page(objp),
                                         cachep->size / PAGE_SIZE, 0);
@@ -2861,7 +2865,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                return objp;
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
-               if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+               if (debug_pagealloc_enabled() &&
+                       (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
                        kernel_map_pages(virt_to_page(objp),
                                         cachep->size / PAGE_SIZE, 1);
                else