diff options
-rw-r--r-- | mm/slab.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c index 14c3f9c59bf1..4807cf4fe1b4 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1838,7 +1838,8 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if (cachep->size % PAGE_SIZE == 0 && + if (debug_pagealloc_enabled() && + cachep->size % PAGE_SIZE == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, 1); @@ -2176,7 +2177,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ - if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + if (debug_pagealloc_enabled() && + !slab_early_init && size >= kmalloc_size(INDEX_NODE) && size >= 256 && cachep->object_size > cache_line_size() && ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); @@ -2232,7 +2234,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) * poisoning, then it's going to smash the contents of * the redzone and userword anyhow, so switch them off. */ - if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) + if (debug_pagealloc_enabled() && + size % PAGE_SIZE == 0 && flags & SLAB_POISON) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); #endif } @@ -2716,7 +2719,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, set_obj_status(page, objnr, OBJECT_FREE); if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { + if (debug_pagealloc_enabled() && + (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { store_stackinfo(cachep, objp, caller); kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, 0); @@ -2861,7 +2865,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, return objp; if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) + if (debug_pagealloc_enabled() && + (cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, 1); else |