From 4581ced379736fd76432c754f999d26deb83fbb7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 19 May 2010 12:02:14 +0100 Subject: mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to Acked-by: Herbert Xu Signed-off-by: David Woodhouse Signed-off-by: Pekka Enberg --- mm/slub.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index d2a54fe71ea..c874c3efac2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -157,14 +157,6 @@ #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA | SLAB_NOTRACK) -#ifndef ARCH_KMALLOC_MINALIGN -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) -#endif - #define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ -- cgit v1.2.3 From bbd7d57bfe852d9788bae5fb171c7edb4021d8ac Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 24 Mar 2010 22:25:47 +0100 Subject: slub: Potential stack overflow I discovered that we can overflow stack if CONFIG_SLUB_DEBUG=y and use slabs with many objects, since list_slab_objects() and process_slab() use DECLARE_BITMAP(map, page->objects). With 65535 bits, we use 8192 bytes of stack ... Switch these allocations to dynamic allocations. Signed-off-by: Eric Dumazet Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index d2a54fe71ea..78f1a202ca3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2429,9 +2429,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; - DECLARE_BITMAP(map, page->objects); + long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), + GFP_ATOMIC); - bitmap_zero(map, page->objects); + if (!map) + return; slab_err(s, page, "%s", text); slab_lock(page); for_each_free_object(p, s, page->freelist) @@ -2446,6 +2448,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, } } slab_unlock(page); + kfree(map); #endif } @@ -3651,10 +3654,10 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, } static void process_slab(struct loc_track *t, struct kmem_cache *s, - struct page *page, enum track_item alloc) + struct page *page, enum track_item alloc, + long *map) { void *addr = page_address(page); - DECLARE_BITMAP(map, page->objects); void *p; bitmap_zero(map, page->objects); @@ -3673,11 +3676,14 @@ static int list_locations(struct kmem_cache *s, char *buf, unsigned long i; struct loc_track t = { 0, 0, NULL }; int node; + unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * + sizeof(unsigned long), GFP_KERNEL); - if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), - GFP_TEMPORARY)) + if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), + GFP_TEMPORARY)) { + kfree(map); return sprintf(buf, "Out of memory\n"); - + } /* Push back cpu slabs */ flush_all(s); @@ -3691,9 +3697,9 @@ static int list_locations(struct kmem_cache *s, char *buf, spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) - process_slab(&t, s, page, alloc); + process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) - process_slab(&t, s, page, alloc); + process_slab(&t, s, page, alloc, map); spin_unlock_irqrestore(&n->list_lock, flags); } @@ -3744,6 +3750,7 @@ static int list_locations(struct kmem_cache *s, char *buf, } free_loc_track(&t); + kfree(map); if (!t.count) len += sprintf(buf, "No data\n"); return len; -- cgit v1.2.3 From d3e14aa336b37df76ae875fa051dfdb0e765ddf9 Mon Sep 17 00:00:00 2001 From: Xiaotian Feng Date: Thu, 8 Apr 2010 17:26:44 +0800 Subject: slub: __kmalloc_node_track_caller should trace kmalloc_large_node case commit 94b528d (kmemtrace: SLUB hooks for caller-tracking functions) missed tracing kmalloc_large_node in __kmalloc_node_track_caller. We should trace it same as __kmalloc_node. Acked-by: David Rientjes Cc: Matt Mackall Cc: Ingo Molnar Cc: Vegard Nossum Signed-off-by: Xiaotian Feng Signed-off-by: Pekka Enberg --- mm/slub.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 78f1a202ca3..52ae5a53818 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3341,8 +3341,15 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) - return kmalloc_large_node(size, gfpflags, node); + if (unlikely(size > SLUB_MAX_SIZE)) { + ret = kmalloc_large_node(size, gfpflags, node); + + trace_kmalloc_node(caller, ret, + size, PAGE_SIZE << get_order(size), + gfpflags, node); + + return ret; + } s = get_slab(size, gfpflags); -- cgit v1.2.3 From 6b65aaf3027c4e02b42aaefd900aa79136a30681 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Wed, 14 Apr 2010 23:58:36 +0900 Subject: slub: Use alloc_pages_exact_node() for page allocation The alloc_slab_page() in SLUB uses alloc_pages() if node is '-1'. This means that node validity check in alloc_pages_node is unnecessary and we can use alloc_pages_exact_node() to avoid comparison and branch as commit 6484eb3e2a81807722 ("page allocator: do not check NUMA node ID when the caller knows the node is valid") did for the page allocator. Cc: Christoph Lameter Reviewed-by: KAMEZAWA Hiroyuki Reviewed-by: Mel Gorman Signed-off-by: Minchan Kim Signed-off-by: Pekka Enberg --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 52ae5a53818..2cdd235cb80 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1084,7 +1084,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, if (node == -1) return alloc_pages(flags, order); else - return alloc_pages_node(node, flags, order); + return alloc_pages_exact_node(node, flags, order); } static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) -- cgit v1.2.3