diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-10 18:50:32 +0300 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-06-11 18:14:18 +0300 |
commit | 781b2ba6eb5f22440afac9c79a89ebd6e3674a60 (patch) | |
tree | 89f3665c52e68cde9aab3eaf249d33e90db5fc20 /mm/slub.c | |
parent | 59a3759d0fe8d969888c741bb33f4946e4d3750d (diff) | |
download | kernel-common-781b2ba6eb5f22440afac9c79a89ebd6e3674a60.tar.gz kernel-common-781b2ba6eb5f22440afac9c79a89ebd6e3674a60.tar.bz2 kernel-common-781b2ba6eb5f22440afac9c79a89ebd6e3674a60.zip |
SLUB: Out-of-memory diagnostics
As suggested by Mel Gorman, add out-of-memory diagnostics to the SLUB allocator
to make debugging OOM conditions easier. This patch helped hunt down a nasty
OOM issue that popped up every now that was caused by SLUB debugging code which
forced 4096 byte allocations to use order 1 pages even in the fallback case.
An example print out looks like this:
<snip page allocator out-of-memory message>
SLUB: Unable to allocate memory on node -1 (gfp=20)
cache: kmalloc-4096, object size: 4096, buffer size: 4168, default order: 3, min order: 1
node 0: slabs: 95, objs: 665, free: 0
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 70 |
1 files changed, 51 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c index 65ffda5934b0..a5a4ecf7e393 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1484,6 +1484,56 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) return 1; } +static int count_free(struct page *page) +{ + return page->objects - page->inuse; +} + +static unsigned long count_partial(struct kmem_cache_node *n, + int (*get_count)(struct page *)) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + +static noinline void +slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) +{ + int node; + + printk(KERN_WARNING + "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n", + nid, gfpflags); + printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " + "default order: %d, min order: %d\n", s->name, s->objsize, + s->size, oo_order(s->oo), oo_order(s->min)); + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + unsigned long nr_slabs; + unsigned long nr_objs; + unsigned long nr_free; + + if (!n) + continue; + + nr_slabs = atomic_long_read(&n->nr_slabs); + nr_objs = atomic_long_read(&n->total_objects); + nr_free = count_partial(n, count_free); + + printk(KERN_WARNING + " node %d: slabs: %ld, objs: %ld, free: %ld\n", + node, nr_slabs, nr_objs, nr_free); + } +} + /* * Slow path. The lockless freelist is empty or we need to perform * debugging duties. @@ -1565,6 +1615,7 @@ new_slab: c->page = new; goto load_freelist; } + slab_out_of_memory(s, gfpflags, node); return NULL; debug: if (!alloc_debug_processing(s, c->page, object, addr)) @@ -3318,20 +3369,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, } #ifdef CONFIG_SLUB_DEBUG -static unsigned long count_partial(struct kmem_cache_node *n, - int (*get_count)(struct page *)) -{ - unsigned long flags; - unsigned long x = 0; - struct page *page; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); - return x; -} - static int count_inuse(struct page *page) { return page->inuse; @@ -3342,11 +3379,6 @@ static int count_total(struct page *page) return page->objects; } -static int count_free(struct page *page) -{ - return page->objects - page->inuse; -} - static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) { |