summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-14 19:11:41 +0300
committerPekka Enberg <penberg@cs.helsinki.fi>2008-04-27 18:28:18 +0300
commit319d1e240683d37924ea8977c91730c3393fd453 (patch)
tree8c59f466123d9628ee441b5d3b65564ff8b997e3 /mm
parent65c3376aaca96c66aa76014aaf430398964b68cb (diff)
downloadlinux-3.10-319d1e240683d37924ea8977c91730c3393fd453.tar.gz
linux-3.10-319d1e240683d37924ea8977c91730c3393fd453.tar.bz2
linux-3.10-319d1e240683d37924ea8977c91730c3393fd453.zip
slub: Drop fallback to page allocator method
There is now a generic method of falling back to a slab page of minimal order. No need anymore for the fallback to kmalloc_large(). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c43
1 files changed, 2 insertions, 41 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 35c22d940ba..de6f38761d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -204,8 +204,6 @@ static inline void ClearSlabDebug(struct page *page)
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
-#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
-#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
/* Not all arches define cache_line_size */
#ifndef cache_line_size
@@ -1623,27 +1621,6 @@ new_slab:
c->page = new;
goto load_freelist;
}
-
- /*
- * No memory available.
- *
- * If the slab uses higher order allocs but the object is
- * smaller than a page size then we can fallback in emergencies
- * to the page allocator via kmalloc_large. The page allocator may
- * have failed to obtain a higher order page and we can try to
- * allocate a single page if the object fits into a single page.
- * That is only possible if certain conditions are met that are being
- * checked when a slab is created.
- */
- if (!(gfpflags & __GFP_NORETRY) &&
- (s->flags & __PAGE_ALLOC_FALLBACK)) {
- if (gfpflags & __GFP_WAIT)
- local_irq_enable();
- object = kmalloc_large(s->objsize, gfpflags);
- if (gfpflags & __GFP_WAIT)
- local_irq_disable();
- return object;
- }
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -2330,20 +2307,7 @@ static int calculate_sizes(struct kmem_cache *s)
*/
size = ALIGN(size, align);
s->size = size;
-
- if ((flags & __KMALLOC_CACHE) &&
- PAGE_SIZE / size < slub_min_objects) {
- /*
- * Kmalloc cache that would not have enough objects in
- * an order 0 page. Kmalloc slabs can fallback to
- * page allocator order 0 allocs so take a reasonably large
- * order that will allows us a good number of objects.
- */
- order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
- s->flags |= __PAGE_ALLOC_FALLBACK;
- s->allocflags |= __GFP_NOWARN;
- } else
- order = calculate_order(size);
+ order = calculate_order(size);
if (order < 0)
return 0;
@@ -2589,7 +2553,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags | __KMALLOC_CACHE, NULL))
+ flags, NULL))
goto panic;
list_add(&s->list, &slab_caches);
@@ -3105,9 +3069,6 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
- if ((s->flags & __PAGE_ALLOC_FALLBACK))
- return 1;
-
if (s->ctor)
return 1;