From d6269543ef24aa012aa228c27af3adb074f7b36b Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Sat, 21 Jul 2007 04:37:40 -0700 Subject: slob: reduce list scanning The version of SLOB in -mm always scans its free list from the beginning, which results in small allocations and free segments clustering at the beginning of the list over time. This causes the average search to scan over a large stretch at the beginning on each allocation. By starting each page search where the last one left off, we evenly distribute the allocations and greatly shorten the average search. Without this patch, kernel compiles on a 1.5G machine take a large amount of system time for list scanning. With this patch, compiles are within a few seconds of performance of a SLAB kernel with no notable change in system time. Signed-off-by: Matt Mackall Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slob.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/slob.c b/mm/slob.c index d50920ecc02..ec33fcdc852 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -293,6 +293,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct slob_page *sp; + struct list_head *prev; slob_t *b = NULL; unsigned long flags; @@ -307,12 +308,22 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) if (node != -1 && page_to_nid(&sp->page) != node) continue; #endif + /* Enough room on this page? */ + if (sp->units < SLOB_UNITS(size)) + continue; - if (sp->units >= SLOB_UNITS(size)) { - b = slob_page_alloc(sp, size, align); - if (b) - break; - } + /* Attempt to alloc */ + prev = sp->list.prev; + b = slob_page_alloc(sp, size, align); + if (!b) + continue; + + /* Improve fragment distribution and reduce our average + * search time by starting our next search here. (see + * Knuth vol 1, sec 2.5, pg 449) */ + if (free_slob_pages.next != prev->next) + list_move_tail(&free_slob_pages, prev->next); + break; } spin_unlock_irqrestore(&slob_lock, flags); -- cgit v1.2.3