diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2016-03-15 14:54:35 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 16:55:16 -0700 |
commit | 158e319bba59e890c3920ce6d827c188287bae84 (patch) | |
tree | 3380225c70f1621a86fd469b23687f4758d8559a /mm/slab.c | |
parent | 832a15d209cd260180407bde1af18965b21623f3 (diff) | |
download | linux-rpi-158e319bba59e890c3920ce6d827c188287bae84.tar.gz linux-rpi-158e319bba59e890c3920ce6d827c188287bae84.tar.bz2 linux-rpi-158e319bba59e890c3920ce6d827c188287bae84.zip |
mm/slab: clean up cache type determination
Current cache type determination code is open-code and looks not
understandable. Following patch will introduce one more cache type and
it would make code more complex. So, before it happens, this patch
abstracts these codes.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 105 |
1 files changed, 71 insertions, 34 deletions
diff --git a/mm/slab.c b/mm/slab.c index d5dffc806f82..9b56685fb79b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2023,6 +2023,64 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, return cachep; } +static bool set_off_slab_cache(struct kmem_cache *cachep, + size_t size, unsigned long flags) +{ + size_t left; + + cachep->num = 0; + + /* + * Determine if the slab management is 'on' or 'off' slab. + * (bootstrapping cannot cope with offslab caches so don't do + * it too early on. Always use on-slab management when + * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) + */ + if (size < OFF_SLAB_MIN_SIZE) + return false; + + if (slab_early_init) + return false; + + if (flags & SLAB_NOLEAKTRACE) + return false; + + /* + * Size is large, assume best to place the slab management obj + * off-slab (should allow better packing of objs). + */ + left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); + if (!cachep->num) + return false; + + /* + * If the slab has been placed off-slab, and we have enough space then + * move it on-slab. This is at the expense of any extra colouring. + */ + if (left >= cachep->num * sizeof(freelist_idx_t)) + return false; + + cachep->colour = left / cachep->colour_off; + + return true; +} + +static bool set_on_slab_cache(struct kmem_cache *cachep, + size_t size, unsigned long flags) +{ + size_t left; + + cachep->num = 0; + + left = calculate_slab_order(cachep, size, flags); + if (!cachep->num) + return false; + + cachep->colour = left / cachep->colour_off; + + return true; +} + /** * __kmem_cache_create - Create a cache. * @cachep: cache management descriptor @@ -2047,7 +2105,6 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, int __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { - size_t left_over, freelist_size; size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; @@ -2098,6 +2155,10 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) * 4) Store it. */ cachep->align = ralign; + cachep->colour_off = cache_line_size(); + /* Offset must be a multiple of the alignment. */ + if (cachep->colour_off < cachep->align) + cachep->colour_off = cachep->align; if (slab_is_available()) gfp = GFP_KERNEL; @@ -2152,43 +2213,18 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) } #endif - /* - * Determine if the slab management is 'on' or 'off' slab. - * (bootstrapping cannot cope with offslab caches so don't do - * it too early on. Always use on-slab management when - * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) - */ - if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init && - !(flags & SLAB_NOLEAKTRACE)) { - /* - * Size is large, assume best to place the slab management obj - * off-slab (should allow better packing of objs). - */ + if (set_off_slab_cache(cachep, size, flags)) { flags |= CFLGS_OFF_SLAB; + goto done; } - left_over = calculate_slab_order(cachep, size, flags); - - if (!cachep->num) - return -E2BIG; - - freelist_size = cachep->num * sizeof(freelist_idx_t); + if (set_on_slab_cache(cachep, size, flags)) + goto done; - /* - * If the slab has been placed off-slab, and we have enough space then - * move it on-slab. This is at the expense of any extra colouring. - */ - if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { - flags &= ~CFLGS_OFF_SLAB; - left_over -= freelist_size; - } + return -E2BIG; - cachep->colour_off = cache_line_size(); - /* Offset must be a multiple of the alignment. */ - if (cachep->colour_off < cachep->align) - cachep->colour_off = cachep->align; - cachep->colour = left_over / cachep->colour_off; - cachep->freelist_size = freelist_size; +done: + cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); cachep->flags = flags; cachep->allocflags = __GFP_COMP; if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) @@ -2209,7 +2245,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) #endif if (OFF_SLAB(cachep)) { - cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); + cachep->freelist_cache = + kmalloc_slab(cachep->freelist_size, 0u); /* * This is a possibility for one of the kmalloc_{dma,}_caches. * But since we go off slab only for object size greater than |