summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 19:14:19 +0000
committerPekka Enberg <penberg@kernel.org>2013-02-01 12:32:08 +0200
commit2c59dd6544212faa5ce761920d2251f4152f408d (patch)
treec2547eb50205b72368e0b4758fc7c9a0111238a5 /mm/slab.c
parent9e5e8deca74603357626471a9b44f05dea9e32b1 (diff)
downloadkernel-common-2c59dd6544212faa5ce761920d2251f4152f408d.tar.gz
kernel-common-2c59dd6544212faa5ce761920d2251f4152f408d.tar.bz2
kernel-common-2c59dd6544212faa5ce761920d2251f4152f408d.zip
slab: Common Kmalloc cache determination
Extract the optimized lookup functions from slub and put them into slab_common.c. Then make slab use these functions as well. Joonsoo notes that this fixes some issues with constant folding which also reduces the code size for slub. https://lkml.org/lkml/2012/10/20/82 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c40
1 files changed, 3 insertions, 37 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 08ba44f81a28..62629b11df38 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -656,40 +656,6 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
return cachep->array[smp_processor_id()];
}
-static inline struct kmem_cache *__find_general_cachep(size_t size,
- gfp_t gfpflags)
-{
- int i;
-
-#if DEBUG
- /* This happens if someone tries to call
- * kmem_cache_create(), or __kmalloc(), before
- * the generic caches are initialized.
- */
- BUG_ON(kmalloc_caches[INDEX_AC] == NULL);
-#endif
- if (!size)
- return ZERO_SIZE_PTR;
-
- i = kmalloc_index(size);
-
- /*
- * Really subtle: The last entry with cs->cs_size==ULONG_MAX
- * has cs_{dma,}cachep==NULL. Thus no special case
- * for large kmalloc calls required.
- */
-#ifdef CONFIG_ZONE_DMA
- if (unlikely(gfpflags & GFP_DMA))
- return kmalloc_dma_caches[i];
-#endif
- return kmalloc_caches[i];
-}
-
-static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
-{
- return __find_general_cachep(size, gfpflags);
-}
-
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
@@ -2426,7 +2392,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
- cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
+ cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
/*
* This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than
@@ -3729,7 +3695,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
- cachep = kmem_find_general_cachep(size, flags);
+ cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return kmem_cache_alloc_node_trace(cachep, flags, node, size);
@@ -3774,7 +3740,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
* Then kmalloc uses the uninlined functions instead of the inline
* functions.
*/
- cachep = __find_general_cachep(size, flags);
+ cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = slab_alloc(cachep, flags, caller);