summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-02-11 22:47:46 +0200
committerChristoph Lameter <christoph@stapp.engr.sgi.com>2008-02-14 15:30:01 -0800
commiteada35efcb2773cf49aa26277e056122e1a3405c (patch)
tree69803b03b6f6106722d4cc293678f2e3183bec2e
parente51bfd0ad10600a9fe4c8ede5ac2272e80075008 (diff)
downloadlinux-3.10-eada35efcb2773cf49aa26277e056122e1a3405c.tar.gz
linux-3.10-eada35efcb2773cf49aa26277e056122e1a3405c.tar.bz2
linux-3.10-eada35efcb2773cf49aa26277e056122e1a3405c.zip
slub: kmalloc page allocator pass-through cleanup
This adds a proper function for kmalloc page allocator pass-through. While it simplifies any code that does slab tracing code a lot, I think it's a worthwhile cleanup in itself. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Christoph Lameter <clameter@sgi.com>
-rw-r--r--include/linux/slub_def.h8
-rw-r--r--mm/slub.c14
2 files changed, 12 insertions, 10 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5e6d3d634d5..a849c472b84 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -188,12 +188,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+}
+
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE / 2)
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
diff --git a/mm/slub.c b/mm/slub.c
index e2989ae243b..7870ef9d863 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2671,8 +2671,7 @@ void *__kmalloc(size_t size, gfp_t flags)
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -2689,8 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(flags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, flags);
s = get_slab(size, flags);
@@ -3219,8 +3217,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
@@ -3235,8 +3233,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE / 2))
- return (void *)__get_free_pages(gfpflags | __GFP_COMP,
- get_order(size));
+ return kmalloc_large(size, gfpflags);
+
s = get_slab(size, gfpflags);
if (unlikely(ZERO_OR_NULL_PTR(s)))