diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 07:15:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-15 07:15:06 -0400 |
commit | bff157b3ad4b9f6be0af6987fcd62deaf0f2b799 (patch) | |
tree | 02ae68620a40fefd9ffc2de739a8bb362baa3f08 /mm | |
parent | 8bf5e36d0429e9b8fc2c84966577f10386bd7195 (diff) | |
parent | 23774a2f6fee0848503bfb8004eeeb5adef94f5c (diff) | |
download | linux-rpi3-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.tar.gz linux-rpi3-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.tar.bz2 linux-rpi3-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.zip |
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB update from Pekka Enberg:
"Nothing terribly exciting here apart from Christoph's kmalloc
unification patches that brings sl[aou]b implementations closer to
each other"
* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
slab: Use correct GFP_DMA constant
slub: remove verify_mem_not_deleted()
mm/sl[aou]b: Move kmallocXXX functions to common code
mm, slab_common: add 'unlikely' to size check of kmalloc_slab()
mm/slub.c: beautify code for removing redundancy 'break' statement.
slub: Remove unnecessary page NULL check
slub: don't use cpu partial pages on UP
mm/slub: beautify code for 80 column limitation and tab alignment
mm/slub: remove 'per_cpu' which is useless variable
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab_common.c | 12 | ||||
-rw-r--r-- | mm/slob.c | 28 | ||||
-rw-r--r-- | mm/slub.c | 142 |
3 files changed, 91 insertions, 91 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 538bade6df7d..a3443278ce3a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -19,6 +19,7 @@ #include <asm/tlbflush.h> #include <asm/page.h> #include <linux/memcontrol.h> +#include <trace/events/kmem.h> #include "slab.h" @@ -373,7 +374,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) { int index; - if (size > KMALLOC_MAX_SIZE) { + if (unlikely(size > KMALLOC_MAX_SIZE)) { WARN_ON_ONCE(!(flags & __GFP_NOWARN)); return NULL; } @@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags) } #endif /* !CONFIG_SLOB */ +#ifdef CONFIG_TRACING +void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +{ + void *ret = kmalloc_order(size, flags, order); + trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); + return ret; +} +EXPORT_SYMBOL(kmalloc_order_trace); +#endif #ifdef CONFIG_SLABINFO diff --git a/mm/slob.c b/mm/slob.c index 91bd3f2dd2f0..4bf8809dfcce 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) return ret; } -void *__kmalloc_node(size_t size, gfp_t gfp, int node) +void *__kmalloc(size_t size, gfp_t gfp) { - return __do_kmalloc_node(size, gfp, node, _RET_IP_); + return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); } -EXPORT_SYMBOL(__kmalloc_node); +EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_TRACING void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) @@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) return 0; } -void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) +void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; @@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); return b; } +EXPORT_SYMBOL(slob_alloc_node); + +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + return slob_alloc_node(cachep, flags, NUMA_NO_NODE); +} +EXPORT_SYMBOL(kmem_cache_alloc); + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t gfp, int node) +{ + return __do_kmalloc_node(size, gfp, node, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc_node); + +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) +{ + return slob_alloc_node(cachep, gfp, node); +} EXPORT_SYMBOL(kmem_cache_alloc_node); +#endif static void __kmem_cache_free(void *b, int size) { diff --git a/mm/slub.c b/mm/slub.c index 51df8272cfaf..c3eb3d3ca835 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page #endif { slab_lock(page); - if (page->freelist == freelist_old && page->counters == counters_old) { + if (page->freelist == freelist_old && + page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; slab_unlock(page); @@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, local_irq_save(flags); slab_lock(page); - if (page->freelist == freelist_old && page->counters == counters_old) { + if (page->freelist == freelist_old && + page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; slab_unlock(page); @@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object) static void print_page_info(struct page *page) { - printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", - page, page->objects, page->inuse, page->freelist, page->flags); + printk(KERN_ERR + "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", + page, page->objects, page->inuse, page->freelist, page->flags); } @@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page, print_trailer(s, page, object); } -static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) +static void slab_err(struct kmem_cache *s, struct page *page, + const char *fmt, ...) { va_list args; char buf[100]; @@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page, } else { if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { check_bytes_and_report(s, page, p, "Alignment padding", - endobject, POISON_INUSE, s->inuse - s->object_size); + endobject, POISON_INUSE, + s->inuse - s->object_size); } } @@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) object_err(s, page, object, "Freechain corrupt"); set_freepointer(s, object, NULL); - break; } else { slab_err(s, page, "Freepointer corrupt"); page->freelist = NULL; @@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, page->freelist); if (!alloc) - print_section("Object ", (void *)object, s->object_size); + print_section("Object ", (void *)object, + s->object_size); dump_stack(); } @@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) return should_failslab(s->object_size, flags, s->flags); } -static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) +static inline void slab_post_alloc_hook(struct kmem_cache *s, + gfp_t flags, void *object) { flags &= gfp_allowed_mask; kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); @@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, init_tracking(s, object); } -static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, +static noinline int alloc_debug_processing(struct kmem_cache *s, + struct page *page, void *object, unsigned long addr) { if (!check_slab(s, page)) @@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) /* * Remove the cpu slab */ -static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) +static void deactivate_slab(struct kmem_cache *s, struct page *page, + void *freelist) { enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; struct kmem_cache_node *n = get_node(s, page_to_nid(page)); @@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) page->pobjects = pobjects; page->next = oldpage; - } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); + } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) + != oldpage); #endif } @@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) } /* - * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist - * or deactivate the page. + * Check the page->freelist of a page and either transfer the freelist to the + * per cpu freelist or deactivate the page. * * The page is still frozen if the return value is not NULL. * @@ -2314,7 +2323,8 @@ new_slab: goto load_freelist; /* Only entered in the debug case */ - if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) + if (kmem_cache_debug(s) && + !alloc_debug_processing(s, page, freelist, addr)) goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist)); @@ -2372,7 +2382,7 @@ redo: object = c->freelist; page = c->page; - if (unlikely(!object || !page || !node_match(page, node))) + if (unlikely(!object || !node_match(page, node))) object = __slab_alloc(s, gfpflags, node, addr, c); else { @@ -2382,13 +2392,15 @@ redo: * The cmpxchg will only match if there was no additional * operation and if we are on the right processor. * - * The cmpxchg does the following atomically (without lock semantics!) + * The cmpxchg does the following atomically (without lock + * semantics!) * 1. Relocate first pointer to the current per cpu area. * 2. Verify that tid and freelist have not been changed * 3. If they were not changed replace tid and freelist * - * Since this is without lock semantics the protection is only against - * code executing on this cpu *not* from access by other cpus. + * Since this is without lock semantics the protection is only + * against code executing on this cpu *not* from access by + * other cpus. */ if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, @@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { void *ret = slab_alloc(s, gfpflags, _RET_IP_); - trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); + trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, + s->size, gfpflags); return ret; } @@ -2434,14 +2447,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); - -void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) -{ - void *ret = kmalloc_order(size, flags, order); - trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); - return ret; -} -EXPORT_SYMBOL(kmalloc_order_trace); #endif #ifdef CONFIG_NUMA @@ -2512,8 +2517,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, if (kmem_cache_has_cpu_partial(s) && !prior) /* - * Slab was on no list before and will be partially empty - * We can defer the list move and instead freeze it. + * Slab was on no list before and will be + * partially empty + * We can defer the list move and instead + * freeze it. */ new.frozen = 1; @@ -3071,8 +3078,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) * A) The number of objects from per cpu partial slabs dumped to the * per node list when we reach the limit. * B) The number of objects in cpu partial slabs to extract from the - * per node list when we run out of per cpu objects. We only fetch 50% - * to keep some capacity around for frees. + * per node list when we run out of per cpu objects. We only fetch + * 50% to keep some capacity around for frees. */ if (!kmem_cache_has_cpu_partial(s)) s->cpu_partial = 0; @@ -3099,8 +3106,8 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", - s->name, (unsigned long)s->size, s->size, oo_order(s->oo), - s->offset, flags); + s->name, (unsigned long)s->size, s->size, + oo_order(s->oo), s->offset, flags); return -EINVAL; } @@ -3316,42 +3323,6 @@ size_t ksize(const void *object) } EXPORT_SYMBOL(ksize); -#ifdef CONFIG_SLUB_DEBUG -bool verify_mem_not_deleted(const void *x) -{ - struct page *page; - void *object = (void *)x; - unsigned long flags; - bool rv; - - if (unlikely(ZERO_OR_NULL_PTR(x))) - return false; - - local_irq_save(flags); - - page = virt_to_head_page(x); - if (unlikely(!PageSlab(page))) { - /* maybe it was from stack? */ - rv = true; - goto out_unlock; - } - - slab_lock(page); - if (on_freelist(page->slab_cache, page, object)) { - object_err(page->slab_cache, page, object, "Object is on free-list"); - rv = false; - } else { - rv = true; - } - slab_unlock(page); - -out_unlock: - local_irq_restore(flags); - return rv; -} -EXPORT_SYMBOL(verify_mem_not_deleted); -#endif - void kfree(const void *x) { struct page *page; @@ -4162,15 +4133,17 @@ static int list_locations(struct kmem_cache *s, char *buf, !cpumask_empty(to_cpumask(l->cpus)) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); - len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, + len += cpulist_scnprintf(buf + len, + PAGE_SIZE - len - 50, to_cpumask(l->cpus)); } if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " nodes="); - len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, - l->nodes); + len += nodelist_scnprintf(buf + len, + PAGE_SIZE - len - 50, + l->nodes); } len += sprintf(buf + len, "\n"); @@ -4268,18 +4241,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s, int node; int x; unsigned long *nodes; - unsigned long *per_cpu; - nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); + nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); if (!nodes) return -ENOMEM; - per_cpu = nodes + nr_node_ids; if (flags & SO_CPU) { int cpu; for_each_possible_cpu(cpu) { - struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); + struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, + cpu); int node; struct page *page; @@ -4304,8 +4276,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, total += x; nodes[node] += x; } - - per_cpu[node]++; } } @@ -4315,12 +4285,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s, for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); - if (flags & SO_TOTAL) - x = atomic_long_read(&n->total_objects); - else if (flags & SO_OBJECTS) - x = atomic_long_read(&n->total_objects) - - count_partial(n, count_free); - + if (flags & SO_TOTAL) + x = atomic_long_read(&n->total_objects); + else if (flags & SO_OBJECTS) + x = atomic_long_read(&n->total_objects) - + count_partial(n, count_free); else x = atomic_long_read(&n->nr_slabs); total += x; @@ -5136,7 +5105,8 @@ static char *create_unique_id(struct kmem_cache *s) #ifdef CONFIG_MEMCG_KMEM if (!is_root_cache(s)) - p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); + p += sprintf(p, "-%08d", + memcg_cache_id(s->memcg_params->memcg)); #endif BUG_ON(p > name + ID_STR_LENGTH - 1); |