summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-06-01 16:50:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-06-01 16:50:23 -0700
commitaf4f8ba31a4e328677bec493ceeaf112ca193b65 (patch)
treed97a6dc3a8ddcd2bcebe124a4716e565a7868cdc
parentefff0471b0dd8b08ca3830b06a9083f6d6cef44e (diff)
parentc03f94ccbd67fbcf546e5a9fcfeb99ef0aca4ada (diff)
downloadlinux-3.10-af4f8ba31a4e328677bec493ceeaf112ca193b65.tar.gz
linux-3.10-af4f8ba31a4e328677bec493ceeaf112ca193b65.tar.bz2
linux-3.10-af4f8ba31a4e328677bec493ceeaf112ca193b65.zip
Merge branch 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab updates from Pekka Enberg: "Mainly a bunch of SLUB fixes from Joonsoo Kim" * 'slab/for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: slub: use __SetPageSlab function to set PG_slab flag slub: fix a memory leak in get_partial_node() slub: remove unused argument of init_kmem_cache_node() slub: fix a possible memory leak Documentations: Fix slabinfo.c directory in vm/slub.txt slub: fix incorrect return type of get_any_partial()
-rw-r--r--Documentation/vm/slub.txt2
-rw-r--r--mm/slub.c23
2 files changed, 14 insertions, 11 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 6752870c497..b0c6d1bbb43 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
slabs that have data in them. See "slabinfo -h" for more options when
running the command. slabinfo can be compiled with
-gcc -o slabinfo tools/slub/slabinfo.c
+gcc -o slabinfo tools/vm/slabinfo.c
Some of the modes of operation of slabinfo require that slub debugging
be enabled on the command line. F.e. no tracking information will be
diff --git a/mm/slub.c b/mm/slub.c
index 80848cd3901..8c691fa1cf3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
- page->flags |= 1 << PG_slab;
+ __SetPageSlab(page);
start = page_address(page);
@@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
freelist = page->freelist;
counters = page->counters;
new.counters = counters;
- if (mode)
+ if (mode) {
new.inuse = page->objects;
+ new.freelist = NULL;
+ } else {
+ new.freelist = freelist;
+ }
VM_BUG_ON(new.frozen);
new.frozen = 1;
} while (!__cmpxchg_double_slab(s, page,
freelist, counters,
- NULL, new.counters,
+ new.freelist, new.counters,
"lock and freeze"));
remove_partial(n, page);
@@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
object = t;
available = page->objects - page->inuse;
} else {
- page->freelist = t;
available = put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
@@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
/*
* Get a page from somewhere. Search in increasing NUMA distances.
*/
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
struct kmem_cache_cpu *c)
{
#ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
}
static void
-init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
- init_kmem_cache_node(n, kmem_cache_node);
+ init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
}
s->node[node] = n;
- init_kmem_cache_node(n, s);
+ init_kmem_cache_node(n);
}
return 1;
}
@@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
ret = -ENOMEM;
goto out;
}
- init_kmem_cache_node(n, s);
+ init_kmem_cache_node(n);
s->node[nid] = n;
}
out:
@@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
}
return s;
}
- kfree(n);
kfree(s);
}
+ kfree(n);
err:
up_write(&slub_lock);