summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-11 18:10:45 +0200
committerPekka Enberg <penberg@kernel.org>2011-03-11 18:10:45 +0200
commitc9149556756d56c68451a4a8735c37e7062fd3d7 (patch)
treea2dae56b22adaa9a23c8f92f30c3b3ad3b610850 /mm/slub.c
parentd71f606f687ef9d0cdddfd3619ca7cb9a0b3fb63 (diff)
parent5bfe53a77e8a3ffce4a10003c75f464a138e272d (diff)
downloadlinux-3.10-c9149556756d56c68451a4a8735c37e7062fd3d7.tar.gz
linux-3.10-c9149556756d56c68451a4a8735c37e7062fd3d7.tar.bz2
linux-3.10-c9149556756d56c68451a4a8735c37e7062fd3d7.zip
Merge branch 'slab/rcu' into slab/next
Conflicts: mm/slub.c
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c77
1 files changed, 55 insertions, 22 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ea6f0390996..e841d8921c2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -305,11 +305,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
return s->size;
}
+static inline int order_objects(int order, unsigned long size, int reserved)
+{
+ return ((PAGE_SIZE << order) - reserved) / size;
+}
+
static inline struct kmem_cache_order_objects oo_make(int order,
- unsigned long size)
+ unsigned long size, int reserved)
{
struct kmem_cache_order_objects x = {
- (order << OO_SHIFT) + (PAGE_SIZE << order) / size
+ (order << OO_SHIFT) + order_objects(order, size, reserved)
};
return x;
@@ -641,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
return 1;
start = page_address(page);
- length = (PAGE_SIZE << compound_order(page));
+ length = (PAGE_SIZE << compound_order(page)) - s->reserved;
end = start + length;
remainder = length % s->size;
if (!remainder)
@@ -722,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
return 0;
}
- maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
+ maxobj = order_objects(compound_order(page), s->size, s->reserved);
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
s->name, page->objects, maxobj);
@@ -772,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
nr++;
}
- max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
+ max_objects = order_objects(compound_order(page), s->size, s->reserved);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
@@ -1273,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_pages(page, order);
}
+#define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page;
- page = container_of((struct list_head *)h, struct page, lru);
+ if (need_reserve_slab_rcu)
+ page = virt_to_head_page(h);
+ else
+ page = container_of((struct list_head *)h, struct page, lru);
+
__free_slab(page->slab, page);
}
static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
- /*
- * RCU free overloads the RCU head over the LRU
- */
- struct rcu_head *head = (void *)&page->lru;
+ struct rcu_head *head;
+
+ if (need_reserve_slab_rcu) {
+ int order = compound_order(page);
+ int offset = (PAGE_SIZE << order) - s->reserved;
+
+ VM_BUG_ON(s->reserved != sizeof(*head));
+ head = page_address(page) + offset;
+ } else {
+ /*
+ * RCU free overloads the RCU head over the LRU
+ */
+ head = (void *)&page->lru;
+ }
call_rcu(head, rcu_free_slab);
} else
@@ -2012,13 +2034,13 @@ static int slub_nomerge;
* the smallest order which will fit the object.
*/
static inline int slab_order(int size, int min_objects,
- int max_order, int fract_leftover)
+ int max_order, int fract_leftover, int reserved)
{
int order;
int rem;
int min_order = slub_min_order;
- if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
+ if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order,
@@ -2027,10 +2049,10 @@ static inline int slab_order(int size, int min_objects,
unsigned long slab_size = PAGE_SIZE << order;
- if (slab_size < min_objects * size)
+ if (slab_size < min_objects * size + reserved)
continue;
- rem = slab_size % size;
+ rem = (slab_size - reserved) % size;
if (rem <= slab_size / fract_leftover)
break;
@@ -2040,7 +2062,7 @@ static inline int slab_order(int size, int min_objects,
return order;
}
-static inline int calculate_order(int size)
+static inline int calculate_order(int size, int reserved)
{
int order;
int min_objects;
@@ -2058,14 +2080,14 @@ static inline int calculate_order(int size)
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
- max_objects = (PAGE_SIZE << slub_max_order)/size;
+ max_objects = order_objects(slub_max_order, size, reserved);
min_objects = min(min_objects, max_objects);
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
- slub_max_order, fraction);
+ slub_max_order, fraction, reserved);
if (order <= slub_max_order)
return order;
fraction /= 2;
@@ -2077,14 +2099,14 @@ static inline int calculate_order(int size)
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
- order = slab_order(size, 1, slub_max_order, 1);
+ order = slab_order(size, 1, slub_max_order, 1, reserved);
if (order <= slub_max_order)
return order;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
- order = slab_order(size, 1, MAX_ORDER, 1);
+ order = slab_order(size, 1, MAX_ORDER, 1, reserved);
if (order < MAX_ORDER)
return order;
return -ENOSYS;
@@ -2335,7 +2357,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
if (forced_order >= 0)
order = forced_order;
else
- order = calculate_order(size);
+ order = calculate_order(size, s->reserved);
if (order < 0)
return 0;
@@ -2353,8 +2375,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
/*
* Determine the number of objects per slab
*/
- s->oo = oo_make(order, size);
- s->min = oo_make(get_order(size), size);
+ s->oo = oo_make(order, size, s->reserved);
+ s->min = oo_make(get_order(size), size, s->reserved);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
@@ -2373,6 +2395,10 @@ static int kmem_cache_open(struct kmem_cache *s,
s->objsize = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
+ s->reserved = 0;
+
+ if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+ s->reserved = sizeof(struct rcu_head);
if (!calculate_sizes(s, -1))
goto error;
@@ -4014,6 +4040,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(destroy_by_rcu);
+static ssize_t reserved_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", s->reserved);
+}
+SLAB_ATTR_RO(reserved);
+
#ifdef CONFIG_SLUB_DEBUG
static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
@@ -4300,6 +4332,7 @@ static struct attribute *slab_attrs[] = {
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
&shrink_attr.attr,
+ &reserved_attr.attr,
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
&slabs_attr.attr,