summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-06 16:04:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 18:01:13 -0700
commit44c5356fb460053112ab87c9601df1605054edca (patch)
treed99acf7525a8092a2a7527c6d5599b330ba9262a /mm/slab.h
parent1536cb39338aff16b0e30cc6708da03b268337f7 (diff)
downloadlinux-exynos-44c5356fb460053112ab87c9601df1605054edca.tar.gz
linux-exynos-44c5356fb460053112ab87c9601df1605054edca.tar.bz2
linux-exynos-44c5356fb460053112ab87c9601df1605054edca.zip
slab common: add functions for kmem_cache_node access
The patchset provides two new functions in mm/slab.h and modifies SLAB and SLUB to use these. The kmem_cache_node structure is shared between both allocators and the use of common accessors will allow us to move more code into slab_common.c in the future. This patch (of 3): These functions allow to eliminate repeatedly used code in both SLAB and SLUB and also allow for the insertion of debugging code that may be needed in the development process. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 961a3fb1f5a2..3f9766e393a3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -262,7 +262,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
}
#endif
-
+#ifndef CONFIG_SLOB
/*
* The slab lists for all objects.
*/
@@ -294,5 +294,20 @@ struct kmem_cache_node {
};
+static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
+{
+ return s->node[node];
+}
+
+/*
+ * Iterator over all nodes. The body will be executed for each node that has
+ * a kmem_cache_node structure allocated (which is true for all online nodes)
+ */
+#define for_each_kmem_cache_node(__s, __node, __n) \
+ for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \
+ if (__n)
+
+#endif
+
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);