summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-07-20 10:11:58 +0900
committerPaul Mundt <lethal@linux-sh.org>2007-07-20 10:11:58 +0900
commit20c2df83d25c6a95affe6157a4c9cac4cf5ffaac (patch)
tree415c4453d2b17a50abe7a3e515177e1fa337bd67 /mm/slab.c
parent64fb98fc40738ae1a98bcea9ca3145b89fb71524 (diff)
downloadlinux-3.10-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.tar.gz
linux-3.10-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.tar.bz2
linux-3.10-20c2df83d25c6a95affe6157a4c9cac4cf5ffaac.zip
mm: Remove slab destructors from kmem_cache_create().
Slab destructors were no longer supported after Christoph's c59def9f222d44bb7e2f0a559f2906191a0862d7 change. They've been BUGs for both slab and slub, and slob never supported them either. This rips out support for the dtor pointer from kmem_cache_create() completely and fixes up every single callsite in the kernel (there were about 224, not including the slab allocator definitions themselves, or the documentation references). Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c3feeaab387..bde271c001b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1484,7 +1484,7 @@ void __init kmem_cache_init(void)
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
@@ -1492,7 +1492,7 @@ void __init kmem_cache_init(void)
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
slab_early_init = 0;
@@ -1510,7 +1510,7 @@ void __init kmem_cache_init(void)
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
- NULL, NULL);
+ NULL);
}
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_create(
@@ -1519,7 +1519,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
- NULL, NULL);
+ NULL);
#endif
sizes++;
names++;
@@ -2101,12 +2101,10 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
- * @dtor: A destructor for the objects (not implemented anymore).
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache
- * and the @dtor is run before the pages are handed back.
+ * The @ctor is run when new pages are allocated by the cache.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
@@ -2126,8 +2124,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
- void (*dtor)(void*, struct kmem_cache *, unsigned long))
+ void (*ctor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
@@ -2136,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE || dtor) {
+ size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();