diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 2 | ||||
-rw-r--r-- | include/linux/debugobjects.h | 90 | ||||
-rw-r--r-- | init/main.c | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 23 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/debugobjects.c | 890 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/slab.c | 10 | ||||
-rw-r--r-- | mm/slub.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
10 files changed, 1030 insertions, 4 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0ba0861b5d1..a3c35446e75 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -561,6 +561,8 @@ and is between 256 and 4096 characters. It is defined in the file 1 will print _a lot_ more information - normally only useful to kernel developers. + debug_objects [KNL] Enable object debugging + decnet.addr= [HW,NET] Format: <area>[,<node>] See also Documentation/networking/decnet.txt. diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h new file mode 100644 index 00000000000..8c243aaa86a --- /dev/null +++ b/include/linux/debugobjects.h @@ -0,0 +1,90 @@ +#ifndef _LINUX_DEBUGOBJECTS_H +#define _LINUX_DEBUGOBJECTS_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +enum debug_obj_state { + ODEBUG_STATE_NONE, + ODEBUG_STATE_INIT, + ODEBUG_STATE_INACTIVE, + ODEBUG_STATE_ACTIVE, + ODEBUG_STATE_DESTROYED, + ODEBUG_STATE_NOTAVAILABLE, + ODEBUG_STATE_MAX, +}; + +struct debug_obj_descr; + +/** + * struct debug_obj - representaion of an tracked object + * @node: hlist node to link the object into the tracker list + * @state: tracked object state + * @object: pointer to the real object + * @descr: pointer to an object type specific debug description structure + */ +struct debug_obj { + struct hlist_node node; + enum debug_obj_state state; + void *object; + struct debug_obj_descr *descr; +}; + +/** + * struct debug_obj_descr - object type specific debug description structure + * @name: name of the object typee + * @fixup_init: fixup function, which is called when the init check + * fails + * @fixup_activate: fixup function, which is called when the activate check + * fails + * @fixup_destroy: fixup function, which is called when the destroy check + * fails + * @fixup_free: fixup function, which is called when the free check + * fails + */ +struct debug_obj_descr { + const char *name; + + int (*fixup_init) (void *addr, enum debug_obj_state state); + int (*fixup_activate) (void *addr, enum debug_obj_state state); + int (*fixup_destroy) (void *addr, enum debug_obj_state state); + int (*fixup_free) (void *addr, enum debug_obj_state state); +}; + +#ifdef CONFIG_DEBUG_OBJECTS +extern void debug_object_init (void *addr, struct debug_obj_descr *descr); +extern void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); +extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); +extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); +extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); +extern void debug_object_free (void *addr, struct debug_obj_descr *descr); + +extern void debug_objects_early_init(void); +extern void debug_objects_mem_init(void); +#else +static inline void +debug_object_init (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_activate (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } +static inline void +debug_object_free (void *addr, struct debug_obj_descr *descr) { } + +static inline void debug_objects_early_init(void) { } +static inline void debug_objects_mem_init(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +extern void debug_check_no_obj_freed(const void *address, unsigned long size); +#else +static inline void +debug_check_no_obj_freed(const void *address, unsigned long size) { } +#endif + +#endif diff --git a/init/main.c b/init/main.c index dff253cfcd9..a87d4ca5c36 100644 --- a/init/main.c +++ b/init/main.c @@ -52,6 +52,7 @@ #include <linux/unwind.h> #include <linux/buffer_head.h> #include <linux/debug_locks.h> +#include <linux/debugobjects.h> #include <linux/lockdep.h> #include <linux/pid_namespace.h> #include <linux/device.h> @@ -543,6 +544,7 @@ asmlinkage void __init start_kernel(void) */ unwind_init(); lockdep_init(); + debug_objects_early_init(); cgroup_init_early(); local_irq_disable(); @@ -638,6 +640,7 @@ asmlinkage void __init start_kernel(void) enable_debug_pagealloc(); cpu_hotplug_init(); kmem_cache_init(); + debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); numa_policy_init(); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 754cc0027f2..3e132b0a59c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -194,6 +194,29 @@ config TIMER_STATS (it defaults to deactivated on bootup and will only be activated if some application like powertop activates it explicitly). +config DEBUG_OBJECTS + bool "Debug object operations" + depends on DEBUG_KERNEL + help + If you say Y here, additional code will be inserted into the + kernel to track the life time of various objects and validate + the operations on those objects. + +config DEBUG_OBJECTS_SELFTEST + bool "Debug objects selftest" + depends on DEBUG_OBJECTS + help + This enables the selftest of the object debug code. + +config DEBUG_OBJECTS_FREE + bool "Debug objects in freed memory" + depends on DEBUG_OBJECTS + help + This enables checks whether a k/v free operation frees an area + which contains an object which has not been deactivated + properly. This can make kmalloc/kfree-intensive workloads + much slower. + config DEBUG_SLAB bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB diff --git a/lib/Makefile b/lib/Makefile index 0ae4eb047aa..74b0cfb1fcc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o +obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o ifneq ($(CONFIG_HAVE_DEC_LOCK),y) lib-y += dec_and_lock.o diff --git a/lib/debugobjects.c b/lib/debugobjects.c new file mode 100644 index 00000000000..a76a5e122ae --- /dev/null +++ b/lib/debugobjects.c @@ -0,0 +1,890 @@ +/* + * Generic infrastructure for lifetime debugging of objects. + * + * Started by Thomas Gleixner + * + * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> + * + * For licencing details see kernel-base/COPYING + */ +#include <linux/debugobjects.h> +#include <linux/interrupt.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/hash.h> + +#define ODEBUG_HASH_BITS 14 +#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) + +#define ODEBUG_POOL_SIZE 512 +#define ODEBUG_POOL_MIN_LEVEL 256 + +#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT +#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) +#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) + +struct debug_bucket { + struct hlist_head list; + spinlock_t lock; +}; + +static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; + +static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; + +static DEFINE_SPINLOCK(pool_lock); + +static HLIST_HEAD(obj_pool); + +static int obj_pool_min_free = ODEBUG_POOL_SIZE; +static int obj_pool_free = ODEBUG_POOL_SIZE; +static int obj_pool_used; +static int obj_pool_max_used; +static struct kmem_cache *obj_cache; + +static int debug_objects_maxchain __read_mostly; +static int debug_objects_fixups __read_mostly; +static int debug_objects_warnings __read_mostly; +static int debug_objects_enabled __read_mostly; +static struct debug_obj_descr *descr_test __read_mostly; + +static int __init enable_object_debug(char *str) +{ + debug_objects_enabled = 1; + return 0; +} +early_param("debug_objects", enable_object_debug); + +static const char *obj_states[ODEBUG_STATE_MAX] = { + [ODEBUG_STATE_NONE] = "none", + [ODEBUG_STATE_INIT] = "initialized", + [ODEBUG_STATE_INACTIVE] = "inactive", + [ODEBUG_STATE_ACTIVE] = "active", + [ODEBUG_STATE_DESTROYED] = "destroyed", + [ODEBUG_STATE_NOTAVAILABLE] = "not available", +}; + +static int fill_pool(void) +{ + gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; + struct debug_obj *new; + + if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) + return obj_pool_free; + + if (unlikely(!obj_cache)) + return obj_pool_free; + + while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { + + new = kmem_cache_zalloc(obj_cache, gfp); + if (!new) + return obj_pool_free; + + spin_lock(&pool_lock); + hlist_add_head(&new->node, &obj_pool); + obj_pool_free++; + spin_unlock(&pool_lock); + } + return obj_pool_free; +} + +/* + * Lookup an object in the hash bucket. + */ +static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) +{ + struct hlist_node *node; + struct debug_obj *obj; + int cnt = 0; + + hlist_for_each_entry(obj, node, &b->list, node) { + cnt++; + if (obj->object == addr) + return obj; + } + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + + return NULL; +} + +/* + * Allocate a new object. If the pool is empty and no refill possible, + * switch off the debugger. + */ +static struct debug_obj * +alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) +{ + struct debug_obj *obj = NULL; + int retry = 0; + +repeat: + spin_lock(&pool_lock); + if (obj_pool.first) { + obj = hlist_entry(obj_pool.first, typeof(*obj), node); + + obj->object = addr; + obj->descr = descr; + obj->state = ODEBUG_STATE_NONE; + hlist_del(&obj->node); + + hlist_add_head(&obj->node, &b->list); + + obj_pool_used++; + if (obj_pool_used > obj_pool_max_used) + obj_pool_max_used = obj_pool_used; + + obj_pool_free--; + if (obj_pool_free < obj_pool_min_free) + obj_pool_min_free = obj_pool_free; + } + spin_unlock(&pool_lock); + + if (fill_pool() && !obj && !retry++) + goto repeat; + + return obj; +} + +/* + * Put the object back into the pool or give it back to kmem_cache: + */ +static void free_object(struct debug_obj *obj) +{ + unsigned long idx = (unsigned long)(obj - obj_static_pool); + + if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { + spin_lock(&pool_lock); + hlist_add_head(&obj->node, &obj_pool); + obj_pool_free++; + obj_pool_used--; + spin_unlock(&pool_lock); + } else { + spin_lock(&pool_lock); + obj_pool_used--; + spin_unlock(&pool_lock); + kmem_cache_free(obj_cache, obj); + } +} + +/* + * We run out of memory. That means we probably have tons of objects + * allocated. + */ +static void debug_objects_oom(void) +{ + struct debug_bucket *db = obj_hash; + struct hlist_node *node, *tmp; + struct debug_obj *obj; + unsigned long flags; + int i; + + printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); + + for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + hlist_del(&obj->node); + free_object(obj); + } + spin_unlock_irqrestore(&db->lock, flags); + } +} + +/* + * We use the pfn of the address for the hash. That way we can check + * for freed objects simply by checking the affected bucket. + */ +static struct debug_bucket *get_bucket(unsigned long addr) +{ + unsigned long hash; + + hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); + return &obj_hash[hash]; +} + +static void debug_print_object(struct debug_obj *obj, char *msg) +{ + static int limit; + + if (limit < 5 && obj->descr != descr_test) { + limit++; + printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, + obj_states[obj->state], obj->descr->name); + WARN_ON(1); + } + debug_objects_warnings++; +} + +/* + * Try to repair the damage, so we have a better chance to get useful + * debug output. + */ +static void +debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), + void * addr, enum debug_obj_state state) +{ + if (fixup) + debug_objects_fixups += fixup(addr, state); +} + +static void debug_object_is_on_stack(void *addr, int onstack) +{ + void *stack = current->stack; + int is_on_stack; + static int limit; + + if (limit > 4) + return; + + is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE)); + + if (is_on_stack == onstack) + return; + + limit++; + if (is_on_stack) + printk(KERN_WARNING + "ODEBUG: object is on stack, but not annotated\n"); + else + printk(KERN_WARNING + "ODEBUG: object is not on stack, but annotated\n"); + WARN_ON(1); +} + +static void +__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) { + obj = alloc_object(addr, db, descr); + if (!obj) { + debug_objects_enabled = 0; + spin_unlock_irqrestore(&db->lock, flags); + debug_objects_oom(); + return; + } + debug_object_is_on_stack(addr, onstack); + } + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_INIT; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "init"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_init, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "init"); + break; + default: + break; + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_init - debug checks when an object is initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 0); +} + +/** + * debug_object_init_on_stack - debug checks when an object on stack is + * initialized + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) +{ + if (!debug_objects_enabled) + return; + + __debug_object_init(addr, descr, 1); +} + +/** + * debug_object_activate - debug checks when an object is activated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_activate(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_ACTIVE; + break; + + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "activate"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_activate, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "activate"); + break; + default: + break; + } + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + spin_unlock_irqrestore(&db->lock, flags); + /* + * This happens when a static object is activated. We + * let the type specific code decide whether this is + * true or not. + */ + debug_object_fixup(descr->fixup_activate, addr, + ODEBUG_STATE_NOTAVAILABLE); +} + +/** + * debug_object_deactivate - debug checks when an object is deactivated + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (obj) { + switch (obj->state) { + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + case ODEBUG_STATE_ACTIVE: + obj->state = ODEBUG_STATE_INACTIVE; + break; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "deactivate"); + break; + default: + break; + } + } else { + struct debug_obj o = { .object = addr, + .state = ODEBUG_STATE_NOTAVAILABLE, + .descr = descr }; + + debug_print_object(&o, "deactivate"); + } + + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_destroy - debug checks when an object is destroyed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_destroy(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_NONE: + case ODEBUG_STATE_INIT: + case ODEBUG_STATE_INACTIVE: + obj->state = ODEBUG_STATE_DESTROYED; + break; + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "destroy"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_destroy, addr, state); + return; + + case ODEBUG_STATE_DESTROYED: + debug_print_object(obj, "destroy"); + break; + default: + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +/** + * debug_object_free - debug checks when an object is freed + * @addr: address of the object + * @descr: pointer to an object specific debug description structure + */ +void debug_object_free(void *addr, struct debug_obj_descr *descr) +{ + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + + if (!debug_objects_enabled) + return; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj) + goto out_unlock; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, addr, state); + return; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } +out_unlock: + spin_unlock_irqrestore(&db->lock, flags); +} + +#ifdef CONFIG_DEBUG_OBJECTS_FREE +static void __debug_check_no_obj_freed(const void *address, unsigned long size) +{ + unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; + struct hlist_node *node, *tmp; + struct debug_obj_descr *descr; + enum debug_obj_state state; + struct debug_bucket *db; + struct debug_obj *obj; + int cnt; + + saddr = (unsigned long) address; + eaddr = saddr + size; + paddr = saddr & ODEBUG_CHUNK_MASK; + chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); + chunks >>= ODEBUG_CHUNK_SHIFT; + + for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { + db = get_bucket(paddr); + +repeat: + cnt = 0; + spin_lock_irqsave(&db->lock, flags); + hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { + cnt++; + oaddr = (unsigned long) obj->object; + if (oaddr < saddr || oaddr >= eaddr) + continue; + + switch (obj->state) { + case ODEBUG_STATE_ACTIVE: + debug_print_object(obj, "free"); + descr = obj->descr; + state = obj->state; + spin_unlock_irqrestore(&db->lock, flags); + debug_object_fixup(descr->fixup_free, + (void *) oaddr, state); + goto repeat; + default: + hlist_del(&obj->node); + free_object(obj); + break; + } + } + spin_unlock_irqrestore(&db->lock, flags); + if (cnt > debug_objects_maxchain) + debug_objects_maxchain = cnt; + } +} + +void debug_check_no_obj_freed(const void *address, unsigned long size) +{ + if (debug_objects_enabled) + __debug_check_no_obj_freed(address, size); +} +#endif + +#ifdef CONFIG_DEBUG_FS + +static int debug_stats_show(struct seq_file *m, void *v) +{ + seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); + seq_printf(m, "warnings :%d\n", debug_objects_warnings); + seq_printf(m, "fixups :%d\n", debug_objects_fixups); + seq_printf(m, "pool_free :%d\n", obj_pool_free); + seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); + seq_printf(m, "pool_used :%d\n", obj_pool_used); + seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); + return 0; +} + +static int debug_stats_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, debug_stats_show, NULL); +} + +static const struct file_operations debug_stats_fops = { + .open = debug_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init debug_objects_init_debugfs(void) +{ + struct dentry *dbgdir, *dbgstats; + + if (!debug_objects_enabled) + return 0; + + dbgdir = debugfs_create_dir("debug_objects", NULL); + if (!dbgdir) + return -ENOMEM; + + dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, + &debug_stats_fops); + if (!dbgstats) + goto err; + + return 0; + +err: + debugfs_remove(dbgdir); + + return -ENOMEM; +} +__initcall(debug_objects_init_debugfs); + +#else +static inline void debug_objects_init_debugfs(void) { } +#endif + +#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST + +/* Random data structure for the self test */ +struct self_test { + unsigned long dummy1[6]; + int static_init; + unsigned long dummy2[3]; +}; + +static __initdata struct debug_obj_descr descr_type_test; + +/* + * fixup_init is called when: + * - an active object is initialized + */ +static int __init fixup_init(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_init(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_activate is called when: + * - an active object is activated + * - an unknown object is activated (might be a statically initialized object) + */ +static int __init fixup_activate(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_NOTAVAILABLE: + if (obj->static_init == 1) { + debug_object_init(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + /* + * Real code should return 0 here ! This is + * not a fixup of some bad behaviour. We + * merily call the debug_init function to keep + * track of the object. + */ + return 1; + } else { + /* Real code needs to emit a warning here */ + } + return 0; + + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_activate(obj, &descr_type_test); + return 1; + + default: + return 0; + } +} + +/* + * fixup_destroy is called when: + * - an active object is destroyed + */ +static int __init fixup_destroy(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_destroy(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +/* + * fixup_free is called when: + * - an active object is freed + */ +static int __init fixup_free(void *addr, enum debug_obj_state state) +{ + struct self_test *obj = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + debug_object_deactivate(obj, &descr_type_test); + debug_object_free(obj, &descr_type_test); + return 1; + default: + return 0; + } +} + +static int +check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) +{ + struct debug_bucket *db; + struct debug_obj *obj; + unsigned long flags; + int res = -EINVAL; + + db = get_bucket((unsigned long) addr); + + spin_lock_irqsave(&db->lock, flags); + + obj = lookup_object(addr, db); + if (!obj && state != ODEBUG_STATE_NONE) { + printk(KERN_ERR "ODEBUG: selftest object not found\n"); + WARN_ON(1); + goto out; + } + if (obj && obj->state != state) { + printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", + obj->state, state); + WARN_ON(1); + goto out; + } + if (fixups != debug_objects_fixups) { + printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", + fixups, debug_objects_fixups); + WARN_ON(1); + goto out; + } + if (warnings != debug_objects_warnings) { + printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", + warnings, debug_objects_warnings); + WARN_ON(1); + goto out; + } + res = 0; +out: + spin_unlock_irqrestore(&db->lock, flags); + if (res) + debug_objects_enabled = 0; + return res; +} + +static __initdata struct debug_obj_descr descr_type_test = { + .name = "selftest", + .fixup_init = fixup_init, + .fixup_activate = fixup_activate, + .fixup_destroy = fixup_destroy, + .fixup_free = fixup_free, +}; + +static __initdata struct self_test obj = { .static_init = 0 }; + +static void __init debug_objects_selftest(void) +{ + int fixups, oldfixups, warnings, oldwarnings; + unsigned long flags; + + local_irq_save(flags); + + fixups = oldfixups = debug_objects_fixups; + warnings = oldwarnings = debug_objects_warnings; + descr_test = &descr_type_test; + + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) + goto out; + debug_object_destroy(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_deactivate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + + obj.static_init = 1; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) + goto out; + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) + goto out; + debug_object_free(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) + goto out; + +#ifdef CONFIG_DEBUG_OBJECTS_FREE + debug_object_init(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) + goto out; + debug_object_activate(&obj, &descr_type_test); + if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) + goto out; + __debug_check_no_obj_freed(&obj, sizeof(obj)); + if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) + goto out; +#endif + printk(KERN_INFO "ODEBUG: selftest passed\n"); + +out: + debug_objects_fixups = oldfixups; + debug_objects_warnings = oldwarnings; + descr_test = NULL; + + local_irq_restore(flags); +} +#else +static inline void debug_objects_selftest(void) { } +#endif + +/* + * Called during early boot to initialize the hash buckets and link + * the static object pool objects into the poll list. After this call + * the object tracker is fully operational. + */ +void __init debug_objects_early_init(void) +{ + int i; + + for (i = 0; i < ODEBUG_HASH_SIZE; i++) + spin_lock_init(&obj_hash[i].lock); + + for (i = 0; i < ODEBUG_POOL_SIZE; i++) + hlist_add_head(&obj_static_pool[i].node, &obj_pool); +} + +/* + * Called after the kmem_caches are functional to setup a dedicated + * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag + * prevents that the debug code is called on kmem_cache_free() for the + * debug tracker objects to avoid recursive calls. + */ +void __init debug_objects_mem_init(void) +{ + if (!debug_objects_enabled) + return; + + obj_cache = kmem_cache_create("debug_objects_cache", + sizeof (struct debug_obj), 0, + SLAB_DEBUG_OBJECTS, NULL); + + if (!obj_cache) + debug_objects_enabled = 0; + else + debug_objects_selftest(); +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0a502e99ee2..bdd5c432c42 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -45,6 +45,7 @@ #include <linux/fault-inject.h> #include <linux/page-isolation.h> #include <linux/memcontrol.h> +#include <linux/debugobjects.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order) if (reserved) return; - if (!PageHighMem(page)) + if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } arch_free_page(page, order); kernel_map_pages(page, 1 << order, 0); @@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold) if (free_pages_check(page)) return; - if (!PageHighMem(page)) + if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), PAGE_SIZE); + debug_check_no_obj_freed(page_address(page), PAGE_SIZE); + } arch_free_page(page, 0); kernel_map_pages(page, 1, 0); diff --git a/mm/slab.c b/mm/slab.c index 39d20f8a079..919a995d1e6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -110,6 +110,7 @@ #include <linux/fault-inject.h> #include <linux/rtmutex.h> #include <linux/reciprocal_div.h> +#include <linux/debugobjects.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -174,12 +175,14 @@ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_DEBUG_OBJECTS) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_DEBUG_OBJECTS) #endif /* @@ -3760,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) local_irq_save(flags); debug_check_no_locks_freed(objp, obj_size(cachep)); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, obj_size(cachep)); __cache_free(cachep, objp); local_irq_restore(flags); } @@ -3785,6 +3790,7 @@ void kfree(const void *objp) kfree_debugcheck(objp); c = virt_to_cache(objp); debug_check_no_locks_freed(objp, obj_size(c)); + debug_check_no_obj_freed(objp, obj_size(c)); __cache_free(c, (void *)objp); local_irq_restore(flags); } diff --git a/mm/slub.c b/mm/slub.c index b145e798bf3..70db2897c1e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -19,6 +19,7 @@ #include <linux/cpuset.h> #include <linux/mempolicy.h> #include <linux/ctype.h> +#include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/memory.h> @@ -1747,6 +1748,8 @@ static __always_inline void slab_free(struct kmem_cache *s, local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); + if (!(s->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(object, s->objsize); if (likely(page == c->page && c->node >= 0)) { object[c->offset] = c->freelist; c->freelist = object; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e33e0ae69ad..2a39cf128ab 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -15,6 +15,7 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/seq_file.h> +#include <linux/debugobjects.h> #include <linux/vmalloc.h> #include <linux/kallsyms.h> @@ -394,6 +395,7 @@ static void __vunmap(const void *addr, int deallocate_pages) } debug_check_no_locks_freed(addr, area->size); + debug_check_no_obj_freed(addr, area->size); if (deallocate_pages) { int i; |