diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 31 | ||||
-rw-r--r-- | lib/Makefile | 3 | ||||
-rw-r--r-- | lib/bitmap.c | 182 | ||||
-rw-r--r-- | lib/cpumask.c | 45 | ||||
-rw-r--r-- | lib/extable.c | 1 | ||||
-rw-r--r-- | lib/find_next_bit.c | 177 | ||||
-rw-r--r-- | lib/hweight.c | 53 | ||||
-rw-r--r-- | lib/kobject.c | 60 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 2 | ||||
-rw-r--r-- | lib/kref.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 49 | ||||
-rw-r--r-- | lib/reed_solomon/reed_solomon.c | 11 | ||||
-rw-r--r-- | lib/string.c | 1 | ||||
-rw-r--r-- | lib/swiotlb.c | 32 |
14 files changed, 485 insertions, 169 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a314e663d51..d57fd9181b1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -78,13 +78,17 @@ config SCHEDSTATS this adds. config DEBUG_SLAB - bool "Debug memory allocations" + bool "Debug slab memory allocations" depends on DEBUG_KERNEL && SLAB help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed memory. This can make kmalloc/kfree-intensive workloads much slower. +config DEBUG_SLAB_LEAK + bool "Memory leak debugging" + depends on DEBUG_SLAB + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT @@ -153,22 +157,9 @@ config DEBUG_INFO If unsure, say N. -config DEBUG_IOREMAP - bool "Enable ioremap() debugging" - depends on DEBUG_KERNEL && PARISC - help - Enabling this option will cause the kernel to distinguish between - ioremapped and physical addresses. It will print a backtrace (at - most one every 10 seconds), hopefully allowing you to see which - drivers need work. Fixing all these problems is a prerequisite - for turning on USE_HPPA_IOREMAP. The warnings are harmless; - the kernel has enough information to fix the broken drivers - automatically, but we'd like to make it more efficient by not - having to do that. - config DEBUG_FS bool "Debug Filesystem" - depends on DEBUG_KERNEL && SYSFS + depends on SYSFS help debugfs is a virtual file system that kernel developers use to put debugging files into. Enable this option to be able to read and @@ -195,6 +186,16 @@ config FRAME_POINTER some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config UNWIND_INFO + bool "Compile the kernel with frame unwind information" + depends on !IA64 + depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850) + help + If you say Y here the resulting kernel image will be slightly larger + but not slower, and it will give very useful debugging information. + If you don't debug the kernel, you can say N, but we may not be able + to solve problems without frame unwind information or frame pointers. + config FORCED_INLINING bool "Force gcc to inline functions marked 'inline'" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 648b2c1242f..b830c9a1554 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,8 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ sha1.o +lib-$(CONFIG_SMP) += cpumask.o + lib-y += kobject.o kref.o kobject_uevent.o klist.o obj-y += sort.o parser.o halfmd4.o iomap_copy.o @@ -21,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o +lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o diff --git a/lib/bitmap.c b/lib/bitmap.c index 48e708381d4..ed2ae3b0cd0 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -#if BITS_PER_LONG == 32 int __bitmap_weight(const unsigned long *bitmap, int bits) { int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) - w += hweight32(bitmap[k]); + w += hweight_long(bitmap[k]); if (bits % BITS_PER_LONG) - w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); + w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); return w; } -#else -int __bitmap_weight(const unsigned long *bitmap, int bits) -{ - int k, w = 0, lim = bits/BITS_PER_LONG; - - for (k = 0; k < lim; k++) - w += hweight64(bitmap[k]); - - if (bits % BITS_PER_LONG) - w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); - - return w; -} -#endif EXPORT_SYMBOL(__bitmap_weight); /* @@ -676,84 +661,143 @@ int bitmap_bitremap(int oldbit, const unsigned long *old, } EXPORT_SYMBOL(bitmap_bitremap); -/** - * bitmap_find_free_region - find a contiguous aligned mem region - * @bitmap: an array of unsigned longs corresponding to the bitmap - * @bits: number of bits in the bitmap - * @order: region size to find (size is actually 1<<order) +/* + * Common code for bitmap_*_region() routines. + * bitmap: array of unsigned longs corresponding to the bitmap + * pos: the beginning of the region + * order: region size (log base 2 of number of bits) + * reg_op: operation(s) to perform on that region of bitmap * - * This is used to allocate a memory region from a bitmap. The idea is - * that the region has to be 1<<order sized and 1<<order aligned (this - * makes the search algorithm much faster). + * Can set, verify and/or release a region of bits in a bitmap, + * depending on which combination of REG_OP_* flag bits is set. * - * The region is marked as set bits in the bitmap if a free one is - * found. + * A region of a bitmap is a sequence of bits in the bitmap, of + * some size '1 << order' (a power of two), aligned to that same + * '1 << order' power of two. * - * Returns either beginning of region or negative error + * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits). + * Returns 0 in all other cases and reg_ops. */ -int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) -{ - unsigned long mask; - int pages = 1 << order; - int i; - if(pages > BITS_PER_LONG) - return -EINVAL; +enum { + REG_OP_ISFREE, /* true if region is all zero bits */ + REG_OP_ALLOC, /* set all bits in region */ + REG_OP_RELEASE, /* clear all bits in region */ +}; - /* make a mask of the order */ - mask = (1ul << (pages - 1)); +static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) +{ + int nbits_reg; /* number of bits in region */ + int index; /* index first long of region in bitmap */ + int offset; /* bit offset region in bitmap[index] */ + int nlongs_reg; /* num longs spanned by region in bitmap */ + int nbitsinlong; /* num bits of region in each spanned long */ + unsigned long mask; /* bitmask for one long of region */ + int i; /* scans bitmap by longs */ + int ret = 0; /* return value */ + + /* + * Either nlongs_reg == 1 (for small orders that fit in one long) + * or (offset == 0 && mask == ~0UL) (for larger multiword orders.) + */ + nbits_reg = 1 << order; + index = pos / BITS_PER_LONG; + offset = pos - (index * BITS_PER_LONG); + nlongs_reg = BITS_TO_LONGS(nbits_reg); + nbitsinlong = min(nbits_reg, BITS_PER_LONG); + + /* + * Can't do "mask = (1UL << nbitsinlong) - 1", as that + * overflows if nbitsinlong == BITS_PER_LONG. + */ + mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; + mask <<= offset; - /* run up the bitmap pages bits at a time */ - for (i = 0; i < bits; i += pages) { - int index = i/BITS_PER_LONG; - int offset = i - (index * BITS_PER_LONG); - if((bitmap[index] & (mask << offset)) == 0) { - /* set region in bimap */ - bitmap[index] |= (mask << offset); - return i; + switch (reg_op) { + case REG_OP_ISFREE: + for (i = 0; i < nlongs_reg; i++) { + if (bitmap[index + i] & mask) + goto done; } + ret = 1; /* all bits in region free (zero) */ + break; + + case REG_OP_ALLOC: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] |= mask; + break; + + case REG_OP_RELEASE: + for (i = 0; i < nlongs_reg; i++) + bitmap[index + i] &= ~mask; + break; } - return -ENOMEM; +done: + return ret; +} + +/** + * bitmap_find_free_region - find a contiguous aligned mem region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @bits: number of bits in the bitmap + * @order: region size (log base 2 of number of bits) to find + * + * Find a region of free (zero) bits in a @bitmap of @bits bits and + * allocate them (set them to one). Only consider regions of length + * a power (@order) of two, aligned to that power of two, which + * makes the search algorithm much faster. + * + * Return the bit offset in bitmap of the allocated region, + * or -errno on failure. + */ +int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) +{ + int pos; /* scans bitmap by regions of size order */ + + for (pos = 0; pos < bits; pos += (1 << order)) + if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + break; + if (pos == bits) + return -ENOMEM; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; } EXPORT_SYMBOL(bitmap_find_free_region); /** - * bitmap_release_region - release allocated bitmap region - * @bitmap: a pointer to the bitmap - * @pos: the beginning of the region - * @order: the order of the bits to release (number is 1<<order) + * bitmap_release_region - release allocated bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to release + * @order: region size (log base 2 of number of bits) to release * * This is the complement to __bitmap_find_free_region and releases * the found region (by clearing it in the bitmap). + * + * No return value. */ void bitmap_release_region(unsigned long *bitmap, int pos, int order) { - int pages = 1 << order; - unsigned long mask = (1ul << (pages - 1)); - int index = pos/BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - mask += mask - 1; - bitmap[index] &= ~(mask << offset); + __reg_op(bitmap, pos, order, REG_OP_RELEASE); } EXPORT_SYMBOL(bitmap_release_region); +/** + * bitmap_allocate_region - allocate bitmap region + * @bitmap: array of unsigned longs corresponding to the bitmap + * @pos: beginning of bit region to allocate + * @order: region size (log base 2 of number of bits) to allocate + * + * Allocate (set bits in) a specified region of a bitmap. + * + * Return 0 on success, or -EBUSY if specified region wasn't + * free (not all bits were zero). + */ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) { - int pages = 1 << order; - unsigned long mask = (1ul << (pages - 1)); - int index = pos/BITS_PER_LONG; - int offset = pos - (index * BITS_PER_LONG); - - /* We don't do regions of pages > BITS_PER_LONG. The - * algorithm would be a simple look for multiple zeros in the - * array, but there's no driver today that needs this. If you - * trip this BUG(), you get to code it... */ - BUG_ON(pages > BITS_PER_LONG); - mask += mask - 1; - if (bitmap[index] & (mask << offset)) + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) return -EBUSY; - bitmap[index] |= (mask << offset); + __reg_op(bitmap, pos, order, REG_OP_ALLOC); return 0; } EXPORT_SYMBOL(bitmap_allocate_region); diff --git a/lib/cpumask.c b/lib/cpumask.c new file mode 100644 index 00000000000..3a67dc5ada7 --- /dev/null +++ b/lib/cpumask.c @@ -0,0 +1,45 @@ +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/cpumask.h> +#include <linux/module.h> + +int __first_cpu(const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); +} +EXPORT_SYMBOL(__first_cpu); + +int __next_cpu(int n, const cpumask_t *srcp) +{ + return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); +} +EXPORT_SYMBOL(__next_cpu); + +/* + * Find the highest possible smp_processor_id() + * + * Note: if we're prepared to assume that cpu_possible_map never changes + * (reasonable) then this function should cache its return value. + */ +int highest_possible_processor_id(void) +{ + unsigned int cpu; + unsigned highest = 0; + + for_each_cpu_mask(cpu, cpu_possible_map) + highest = cpu; + return highest; +} +EXPORT_SYMBOL(highest_possible_processor_id); + +int __any_online_cpu(const cpumask_t *mask) +{ + int cpu; + + for_each_cpu_mask(cpu, *mask) { + if (cpu_online(cpu)) + break; + } + return cpu; +} +EXPORT_SYMBOL(__any_online_cpu); diff --git a/lib/extable.c b/lib/extable.c index 18df57c029d..01c08b5836f 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -1,5 +1,4 @@ /* - * lib/extable.c * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c. * * Copyright (C) 2004 Paul Mackerras, IBM Corp. diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c05b4b19cf6..bda0d71a251 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -11,48 +11,171 @@ #include <linux/bitops.h> #include <linux/module.h> +#include <asm/types.h> +#include <asm/byteorder.h> -int find_next_bit(const unsigned long *addr, int size, int offset) +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) { - const unsigned long *base; - const int NBITS = sizeof(*addr) * 8; + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); unsigned long tmp; - base = addr; + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; if (offset) { - int suboffset; + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; - addr += offset / NBITS; +found_first: + tmp &= (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} - suboffset = offset % NBITS; - if (suboffset) { - tmp = *addr; - tmp >>= suboffset; - if (tmp) - goto finish; - } +EXPORT_SYMBOL(find_next_bit); - addr++; +/* + * This implementation of find_{first,next}_zero_bit was stolen from + * Linus' asm-alpha/bitops.h. + */ +unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG-1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (BITS_PER_LONG - offset); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} + +EXPORT_SYMBOL(find_next_zero_bit); - while ((tmp = *addr) == 0) - addr++; +#ifdef __BIG_ENDIAN - offset = (addr - base) * NBITS; +/* include/linux/byteorder does not support "unsigned long" type */ +static inline unsigned long ext2_swabp(const unsigned long * x) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64p((u64 *) x); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32p((u32 *) x); +#else +#error BITS_PER_LONG not defined +#endif +} + +/* include/linux/byteorder doesn't support "unsigned long" type */ +static inline unsigned long ext2_swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return (unsigned long) __swab64((u64) y); +#elif BITS_PER_LONG == 32 + return (unsigned long) __swab32((u32) y); +#else +#error BITS_PER_LONG not defined +#endif +} - finish: - /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ - while (!(tmp & 0xff)) { - offset += 8; - tmp >>= 8; +unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned + long size, unsigned long offset) +{ + const unsigned long *p = addr + BITOP_WORD(offset); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= (BITS_PER_LONG - 1UL); + if (offset) { + tmp = ext2_swabp(p++); + tmp |= (~0UL >> (BITS_PER_LONG - offset)); + if (size < BITS_PER_LONG) + goto found_first; + if (~tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; } - while (!(tmp & 1)) { - offset++; - tmp >>= 1; + while (size & ~(BITS_PER_LONG - 1)) { + if (~(tmp = *(p++))) + goto found_middle_swap; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; } + if (!size) + return result; + tmp = ext2_swabp(p); +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. Skip ffz */ +found_middle: + return result + ffz(tmp); - return offset; +found_middle_swap: + return result + ffz(ext2_swab(tmp)); } -EXPORT_SYMBOL(find_next_bit); +EXPORT_SYMBOL(generic_find_next_zero_le_bit); + +#endif /* __BIG_ENDIAN */ diff --git a/lib/hweight.c b/lib/hweight.c new file mode 100644 index 00000000000..43825767170 --- /dev/null +++ b/lib/hweight.c @@ -0,0 +1,53 @@ +#include <linux/module.h> +#include <asm/types.h> + +/** + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +unsigned int hweight32(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; +} +EXPORT_SYMBOL(hweight32); + +unsigned int hweight16(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res + (res >> 4)) & 0x0F0F; + return (res + (res >> 8)) & 0x00FF; +} +EXPORT_SYMBOL(hweight16); + +unsigned int hweight8(unsigned int w) +{ + unsigned int res = w - ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res + (res >> 4)) & 0x0F; +} +EXPORT_SYMBOL(hweight8); + +unsigned long hweight64(__u64 w) +{ +#if BITS_PER_LONG == 32 + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +#elif BITS_PER_LONG == 64 + __u64 res = w - ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; + res = res + (res >> 8); + res = res + (res >> 16); + return (res + (res >> 32)) & 0x00000000000000FFul; +#else +#error BITS_PER_LONG not defined +#endif +} +EXPORT_SYMBOL(hweight64); diff --git a/lib/kobject.c b/lib/kobject.c index efe67fa96a7..25204a41a9b 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -194,6 +194,17 @@ int kobject_add(struct kobject * kobj) unlink(kobj); if (parent) kobject_put(parent); + + /* be noisy on error issues */ + if (error == -EEXIST) + printk("kobject_add failed for %s with -EEXIST, " + "don't try to register things with the " + "same name in the same directory.\n", + kobject_name(kobj)); + else + printk("kobject_add failed for %s (%d)\n", + kobject_name(kobj), error); + dump_stack(); } return error; @@ -207,18 +218,13 @@ int kobject_add(struct kobject * kobj) int kobject_register(struct kobject * kobj) { - int error = 0; + int error = -EINVAL; if (kobj) { kobject_init(kobj); error = kobject_add(kobj); - if (error) { - printk("kobject_register failed for %s (%d)\n", - kobject_name(kobj),error); - dump_stack(); - } else + if (!error) kobject_uevent(kobj, KOBJ_ADD); - } else - error = -EINVAL; + } return error; } @@ -379,6 +385,44 @@ void kobject_put(struct kobject * kobj) } +static void dir_release(struct kobject *kobj) +{ + kfree(kobj); +} + +static struct kobj_type dir_ktype = { + .release = dir_release, + .sysfs_ops = NULL, + .default_attrs = NULL, +}; + +/** + * kobject_add_dir - add sub directory of object. + * @parent: object in which a directory is created. + * @name: directory name. + * + * Add a plain directory object as child of given object. + */ +struct kobject *kobject_add_dir(struct kobject *parent, const char *name) +{ + struct kobject *k; + + if (!parent) + return NULL; + + k = kzalloc(sizeof(*k), GFP_KERNEL); + if (!k) + return NULL; + + k->parent = parent; + k->ktype = &dir_ktype; + kobject_set_name(k, name); + kobject_register(k); + + return k; +} +EXPORT_SYMBOL_GPL(kobject_add_dir); + /** * kset_init - initialize a kset for use * @k: kset diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 086a0c6e888..982226daf93 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -26,6 +26,8 @@ #define NUM_ENVP 32 /* number of env pointers */ #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) +u64 uevent_seqnum; +char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug"; static DEFINE_SPINLOCK(sequence_lock); static struct sock *uevent_sock; diff --git a/lib/kref.c b/lib/kref.c index 0d07cc31c81..4a467faf136 100644 --- a/lib/kref.c +++ b/lib/kref.c @@ -52,7 +52,12 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) WARN_ON(release == NULL); WARN_ON(release == (void (*)(struct kref *))kfree); - if (atomic_dec_and_test(&kref->refcount)) { + /* + * if current count is one, we are the last user and can release object + * right now, avoiding an atomic operation on 'refcount' + */ + if ((atomic_read(&kref->refcount) == 1) || + (atomic_dec_and_test(&kref->refcount))) { release(kref); return 1; } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1e5b17dc7e3..7097bb239e4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -37,7 +37,6 @@ #else #define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ #endif -#define RADIX_TREE_TAGS 2 #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) @@ -48,7 +47,7 @@ struct radix_tree_node { unsigned int count; void *slots[RADIX_TREE_MAP_SIZE]; - unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; + unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; struct radix_tree_path { @@ -135,17 +134,20 @@ out: return ret; } -static inline void tag_set(struct radix_tree_node *node, int tag, int offset) +static inline void tag_set(struct radix_tree_node *node, unsigned int tag, + int offset) { __set_bit(offset, node->tags[tag]); } -static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) +static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, + int offset) { __clear_bit(offset, node->tags[tag]); } -static inline int tag_get(struct radix_tree_node *node, int tag, int offset) +static inline int tag_get(struct radix_tree_node *node, unsigned int tag, + int offset) { return test_bit(offset, node->tags[tag]); } @@ -154,7 +156,7 @@ static inline int tag_get(struct radix_tree_node *node, int tag, int offset) * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ -static inline int any_tag_set(struct radix_tree_node *node, int tag) +static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) { int idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { @@ -180,7 +182,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) { struct radix_tree_node *node; unsigned int height; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int tag; /* Figure out what the height should be. */ @@ -197,7 +199,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) * Prepare the tag status of the top-level node for propagation * into the newly-pushed top-level node(s) */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 0; if (any_tag_set(root->rnode, tag)) tags[tag] = 1; @@ -211,7 +213,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) node->slots[0] = root->rnode; /* Propagate the aggregated tag info into the new root */ - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) tag_set(node, tag, 0); } @@ -349,14 +351,15 @@ EXPORT_SYMBOL(radix_tree_lookup); * @index: index key * @tag: tag index * - * Set the search tag corresponging to @index in the radix tree. From + * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. From * the root all the way down to the leaf node. * * Returns the address of the tagged item. Setting a tag on a not-present * item is a bug. */ void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -390,7 +393,8 @@ EXPORT_SYMBOL(radix_tree_tag_set); * @index: index key * @tag: tag index * - * Clear the search tag corresponging to @index in the radix tree. If + * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) + * corresponding to @index in the radix tree. If * this causes the leaf node to have no tags set then clear the tag in the * next-to-leaf node, etc. * @@ -398,7 +402,7 @@ EXPORT_SYMBOL(radix_tree_tag_set); * has the same return value and semantics as radix_tree_lookup(). */ void *radix_tree_tag_clear(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; struct radix_tree_node *slot; @@ -450,7 +454,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key - * @tag: tag index + * @tag: tag index (< RADIX_TREE_MAX_TAGS) * * Return values: * @@ -459,7 +463,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); * -1: tag present, unset */ int radix_tree_tag_get(struct radix_tree_root *root, - unsigned long index, int tag) + unsigned long index, unsigned int tag) { unsigned int height, shift; struct radix_tree_node *slot; @@ -592,7 +596,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup); */ static unsigned int __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, - unsigned int max_items, unsigned long *next_index, int tag) + unsigned int max_items, unsigned long *next_index, unsigned int tag) { unsigned int nr_found = 0; unsigned int shift; @@ -646,7 +650,7 @@ out: * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results - * @tag: the tag index + * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the items at *@results and @@ -654,7 +658,8 @@ out: */ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, - unsigned long first_index, unsigned int max_items, int tag) + unsigned long first_index, unsigned int max_items, + unsigned int tag) { const unsigned long max_index = radix_tree_maxindex(root->height); unsigned long cur_index = first_index; @@ -716,7 +721,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) struct radix_tree_node *slot; unsigned int height, shift; void *ret = NULL; - char tags[RADIX_TREE_TAGS]; + char tags[RADIX_TREE_MAX_TAGS]; int nr_cleared_tags; int tag; int offset; @@ -751,7 +756,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) * Clear all tags associated with the just-deleted item */ nr_cleared_tags = 0; - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { tags[tag] = 1; if (tag_get(pathp->node, tag, pathp->offset)) { tag_clear(pathp->node, tag, pathp->offset); @@ -763,7 +768,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) } for (pathp--; nr_cleared_tags && pathp->node; pathp--) { - for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { + for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (tags[tag]) continue; @@ -801,7 +806,7 @@ EXPORT_SYMBOL(radix_tree_delete); * @root: radix tree root * @tag: tag to test */ -int radix_tree_tagged(struct radix_tree_root *root, int tag) +int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) { struct radix_tree_node *rnode; rnode = root->rnode; diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c index f5fef948a41..f8ac9fa95de 100644 --- a/lib/reed_solomon/reed_solomon.c +++ b/lib/reed_solomon/reed_solomon.c @@ -44,12 +44,13 @@ #include <linux/module.h> #include <linux/rslib.h> #include <linux/slab.h> +#include <linux/mutex.h> #include <asm/semaphore.h> /* This list holds all currently allocated rs control structures */ static LIST_HEAD (rslist); /* Protection for the list */ -static DECLARE_MUTEX(rslistlock); +static DEFINE_MUTEX(rslistlock); /** * rs_init - Initialize a Reed-Solomon codec @@ -161,7 +162,7 @@ errrs: */ void free_rs(struct rs_control *rs) { - down(&rslistlock); + mutex_lock(&rslistlock); rs->users--; if(!rs->users) { list_del(&rs->list); @@ -170,7 +171,7 @@ void free_rs(struct rs_control *rs) kfree(rs->genpoly); kfree(rs); } - up(&rslistlock); + mutex_unlock(&rslistlock); } /** @@ -201,7 +202,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, if (nroots < 0 || nroots >= (1<<symsize)) return NULL; - down(&rslistlock); + mutex_lock(&rslistlock); /* Walk through the list and look for a matching entry */ list_for_each(tmp, &rslist) { @@ -228,7 +229,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, list_add(&rs->list, &rslist); } out: - up(&rslistlock); + mutex_unlock(&rslistlock); return rs; } diff --git a/lib/string.c b/lib/string.c index 037a48acedb..b3c28a3f633 100644 --- a/lib/string.c +++ b/lib/string.c @@ -403,7 +403,6 @@ char *strpbrk(const char *cs, const char *ct) } return NULL; } -EXPORT_SYMBOL(strpbrk); #endif #ifndef __HAVE_ARCH_STRSEP diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 0af497b6b9a..10625785eef 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) else stride = 1; - if (!nslots) - BUG(); + BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this @@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(buffer, dma_addr, size); - else if (dir != DMA_TO_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(dma_addr, buffer, size); - else if (dir != DMA_FROM_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); @@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) unsigned long dev_addr = virt_to_phys(ptr); void *map; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); /* * If the pointer passed in happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce @@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) @@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr) + offset; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, unsigned long dev_addr; int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); @@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) @@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) |