diff options
author | Tony Luck <tony.luck@intel.com> | 2005-10-31 10:51:57 -0800 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-31 10:51:57 -0800 |
commit | c7fb577e2a6cb04732541f2dc402bd46747f7558 (patch) | |
tree | df3b1a1922ed13bfbcc45d08650c38beeb1a7bd1 /lib | |
parent | 9cec58dc138d6fcad9f447a19c8ff69f6540e667 (diff) | |
parent | 581c1b14394aee60aff46ea67d05483261ed6527 (diff) | |
download | linux-3.10-c7fb577e2a6cb04732541f2dc402bd46747f7558.tar.gz linux-3.10-c7fb577e2a6cb04732541f2dc402bd46747f7558.tar.bz2 linux-3.10-c7fb577e2a6cb04732541f2dc402bd46747f7558.zip |
manual update from upstream:
Applied Al's change 06a544971fad0992fe8b92c5647538d573089dd4
to new location of swiotlb.c
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 27 | ||||
-rw-r--r-- | lib/bitmap.c | 166 | ||||
-rw-r--r-- | lib/extable.c | 3 | ||||
-rw-r--r-- | lib/idr.c | 50 | ||||
-rw-r--r-- | lib/kobject.c | 3 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 6 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 1 | ||||
-rw-r--r-- | lib/sort.c | 1 | ||||
-rw-r--r-- | lib/string.c | 125 | ||||
-rw-r--r-- | lib/swiotlb.c | 2 | ||||
-rw-r--r-- | lib/textsearch.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 1 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate.c | 1 |
13 files changed, 291 insertions, 97 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 016e89a44ac..156822e3cc7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -128,7 +128,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED depends on BUG - depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV + depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV default !EMBEDDED help Say Y here to make BUG() panics output the file name and line number @@ -168,13 +168,34 @@ config DEBUG_FS If unsure, say N. +config DEBUG_VM + bool "Debug VM" + depends on DEBUG_KERNEL + help + Enable this to debug the virtual-memory system. + + If unsure, say N. + config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML) default y if DEBUG_INFO && UML help If you say Y here the resulting kernel image will be slightly larger - and slower, but it might give very useful debugging information - on some architectures or you use external debuggers. + and slower, but it might give very useful debugging information on + some architectures or if you use external debuggers. If you don't debug the kernel, you can say N. +config RCU_TORTURE_TEST + tristate "torture tests for RCU" + depends on DEBUG_KERNEL + default n + help + This option provides a kernel module that runs torture tests + on the RCU infrastructure. The kernel module may be built + after the fact on the running kernel to be tested, if desired. + + Say Y here if you want RCU torture tests to start automatically + at boot time (you probably don't). + Say M if you want the RCU torture tests to build as a module. + Say N if you are unsure. diff --git a/lib/bitmap.c b/lib/bitmap.c index fb9371fdd44..23d3b1147fe 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -511,6 +511,172 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) } EXPORT_SYMBOL(bitmap_parselist); +/* + * bitmap_pos_to_ord(buf, pos, bits) + * @buf: pointer to a bitmap + * @pos: a bit position in @buf (0 <= @pos < @bits) + * @bits: number of valid bit positions in @buf + * + * Map the bit at position @pos in @buf (of length @bits) to the + * ordinal of which set bit it is. If it is not set or if @pos + * is not a valid bit position, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @pos + * values 4 through 7 will get mapped to 0 through 3, respectively, + * and other @pos values will get mapped to 0. When @pos value 7 + * gets mapped to (returns) @ord value 3 in this example, that means + * that bit 7 is the 3rd (starting with 0th) set bit in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +{ + int ord = 0; + + if (pos >= 0 && pos < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < pos; + i = find_next_bit(buf, bits, i + 1)) + ord++; + if (i > pos) + ord = 0; + } + return ord; +} + +/** + * bitmap_ord_to_pos(buf, ord, bits) + * @buf: pointer to bitmap + * @ord: ordinal bit position (n-th set bit, n >= 0) + * @bits: number of valid bit positions in @buf + * + * Map the ordinal offset of bit @ord in @buf to its position in @buf. + * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). + * + * If for example, just bits 4 through 7 are set in @buf, then @ord + * values 0 through 3 will get mapped to 4 through 7, respectively, + * and all other @ord valuds will get mapped to 0. When @ord value 3 + * gets mapped to (returns) @pos value 7 in this example, that means + * that the 3rd set bit (starting with 0th) is at position 7 in @buf. + * + * The bit positions 0 through @bits are valid positions in @buf. + */ +static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +{ + int pos = 0; + + if (ord >= 0 && ord < bits) { + int i; + + for (i = find_first_bit(buf, bits); + i < bits && ord > 0; + i = find_next_bit(buf, bits, i + 1)) + ord--; + if (i < bits && ord == 0) + pos = i; + } + + return pos; +} + +/** + * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap + * @src: subset to be remapped + * @dst: remapped result + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * If either of the @old and @new bitmaps are empty, or if@src and @dst + * point to the same location, then this routine does nothing. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to @src, placing the result in + * @dst, clearing any bits previously set in @dst. + * + * The resulting value of @dst will have either the same weight as + * @src, or less weight in the general case that the mapping wasn't + * injective due to the weight of @new being less than that of @old. + * The resulting value of @dst will never have greater weight than + * that of @src, except perhaps in the case that one of the above + * conditions was not met and this routine just returned. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @src + * comes into this routine with bits 1, 5 and 7 set, then @dst should + * leave with bits 12, 13 and 15 set. + */ +void bitmap_remap(unsigned long *dst, const unsigned long *src, + const unsigned long *old, const unsigned long *new, + int bits) +{ + int s; + + if (bitmap_weight(old, bits) == 0) + return; + if (bitmap_weight(new, bits) == 0) + return; + if (dst == src) /* following doesn't handle inplace remaps */ + return; + + bitmap_zero(dst, bits); + for (s = find_first_bit(src, bits); + s < bits; + s = find_next_bit(src, bits, s + 1)) { + int x = bitmap_pos_to_ord(old, s, bits); + int y = bitmap_ord_to_pos(new, x, bits); + set_bit(y, dst); + } +} +EXPORT_SYMBOL(bitmap_remap); + +/** + * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit + * @oldbit - bit position to be mapped + * @old: defines domain of map + * @new: defines range of map + * @bits: number of bits in each of these bitmaps + * + * Let @old and @new define a mapping of bit positions, such that + * whatever position is held by the n-th set bit in @old is mapped + * to the n-th set bit in @new. In the more general case, allowing + * for the possibility that the weight 'w' of @new is less than the + * weight of @old, map the position of the n-th set bit in @old to + * the position of the m-th set bit in @new, where m == n % w. + * + * The positions of unset bits in @old are mapped to the position of + * the first set bit in @new. + * + * Apply the above specified mapping to bit position @oldbit, returning + * the new bit position. + * + * For example, lets say that @old has bits 4 through 7 set, and + * @new has bits 12 through 15 set. This defines the mapping of bit + * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other + * bit positions to 12 (the first set bit in @new. So if say @oldbit + * is 5, then this routine returns 13. + */ +int bitmap_bitremap(int oldbit, const unsigned long *old, + const unsigned long *new, int bits) +{ + int x = bitmap_pos_to_ord(old, oldbit, bits); + return bitmap_ord_to_pos(new, x, bits); +} +EXPORT_SYMBOL(bitmap_bitremap); + /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: an array of unsigned longs corresponding to the bitmap diff --git a/lib/extable.c b/lib/extable.c index 3f677a8f0c3..18df57c029d 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -16,9 +16,6 @@ #include <linux/sort.h> #include <asm/uaccess.h> -extern struct exception_table_entry __start___ex_table[]; -extern struct exception_table_entry __stop___ex_table[]; - #ifndef ARCH_HAS_SORT_EXTABLE /* * The exception table needs to be sorted so that the binary diff --git a/lib/idr.c b/lib/idr.c index 6415d053e2b..d226259c3c2 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -6,20 +6,20 @@ * Modified by George Anzinger to reuse immediately and to use * find bit instructions. Also removed _irq on spinlocks. * - * Small id to pointer translation service. + * Small id to pointer translation service. * - * It uses a radix tree like structure as a sparse array indexed + * It uses a radix tree like structure as a sparse array indexed * by the id to obtain the pointer. The bitmap makes allocating - * a new id quick. + * a new id quick. * * You call it to allocate an id (an int) an associate with that id a * pointer or what ever, we treat it as a (void *). You can pass this * id to a user for him to pass back at a later time. You then pass * that id to this code and it returns your pointer. - * You can release ids at any time. When all ids are released, most of + * You can release ids at any time. When all ids are released, most of * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we - * don't need to go to the memory "store" during an id allocate, just + * don't need to go to the memory "store" during an id allocate, just * so you don't need to be too concerned about locking and conflicts * with the slab allocator. */ @@ -72,12 +72,12 @@ static void free_layer(struct idr *idp, struct idr_layer *p) * If the system is REALLY out of memory this function returns 0, * otherwise 1. */ -int idr_pre_get(struct idr *idp, unsigned gfp_mask) +int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < IDR_FREE_MAX) { struct idr_layer *new; new = kmem_cache_alloc(idr_layer_cache, gfp_mask); - if(new == NULL) + if (new == NULL) return (0); free_layer(idp, new); } @@ -107,7 +107,7 @@ static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) if (m == IDR_SIZE) { /* no space available go back to previous layer. */ l++; - id = (id | ((1 << (IDR_BITS*l))-1)) + 1; + id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; if (!(p = pa[l])) { *starting_id = id; return -2; @@ -161,7 +161,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) { struct idr_layer *p, *new; int layers, v, id; - + id = starting_id; build_up: p = idp->top; @@ -225,6 +225,7 @@ build_up: int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, starting_id); /* * This is a cheap hack until the IDR code can be fixed to @@ -259,6 +260,7 @@ EXPORT_SYMBOL(idr_get_new_above); int idr_get_new(struct idr *idp, void *ptr, int *id) { int rv; + rv = idr_get_new_above_int(idp, ptr, 0); /* * This is a cheap hack until the IDR code can be fixed to @@ -306,11 +308,10 @@ static void sub_remove(struct idr *idp, int shift, int id) free_layer(idp, **paa); **paa-- = NULL; } - if ( ! *paa ) + if (!*paa) idp->layers = 0; - } else { + } else idr_remove_warning(id); - } } /** @@ -326,9 +327,8 @@ void idr_remove(struct idr *idp, int id) id &= MAX_ID_MASK; sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if ( idp->top && idp->top->count == 1 && - (idp->layers > 1) && - idp->top->ary[0]){ // We can drop a layer + if (idp->top && idp->top->count == 1 && (idp->layers > 1) && + idp->top->ary[0]) { // We can drop a layer p = idp->top->ary[0]; idp->top->bitmap = idp->top->count = 0; @@ -337,7 +337,6 @@ void idr_remove(struct idr *idp, int id) --idp->layers; } while (idp->id_free_cnt >= IDR_FREE_MAX) { - p = alloc_layer(idp); kmem_cache_free(idr_layer_cache, p); return; @@ -346,6 +345,19 @@ void idr_remove(struct idr *idp, int id) EXPORT_SYMBOL(idr_remove); /** + * idr_destroy - release all cached layers within an idr tree + * idp: idr handle + */ +void idr_destroy(struct idr *idp) +{ + while (idp->id_free_cnt) { + struct idr_layer *p = alloc_layer(idp); + kmem_cache_free(idr_layer_cache, p); + } +} +EXPORT_SYMBOL(idr_destroy); + +/** * idr_find - return pointer for given id * @idp: idr handle * @id: lookup key @@ -378,8 +390,8 @@ void *idr_find(struct idr *idp, int id) } EXPORT_SYMBOL(idr_find); -static void idr_cache_ctor(void * idr_layer, - kmem_cache_t *idr_layer_cache, unsigned long flags) +static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, + unsigned long flags) { memset(idr_layer, 0, sizeof(struct idr_layer)); } @@ -387,7 +399,7 @@ static void idr_cache_ctor(void * idr_layer, static int init_id_cache(void) { if (!idr_layer_cache) - idr_layer_cache = kmem_cache_create("idr_layer_cache", + idr_layer_cache = kmem_cache_create("idr_layer_cache", sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); return 0; } diff --git a/lib/kobject.c b/lib/kobject.c index dd0917dd9fa..a181abed89f 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/module.h> #include <linux/stat.h> +#include <linux/slab.h> /** * populate_dir - populate directory with attributes. @@ -100,7 +101,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) * @kobj: kobject in question, with which to build the path * @gfp_mask: the allocation type used to allocate the path */ -char *kobject_get_path(struct kobject *kobj, int gfp_mask) +char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) { char *path; int len; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 04ca4429ddf..3ab375411e3 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -54,7 +54,7 @@ static char *action_to_string(enum kobject_action action) static struct sock *uevent_sock; /** - * send_uevent - notify userspace by sending event trough netlink socket + * send_uevent - notify userspace by sending event through netlink socket * * @signal: signal name * @obj: object path (kobject) @@ -62,7 +62,7 @@ static struct sock *uevent_sock; * @gfp_mask: */ static int send_uevent(const char *signal, const char *obj, - char **envp, int gfp_mask) + char **envp, gfp_t gfp_mask) { struct sk_buff *skb; char *pos; @@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj, } static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, - struct attribute *attr, int gfp_mask) + struct attribute *attr, gfp_t gfp_mask) { char *path; char *attrpath; diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 42c08ef828c..eddc9b3d387 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -5,6 +5,7 @@ */ #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/sched.h> unsigned int debug_smp_processor_id(void) { diff --git a/lib/sort.c b/lib/sort.c index ddc4d35df28..5f3b51ffa1d 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> +#include <linux/slab.h> static void u32_swap(void *a, void *b, int size) { diff --git a/lib/string.c b/lib/string.c index d886ef157c1..037a48acedb 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,11 +36,13 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = 0; c2 = 0; + c1 = c2 = 0; if (len) { do { - c1 = *s1; c2 = *s2; - s1++; s2++; + c1 = *s1; + c2 = *s2; + s1++; + s2++; if (!c1) break; if (!c2) @@ -55,7 +57,6 @@ int strnicmp(const char *s1, const char *s2, size_t len) } return (int)c1 - (int)c2; } - EXPORT_SYMBOL(strnicmp); #endif @@ -66,7 +67,7 @@ EXPORT_SYMBOL(strnicmp); * @src: Where to copy the string from */ #undef strcpy -char * strcpy(char * dest,const char *src) +char *strcpy(char *dest, const char *src) { char *tmp = dest; @@ -91,12 +92,13 @@ EXPORT_SYMBOL(strcpy); * count, the remainder of @dest will be padded with %NUL. * */ -char * strncpy(char * dest,const char *src,size_t count) +char *strncpy(char *dest, const char *src, size_t count) { char *tmp = dest; while (count) { - if ((*tmp = *src) != 0) src++; + if ((*tmp = *src) != 0) + src++; tmp++; count--; } @@ -122,7 +124,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) size_t ret = strlen(src); if (size) { - size_t len = (ret >= size) ? size-1 : ret; + size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } @@ -138,7 +140,7 @@ EXPORT_SYMBOL(strlcpy); * @src: The string to append to it */ #undef strcat -char * strcat(char * dest, const char * src) +char *strcat(char *dest, const char *src) { char *tmp = dest; @@ -146,7 +148,6 @@ char * strcat(char * dest, const char * src) dest++; while ((*dest++ = *src++) != '\0') ; - return tmp; } EXPORT_SYMBOL(strcat); @@ -162,7 +163,7 @@ EXPORT_SYMBOL(strcat); * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ -char * strncat(char *dest, const char *src, size_t count) +char *strncat(char *dest, const char *src, size_t count) { char *tmp = dest; @@ -176,7 +177,6 @@ char * strncat(char *dest, const char *src, size_t count) } } } - return tmp; } EXPORT_SYMBOL(strncat); @@ -216,15 +216,14 @@ EXPORT_SYMBOL(strlcat); * @ct: Another string */ #undef strcmp -int strcmp(const char * cs,const char * ct) +int strcmp(const char *cs, const char *ct) { - register signed char __res; + signed char __res; while (1) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; } - return __res; } EXPORT_SYMBOL(strcmp); @@ -237,16 +236,15 @@ EXPORT_SYMBOL(strcmp); * @ct: Another string * @count: The maximum number of bytes to compare */ -int strncmp(const char * cs,const char * ct,size_t count) +int strncmp(const char *cs, const char *ct, size_t count) { - register signed char __res = 0; + signed char __res = 0; while (count) { if ((__res = *cs - *ct++) != 0 || !*cs++) break; count--; } - return __res; } EXPORT_SYMBOL(strncmp); @@ -258,12 +256,12 @@ EXPORT_SYMBOL(strncmp); * @s: The string to be searched * @c: The character to search for */ -char * strchr(const char * s, int c) +char *strchr(const char *s, int c) { - for(; *s != (char) c; ++s) + for (; *s != (char)c; ++s) if (*s == '\0') return NULL; - return (char *) s; + return (char *)s; } EXPORT_SYMBOL(strchr); #endif @@ -274,7 +272,7 @@ EXPORT_SYMBOL(strchr); * @s: The string to be searched * @c: The character to search for */ -char * strrchr(const char * s, int c) +char *strrchr(const char *s, int c) { const char *p = s + strlen(s); do { @@ -296,8 +294,8 @@ EXPORT_SYMBOL(strrchr); char *strnchr(const char *s, size_t count, int c) { for (; count-- && *s != '\0'; ++s) - if (*s == (char) c) - return (char *) s; + if (*s == (char)c) + return (char *)s; return NULL; } EXPORT_SYMBOL(strnchr); @@ -308,7 +306,7 @@ EXPORT_SYMBOL(strnchr); * strlen - Find the length of a string * @s: The string to be sized */ -size_t strlen(const char * s) +size_t strlen(const char *s) { const char *sc; @@ -325,7 +323,7 @@ EXPORT_SYMBOL(strlen); * @s: The string to be sized * @count: The maximum number of bytes to search */ -size_t strnlen(const char * s, size_t count) +size_t strnlen(const char *s, size_t count) { const char *sc; @@ -358,7 +356,6 @@ size_t strspn(const char *s, const char *accept) return count; ++count; } - return count; } @@ -384,9 +381,8 @@ size_t strcspn(const char *s, const char *reject) } ++count; } - return count; -} +} EXPORT_SYMBOL(strcspn); #ifndef __HAVE_ARCH_STRPBRK @@ -395,14 +391,14 @@ EXPORT_SYMBOL(strcspn); * @cs: The string to be searched * @ct: The characters to search for */ -char * strpbrk(const char * cs,const char * ct) +char *strpbrk(const char *cs, const char *ct) { - const char *sc1,*sc2; + const char *sc1, *sc2; - for( sc1 = cs; *sc1 != '\0'; ++sc1) { - for( sc2 = ct; *sc2 != '\0'; ++sc2) { + for (sc1 = cs; *sc1 != '\0'; ++sc1) { + for (sc2 = ct; *sc2 != '\0'; ++sc2) { if (*sc1 == *sc2) - return (char *) sc1; + return (char *)sc1; } } return NULL; @@ -422,9 +418,10 @@ EXPORT_SYMBOL(strpbrk); * of that name. In fact, it was stolen from glibc2 and de-fancy-fied. * Same semantics, slimmer shape. ;) */ -char * strsep(char **s, const char *ct) +char *strsep(char **s, const char *ct) { - char *sbegin = *s, *end; + char *sbegin = *s; + char *end; if (sbegin == NULL) return NULL; @@ -433,10 +430,8 @@ char * strsep(char **s, const char *ct) if (end) *end++ = '\0'; *s = end; - return sbegin; } - EXPORT_SYMBOL(strsep); #endif @@ -449,13 +444,12 @@ EXPORT_SYMBOL(strsep); * * Do not use memset() to access IO space, use memset_io() instead. */ -void * memset(void * s,int c,size_t count) +void *memset(void *s, int c, size_t count) { - char *xs = (char *) s; + char *xs = s; while (count--) *xs++ = c; - return s; } EXPORT_SYMBOL(memset); @@ -471,13 +465,13 @@ EXPORT_SYMBOL(memset); * You should not use this function to access IO space, use memcpy_toio() * or memcpy_fromio() instead. */ -void * memcpy(void * dest,const void *src,size_t count) +void *memcpy(void *dest, const void *src, size_t count) { - char *tmp = (char *) dest, *s = (char *) src; + char *tmp = dest; + char *s = src; while (count--) *tmp++ = *s++; - return dest; } EXPORT_SYMBOL(memcpy); @@ -492,23 +486,24 @@ EXPORT_SYMBOL(memcpy); * * Unlike memcpy(), memmove() copes with overlapping areas. */ -void * memmove(void * dest,const void *src,size_t count) +void *memmove(void *dest, const void *src, size_t count) { - char *tmp, *s; + char *tmp; + const char *s; if (dest <= src) { - tmp = (char *) dest; - s = (char *) src; + tmp = dest; + s = src; while (count--) *tmp++ = *s++; - } - else { - tmp = (char *) dest + count; - s = (char *) src + count; + } else { + tmp = dest; + tmp += count; + s = src; + s += count; while (count--) *--tmp = *--s; - } - + } return dest; } EXPORT_SYMBOL(memmove); @@ -522,12 +517,12 @@ EXPORT_SYMBOL(memmove); * @count: The size of the area. */ #undef memcmp -int memcmp(const void * cs,const void * ct,size_t count) +int memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1, *su2; int res = 0; - for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) + for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) if ((res = *su1 - *su2) != 0) break; return res; @@ -545,17 +540,17 @@ EXPORT_SYMBOL(memcmp); * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ -void * memscan(void * addr, int c, size_t size) +void *memscan(void *addr, int c, size_t size) { - unsigned char * p = (unsigned char *) addr; + unsigned char *p = addr; while (size) { if (*p == c) - return (void *) p; + return (void *)p; p++; size--; } - return (void *) p; + return (void *)p; } EXPORT_SYMBOL(memscan); #endif @@ -566,18 +561,18 @@ EXPORT_SYMBOL(memscan); * @s1: The string to be searched * @s2: The string to search for */ -char * strstr(const char * s1,const char * s2) +char *strstr(const char *s1, const char *s2) { int l1, l2; l2 = strlen(s2); if (!l2) - return (char *) s1; + return (char *)s1; l1 = strlen(s1); while (l1 >= l2) { l1--; - if (!memcmp(s1,s2,l2)) - return (char *) s1; + if (!memcmp(s1, s2, l2)) + return (char *)s1; s1++; } return NULL; @@ -600,7 +595,7 @@ void *memchr(const void *s, int c, size_t n) const unsigned char *p = s; while (n-- != 0) { if ((unsigned char)c == *p++) { - return (void *)(p-1); + return (void *)(p - 1); } } return NULL; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 5bdeaaea57f..57216f3544c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -433,7 +433,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, gfp_t flags) { unsigned long dev_addr; void *ret; diff --git a/lib/textsearch.c b/lib/textsearch.c index 1e934c196f0..6f3093efbd7 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf, * parameters or a ERR_PTR(). */ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, - unsigned int len, int gfp_mask, int flags) + unsigned int len, gfp_t gfp_mask, int flags) { int err = -ENOENT; struct ts_config *conf; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e4e9031dd9c..b07db5ca3f6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> +#include <asm/page.h> /* for PAGE_SIZE */ #include <asm/div64.h> /** diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 3d94cb90c1d..31b9e9054bf 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -3,7 +3,6 @@ * For conditions of distribution and use, see copyright notice in zlib.h */ -#include <linux/module.h> #include <linux/zutil.h> #include "infblock.h" #include "infutil.h" |