diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2007-05-06 14:48:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 12:12:50 -0700 |
commit | fd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4 (patch) | |
tree | 66f310ab9d7cdadfb79486700f1e01df7923ec14 | |
parent | e3ebadd95cb621e2c7436f3d3646447ac9d5c16d (diff) | |
download | linux-3.10-fd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4.tar.gz linux-3.10-fd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4.tar.bz2 linux-3.10-fd76bab2fa6d8f3ef6b326a4c6ae442fa21d30a4.zip |
slab: introduce krealloc
This introduce krealloc() that reallocates memory while keeping the contents
unchanged. The allocator avoids reallocation if the new size fits the
currently used cache. I also added a simple non-optimized version for
mm/slob.c for compatibility.
[akpm@linux-foundation.org: fix warnings]
Acked-by: Josef Sipek <jsipek@fsl.cs.sunysb.edu>
Acked-by: Matt Mackall <mpm@selenic.com>
Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slab.h | 3 | ||||
-rw-r--r-- | mm/slab.c | 49 | ||||
-rw-r--r-- | mm/slob.c | 35 |
3 files changed, 84 insertions, 3 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 1ef822e31c7..2f8f60ff294 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -72,8 +72,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, */ void *__kmalloc(size_t, gfp_t); void *__kzalloc(size_t, gfp_t); +void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -unsigned int ksize(const void *); +size_t ksize(const void *); /** * kcalloc - allocate memory for an array. The memory is set to zero. diff --git a/mm/slab.c b/mm/slab.c index 168bfe9d8ff..8b71a9c3daa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3740,6 +3740,53 @@ EXPORT_SYMBOL(__kmalloc); #endif /** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + struct kmem_cache *cache, *new_cache; + void *ret; + + if (unlikely(!p)) + return kmalloc_track_caller(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + cache = virt_to_cache(p); + new_cache = __find_general_cachep(new_size, flags); + + /* + * If new size fits in the current cache, bail out. + */ + if (likely(cache == new_cache)) + return (void *)p; + + /* + * We are on the slow-path here so do not use __cache_alloc + * because it bloats kernel text. + */ + ret = kmalloc_track_caller(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + +/** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. @@ -4481,7 +4528,7 @@ const struct seq_operations slabstats_op = { * allocated with either kmalloc() or kmem_cache_alloc(). The object * must not be freed during the duration of the call. */ -unsigned int ksize(const void *objp) +size_t ksize(const void *objp) { if (unlikely(objp == NULL)) return 0; diff --git a/mm/slob.c b/mm/slob.c index 5adc29cb58d..03cce3d3d98 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -190,6 +190,39 @@ void *__kmalloc(size_t size, gfp_t gfp) } EXPORT_SYMBOL(__kmalloc); +/** + * krealloc - reallocate memory. The contents will remain unchanged. + * + * @p: object to reallocate memory for. + * @new_size: how many bytes of memory are required. + * @flags: the type of memory to allocate. + * + * The contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. If @p is %NULL, krealloc() + * behaves exactly like kmalloc(). If @size is 0 and @p is not a + * %NULL pointer, the object pointed to is freed. + */ +void *krealloc(const void *p, size_t new_size, gfp_t flags) +{ + void *ret; + + if (unlikely(!p)) + return kmalloc_track_caller(new_size, flags); + + if (unlikely(!new_size)) { + kfree(p); + return NULL; + } + + ret = kmalloc_track_caller(new_size, flags); + if (ret) { + memcpy(ret, p, min(new_size, ksize(p))); + kfree(p); + } + return ret; +} +EXPORT_SYMBOL(krealloc); + void kfree(const void *block) { bigblock_t *bb, **last = &bigblocks; @@ -219,7 +252,7 @@ void kfree(const void *block) EXPORT_SYMBOL(kfree); -unsigned int ksize(const void *block) +size_t ksize(const void *block) { bigblock_t *bb; unsigned long flags; |