diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2005-11-13 16:06:46 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-13 18:14:12 -0800 |
commit | 065d41cb269e9debb18c6d5052e4de1088ae3d8f (patch) | |
tree | db3fbad420fc3dec144c76ecc51cc1d72e31aef9 /mm | |
parent | 669ed17521b9b78cdbeac8a53c30599aca9527ce (diff) | |
download | linux-3.10-065d41cb269e9debb18c6d5052e4de1088ae3d8f.tar.gz linux-3.10-065d41cb269e9debb18c6d5052e4de1088ae3d8f.tar.bz2 linux-3.10-065d41cb269e9debb18c6d5052e4de1088ae3d8f.zip |
[PATCH] slab: convert cache to page mapping macros
This patch converts object cache <-> page mapping macros to static inline
functions to make the more explicit and readable.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 49 |
1 files changed, 32 insertions, 17 deletions
diff --git a/mm/slab.c b/mm/slab.c index 8a73dcfc6a2..15d25ae5b68 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -565,14 +565,29 @@ static void **dbg_userword(kmem_cache_t *cachep, void *objp) #define BREAK_GFP_ORDER_LO 0 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; -/* Macros for storing/retrieving the cachep and or slab from the +/* Functions for storing/retrieving the cachep and or slab from the * global 'mem_map'. These are used to find the slab an obj belongs to. * With kfree(), these are used to find the cache which an obj belongs to. */ -#define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x)) -#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next) -#define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x)) -#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev) +static inline void page_set_cache(struct page *page, struct kmem_cache *cache) +{ + page->lru.next = (struct list_head *)cache; +} + +static inline struct kmem_cache *page_get_cache(struct page *page) +{ + return (struct kmem_cache *)page->lru.next; +} + +static inline void page_set_slab(struct page *page, struct slab *slab) +{ + page->lru.prev = (struct list_head *)slab; +} + +static inline struct slab *page_get_slab(struct page *page) +{ + return (struct slab *)page->lru.prev; +} /* These are the default caches for kmalloc. Custom caches can have other sizes. */ struct cache_sizes malloc_sizes[] = { @@ -1368,7 +1383,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) /* Print some data about the neighboring objects, if they * exist: */ - struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp)); + struct slab *slabp = page_get_slab(virt_to_page(objp)); int objnr; objnr = (objp-slabp->s_mem)/cachep->objsize; @@ -2138,8 +2153,8 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) i = 1 << cachep->gfporder; page = virt_to_page(objp); do { - SET_PAGE_CACHE(page, cachep); - SET_PAGE_SLAB(page, slabp); + page_set_cache(page, cachep); + page_set_slab(page, slabp); page++; } while (--i); } @@ -2269,14 +2284,14 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, kfree_debugcheck(objp); page = virt_to_page(objp); - if (GET_PAGE_CACHE(page) != cachep) { + if (page_get_cache(page) != cachep) { printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", - GET_PAGE_CACHE(page),cachep); + page_get_cache(page),cachep); printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); - printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name); + printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name); WARN_ON(1); } - slabp = GET_PAGE_SLAB(page); + slabp = page_get_slab(page); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { @@ -2628,7 +2643,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n struct slab *slabp; unsigned int objnr; - slabp = GET_PAGE_SLAB(virt_to_page(objp)); + slabp = page_get_slab(virt_to_page(objp)); l3 = cachep->nodelists[node]; list_del(&slabp->list); objnr = (objp - slabp->s_mem) / cachep->objsize; @@ -2744,7 +2759,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) #ifdef CONFIG_NUMA { struct slab *slabp; - slabp = GET_PAGE_SLAB(virt_to_page(objp)); + slabp = page_get_slab(virt_to_page(objp)); if (unlikely(slabp->nodeid != numa_node_id())) { struct array_cache *alien = NULL; int nodeid = slabp->nodeid; @@ -2830,7 +2845,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) page = virt_to_page(ptr); if (unlikely(!PageSlab(page))) goto out; - if (unlikely(GET_PAGE_CACHE(page) != cachep)) + if (unlikely(page_get_cache(page) != cachep)) goto out; return 1; out: @@ -3026,7 +3041,7 @@ void kfree(const void *objp) return; local_irq_save(flags); kfree_debugcheck(objp); - c = GET_PAGE_CACHE(virt_to_page(objp)); + c = page_get_cache(virt_to_page(objp)); __cache_free(c, (void*)objp); local_irq_restore(flags); } @@ -3596,7 +3611,7 @@ unsigned int ksize(const void *objp) if (unlikely(objp == NULL)) return 0; - return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); + return obj_reallen(page_get_cache(virt_to_page(objp))); } |