diff options
author | Mel Gorman <mel@csn.ul.ie> | 2007-10-16 01:25:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:00 -0700 |
commit | e12ba74d8ff3e2f73a583500d7095e406df4d093 (patch) | |
tree | a0d3385b65f0b3e1e00b0bbf11b75e7538a93edb | |
parent | c361be55b3128474aa66d31092db330b07539103 (diff) | |
download | linux-3.10-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.gz linux-3.10-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.bz2 linux-3.10-e12ba74d8ff3e2f73a583500d7095e406df4d093.zip |
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as
network buffers or are reclaimable such as inode allocations. When something
like updatedb is called, long-lived and unmovable kernel allocations tend to
be spread throughout the address space which increases fragmentation.
This patch groups these allocations together as much as possible by adding a
new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be
reclaimed on demand, but not moved. i.e. they can be migrated by deleting
them and re-reading the information from elsewhere.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/buffer.c | 3 | ||||
-rw-r--r-- | fs/dcache.c | 2 | ||||
-rw-r--r-- | fs/jbd/journal.c | 4 | ||||
-rw-r--r-- | fs/jbd/revoke.c | 6 | ||||
-rw-r--r-- | fs/proc/base.c | 13 | ||||
-rw-r--r-- | fs/proc/generic.c | 2 | ||||
-rw-r--r-- | include/linux/gfp.h | 15 | ||||
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | include/linux/pageblock-flags.h | 2 | ||||
-rw-r--r-- | include/linux/slab.h | 4 | ||||
-rw-r--r-- | kernel/cpuset.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 3 |
16 files changed, 56 insertions, 28 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index a406cfd89e3..faceb5eecca 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3169,7 +3169,8 @@ static void recalc_bh_state(void) struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { - struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, + set_migrateflags(gfp_flags, __GFP_RECLAIMABLE)); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); get_cpu_var(bh_accounting).nr++; diff --git a/fs/dcache.c b/fs/dcache.c index 678d39deb60..7da0cf50873 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -903,7 +903,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) struct dentry *dentry; char *dname; - dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); + dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); if (!dentry) return NULL; diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 06ab3c10b1b..a6be78c05dc 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -1710,7 +1710,7 @@ static int journal_init_journal_head_cache(void) journal_head_cache = kmem_cache_create("journal_head", sizeof(struct journal_head), 0, /* offset */ - 0, /* flags */ + SLAB_TEMPORARY, /* flags */ NULL); /* ctor */ retval = 0; if (journal_head_cache == 0) { @@ -2006,7 +2006,7 @@ static int __init journal_init_handle_cache(void) jbd_handle_cache = kmem_cache_create("journal_handle", sizeof(handle_t), 0, /* offset */ - 0, /* flags */ + SLAB_TEMPORARY, /* flags */ NULL); /* ctor */ if (jbd_handle_cache == NULL) { printk(KERN_EMERG "JBD: failed to create handle cache\n"); diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index 62e13c8db13..ad2eacf570c 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -170,13 +170,15 @@ int __init journal_init_revoke_caches(void) { revoke_record_cache = kmem_cache_create("revoke_record", sizeof(struct jbd_revoke_record_s), - 0, SLAB_HWCACHE_ALIGN, NULL); + 0, + SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, + NULL); if (revoke_record_cache == 0) return -ENOMEM; revoke_table_cache = kmem_cache_create("revoke_table", sizeof(struct jbd_revoke_table_s), - 0, 0, NULL); + 0, SLAB_TEMPORARY, NULL); if (revoke_table_cache == 0) { kmem_cache_destroy(revoke_record_cache); revoke_record_cache = NULL; diff --git a/fs/proc/base.c b/fs/proc/base.c index e5d0953d4db..78fdfea1a7f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -492,7 +492,7 @@ static ssize_t proc_info_read(struct file * file, char __user * buf, count = PROC_BLOCK_SIZE; length = -ENOMEM; - if (!(page = __get_free_page(GFP_KERNEL))) + if (!(page = __get_free_page(GFP_TEMPORARY))) goto out; length = PROC_I(inode)->op.proc_read(task, (char*)page); @@ -532,7 +532,7 @@ static ssize_t mem_read(struct file * file, char __user * buf, goto out; ret = -ENOMEM; - page = (char *)__get_free_page(GFP_USER); + page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) goto out; @@ -602,7 +602,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf, goto out; copied = -ENOMEM; - page = (char *)__get_free_page(GFP_USER); + page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) goto out; @@ -788,7 +788,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, /* No partial writes. */ return -EINVAL; } - page = (char*)__get_free_page(GFP_USER); + page = (char*)__get_free_page(GFP_TEMPORARY); if (!page) return -ENOMEM; length = -EFAULT; @@ -954,7 +954,8 @@ static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, char __user *buffer, int buflen) { struct inode * inode; - char *tmp = (char*)__get_free_page(GFP_KERNEL), *path; + char *tmp = (char*)__get_free_page(GFP_TEMPORARY); + char *path; int len; if (!tmp) @@ -1726,7 +1727,7 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, goto out; length = -ENOMEM; - page = (char*)__get_free_page(GFP_USER); + page = (char*)__get_free_page(GFP_TEMPORARY); if (!page) goto out; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index b5e7155d30d..1bdb6243575 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -74,7 +74,7 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes, nbytes = MAX_NON_LFS - pos; dp = PDE(inode); - if (!(page = (char*) __get_free_page(GFP_KERNEL))) + if (!(page = (char*) __get_free_page(GFP_TEMPORARY))) return -ENOMEM; while ((nbytes > 0) && !eof) { diff --git a/include/linux/gfp.h b/include/linux/gfp.h index da8aa872eb6..f8ffcd401c5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -48,9 +48,10 @@ struct vm_area_struct; #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ -#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */ +#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ +#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ -#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ @@ -60,6 +61,8 @@ struct vm_area_struct; #define GFP_NOIO (__GFP_WAIT) #define GFP_NOFS (__GFP_WAIT | __GFP_IO) #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) +#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ + __GFP_RECLAIMABLE) #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ __GFP_HIGHMEM) @@ -80,7 +83,7 @@ struct vm_area_struct; #endif /* This mask makes up all the page movable related flags */ -#define GFP_MOVABLE_MASK (__GFP_MOVABLE) +#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) /* Control page allocator reclaim behavior */ #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ @@ -129,6 +132,12 @@ static inline enum zone_type gfp_zone(gfp_t flags) return base + ZONE_NORMAL; } +static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) +{ + BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags; +} + /* * There is only one page-allocator function, and two main namespaces to * it. The alloc_page*() variants return 'struct page *' and as such diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7d7e4fe0fda..4721e9aa3ce 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -35,10 +35,12 @@ #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY #define MIGRATE_UNMOVABLE 0 -#define MIGRATE_MOVABLE 1 -#define MIGRATE_TYPES 2 +#define MIGRATE_RECLAIMABLE 1 +#define MIGRATE_MOVABLE 2 +#define MIGRATE_TYPES 3 #else #define MIGRATE_UNMOVABLE 0 +#define MIGRATE_UNRECLAIMABLE 0 #define MIGRATE_MOVABLE 0 #define MIGRATE_TYPES 1 #endif diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 3619d52a425..5456da6b4ad 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -31,7 +31,7 @@ /* Bit indices that affect a whole block of pages */ enum pageblock_bits { - PB_range(PB_migrate, 1), /* 1 bit required for migrate types */ + PB_range(PB_migrate, 2), /* 2 bits required for migrate types */ NR_PAGEBLOCK_BITS }; diff --git a/include/linux/slab.h b/include/linux/slab.h index d859354b9e5..3a5bad3ad12 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -24,12 +24,14 @@ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ -#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ +/* The following flags affect the page allocator grouping pages by mobility */ +#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ +#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 8b2daac4de8..e196510aa40 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1463,7 +1463,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, ssize_t retval = 0; char *s; - if (!(page = (char *)__get_free_page(GFP_KERNEL))) + if (!(page = (char *)__get_free_page(GFP_TEMPORARY))) return -ENOMEM; s = page; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 519d3f00ef9..6b26f9d3980 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -98,7 +98,8 @@ radix_tree_node_alloc(struct radix_tree_root *root) struct radix_tree_node *ret; gfp_t gfp_mask = root_gfp_mask(root); - ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + ret = kmem_cache_alloc(radix_tree_node_cachep, + set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); if (ret == NULL && !(gfp_mask & __GFP_WAIT)) { struct radix_tree_preload *rtp; @@ -142,7 +143,8 @@ int radix_tree_preload(gfp_t gfp_mask) rtp = &__get_cpu_var(radix_tree_preloads); while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { preempt_enable(); - node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + node = kmem_cache_alloc(radix_tree_node_cachep, + set_migrateflags(gfp_mask, __GFP_RECLAIMABLE)); if (node == NULL) goto out; preempt_disable(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d575a3ee8dd..29f4de1423c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) static inline int gfpflags_to_migratetype(gfp_t gfp_flags) { - return ((gfp_flags & __GFP_MOVABLE) != 0); + WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + + return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | + ((gfp_flags & __GFP_RECLAIMABLE) != 0); } #else @@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) * the free lists for the desirable migrate type are depleted */ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { - [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE }, - [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, }; /* diff --git a/mm/shmem.c b/mm/shmem.c index 855b93b3637..76ecbac0d55 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: * might be reconsidered if it ever diverges from PAGE_SIZE. * - * __GFP_MOVABLE is masked out as swap vectors cannot move + * Mobility flags are masked out as swap vectors cannot move */ - return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, + return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, PAGE_CACHE_SHIFT-PAGE_SHIFT); } diff --git a/mm/slab.c b/mm/slab.c index 8fb56ae685d..e34bcb87a6e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) #endif flags |= cachep->gfpflags; + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) diff --git a/mm/slub.c b/mm/slub.c index 19d3202ca2d..a90c4ffc957 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (s->flags & SLAB_CACHE_DMA) flags |= SLUB_DMA; + if (s->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; + if (node == -1) page = alloc_pages(flags, s->order); else |