diff options
author | Christoph Lameter <clameter@engr.sgi.com> | 2006-01-18 17:42:29 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-18 19:20:17 -0800 |
commit | fc3012896337c83a056c496d7cfb0072e1591181 (patch) | |
tree | 5b774e59ba982fd4330eb96abace9cda9d744b0f /mm/mempolicy.c | |
parent | 053837fce7aa79025ed57656855df09f80175527 (diff) | |
download | linux-3.10-fc3012896337c83a056c496d7cfb0072e1591181.tar.gz linux-3.10-fc3012896337c83a056c496d7cfb0072e1591181.tar.bz2 linux-3.10-fc3012896337c83a056c496d7cfb0072e1591181.zip |
[PATCH] Simplify migrate_page_add
Simplify migrate_page_add after feedback from Hugh. This also allows us to
drop one parameter from migrate_page_add.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 43 |
1 files changed, 7 insertions, 36 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 551cde40520..a683a66599b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -185,8 +185,8 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes) } static void gather_stats(struct page *, void *); -static void migrate_page_add(struct vm_area_struct *vma, - struct page *page, struct list_head *pagelist, unsigned long flags); +static void migrate_page_add(struct page *page, struct list_head *pagelist, + unsigned long flags); /* Scan through pages checking if pages follow certain conditions. */ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, @@ -228,7 +228,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (flags & MPOL_MF_STATS) gather_stats(page, private); else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) - migrate_page_add(vma, page, private, flags); + migrate_page_add(page, private, flags); else break; } while (pte++, addr += PAGE_SIZE, addr != end); @@ -531,42 +531,13 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask, * page migration */ -/* Check if we are the only process mapping the page in question */ -static inline int single_mm_mapping(struct mm_struct *mm, - struct address_space *mapping) -{ - struct vm_area_struct *vma; - struct prio_tree_iter iter; - int rc = 1; - - spin_lock(&mapping->i_mmap_lock); - vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) - if (mm != vma->vm_mm) { - rc = 0; - goto out; - } - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) - if (mm != vma->vm_mm) { - rc = 0; - goto out; - } -out: - spin_unlock(&mapping->i_mmap_lock); - return rc; -} - -/* - * Add a page to be migrated to the pagelist - */ -static void migrate_page_add(struct vm_area_struct *vma, - struct page *page, struct list_head *pagelist, unsigned long flags) +static void migrate_page_add(struct page *page, struct list_head *pagelist, + unsigned long flags) { /* - * Avoid migrating a page that is shared by others and not writable. + * Avoid migrating a page that is shared with others. */ - if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) || - mapping_writably_mapped(page->mapping) || - single_mm_mapping(vma->vm_mm, page->mapping)) { + if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if (isolate_lru_page(page)) list_add(&page->lru, pagelist); } |