diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 17:59:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 08:53:18 -0800 |
commit | 31e855ea7173bdb0520f9684580423a9560f66e0 (patch) | |
tree | 20ff327e2a58929696946971a8201da559785338 /mm/ksm.c | |
parent | 93d17715a5b960d34220f2edba3e6cee9b5b1c58 (diff) | |
download | linux-3.10-31e855ea7173bdb0520f9684580423a9560f66e0.tar.gz linux-3.10-31e855ea7173bdb0520f9684580423a9560f66e0.tar.bz2 linux-3.10-31e855ea7173bdb0520f9684580423a9560f66e0.zip |
ksm: remove redundancies when merging page
There is no need for replace_page() to calculate a write-protected prot
vm_page_prot must already be write-protected for an anonymous page (see
mm/memory.c do_anonymous_page() for similar reliance on vm_page_prot).
There is no need for try_to_merge_one_page() to get_page and put_page on
newpage and oldpage: in every case we already hold a reference to each of
them.
But some instinct makes me move try_to_merge_one_page()'s unlock_page of
oldpage down after replace_page(): that doesn't increase contention on the
ksm page, and makes thinking about the transition easier.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 26 |
1 files changed, 6 insertions, 20 deletions
@@ -647,7 +647,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if ((page_mapcount(page) + 2 + swapped) != page_count(page)) { + if (page_mapcount(page) + 1 + swapped != page_count(page)) { set_pte_at_notify(mm, addr, ptep, entry); goto out_unlock; } @@ -682,11 +682,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage, pte_t *ptep; spinlock_t *ptl; unsigned long addr; - pgprot_t prot; int err = -EFAULT; - prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE); - addr = page_address_in_vma(oldpage, vma); if (addr == -EFAULT) goto out; @@ -714,7 +711,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage, flush_cache_page(vma, addr, pte_pfn(*ptep)); ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot)); + set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, vma->vm_page_prot)); page_remove_rmap(oldpage); put_page(oldpage); @@ -746,13 +743,9 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, if (!(vma->vm_flags & VM_MERGEABLE)) goto out; - if (!PageAnon(oldpage)) goto out; - get_page(newpage); - get_page(oldpage); - /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of @@ -761,25 +754,18 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, * then come back to this page when it is unlocked. */ if (!trylock_page(oldpage)) - goto out_putpage; + goto out; /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, oldpage, &orig_pte)) { - unlock_page(oldpage); - goto out_putpage; - } - unlock_page(oldpage); - - if (pages_identical(oldpage, newpage)) + if (write_protect_page(vma, oldpage, &orig_pte) == 0 && + pages_identical(oldpage, newpage)) err = replace_page(vma, oldpage, newpage, orig_pte); -out_putpage: - put_page(oldpage); - put_page(newpage); + unlock_page(oldpage); out: return err; } |