summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 17:12:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 08:39:19 -0700
commit746b18d421da7f27e948e8af1ad82b6d0309324d (patch)
treed4e756977673b7b8166c00e34a21d85c5e26ea6e
parent6111e4ca6829a0e8b092b8e5eeb6b5366091f29c (diff)
downloadlinux-3.10-746b18d421da7f27e948e8af1ad82b6d0309324d.tar.gz
linux-3.10-746b18d421da7f27e948e8af1ad82b6d0309324d.tar.bz2
linux-3.10-746b18d421da7f27e948e8af1ad82b6d0309324d.zip
mm: use refcounts for page_lock_anon_vma()
Convert page_lock_anon_vma() over to use refcounts. This is done to prepare for the conversion of anon_vma from spinlock to mutex. Sadly this inceases the cost of page_lock_anon_vma() from one to two atomics, a follow up patch addresses this, lets keep that simple for now. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/migrate.c17
-rw-r--r--mm/rmap.c42
2 files changed, 31 insertions, 28 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 34132f8e910..e4a5c912983 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* Only page_lock_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
*/
- anon_vma = page_lock_anon_vma(page);
+ anon_vma = page_get_anon_vma(page);
if (anon_vma) {
/*
- * Take a reference count on the anon_vma if the
- * page is mapped so that it is guaranteed to
- * exist when the page is remapped later
+ * Anon page
*/
- get_anon_vma(anon_vma);
- page_unlock_anon_vma(anon_vma);
} else if (PageSwapCache(page)) {
/*
* We cannot be sure that the anon_vma of an unmapped
@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
- if (PageAnon(hpage)) {
- anon_vma = page_lock_anon_vma(hpage);
- if (anon_vma) {
- get_anon_vma(anon_vma);
- page_unlock_anon_vma(anon_vma);
- }
- }
+ if (PageAnon(hpage))
+ anon_vma = page_get_anon_vma(hpage);
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
diff --git a/mm/rmap.c b/mm/rmap.c
index cc140811af5..d271845d7d1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -337,9 +337,9 @@ void __init anon_vma_init(void)
* that the anon_vma pointer from page->mapping is valid if there is a
* mapcount, we can dereference the anon_vma after observing those.
*/
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_get_anon_vma(struct page *page)
{
- struct anon_vma *anon_vma, *root_anon_vma;
+ struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
rcu_read_lock();
@@ -350,30 +350,42 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- root_anon_vma = ACCESS_ONCE(anon_vma->root);
- spin_lock(&root_anon_vma->lock);
+ if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+ anon_vma = NULL;
+ goto out;
+ }
/*
* If this page is still mapped, then its anon_vma cannot have been
- * freed. But if it has been unmapped, we have no security against
- * the anon_vma structure being freed and reused (for another anon_vma:
- * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
- * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
- * anon_vma->root before page_unlock_anon_vma() is called to unlock.
+ * freed. But if it has been unmapped, we have no security against the
+ * anon_vma structure being freed and reused (for another anon_vma:
+ * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
+ * above cannot corrupt).
*/
- if (page_mapped(page))
- return anon_vma;
-
- spin_unlock(&root_anon_vma->lock);
+ if (!page_mapped(page)) {
+ put_anon_vma(anon_vma);
+ anon_vma = NULL;
+ }
out:
rcu_read_unlock();
- return NULL;
+
+ return anon_vma;
+}
+
+struct anon_vma *page_lock_anon_vma(struct page *page)
+{
+ struct anon_vma *anon_vma = page_get_anon_vma(page);
+
+ if (anon_vma)
+ anon_vma_lock(anon_vma);
+
+ return anon_vma;
}
void page_unlock_anon_vma(struct anon_vma *anon_vma)
{
anon_vma_unlock(anon_vma);
- rcu_read_unlock();
+ put_anon_vma(anon_vma);
}
/*