diff options
author | Nick Piggin <npiggin@suse.de> | 2007-05-16 22:11:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 05:23:06 -0700 |
commit | c97a9e10eaee328e6eea9f76acf7bacd7d48ef56 (patch) | |
tree | f14bf796d087e130452a2e2457c75eb1eca27483 | |
parent | ea125892a17f43919c726777ed1e4929d41e7984 (diff) | |
download | linux-3.10-c97a9e10eaee328e6eea9f76acf7bacd7d48ef56.tar.gz linux-3.10-c97a9e10eaee328e6eea9f76acf7bacd7d48ef56.tar.bz2 linux-3.10-c97a9e10eaee328e6eea9f76acf7bacd7d48ef56.zip |
mm: more rmap checking
Re-introduce rmap verification patches that Hugh removed when he removed
PG_map_lock. PG_map_lock actually isn't needed to synchronise access to
anonymous pages, because PG_locked and PTL together already do.
These checks were important in discovering and fixing a rare rmap corruption
in SLES9.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/rmap.h | 13 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 58 |
3 files changed, 62 insertions, 11 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bdd277223af..97347f22fc2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon void page_add_file_rmap(struct page *); void page_remove_rmap(struct page *, struct vm_area_struct *); -/** - * page_dup_rmap - duplicate pte mapping to a page - * @page: the page to add the mapping to - * - * For copy_page_range only: minimal extract from page_add_rmap, - * avoiding unnecessary tests (already checked) so it's quicker. - */ -static inline void page_dup_rmap(struct page *page) +#ifdef CONFIG_DEBUG_VM +void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); +#else +static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { atomic_inc(&page->_mapcount); } +#endif /* * Called from mm/vmscan.c to handle paging out diff --git a/mm/memory.c b/mm/memory.c index 1d647ab0ee7..cb94488ab96 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, page = vm_normal_page(vma, addr, pte); if (page) { get_page(page); - page_dup_rmap(page); + page_dup_rmap(page, vma, addr); rss[!!PageAnon(page)]++; } diff --git a/mm/rmap.c b/mm/rmap.c index 1c1af92732d..850165d32b7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -530,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page, } /** + * page_set_anon_rmap - sanity check anonymous rmap addition + * @page: the page to add the mapping to + * @vma: the vm area in which the mapping is added + * @address: the user virtual address mapped + */ +static void __page_check_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address) +{ +#ifdef CONFIG_DEBUG_VM + /* + * The page's anon-rmap details (mapping and index) are guaranteed to + * be set up correctly at this point. + * + * We have exclusion against page_add_anon_rmap because the caller + * always holds the page locked, except if called from page_dup_rmap, + * in which case the page is already known to be setup. + * + * We have exclusion against page_add_new_anon_rmap because those pages + * are initially only visible via the pagetables, and the pte is locked + * over the call to page_add_new_anon_rmap. + */ + struct anon_vma *anon_vma = vma->anon_vma; + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; + BUG_ON(page->mapping != (struct address_space *)anon_vma); + BUG_ON(page->index != linear_page_index(vma, address)); +#endif +} + +/** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * The caller needs to hold the pte lock. + * The caller needs to hold the pte lock and the page must be locked. */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (atomic_inc_and_test(&page->_mapcount)) __page_set_anon_rmap(page, vma, address); - /* else checking page index and mapping is racy */ + else + __page_check_anon_rmap(page, vma, address); } /* @@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page, * * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. + * Page does not have to be locked. */ void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ __page_set_anon_rmap(page, vma, address); } @@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page) __inc_zone_page_state(page, NR_FILE_MAPPED); } +#ifdef CONFIG_DEBUG_VM +/** + * page_dup_rmap - duplicate pte mapping to a page + * @page: the page to add the mapping to + * + * For copy_page_range only: minimal extract from page_add_file_rmap / + * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's + * quicker. + * + * The caller needs to hold the pte lock. + */ +void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) +{ + BUG_ON(page_mapcount(page) == 0); + if (PageAnon(page)) + __page_check_anon_rmap(page, vma, address); + atomic_inc(&page->_mapcount); +} +#endif + /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from |