summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorHaggai Eran <haggaie@mellanox.com>2012-10-08 16:33:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 16:22:58 +0900
commit6bdb913f0a70a4dfb7f066fb15e2d6f960701d00 (patch)
tree9a61960b27bf801794104b8bb8fccee1813f1b4b /mm/memory.c
parent2ec74c3ef2d8c58d71e0e00336fb6b891192155a (diff)
downloadkernel-common-6bdb913f0a70a4dfb7f066fb15e2d6f960701d00.tar.gz
kernel-common-6bdb913f0a70a4dfb7f066fb15e2d6f960701d00.tar.bz2
kernel-common-6bdb913f0a70a4dfb7f066fb15e2d6f960701d00.zip
mm: wrap calls to set_pte_at_notify with invalidate_range_start and invalidate_range_end
In order to allow sleeping during invalidate_page mmu notifier calls, we need to avoid calling when holding the PT lock. In addition to its direct calls, invalidate_page can also be called as a substitute for a change_pte call, in case the notifier client hasn't implemented change_pte. This patch drops the invalidate_page call from change_pte, and instead wraps all calls to change_pte with invalidate_range_start and invalidate_range_end calls. Note that change_pte still cannot sleep after this patch, and that clients implementing change_pte should not take action on it in case the number of outstanding invalidate_range_start calls is larger than one, otherwise they might miss a later invalidation. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Cc: Andrea Arcangeli <andrea@qumranet.com> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Or Gerlitz <ogerlitz@mellanox.com> Cc: Haggai Eran <haggaie@mellanox.com> Cc: Shachar Raindel <raindel@mellanox.com> Cc: Liran Liss <liranl@mellanox.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Avi Kivity <avi@redhat.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b03a4a21c1d0..01ec048ece8b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2527,6 +2527,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
int ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL;
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+ bool mmun_called = false; /* For mmu_notifiers */
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page) {
@@ -2704,6 +2707,11 @@ gotten:
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
+ mmun_start = address & PAGE_MASK;
+ mmun_end = (address & PAGE_MASK) + PAGE_SIZE;
+ mmun_called = true;
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+
/*
* Re-check the pte - we dropped the lock
*/
@@ -2766,14 +2774,12 @@ gotten:
} else
mem_cgroup_uncharge_page(new_page);
+ if (new_page)
+ page_cache_release(new_page);
unlock:
pte_unmap_unlock(page_table, ptl);
- if (new_page) {
- if (new_page == old_page)
- /* cow happened, notify before releasing old_page */
- mmu_notifier_invalidate_page(mm, address);
- page_cache_release(new_page);
- }
+ if (mmun_called)
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,