summaryrefslogtreecommitdiff
path: root/mm/msync.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:15:53 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:36 -0700
commit0c942a4539c09adf09097315cc174aefd0eeedf7 (patch)
treeb9b7d5093ca0a130ef2221f8932fabcf4291f6a6 /mm/msync.c
parente040f218bb49a6965a5b77edce05fe47a62dda39 (diff)
downloadlinux-3.10-0c942a4539c09adf09097315cc174aefd0eeedf7.tar.gz
linux-3.10-0c942a4539c09adf09097315cc174aefd0eeedf7.tar.bz2
linux-3.10-0c942a4539c09adf09097315cc174aefd0eeedf7.zip
[PATCH] mm: msync_pte_range progress
Use latency breaking in msync_pte_range like that in copy_pte_range, instead of the ugly CONFIG_PREEMPT filemap_msync alternatives. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/msync.c')
-rw-r--r--mm/msync.c38
1 files changed, 14 insertions, 24 deletions
diff --git a/mm/msync.c b/mm/msync.c
index 9cab3f2d586..3b5f1c521d4 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -26,12 +26,21 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end)
{
pte_t *pte;
+ int progress = 0;
+again:
pte = pte_offset_map(pmd, addr);
do {
unsigned long pfn;
struct page *page;
+ if (progress >= 64) {
+ progress = 0;
+ if (need_resched() ||
+ need_lockbreak(&vma->vm_mm->page_table_lock))
+ break;
+ }
+ progress++;
if (!pte_present(*pte))
continue;
if (!pte_maybe_dirty(*pte))
@@ -46,8 +55,12 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page))
set_page_dirty(page);
+ progress += 3;
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
+ cond_resched_lock(&vma->vm_mm->page_table_lock);
+ if (addr != end)
+ goto again;
}
static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -106,29 +119,6 @@ static void msync_page_range(struct vm_area_struct *vma,
spin_unlock(&mm->page_table_lock);
}
-#ifdef CONFIG_PREEMPT
-static inline void filemap_msync(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- const size_t chunk = 64 * 1024; /* bytes */
- unsigned long next;
-
- do {
- next = addr + chunk;
- if (next > end || next < addr)
- next = end;
- msync_page_range(vma, addr, next);
- cond_resched();
- } while (addr = next, addr != end);
-}
-#else
-static inline void filemap_msync(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end)
-{
- msync_page_range(vma, addr, end);
-}
-#endif
-
/*
* MS_SYNC syncs the entire file - including mappings.
*
@@ -150,7 +140,7 @@ static int msync_interval(struct vm_area_struct *vma,
return -EBUSY;
if (file && (vma->vm_flags & VM_SHARED)) {
- filemap_msync(vma, addr, end);
+ msync_page_range(vma, addr, end);
if (flags & MS_SYNC) {
struct address_space *mapping = file->f_mapping;