summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>2006-12-12 17:14:55 +0000
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 09:27:08 -0800
commit9de455b20705f36384a711d4a20bcf7ba1ab180b (patch)
tree6eb3643514d12d06a69a5c889d612f66b68288e6 /mm
parent77fff4ae2b7bba6d66a8287d9ab948e2b6c16145 (diff)
downloadlinux-3.10-9de455b20705f36384a711d4a20bcf7ba1ab180b.tar.gz
linux-3.10-9de455b20705f36384a711d4a20bcf7ba1ab180b.tar.bz2
linux-3.10-9de455b20705f36384a711d4a20bcf7ba1ab180b.zip
[PATCH] Pass vma argument to copy_user_highpage().
To allow a more effective copy_user_highpage() on certain architectures, a vma argument is added to the function and cow_user_page() allowing the implementation of these functions to check for the VM_EXEC bit. The main part of this patch was originally written by Ralf Baechle; Atushi Nemoto did the the debugging. Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c10
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 089092d152a..cb362f761f1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -44,14 +44,14 @@ static void clear_huge_page(struct page *page, unsigned long addr)
}
static void copy_huge_page(struct page *dst, struct page *src,
- unsigned long addr)
+ unsigned long addr, struct vm_area_struct *vma)
{
int i;
might_sleep();
for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
cond_resched();
- copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
+ copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
}
}
@@ -442,7 +442,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
}
spin_unlock(&mm->page_table_lock);
- copy_huge_page(new_page, old_page, address);
+ copy_huge_page(new_page, old_page, address, vma);
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
diff --git a/mm/memory.c b/mm/memory.c
index bf6100236e6..c00bac66ce9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1441,7 +1441,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
/*
* If the source page was a PFN mapping, we don't have
@@ -1464,9 +1464,9 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(dst);
return;
-
+
}
- copy_user_highpage(dst, src, va);
+ copy_user_highpage(dst, src, va, vma);
}
/*
@@ -1577,7 +1577,7 @@ gotten:
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page)
goto oom;
- cow_user_page(new_page, old_page, address);
+ cow_user_page(new_page, old_page, address, vma);
}
/*
@@ -2200,7 +2200,7 @@ retry:
page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page)
goto oom;
- copy_user_highpage(page, new_page, address);
+ copy_user_highpage(page, new_page, address, vma);
page_cache_release(new_page);
new_page = page;
anon = 1;