summaryrefslogtreecommitdiff
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-09-21 17:03:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:40 -0700
commit6e919717c82c5773ac671816c8392c70d261685f (patch)
tree76e22da3ba5000e4ea408315723cc67f3e4b6352 /mm/mlock.c
parent58fa879e1e640a1856f736b418984ebeccee1c95 (diff)
downloadlinux-3.10-6e919717c82c5773ac671816c8392c70d261685f.tar.gz
linux-3.10-6e919717c82c5773ac671816c8392c70d261685f.tar.bz2
linux-3.10-6e919717c82c5773ac671816c8392c70d261685f.zip
mm: m(un)lock avoid ZERO_PAGE
I'm still reluctant to clutter __get_user_pages() with another flag, just to avoid touching ZERO_PAGE count in mlock(); though we can add that later if it shows up as an issue in practice. But when mlocking, we can test page->mapping slightly earlier, to avoid the potentially bouncy rescheduling of lock_page on ZERO_PAGE - mlock didn't lock_page in olden ZERO_PAGE days, so we might have regressed. And when munlocking, it turns out that FOLL_DUMP coincidentally does what's needed to avoid all updates to ZERO_PAGE, so use that here also. Plus add comment suggested by KAMEZAWA Hiroyuki. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 22041aa9f5c..bd6f0e466f6 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -198,17 +198,26 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
for (i = 0; i < ret; i++) {
struct page *page = pages[i];
- lock_page(page);
- /*
- * Because we lock page here and migration is blocked
- * by the elevated reference, we need only check for
- * file-cache page truncation. This page->mapping
- * check also neatly skips over the ZERO_PAGE(),
- * though if that's common we'd prefer not to lock it.
- */
- if (page->mapping)
- mlock_vma_page(page);
- unlock_page(page);
+ if (page->mapping) {
+ /*
+ * That preliminary check is mainly to avoid
+ * the pointless overhead of lock_page on the
+ * ZERO_PAGE: which might bounce very badly if
+ * there is contention. However, we're still
+ * dirtying its cacheline with get/put_page:
+ * we'll add another __get_user_pages flag to
+ * avoid it if that case turns out to matter.
+ */
+ lock_page(page);
+ /*
+ * Because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
+ if (page->mapping)
+ mlock_vma_page(page);
+ unlock_page(page);
+ }
put_page(page); /* ref from get_user_pages() */
}
@@ -309,9 +318,23 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_LOCKED;
for (addr = start; addr < end; addr += PAGE_SIZE) {
- struct page *page = follow_page(vma, addr, FOLL_GET);
- if (page) {
+ struct page *page;
+ /*
+ * Although FOLL_DUMP is intended for get_dump_page(),
+ * it just so happens that its special treatment of the
+ * ZERO_PAGE (returning an error instead of doing get_page)
+ * suits munlock very well (and if somehow an abnormal page
+ * has sneaked into the range, we won't oops here: great).
+ */
+ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ if (page && !IS_ERR(page)) {
lock_page(page);
+ /*
+ * Like in __mlock_vma_pages_range(),
+ * because we lock page here and migration is
+ * blocked by the elevated reference, we need
+ * only check for file-cache page truncation.
+ */
if (page->mapping)
munlock_vma_page(page);
unlock_page(page);