summaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-11-05 18:48:14 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 19:34:48 -0800
commit87e8827b37c0c391d9915d0dc6a06c9b5f9cac65 (patch)
tree9164f29c1f758212177668b95eb3363bda501240 /mm/mmap.c
parent9fd745d450e7e2b0d2f1b386b886e7d568b64404 (diff)
downloadlinux-exynos-87e8827b37c0c391d9915d0dc6a06c9b5f9cac65.tar.gz
linux-exynos-87e8827b37c0c391d9915d0dc6a06c9b5f9cac65.tar.bz2
linux-exynos-87e8827b37c0c391d9915d0dc6a06c9b5f9cac65.zip
mm: fix the racy mm->locked_vm change in
"mm->locked_vm += grow" and vm_stat_account() in acct_stack_growth() are not safe; multiple threads using the same ->mm can do this at the same time trying to expans different vma's under down_read(mmap_sem). This means that one of the "locked_vm += grow" changes can be lost and we can miss munlock_vma_pages_all() later. Move this code into the caller(s) under mm->page_table_lock. All other updates to ->locked_vm hold mmap_sem for writing. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 3ec19b601be0..d1ac22485998 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2138,10 +2138,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
if (security_vm_enough_memory_mm(mm, grow))
return -ENOMEM;
- /* Ok, everything looks good - let it rip */
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
return 0;
}
@@ -2202,6 +2198,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
* against concurrent vma expansions.
*/
spin_lock(&vma->vm_mm->page_table_lock);
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
+ vm_stat_account(vma->vm_mm, vma->vm_flags,
+ vma->vm_file, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
@@ -2273,6 +2273,10 @@ int expand_downwards(struct vm_area_struct *vma,
* against concurrent vma expansions.
*/
spin_lock(&vma->vm_mm->page_table_lock);
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
+ vm_stat_account(vma->vm_mm, vma->vm_flags,
+ vma->vm_file, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address;
vma->vm_pgoff -= grow;