From 191c542442fdf53cc3c496c00be13367fd9cd42d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 13 Feb 2012 03:58:52 +0000 Subject: mm: collapse security_vm_enough_memory() variants into a single function Collapse security_vm_enough_memory() variants into a single function. Signed-off-by: Al Viro Signed-off-by: James Morris --- mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 3f758c7f4c8..db05495d6d0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1235,7 +1235,7 @@ munmap_back: */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory(charged)) + if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } @@ -2169,7 +2169,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - if (security_vm_enough_memory(len >> PAGE_SHIFT)) + if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -- cgit v1.2.3 From 4040153087478993cbf0809f444400a3c808074c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 13 Feb 2012 03:58:52 +0000 Subject: security: trim security.h Trim security.h Signed-off-by: Al Viro Signed-off-by: James Morris --- mm/mmap.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index db05495d6d0..694a8625ab0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -935,6 +935,19 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags, } #endif /* CONFIG_PROC_FS */ +/* + * If a hint addr is less than mmap_min_addr change hint to be as + * low as possible but still greater than mmap_min_addr + */ +static inline unsigned long round_hint_to_min(unsigned long hint) +{ + hint &= PAGE_MASK; + if (((void *)hint != NULL) && + (hint < mmap_min_addr)) + return PAGE_ALIGN(mmap_min_addr); + return hint; +} + /* * The caller must hold down_write(¤t->mm->mmap_sem). */ -- cgit v1.2.3 From 835ee7978cb47de94cf70232a694f19295d2993f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Mar 2012 06:39:47 +0000 Subject: VM_GROWS{UP,DOWN} shouldn't be set on shmem VMAs Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- mm/mmap.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 3f758c7f4c8..22e1a0b2f70 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1293,6 +1293,8 @@ munmap_back: pgoff = vma->vm_pgoff; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) + goto free_vma; error = shmem_zero_setup(vma); if (error) goto free_vma; -- cgit v1.2.3 From ce8fea7aa4ad9e3b40999a08622ef27c77159659 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 6 Mar 2012 12:28:52 -0800 Subject: mmap: EINVAL not ENOMEM when rejecting VM_GROWS Currently error is -ENOMEM when rejecting VM_GROWSDOWN|VM_GROWSUP from shared anonymous: hoist the file case's -EINVAL up for both. Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/mmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 22e1a0b2f70..09ce2cae07c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1266,8 +1266,9 @@ munmap_back: vma->vm_pgoff = pgoff; INIT_LIST_HEAD(&vma->anon_vma_chain); + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */ + if (file) { - error = -EINVAL; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) goto free_vma; if (vm_flags & VM_DENYWRITE) { -- cgit v1.2.3 From 83cd904d271ba960c53f3adbb037f3486518f1e6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sun, 4 Mar 2012 19:52:03 -0500 Subject: mm: fix find_vma_prev Commit 6bd4837de96e ("mm: simplify find_vma_prev()") broke memory management on PA-RISC. After application of the patch, programs that allocate big arrays on the stack crash with segfault, for example, this will crash if compiled without optimization: int main() { char array[200000]; array[199999] = 0; return 0; } The reason is that PA-RISC has up-growing stack and the stack is usually the last memory area. In the above example, a page fault happens above the stack. Previously, if we passed too high address to find_vma_prev, it returned NULL and stored the last VMA in *pprev. After "simplify find_vma_prev" change, it stores NULL in *pprev. Consequently, the stack area is not found and it is not expanded, as it used to be before the change. This patch restores the old behavior and makes it return the last VMA in *pprev if the requested address is higher than address of any other VMA. Signed-off-by: Mikulas Patocka Acked-by: KOSAKI Motohiro Signed-off-by: Linus Torvalds --- mm/mmap.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 09ce2cae07c..da15a79b144 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1608,7 +1608,6 @@ EXPORT_SYMBOL(find_vma); /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. - * Note: pprev is set to NULL when return value is NULL. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, @@ -1617,7 +1616,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct *vma; vma = find_vma(mm, addr); - *pprev = vma ? vma->vm_prev : NULL; + if (vma) { + *pprev = vma->vm_prev; + } else { + struct rb_node *rb_node = mm->mm_rb.rb_node; + *pprev = NULL; + while (rb_node) { + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); + rb_node = rb_node->rb_right; + } + } return vma; } -- cgit v1.2.3 From 853f5e264018113b1f96f05551b07a74b836c7fc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Mar 2012 14:03:47 -0500 Subject: VM: don't bother with feeding upper limit to tlb_finish_mmu() in exit_mmap() no point, really - the only instance that cares about those arguments of tlb_finish_mmu() is itanic and there we explicitly check if that's called from exit_mmap() (i.e. that ->fullmm is set), in which case we ignore those arguments completely. Signed-off-by: Al Viro --- mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index da15a79b144..2b2b45eb816 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2253,7 +2253,7 @@ void exit_mmap(struct mm_struct *mm) vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); - tlb_finish_mmu(&tlb, 0, end); + tlb_finish_mmu(&tlb, 0, -1); /* * Walk the list again, actually closing and freeing it, -- cgit v1.2.3 From 6e8bb0193af3f308ef22817a5560422d33e58b90 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Mar 2012 13:41:15 -0500 Subject: VM: make unmap_vmas() return void same story - nobody uses it and it's been pointless since "mm: Remove i_mmap_lock lockbreak" went in. Signed-off-by: Al Viro --- mm/mmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 2b2b45eb816..9365a8fe370 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2224,7 +2224,6 @@ void exit_mmap(struct mm_struct *mm) struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; - unsigned long end; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); @@ -2249,7 +2248,7 @@ void exit_mmap(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, 1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); + unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); -- cgit v1.2.3 From f44d21985eb6af7361d3785e26923355172147bd Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 21 Mar 2012 16:33:56 -0700 Subject: mm: do not reset cached_hole_size when vma is unmapped In the current code, cached_hole_size is set to the maximum value if the unmapped vma is less that free_area_cache so the next search will search from the base address. Actually, we can keep cached_hole_size so that if the next required size is more than cached_hole_size, it can search from free_area_cache. Signed-off-by: Xiao Guangrong Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Michal Hocko Cc: Hillf Danton Cc: Andrea Arcangeli Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index da15a79b144..4f31764d838 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1426,10 +1426,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr) /* * Is this a new hole at the lowest possible address? */ - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { + if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) mm->free_area_cache = addr; - mm->cached_hole_size = ~0UL; - } } /* -- cgit v1.2.3 From b716ad953a2bc4a543143c1d9836b7007a4b182f Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 21 Mar 2012 16:33:56 -0700 Subject: mm: search from free_area_cache for the bigger size If the required size is bigger than cached_hole_size it is better to search from free_area_cache - it is easier to get a free region, specifically for the 64 bit process whose address space is large enough Do it just as hugetlb_get_unmapped_area_topdown() in arch/x86/mm/hugetlbpage.c Signed-off-by: Xiao Guangrong Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Michal Hocko Cc: Hillf Danton Cc: Andrea Arcangeli Cc: KAMEZAWA Hiroyuki Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 4f31764d838..9e0c0de2e7e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1442,7 +1442,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long addr = addr0, start_addr; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -1466,22 +1466,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, mm->free_area_cache = mm->mmap_base; } +try_again: /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; - /* make sure it can fit in the remaining address space */ - if (addr > len) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - - if (mm->mmap_base < len) - goto bottomup; - - addr = mm->mmap_base-len; + if (addr < len) + goto fail; + addr -= len; do { /* * Lookup failure means no vma is above this address, @@ -1501,7 +1493,21 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = vma->vm_start-len; } while (len < vma->vm_start); -bottomup: +fail: + /* + * if hint left us with no space for the requested + * mapping then try again: + * + * Note: this is different with the case of bottomup + * which does the fully line-search, but we use find_vma + * here that causes some holes skipped. + */ + if (start_addr != mm->mmap_base) { + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = 0; + goto try_again; + } + /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario -- cgit v1.2.3 From 40716e29243de46720e5773797791466c28904ec Mon Sep 17 00:00:00 2001 From: Steven Truelove Date: Wed, 21 Mar 2012 16:34:14 -0700 Subject: hugetlbfs: fix alignment of huge page requests When calling shmget() with SHM_HUGETLB, shmget aligns the request size to PAGE_SIZE, but this is not sufficient. Modify hugetlb_file_setup() to align requests to the huge page size, and to accept an address argument so that all alignment checks can be performed in hugetlb_file_setup(), rather than in its callers. Change newseg() and mmap_pgoff() to match the new prototype and eliminate a now redundant alignment check. [akpm@linux-foundation.org: fix build] Signed-off-by: Steven Truelove Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index 9e0c0de2e7e..a19cc271e79 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1099,9 +1099,9 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, * A dummy user value is used because we are not locking * memory so no accounting is necessary */ - len = ALIGN(len, huge_page_size(&default_hstate)); - file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE); + file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len, + VM_NORESERVE, &user, + HUGETLB_ANONHUGE_INODE); if (IS_ERR(file)) return PTR_ERR(file); } -- cgit v1.2.3 From 88f6b4c32e531dc5b06bd05144f790847a1fdaeb Mon Sep 17 00:00:00 2001 From: Kautuk Consul Date: Wed, 21 Mar 2012 16:34:16 -0700 Subject: mmap.c: fix comment for __insert_vm_struct() The comment above __insert_vm_struct seems to suggest that this function is also going to link the VMA with the anon_vma, but this is not true. This function only links the VMA to the mm->mm_rb tree and the mm->mmap linked list. [akpm@linux-foundation.org: improve comment layout and text] Signed-off-by: Kautuk Consul Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm/mmap.c') diff --git a/mm/mmap.c b/mm/mmap.c index a19cc271e79..230f0bac06b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -451,9 +451,8 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Helper for vma_adjust in the split_vma insert case: - * insert vm structure into list and rbtree and anon_vma, - * but it has already been inserted into prio_tree earlier. + * Helper for vma_adjust() in the split_vma insert case: insert a vma into the + * mm's list and rbtree. It has already been inserted into the prio_tree. */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { -- cgit v1.2.3