summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/mmap.c10
-rw-r--r--arch/i386/mm/hugetlbpage.c34
-rw-r--r--arch/ppc64/mm/hugetlbpage.c34
-rw-r--r--arch/sh/kernel/sys_sh.c8
-rw-r--r--arch/sparc64/kernel/sys_sparc.c8
-rw-r--r--arch/x86_64/ia32/ia32_aout.c1
-rw-r--r--arch/x86_64/kernel/sys_x86_64.c9
7 files changed, 93 insertions, 11 deletions
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 32c4b0e35b3..3de7f84b53c 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -73,7 +73,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
- start_addr = addr = mm->free_area_cache;
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
full_search:
if (do_align)
@@ -90,6 +95,7 @@ full_search:
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -101,6 +107,8 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c
index 5aa06001a4b..3b099f32b94 100644
--- a/arch/i386/mm/hugetlbpage.c
+++ b/arch/i386/mm/hugetlbpage.c
@@ -140,7 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
struct vm_area_struct *vma;
unsigned long start_addr;
- start_addr = mm->free_area_cache;
+ if (len > mm->cached_hole_size) {
+ start_addr = mm->free_area_cache;
+ } else {
+ start_addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
full_search:
addr = ALIGN(start_addr, HPAGE_SIZE);
@@ -154,6 +159,7 @@ full_search:
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -162,6 +168,8 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
}
}
@@ -173,12 +181,17 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
+ unsigned long largest_hole = mm->cached_hole_size;
int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
mm->free_area_cache = base;
+ if (len <= largest_hole) {
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
@@ -199,13 +212,21 @@ try_again:
* vma->vm_start, use it:
*/
if (addr + len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end)))
+ (!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- else
+ mm->cached_hole_size = largest_hole;
+ return (mm->free_area_cache = addr);
+ } else {
/* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end)
+ if (mm->free_area_cache == vma->vm_end) {
mm->free_area_cache = vma->vm_start;
+ mm->cached_hole_size = largest_hole;
+ }
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+ largest_hole = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = (vma->vm_start - len) & HPAGE_MASK;
@@ -218,6 +239,7 @@ fail:
*/
if (first_time) {
mm->free_area_cache = base;
+ largest_hole = 0;
first_time = 0;
goto try_again;
}
@@ -228,6 +250,7 @@ fail:
* allocations.
*/
mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = ~0UL;
addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
len, pgoff, flags);
@@ -235,6 +258,7 @@ fail:
* Restore the topdown base:
*/
mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
return addr;
}
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index b4ab766f598..fdcfe97c75c 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -292,7 +292,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
&& !is_hugepage_only_range(mm, addr,len))
return addr;
}
- start_addr = addr = mm->free_area_cache;
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
full_search:
vma = find_vma(mm, addr);
@@ -316,6 +321,8 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
vma = vma->vm_next;
}
@@ -323,6 +330,7 @@ full_search:
/* Make sure we didn't miss any holes */
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -344,6 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct vm_area_struct *vma, *prev_vma;
struct mm_struct *mm = current->mm;
unsigned long base = mm->mmap_base, addr = addr0;
+ unsigned long largest_hole = mm->cached_hole_size;
int first_time = 1;
/* requested length too big for entire address space */
@@ -364,6 +373,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
+ if (len <= largest_hole) {
+ largest_hole = 0;
+ mm->free_area_cache = base;
+ }
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
@@ -392,13 +405,21 @@ hugepage_recheck:
* vma->vm_start, use it:
*/
if (addr+len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end)))
+ (!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
- return (mm->free_area_cache = addr);
- else
+ mm->cached_hole_size = largest_hole;
+ return (mm->free_area_cache = addr);
+ } else {
/* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end)
+ if (mm->free_area_cache == vma->vm_end) {
mm->free_area_cache = vma->vm_start;
+ mm->cached_hole_size = largest_hole;
+ }
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + largest_hole < vma->vm_start)
+ largest_hole = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
@@ -411,6 +432,7 @@ fail:
*/
if (first_time) {
mm->free_area_cache = base;
+ largest_hole = 0;
first_time = 0;
goto try_again;
}
@@ -421,11 +443,13 @@ fail:
* allocations.
*/
mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = ~0UL;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = base;
+ mm->cached_hole_size = ~0UL;
return addr;
}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index df5ac294c37..917b2f32f26 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -79,6 +79,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ }
if (flags & MAP_PRIVATE)
addr = PAGE_ALIGN(mm->free_area_cache);
else
@@ -95,6 +99,7 @@ full_search:
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -106,6 +111,9 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
addr = vma->vm_end;
if (!(flags & MAP_PRIVATE))
addr = COLOUR_ALIGN(addr);
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 0077f02f4b3..5f8c822a2b4 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -84,6 +84,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
return addr;
}
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ }
start_addr = addr = mm->free_area_cache;
task_size -= len;
@@ -103,6 +107,7 @@ full_search:
if (task_size < addr) {
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -114,6 +119,9 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
addr = vma->vm_end;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
diff --git a/arch/x86_64/ia32/ia32_aout.c b/arch/x86_64/ia32/ia32_aout.c
index 1965efc974d..c12edf5d97f 100644
--- a/arch/x86_64/ia32/ia32_aout.c
+++ b/arch/x86_64/ia32/ia32_aout.c
@@ -312,6 +312,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->cached_hole_size = 0;
set_mm_counter(current->mm, rss, 0);
current->mm->mmap = NULL;
diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c
index d9798dd433f..cc7821c6885 100644
--- a/arch/x86_64/kernel/sys_x86_64.c
+++ b/arch/x86_64/kernel/sys_x86_64.c
@@ -105,6 +105,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
(!vma || addr + len <= vma->vm_start))
return addr;
}
+ if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
+ && len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = begin;
+ }
addr = mm->free_area_cache;
if (addr < begin)
addr = begin;
@@ -120,6 +125,7 @@ full_search:
*/
if (start_addr != begin) {
start_addr = addr = begin;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
@@ -131,6 +137,9 @@ full_search:
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
addr = vma->vm_end;
}
}