summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-03-18 13:27:37 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-03-18 13:28:13 +0100
commit0fb1d9bcbcf701a45835aa150c57ca54ea685bfa (patch)
treea2821e3d10918d4b76e6329da42a45cfbb9f19cd /arch
parentf481bfafd36e621d6cbc62d4b25f74811410aef7 (diff)
downloadlinux-3.10-0fb1d9bcbcf701a45835aa150c57ca54ea685bfa.tar.gz
linux-3.10-0fb1d9bcbcf701a45835aa150c57ca54ea685bfa.tar.bz2
linux-3.10-0fb1d9bcbcf701a45835aa150c57ca54ea685bfa.zip
[S390] make page table upgrade work again
After TASK_SIZE now gives the current size of the address space the upgrade of a 64 bit process from 3 to 4 levels of page table needs to use the arch_mmap_check hook to catch large mmap lengths. The get_unmapped_area* functions need to check for -ENOMEM from the arch_get_unmapped_area*, upgrade the page table and retry. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/mman.h5
-rw-r--r--arch/s390/mm/mmap.c44
2 files changed, 35 insertions, 14 deletions
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 7839767d837..da01432e8f4 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -22,4 +22,9 @@
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
+int s390_mmap_check(unsigned long addr, unsigned long len);
+#define arch_mmap_check(addr,len,flags) s390_mmap_check(addr,len)
+#endif
+
#endif /* __S390_MMAN_H__ */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 346dd0c5cbd..e008d236cc1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -89,42 +89,58 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
#else
+int s390_mmap_check(unsigned long addr, unsigned long len)
+{
+ if (!test_thread_flag(TIF_31BIT) &&
+ len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+ return crst_table_upgrade(current->mm, 1UL << 53);
+ return 0;
+}
+
static unsigned long
s390_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ unsigned long area;
int rc;
- addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
- if (addr & ~PAGE_MASK)
- return addr;
- if (unlikely(mm->context.asce_limit < addr + len)) {
- rc = crst_table_upgrade(mm, addr + len);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ if (!(area & ~PAGE_MASK))
+ return area;
+ if (area == -ENOMEM &&
+ !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
+ /* Upgrade the page table to 4 levels and retry. */
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
- return addr;
+ return area;
}
static unsigned long
-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
+ unsigned long area;
int rc;
- addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
- if (addr & ~PAGE_MASK)
- return addr;
- if (unlikely(mm->context.asce_limit < addr + len)) {
- rc = crst_table_upgrade(mm, addr + len);
+ area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+ if (!(area & ~PAGE_MASK))
+ return area;
+ if (area == -ENOMEM &&
+ !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) {
+ /* Upgrade the page table to 4 levels and retry. */
+ rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
}
- return addr;
+ return area;
}
/*
* This function, called very early during the creation of a new