diff options
-rw-r--r-- | arch/xtensa/include/asm/pgtable.h | 4 | ||||
-rw-r--r-- | arch/xtensa/kernel/syscall.c | 41 |
2 files changed, 45 insertions, 0 deletions
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index c90ea5bfa1b..d7546c94da5 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -410,6 +410,10 @@ typedef pte_t *pte_addr_t; #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME +/* We provide our own get_unmapped_area to cope with + * SHM area cache aliasing for userland. + */ +#define HAVE_ARCH_UNMAPPED_AREA #include <asm-generic/pgtable.h> diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 54fa8425cee..5d3f7a119ed 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -36,6 +36,10 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= { #include <uapi/asm/unistd.h> }; +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) + asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; @@ -52,3 +56,40 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, { return sys_fadvise64_64(fd, offset, len, advice); } + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vmm; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; + + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vmm || addr + len <= vmm->vm_start) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr, pgoff); + } +} |