summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2015-02-19 13:06:53 +0000
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-19 15:04:49 -0800
commite3a1f6cac1fe20e7ac01d96c914c25726723a64e (patch)
treeb0041715d2f98db2dda0e01e880385212a4723b4 /arch/x86
parent2b9fb532d4168e8974fe49709e2c4c8d5352a64c (diff)
downloadlinux-rpi3-e3a1f6cac1fe20e7ac01d96c914c25726723a64e.tar.gz
linux-rpi3-e3a1f6cac1fe20e7ac01d96c914c25726723a64e.tar.bz2
linux-rpi3-e3a1f6cac1fe20e7ac01d96c914c25726723a64e.zip
x86: pte_protnone() and pmd_protnone() must check entry is not present
Since _PAGE_PROTNONE aliases _PAGE_GLOBAL it is only valid if _PAGE_PRESENT is clear. Make pte_protnone() and pmd_protnone() check for this. This fixes a 64-bit Xen PV guest regression introduced by 8a0516ed8b90 ("mm: convert p[te|md]_numa users to p[te|md]_protnone_numa"). Any userspace process would endlessly fault. In a 64-bit PV guest, userspace page table entries have _PAGE_GLOBAL set by the hypervisor. This meant that any fault on a present userspace entry (e.g., a write to a read-only mapping) would be misinterpreted as a NUMA hinting fault and the fault would not be correctly handled, resulting in the access endlessly faulting. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/pgtable.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 67fc3d2b0aab..a0c35bf6cb92 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd)
*/
static inline int pte_protnone(pte_t pte)
{
- return pte_flags(pte) & _PAGE_PROTNONE;
+ return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+ == _PAGE_PROTNONE;
}
static inline int pmd_protnone(pmd_t pmd)
{
- return pmd_flags(pmd) & _PAGE_PROTNONE;
+ return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+ == _PAGE_PROTNONE;
}
#endif /* CONFIG_NUMA_BALANCING */