summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-03-09 14:08:32 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-09 15:43:42 -0800
commitac343e882a8377caef5fa75d9093cb77e9d4bf6d (patch)
treedbed5dafd7715f1fac5e83670eddc8f9a57548d8
parent0a2e280b6d8ea4afef07c749070705d6af403b7f (diff)
downloadlinux-exynos-ac343e882a8377caef5fa75d9093cb77e9d4bf6d.tar.gz
linux-exynos-ac343e882a8377caef5fa75d9093cb77e9d4bf6d.tar.bz2
linux-exynos-ac343e882a8377caef5fa75d9093cb77e9d4bf6d.zip
memremap: check pfn validity before passing to pfn_to_page()
In memremap's helper function try_ram_remap(), we dereference a struct page pointer that was derived from a PFN that is known to be covered by a 'System RAM' iomem region, and is thus assumed to be a 'valid' PFN, i.e., a PFN that has a struct page associated with it and is covered by the kernel direct mapping. However, the assumption that there is a 1:1 relation between the System RAM iomem region and the kernel direct mapping is not universally valid on all architectures, and on ARM and arm64, 'System RAM' may include regions for which pfn_valid() returns false. Generally speaking, both __va() and pfn_to_page() should only ever be called on PFNs/physical addresses for which pfn_valid() returns true, so add that check to try_ram_remap(). Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/memremap.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 60baf4d3401e..6cf54615a9c4 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -29,10 +29,10 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
static void *try_ram_remap(resource_size_t offset, size_t size)
{
- struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
+ unsigned long pfn = PHYS_PFN(offset);
/* In the simple case just return the existing linear address */
- if (!PageHighMem(page))
+ if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
return __va(offset);
return NULL; /* fallback to ioremap_cache */
}