summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-10-22 16:35:18 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-31 10:02:56 -0700
commit368845fde9e704288f370df57988767aab6042b4 (patch)
tree0ba42c19ff563daa24bdc08c2c1d7cf3bbaabd7d
parentc87ece5a158f3907193202d84f2a316a4c363768 (diff)
downloadlinux-3.10-368845fde9e704288f370df57988767aab6042b4.tar.gz
linux-3.10-368845fde9e704288f370df57988767aab6042b4.tar.bz2
linux-3.10-368845fde9e704288f370df57988767aab6042b4.zip
x86, mm: Trim memory in memblock to be page aligned
commit 6ede1fd3cb404c0016de6ac529df46d561bd558b upstream. We will not map partial pages, so need to make sure memblock allocation will not allocate those bytes out. Also we will use for_each_mem_pfn_range() to loop to map memory range to keep them consistent. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/CAE9FiQVZirvaBMFYRfXMmWEcHbKSicQEHz4VAwUv0xFCk51ZNw@mail.gmail.com Acked-by: Jacob Shin <jacob.shin@amd.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--include/linux/memblock.h1
-rw-r--r--mm/memblock.c24
3 files changed, 28 insertions, 0 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 62d61e9976e..298dc000378 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1076,6 +1076,9 @@ void __init memblock_x86_fill(void)
memblock_add(ei->addr, ei->size);
}
+ /* throw away partial pages */
+ memblock_trim_memory(PAGE_SIZE);
+
memblock_dump_all();
}
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 19dc455b4f3..c948c440ea2 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
diff --git a/mm/memblock.c b/mm/memblock.c
index 280d3d7835d..11e5bd174f3 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -908,6 +908,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
+void __init_memblock memblock_trim_memory(phys_addr_t align)
+{
+ int i;
+ phys_addr_t start, end, orig_start, orig_end;
+ struct memblock_type *mem = &memblock.memory;
+
+ for (i = 0; i < mem->cnt; i++) {
+ orig_start = mem->regions[i].base;
+ orig_end = mem->regions[i].base + mem->regions[i].size;
+ start = round_up(orig_start, align);
+ end = round_down(orig_end, align);
+
+ if (start == orig_start && end == orig_end)
+ continue;
+
+ if (start < end) {
+ mem->regions[i].base = start;
+ mem->regions[i].size = end - start;
+ } else {
+ memblock_remove_region(mem, i);
+ i--;
+ }
+ }
+}
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
{