diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2008-01-26 14:11:00 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-01-26 14:11:12 +0100 |
commit | 5fd9c6e214547a32d3da6ee4284c79004d667bc8 (patch) | |
tree | def2e218393c9867e9db939f1d379c005895a912 /arch/s390/mm | |
parent | 8ffd74a0924e4e04f6455eb2d2187a9564678d01 (diff) | |
download | linux-3.10-5fd9c6e214547a32d3da6ee4284c79004d667bc8.tar.gz linux-3.10-5fd9c6e214547a32d3da6ee4284c79004d667bc8.tar.bz2 linux-3.10-5fd9c6e214547a32d3da6ee4284c79004d667bc8.zip |
[S390] Change vmalloc defintions
Currently the vmalloc area starts at a dynamic address depending on
the memory size. There was also an 8MB security hole after the
physical memory to catch out-of-bounds accesses.
We can simplify the code by putting the vmalloc area explicitely at
the top of the kernel mapping and setting the vmalloc size to a fixed
value of 128MB/128GB for 31bit/64bit systems. Part of the vmalloc
area will be used for the vmem_map. This leaves an area of 96MB/1GB
for normal vmalloc allocations.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/vmem.c | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index fb9c5a85aa5..ee625c8c3b2 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -15,10 +15,6 @@ #include <asm/setup.h> #include <asm/tlbflush.h> -unsigned long vmalloc_end; -EXPORT_SYMBOL(vmalloc_end); - -static struct page *vmem_map; static DEFINE_MUTEX(vmem_mutex); struct memory_segment { @@ -188,8 +184,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) pte_t pte; int ret = -ENOMEM; - map_start = vmem_map + PFN_DOWN(start); - map_end = vmem_map + PFN_DOWN(start + size); + map_start = VMEM_MAP + PFN_DOWN(start); + map_end = VMEM_MAP + PFN_DOWN(start + size); start_addr = (unsigned long) map_start & PAGE_MASK; end_addr = PFN_ALIGN((unsigned long) map_end); @@ -254,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg) { struct memory_segment *tmp; - if (PFN_DOWN(seg->start + seg->size) > max_pfn || + if (seg->start + seg->size >= VMALLOC_START || seg->start + seg->size < seg->start) return -ERANGE; @@ -357,17 +353,15 @@ out: /* * map whole physical memory to virtual memory (identity mapping) + * we reserve enough space in the vmalloc area for vmemmap to hotplug + * additional memory segments. */ void __init vmem_map_init(void) { - unsigned long map_size; int i; - map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); - vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); - vmem_map = (struct page *) vmalloc_end; - NODE_DATA(0)->node_mem_map = vmem_map; - + BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX); + NODE_DATA(0)->node_mem_map = VMEM_MAP; for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); } |