summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-12-02 13:00:54 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-12-02 13:12:30 +0100
commit0c2d70c448b7853a91cfa63659aa3cc6630fb9be (patch)
tree7327dbaa1da19f0e397dfef405a45755ff61a92c
parent21a24302e85024dd7b2a151158adbc1f5dc5c4dd (diff)
downloadqemu-0c2d70c448b7853a91cfa63659aa3cc6630fb9be.tar.gz
qemu-0c2d70c448b7853a91cfa63659aa3cc6630fb9be.tar.bz2
qemu-0c2d70c448b7853a91cfa63659aa3cc6630fb9be.zip
translate-all: ensure host page mask is always extended with 1's
Anthony reported that >4GB guests on Xen with 32bit QEMU broke after commit 4ed023c ("Round up RAMBlock sizes to host page sizes", 2015-11-05). In that patch sizes are masked against qemu_host_page_size/mask which are uintptr_t, and thus 32bit on a 32bit QEMU, even though the ram space might be bigger than 4GB on Xen. Since ram_addr_t is not available on user-mode emulation targets, ensure that we get a sign extension when masking away the low bits of the address. Remove the ~10 year old scary comment that the type of these variables is probably wrong, with another equally scary comment. The new comment however does not have "???" in it, which is arguably an improvement. For completeness use the alignment macros in linux-user and bsd-user instead of manually doing an &. linux-user and bsd-user are not affected by the Xen issue, however. Reviewed-by: Juan Quintela <quintela@redhat.com> Reported-by: Anthony PERARD <anthony.perard@citrix.com> Fixes: 4ed023ce2a39ab5812d33cf4d819def168965a7f Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--bsd-user/elfload.c3
-rw-r--r--include/exec/cpu-all.h8
-rw-r--r--linux-user/elfload.c3
-rw-r--r--linux-user/mmap.c4
-rw-r--r--translate-all.c6
-rw-r--r--translate-common.c2
6 files changed, 12 insertions, 14 deletions
diff --git a/bsd-user/elfload.c b/bsd-user/elfload.c
index 351aab12e7..59a7bdf0cc 100644
--- a/bsd-user/elfload.c
+++ b/bsd-user/elfload.c
@@ -740,8 +740,7 @@ static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
size must be known */
if (qemu_real_host_page_size < qemu_host_page_size) {
abi_ulong end_addr, end_addr1;
- end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
- ~(qemu_real_host_page_size - 1);
+ end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
end_addr = HOST_PAGE_ALIGN(elf_bss);
if (end_addr1 < end_addr) {
mmap((void *)g2h(end_addr1), end_addr - end_addr1,
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index f9998b9732..83b1781afc 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -174,11 +174,13 @@ extern unsigned long reserved_va;
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
-/* ??? These should be the larger of uintptr_t and target_ulong. */
+/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
+ * when intptr_t is 32-bit and we are aligning a long long.
+ */
extern uintptr_t qemu_real_host_page_size;
-extern uintptr_t qemu_real_host_page_mask;
+extern intptr_t qemu_real_host_page_mask;
extern uintptr_t qemu_host_page_size;
-extern uintptr_t qemu_host_page_mask;
+extern intptr_t qemu_host_page_mask;
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index d68f5a16ca..8b17c0e94b 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1478,8 +1478,7 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
host_start = (uintptr_t) g2h(elf_bss);
host_end = (uintptr_t) g2h(last_bss);
- host_map_start = (host_start + qemu_real_host_page_size - 1);
- host_map_start &= -qemu_real_host_page_size;
+ host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
if (host_map_start < host_end) {
void *p = mmap((void *)host_map_start, host_end - host_map_start,
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 5606bcd164..7b459d5100 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -444,9 +444,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
/* If so, truncate the file map at eof aligned with
the hosts real pagesize. Additional anonymous maps
will be created beyond EOF. */
- len = (sb.st_size - offset);
- len += qemu_real_host_page_size - 1;
- len &= ~(qemu_real_host_page_size - 1);
+ len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
}
}
diff --git a/translate-all.c b/translate-all.c
index a940bd2e5e..042a8576ac 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -118,7 +118,7 @@ typedef struct PageDesc {
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
uintptr_t qemu_host_page_size;
-uintptr_t qemu_host_page_mask;
+intptr_t qemu_host_page_mask;
/* The bottom level has pointers to PageDesc */
static void *l1_map[V_L1_SIZE];
@@ -326,14 +326,14 @@ void page_size_init(void)
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
qemu_real_host_page_size = getpagesize();
- qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
+ qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size;
}
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE;
}
- qemu_host_page_mask = ~(qemu_host_page_size - 1);
+ qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
}
static void page_init(void)
diff --git a/translate-common.c b/translate-common.c
index 619feb466e..171222d037 100644
--- a/translate-common.c
+++ b/translate-common.c
@@ -21,7 +21,7 @@
#include "qom/cpu.h"
uintptr_t qemu_real_host_page_size;
-uintptr_t qemu_real_host_page_mask;
+intptr_t qemu_real_host_page_mask;
#ifndef CONFIG_USER_ONLY
/* mask must never be zero, except for A20 change call */