summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurelien Jarno <aurelien@aurel32.net>2010-07-30 21:09:10 +0200
committerAurelien Jarno <aurelien@aurel32.net>2010-07-30 21:09:10 +0200
commit18e9ea8a3f36b0a3845e1ac6d8acd180063bed8f (patch)
tree12c3184c133ef04baee6682eebb3f99fec130081
parent5d5c99300d91d67c3f70058d2431a633028e3a9e (diff)
downloadqemu-18e9ea8a3f36b0a3845e1ac6d8acd180063bed8f.tar.gz
qemu-18e9ea8a3f36b0a3845e1ac6d8acd180063bed8f.tar.bz2
qemu-18e9ea8a3f36b0a3845e1ac6d8acd180063bed8f.zip
linux-user: fix build on hosts not using guest base
Commit 68a1c816868b3e35a1da698af412b29e61b1948a broke qemu on hosts not using guest base. It uses reserved_va unconditionally in mmap.c. To avoid to many #ifdef #endif blocks, define RESERVED_VA as either reserved_va or 0ul, and use it instead of reserved_va, similarly to what has been done with guest_base/GUEST_BASE.
-rw-r--r--cpu-all.h2
-rw-r--r--linux-user/mmap.c14
2 files changed, 9 insertions, 7 deletions
diff --git a/cpu-all.h b/cpu-all.h
index 224ca40c1d..67a32664d5 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -629,8 +629,10 @@ extern unsigned long guest_base;
extern int have_guest_base;
extern unsigned long reserved_va;
#define GUEST_BASE guest_base
+#define RESERVED_VA reserved_va
#else
#define GUEST_BASE 0ul
+#define RESERVED_VA 0ul
#endif
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 39da6dfb40..e10a6ef2e2 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -225,13 +225,13 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
int prot;
int looped = 0;
- if (size > reserved_va) {
+ if (size > RESERVED_VA) {
return (abi_ulong)-1;
}
last_addr = start;
for (addr = start; last_addr + size != addr; addr += qemu_host_page_size) {
- if (last_addr + size >= reserved_va
+ if (last_addr + size >= RESERVED_VA
|| (abi_ulong)(last_addr + size) < last_addr) {
if (looped) {
return (abi_ulong)-1;
@@ -271,7 +271,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
size = HOST_PAGE_ALIGN(size);
- if (reserved_va) {
+ if (RESERVED_VA) {
return mmap_find_vma_reserved(start, size);
}
@@ -651,7 +651,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
ret = 0;
/* unmap what we can */
if (real_start < real_end) {
- if (reserved_va) {
+ if (RESERVED_VA) {
mmap_reserve(real_start, real_end - real_start);
} else {
ret = munmap(g2h(real_start), real_end - real_start);
@@ -679,7 +679,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
flags,
g2h(new_addr));
- if (reserved_va && host_addr != MAP_FAILED) {
+ if (RESERVED_VA && host_addr != MAP_FAILED) {
/* If new and old addresses overlap then the above mremap will
already have failed with EINVAL. */
mmap_reserve(old_addr, old_size);
@@ -701,7 +701,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
} else {
int prot = 0;
- if (reserved_va && old_size < new_size) {
+ if (RESERVED_VA && old_size < new_size) {
abi_ulong addr;
for (addr = old_addr + old_size;
addr < old_addr + new_size;
@@ -711,7 +711,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
if (prot == 0) {
host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
- if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
+ if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
mmap_reserve(old_addr + old_size, new_size - old_size);
}
} else {