summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-11-07 17:14:36 +0100
committerMichael S. Tsirkin <mst@redhat.com>2013-12-10 12:29:56 +0200
commit03f4995781a64e106e6f73864a1e9c4163dac53b (patch)
tree003ca5ab7066202a511bd6becf71a5602184d35e
parent92b8e39c7f582e15f9e9423bc9fd3f186536b073 (diff)
downloadqemu-03f4995781a64e106e6f73864a1e9c4163dac53b.tar.gz
qemu-03f4995781a64e106e6f73864a1e9c4163dac53b.tar.bz2
qemu-03f4995781a64e106e6f73864a1e9c4163dac53b.zip
split definitions for exec.c and translate-all.c radix trees
The exec.c and translate-all.c radix trees are quite different, and the exec.c one in particular is not limited to the CPU---it can be used also by devices that do DMA, and in that case the address space is not limited to TARGET_PHYS_ADDR_SPACE_BITS bits. We want to make exec.c's radix trees 64-bit wide. As a first step, stop sharing the constants between exec.c and translate-all.c. exec.c gets P_L2_* constants, translate-all.c gets V_L2_*, for consistency with the existing V_L1_* symbols. Though actually in the softmmu case translate-all.c is also indexed by physical addresses... This patch has no semantic change. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-rw-r--r--exec.c29
-rw-r--r--translate-all.c32
-rw-r--r--translate-all.h7
3 files changed, 39 insertions, 29 deletions
diff --git a/exec.c b/exec.c
index f4b9ef25f5..060f3f3430 100644
--- a/exec.c
+++ b/exec.c
@@ -88,7 +88,15 @@ struct PhysPageEntry {
uint16_t ptr : 15;
};
-typedef PhysPageEntry Node[L2_SIZE];
+/* Size of the L2 (and L3, etc) page tables. */
+#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
+
+#define P_L2_BITS 10
+#define P_L2_SIZE (1 << P_L2_BITS)
+
+#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
+
+typedef PhysPageEntry Node[P_L2_SIZE];
struct AddressSpaceDispatch {
/* This is a multi-level map on the physical address space.
@@ -155,7 +163,7 @@ static uint16_t phys_map_node_alloc(void)
ret = next_map.nodes_nb++;
assert(ret != PHYS_MAP_NODE_NIL);
assert(ret != next_map.nodes_nb_alloc);
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < P_L2_SIZE; ++i) {
next_map.nodes[ret][i].is_leaf = 0;
next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
}
@@ -168,13 +176,13 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
{
PhysPageEntry *p;
int i;
- hwaddr step = (hwaddr)1 << (level * L2_BITS);
+ hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
lp->ptr = phys_map_node_alloc();
p = next_map.nodes[lp->ptr];
if (level == 0) {
- for (i = 0; i < L2_SIZE; i++) {
+ for (i = 0; i < P_L2_SIZE; i++) {
p[i].is_leaf = 1;
p[i].ptr = PHYS_SECTION_UNASSIGNED;
}
@@ -182,9 +190,9 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
} else {
p = next_map.nodes[lp->ptr];
}
- lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
+ lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
- while (*nb && lp < &p[L2_SIZE]) {
+ while (*nb && lp < &p[P_L2_SIZE]) {
if ((*index & (step - 1)) == 0 && *nb >= step) {
lp->is_leaf = true;
lp->ptr = leaf;
@@ -218,7 +226,7 @@ static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
return &sections[PHYS_SECTION_UNASSIGNED];
}
p = nodes[lp.ptr];
- lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
+ lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
}
return &sections[lp.ptr];
}
@@ -1778,7 +1786,12 @@ void address_space_destroy_dispatch(AddressSpace *as)
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
- memory_region_init(system_memory, NULL, "system", INT64_MAX);
+
+ assert(ADDR_SPACE_BITS <= 64);
+
+ memory_region_init(system_memory, NULL, "system",
+ ADDR_SPACE_BITS == 64 ?
+ UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
address_space_init(&address_space_memory, system_memory, "memory");
system_io = g_malloc(sizeof(*system_io));
diff --git a/translate-all.c b/translate-all.c
index aeda54dfbd..1c63d78b7d 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -96,12 +96,16 @@ typedef struct PageDesc {
# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
#endif
+/* Size of the L2 (and L3, etc) page tables. */
+#define V_L2_BITS 10
+#define V_L2_SIZE (1 << V_L2_BITS)
+
/* The bits remaining after N lower levels of page tables. */
#define V_L1_BITS_REM \
- ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
+ ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
#if V_L1_BITS_REM < 4
-#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
+#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
#else
#define V_L1_BITS V_L1_BITS_REM
#endif
@@ -395,18 +399,18 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
/* Level 2..N-1. */
- for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
+ for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
void **p = *lp;
if (p == NULL) {
if (!alloc) {
return NULL;
}
- ALLOC(p, sizeof(void *) * L2_SIZE);
+ ALLOC(p, sizeof(void *) * V_L2_SIZE);
*lp = p;
}
- lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
+ lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
}
pd = *lp;
@@ -414,13 +418,13 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
if (!alloc) {
return NULL;
}
- ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
+ ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
*lp = pd;
}
#undef ALLOC
- return pd + (index & (L2_SIZE - 1));
+ return pd + (index & (V_L2_SIZE - 1));
}
static inline PageDesc *page_find(tb_page_addr_t index)
@@ -655,14 +659,14 @@ static void page_flush_tb_1(int level, void **lp)
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
pd[i].first_tb = NULL;
invalidate_page_bitmap(pd + i);
}
} else {
void **pp = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
page_flush_tb_1(level - 1, pp + i);
}
}
@@ -673,7 +677,7 @@ static void page_flush_tb(void)
int i;
for (i = 0; i < V_L1_SIZE; i++) {
- page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
+ page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
}
}
@@ -1600,7 +1604,7 @@ static int walk_memory_regions_1(struct walk_memory_regions_data *data,
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
int prot = pd[i].flags;
pa = base | (i << TARGET_PAGE_BITS);
@@ -1614,9 +1618,9 @@ static int walk_memory_regions_1(struct walk_memory_regions_data *data,
} else {
void **pp = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
pa = base | ((abi_ulong)i <<
- (TARGET_PAGE_BITS + L2_BITS * level));
+ (TARGET_PAGE_BITS + V_L2_BITS * level));
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
if (rc != 0) {
return rc;
@@ -1639,7 +1643,7 @@ int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
for (i = 0; i < V_L1_SIZE; i++) {
int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
- V_L1_SHIFT / L2_BITS - 1, l1_map + i);
+ V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
if (rc != 0) {
return rc;
diff --git a/translate-all.h b/translate-all.h
index 5c38819eb8..f7e5932d65 100644
--- a/translate-all.h
+++ b/translate-all.h
@@ -19,13 +19,6 @@
#ifndef TRANSLATE_ALL_H
#define TRANSLATE_ALL_H
-/* Size of the L2 (and L3, etc) page tables. */
-#define L2_BITS 10
-#define L2_SIZE (1 << L2_BITS)
-
-#define P_L2_LEVELS \
- (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
-
/* translate-all.c */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len);
void cpu_unlink_tb(CPUState *cpu);