summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-02-09 17:34:32 +0200
committerAvi Kivity <avi@redhat.com>2012-02-29 13:44:43 +0200
commit54688b1ec1f468c7272b837ff57298068aaedf5f (patch)
treefcab2c1f03137f97b0913c89046ff42c3c094123
parent50c1e1491e1981ecba14a477897681d8d0602500 (diff)
downloadqemu-54688b1ec1f468c7272b837ff57298068aaedf5f.tar.gz
qemu-54688b1ec1f468c7272b837ff57298068aaedf5f.tar.bz2
qemu-54688b1ec1f468c7272b837ff57298068aaedf5f.zip
memory: change memory registration to rebuild the memory map on each change
Instead of incrementally building the memory map, rebuild it every time. This allows later simplification, since the code need not consider overlaying a previous mapping. It is also RCU friendly. With large memory guests this can get expensive, since the operation is O(mem size), but this will be optimized later. As a side effect subpage and L2 leaks are fixed here. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--exec.c50
1 files changed, 49 insertions, 1 deletions
diff --git a/exec.c b/exec.c
index 6726afd347..b36c3012f2 100644
--- a/exec.c
+++ b/exec.c
@@ -2520,6 +2520,53 @@ static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
} \
} while (0)
+static void destroy_page_desc(PhysPageDesc pd)
+{
+ unsigned io_index = pd.phys_offset & ~TARGET_PAGE_MASK;
+ MemoryRegion *mr = io_mem_region[io_index];
+
+ if (mr->subpage) {
+ subpage_t *subpage = container_of(mr, subpage_t, iomem);
+ memory_region_destroy(&subpage->iomem);
+ g_free(subpage);
+ }
+}
+
+static void destroy_l2_mapping(void **lp, unsigned level)
+{
+ unsigned i;
+ void **p;
+ PhysPageDesc *pd;
+
+ if (!*lp) {
+ return;
+ }
+
+ if (level > 0) {
+ p = *lp;
+ for (i = 0; i < L2_SIZE; ++i) {
+ destroy_l2_mapping(&p[i], level - 1);
+ }
+ g_free(p);
+ } else {
+ pd = *lp;
+ for (i = 0; i < L2_SIZE; ++i) {
+ destroy_page_desc(pd[i]);
+ }
+ g_free(pd);
+ }
+ *lp = NULL;
+}
+
+static void destroy_all_mappings(void)
+{
+ unsigned i;
+
+ for (i = 0; i < P_L1_SIZE; ++i) {
+ destroy_l2_mapping(&l1_phys_map[i], P_L1_SHIFT / L2_BITS - 1);
+ }
+}
+
/* register physical memory.
For RAM, 'size' must be a multiple of the target page size.
If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
@@ -3490,6 +3537,7 @@ static void io_mem_init(void)
static void core_begin(MemoryListener *listener)
{
+ destroy_all_mappings();
}
static void core_commit(MemoryListener *listener)
@@ -3505,12 +3553,12 @@ static void core_region_add(MemoryListener *listener,
static void core_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
- cpu_register_physical_memory_log(section, false);
}
static void core_region_nop(MemoryListener *listener,
MemoryRegionSection *section)
{
+ cpu_register_physical_memory_log(section, section->readonly);
}
static void core_log_start(MemoryListener *listener,