diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2016-10-27 16:10:16 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-10-31 15:00:25 +0100 |
commit | ba051fb5e56d5ff5e4fa672d37954452e58543b2 (patch) | |
tree | bc0e27a6e1ef932b5801e235a6ffaf5f2672e456 /exec.c | |
parent | 14e6fe12a705c065fecdfd2a97199728123d4d9a (diff) | |
download | qemu-ba051fb5e56d5ff5e4fa672d37954452e58543b2.tar.gz qemu-ba051fb5e56d5ff5e4fa672d37954452e58543b2.tar.bz2 qemu-ba051fb5e56d5ff5e4fa672d37954452e58543b2.zip |
tcg: move locking for tb_invalidate_phys_page_range up
In the linux-user case all things that involve ''l1_map' and PageDesc
tweaks are protected by the memory lock (mmpa_lock). For SoftMMU mode
we previously relied on single threaded behaviour, with MTTCG we now use
the tb_lock().
As a result we need to do a little re-factoring and push the taking of
this lock up the call tree. This requires a slightly different entry for
the SoftMMU and user-mode cases from tb_invalidate_phys_range.
This also means user-mode breakpoint insertion needs to take two locks
but it hadn't taken any previously so this is an improvement.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20161027151030.20863-20-alex.bennee@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 16 |
1 files changed, 16 insertions, 0 deletions
@@ -687,7 +687,11 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) #if defined(CONFIG_USER_ONLY) static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) { + mmap_lock(); + tb_lock(); tb_invalidate_phys_page_range(pc, pc + 1, 0); + tb_unlock(); + mmap_unlock(); } #else static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) @@ -696,6 +700,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs); int asidx = cpu_asidx_from_attrs(cpu, attrs); if (phys != -1) { + /* Locks grabbed by tb_invalidate_phys_addr */ tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as, phys | (pc & ~TARGET_PAGE_MASK)); } @@ -1988,7 +1993,11 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr) static void notdirty_mem_write(void *opaque, hwaddr ram_addr, uint64_t val, unsigned size) { + bool locked = false; + if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { + locked = true; + tb_lock(); tb_invalidate_phys_page_fast(ram_addr, size); } switch (size) { @@ -2004,6 +2013,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr, default: abort(); } + + if (locked) { + tb_unlock(); + } + /* Set both VGA and migration bits for simplicity and to remove * the notdirty callback faster. */ @@ -2477,7 +2491,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); } if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { + tb_lock(); tb_invalidate_phys_range(addr, addr + length); + tb_unlock(); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); |