summaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorSeokYeon Hwang <syeon.hwang@samsung.com>2016-12-20 10:13:15 +0900
committerSeokYeon Hwang <syeon.hwang@samsung.com>2016-12-20 10:13:15 +0900
commitdc36664b156b6aa2b55f2bca5fd0c643b6417ddb (patch)
treebb319daf3cd759c2d91dd541bb2ee24d8ca4ee1a /exec.c
parent100d9fdc18f28d813f9d22025d783a7cdcc4bb4b (diff)
parent6a928d25b6d8bc3729c3d28326c6db13b9481059 (diff)
downloadqemu-dc36664b156b6aa2b55f2bca5fd0c643b6417ddb.tar.gz
qemu-dc36664b156b6aa2b55f2bca5fd0c643b6417ddb.tar.bz2
qemu-dc36664b156b6aa2b55f2bca5fd0c643b6417ddb.zip
Merge tag 'v2.8.0-rc4' into develop
v2.8.0-rc4 release Change-Id: I0158b5078d1af545dc32a51f10d2f8f0b96543a6 Signed-off-by: SeokYeon Hwang <syeon.hwang@samsung.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c202
1 files changed, 131 insertions, 71 deletions
diff --git a/exec.c b/exec.c
index 04e34a3c70..2d52b335f2 100644
--- a/exec.c
+++ b/exec.c
@@ -94,6 +94,11 @@ static MemoryRegion io_mem_unassigned;
#endif
+#ifdef TARGET_PAGE_BITS_VARY
+int target_page_bits;
+bool target_page_bits_decided;
+#endif
+
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
@@ -103,8 +108,37 @@ __thread CPUState *current_cpu;
2 = Adaptive rate instruction counting. */
int use_icount;
+bool set_preferred_target_page_bits(int bits)
+{
+ /* The target page size is the lowest common denominator for all
+ * the CPUs in the system, so we can only make it smaller, never
+ * larger. And we can't make it smaller once we've committed to
+ * a particular size.
+ */
+#ifdef TARGET_PAGE_BITS_VARY
+ assert(bits >= TARGET_PAGE_BITS_MIN);
+ if (target_page_bits == 0 || target_page_bits > bits) {
+ if (target_page_bits_decided) {
+ return false;
+ }
+ target_page_bits = bits;
+ }
+#endif
+ return true;
+}
+
#if !defined(CONFIG_USER_ONLY)
+static void finalize_target_page_bits(void)
+{
+#ifdef TARGET_PAGE_BITS_VARY
+ if (target_page_bits == 0) {
+ target_page_bits = TARGET_PAGE_BITS_MIN;
+ }
+ target_page_bits_decided = true;
+#endif
+}
+
typedef struct PhysPageEntry PhysPageEntry;
struct PhysPageEntry {
@@ -154,7 +188,7 @@ typedef struct subpage_t {
MemoryRegion iomem;
AddressSpace *as;
hwaddr base;
- uint16_t sub_section[TARGET_PAGE_SIZE];
+ uint16_t sub_section[];
} subpage_t;
#define PHYS_SECTION_UNASSIGNED 0
@@ -256,7 +290,7 @@ static void phys_page_set(AddressSpaceDispatch *d,
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
* and update our entry so we can skip it and go directly to the destination.
*/
-static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
+static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
{
unsigned valid_ptr = P_L2_SIZE;
int valid = 0;
@@ -276,7 +310,7 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *com
valid_ptr = i;
valid++;
if (p[i].skip) {
- phys_page_compact(&p[i], nodes, compacted);
+ phys_page_compact(&p[i], nodes);
}
}
@@ -308,10 +342,8 @@ static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *com
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
{
- DECLARE_BITMAP(compacted, nodes_nb);
-
if (d->phys_map.skip) {
- phys_page_compact(&d->phys_map, d->map.nodes, compacted);
+ phys_page_compact(&d->phys_map, d->map.nodes);
}
}
@@ -321,9 +353,9 @@ static inline bool section_covers_addr(const MemoryRegionSection *section,
/* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
* the section must cover the entire address space.
*/
- return section->size.hi ||
+ return int128_gethi(section->size) ||
range_covers_byte(section->offset_within_address_space,
- section->size.lo, addr);
+ int128_getlo(section->size), addr);
}
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
@@ -462,7 +494,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
section = address_space_translate_internal(d, addr, xlat, plen, false);
@@ -599,32 +631,11 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
}
#endif
-static int cpu_get_free_index(void)
-{
- CPUState *some_cpu;
- int cpu_index = 0;
-
- CPU_FOREACH(some_cpu) {
- cpu_index++;
- }
- return cpu_index;
-}
-
-void cpu_exec_exit(CPUState *cpu)
+void cpu_exec_unrealizefn(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
- cpu_list_lock();
- if (cpu->node.tqe_prev == NULL) {
- /* there is nothing to undo since cpu_exec_init() hasn't been called */
- cpu_list_unlock();
- return;
- }
-
- QTAILQ_REMOVE(&cpus, cpu, node);
- cpu->node.tqe_prev = NULL;
- cpu->cpu_index = UNASSIGNED_CPU_INDEX;
- cpu_list_unlock();
+ cpu_list_remove(cpu);
if (cc->vmsd != NULL) {
vmstate_unregister(NULL, cc->vmsd, cpu);
@@ -634,11 +645,8 @@ void cpu_exec_exit(CPUState *cpu)
}
}
-void cpu_exec_init(CPUState *cpu, Error **errp)
+void cpu_exec_initfn(CPUState *cpu)
{
- CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
- Error *local_err ATTRIBUTE_UNUSED = NULL;
-
cpu->as = NULL;
cpu->num_ases = 0;
@@ -659,14 +667,13 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
cpu->memory = system_memory;
object_ref(OBJECT(cpu->memory));
#endif
+}
- cpu_list_lock();
- if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
- cpu->cpu_index = cpu_get_free_index();
- assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
- }
- QTAILQ_INSERT_TAIL(&cpus, cpu, node);
- cpu_list_unlock();
+void cpu_exec_realizefn(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
+
+ cpu_list_add(cpu);
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
@@ -678,23 +685,15 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
#endif
}
-#if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
- tb_invalidate_phys_page_range(pc, pc + 1, 0);
-}
-#else
-static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
-{
- MemTxAttrs attrs;
- hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
- if (phys != -1) {
- tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
- phys | (pc & ~TARGET_PAGE_MASK));
- }
+ /* Flush the whole TB as this will not have race conditions
+ * even if we don't have proper locking yet.
+ * Ideally we would just invalidate the TBs for the
+ * specified PC.
+ */
+ tb_flush(cpu);
}
-#endif
#if defined(CONFIG_USER_ONLY)
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
@@ -900,11 +899,13 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
fprintf(stderr, "\n");
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
if (qemu_log_separate()) {
+ qemu_log_lock();
qemu_log("qemu: fatal: ");
qemu_log_vprintf(fmt, ap2);
qemu_log("\n");
log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
qemu_log_flush();
+ qemu_log_unlock();
qemu_log_close();
}
va_end(ap2);
@@ -1218,6 +1219,15 @@ void qemu_mutex_unlock_ramlist(void)
}
#ifdef __linux__
+static int64_t get_file_size(int fd)
+{
+ int64_t size = lseek(fd, 0, SEEK_END);
+ if (size < 0) {
+ return -errno;
+ }
+ return size;
+}
+
static void *file_ram_alloc(RAMBlock *block,
ram_addr_t memory,
const char *path,
@@ -1229,7 +1239,7 @@ static void *file_ram_alloc(RAMBlock *block,
char *c;
void *area = MAP_FAILED;
int fd = -1;
- int64_t page_size;
+ int64_t file_size;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
@@ -1284,25 +1294,47 @@ static void *file_ram_alloc(RAMBlock *block,
*/
}
- page_size = qemu_fd_getpagesize(fd);
- block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
+ block->page_size = qemu_fd_getpagesize(fd);
+ block->mr->align = block->page_size;
+#if defined(__s390x__)
+ if (kvm_enabled()) {
+ block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
+ }
+#endif
- if (memory < page_size) {
+ file_size = get_file_size(fd);
+
+ if (memory < block->page_size) {
error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than page size 0x%" PRIx64,
- memory, page_size);
+ "or larger than page size 0x%zx",
+ memory, block->page_size);
goto error;
}
- memory = ROUND_UP(memory, page_size);
+ if (file_size > 0 && file_size < memory) {
+ error_setg(errp, "backing store %s size 0x%" PRIx64
+ " does not match 'size' option 0x" RAM_ADDR_FMT,
+ path, file_size, memory);
+ goto error;
+ }
+
+ memory = ROUND_UP(memory, block->page_size);
/*
* ftruncate is not supported by hugetlbfs in older
* hosts, so don't bother bailing out on errors.
* If anything goes wrong with it under other filesystems,
* mmap will fail.
+ *
+ * Do not truncate the non-empty backend file to avoid corrupting
+ * the existing data in the file. Disabling shrinking is not
+ * enough. For example, the current vNVDIMM implementation stores
+ * the guest NVDIMM labels at the end of the backend file. If the
+ * backend file is later extended, QEMU will not be able to find
+ * those labels. Therefore, extending the non-empty backend file
+ * is disabled as well.
*/
- if (ftruncate(fd, memory)) {
+ if (!file_size && ftruncate(fd, memory)) {
perror("ftruncate");
}
@@ -1449,6 +1481,11 @@ void qemu_ram_unset_idstr(RAMBlock *block)
}
}
+size_t qemu_ram_pagesize(RAMBlock *rb)
+{
+ return rb->page_size;
+}
+
static int memory_try_enable_merging(void *addr, size_t len)
{
if (!machine_mem_merge(current_machine)) {
@@ -1616,10 +1653,8 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
+ /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
- if (kvm_enabled()) {
- kvm_setup_guest_memory(new_block->host, new_block->max_length);
- }
#ifdef CONFIG_HAX
/*
* In Hax, the qemu allocate the virtual address, and HAX kernel
@@ -1705,6 +1740,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
+ new_block->page_size = getpagesize();
new_block->host = host;
if (host) {
new_block->flags |= RAM_PREALLOC;
@@ -1989,7 +2025,11 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size)
{
+ bool locked = false;
+
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
+ locked = true;
+ tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size);
}
switch (size) {
@@ -2005,6 +2045,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default:
abort();
}
+
+ if (locked) {
+ tb_unlock();
+ }
+
/* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
@@ -2065,6 +2110,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
continue;
}
cpu->watchpoint_hit = wp;
+
+ /* The tb_lock will be reset when cpu_loop_exit or
+ * cpu_loop_exit_noexc longjmp back into the cpu_exec
+ * main loop.
+ */
+ tb_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
@@ -2252,8 +2303,7 @@ static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
{
subpage_t *mmio;
- mmio = g_malloc0(sizeof(subpage_t));
-
+ mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
mmio->as = as;
mmio->base = base;
memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
@@ -2358,7 +2408,7 @@ static void tcg_commit(MemoryListener *listener)
* may have split the RCU critical section.
*/
d = atomic_rcu_read(&cpuas->as->dispatch);
- cpuas->memory_dispatch = d;
+ atomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu, 1);
}
@@ -2473,7 +2523,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
+ tb_lock();
tb_invalidate_phys_range(addr, addr + length);
+ tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
@@ -2845,6 +2897,14 @@ void cpu_register_map_client(QEMUBH *bh)
void cpu_exec_init_all(void)
{
qemu_mutex_init(&ram_list.mutex);
+ /* The data structures we set up here depend on knowing the page size,
+ * so no more changes can be made after this point.
+ * In an ideal world, nothing we did before we had finished the
+ * machine setup would care about the target page size, and we could
+ * do this much later, rather than requiring board models to state
+ * up front what their requirements are.
+ */
+ finalize_target_page_bits();
io_mem_init();
memory_map_init();
qemu_mutex_init(&map_client_list_lock);