diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-01-10 21:02:23 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2015-01-10 21:02:23 +0000 |
commit | aaf03019175949eda5087329448b8a0033b89479 (patch) | |
tree | 5e83271283bc2c67171c5ffd8e50e1ee4b977711 /exec.c | |
parent | 97052d64e4fb934bcf25f4a6b42dc06f6ecbf9eb (diff) | |
parent | a1666142db623365b2e7619f01c1cbb72d62b514 (diff) | |
download | qemu-aaf03019175949eda5087329448b8a0033b89479.tar.gz qemu-aaf03019175949eda5087329448b8a0033b89479.tar.bz2 qemu-aaf03019175949eda5087329448b8a0033b89479.zip |
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
pc: resizeable ROM blocks
This makes ROM blocks resizeable. This infrastructure is required for other
functionality we have queued.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
# gpg: Signature made Thu 08 Jan 2015 11:19:24 GMT using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg: aka "Michael S. Tsirkin <mst@redhat.com>"
* remotes/mst/tags/for_upstream:
acpi-build: make ROMs RAM blocks resizeable
memory: API to allocate resizeable RAM MR
arch_init: support resizing on incoming migration
exec: qemu_ram_alloc_resizeable, qemu_ram_resize
exec: split length -> used_length/max_length
exec: cpu_physical_memory_set/clear_dirty_range
memory: add memory_region_set_size
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 136 |
1 files changed, 107 insertions, 29 deletions
@@ -75,6 +75,11 @@ static MemoryRegion io_mem_unassigned; /* RAM is mmap-ed with MAP_SHARED */ #define RAM_SHARED (1 << 1) +/* Only a portion of RAM (used_length) is actually used, and migrated. + * This used_length size can change across reboots. + */ +#define RAM_RESIZEABLE (1 << 2) + #endif struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); @@ -812,11 +817,11 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) /* The list is protected by the iothread lock here. */ block = ram_list.mru_block; - if (block && addr - block->offset < block->length) { + if (block && addr - block->offset < block->max_length) { goto found; } QTAILQ_FOREACH(block, &ram_list.blocks, next) { - if (addr - block->offset < block->length) { + if (addr - block->offset < block->max_length) { goto found; } } @@ -850,7 +855,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, { if (length == 0) return; - cpu_physical_memory_clear_dirty_range(start, length, client); + cpu_physical_memory_clear_dirty_range_type(start, length, client); if (tcg_enabled()) { tlb_reset_dirty_range_all(start, length); @@ -1186,7 +1191,7 @@ static ram_addr_t find_ram_offset(ram_addr_t size) QTAILQ_FOREACH(block, &ram_list.blocks, next) { ram_addr_t end, next = RAM_ADDR_MAX; - end = block->offset + block->length; + end = block->offset + block->max_length; QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { if (next_block->offset >= end) { @@ -1214,7 +1219,7 @@ ram_addr_t last_ram_offset(void) ram_addr_t last = 0; QTAILQ_FOREACH(block, &ram_list.blocks, next) - last = MAX(last, block->offset + block->length); + last = MAX(last, block->offset + block->max_length); return last; } @@ -1296,6 +1301,49 @@ static int memory_try_enable_merging(void *addr, size_t len) return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); } +/* Only legal before guest might have detected the memory size: e.g. on + * incoming migration, or right after reset. + * + * As memory core doesn't know how is memory accessed, it is up to + * resize callback to update device state and/or add assertions to detect + * misuse, if necessary. + */ +int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp) +{ + RAMBlock *block = find_ram_block(base); + + assert(block); + + if (block->used_length == newsize) { + return 0; + } + + if (!(block->flags & RAM_RESIZEABLE)) { + error_setg_errno(errp, EINVAL, + "Length mismatch: %s: 0x" RAM_ADDR_FMT + " in != 0x" RAM_ADDR_FMT, block->idstr, + newsize, block->used_length); + return -EINVAL; + } + + if (block->max_length < newsize) { + error_setg_errno(errp, EINVAL, + "Length too large: %s: 0x" RAM_ADDR_FMT + " > 0x" RAM_ADDR_FMT, block->idstr, + newsize, block->max_length); + return -EINVAL; + } + + cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); + block->used_length = newsize; + cpu_physical_memory_set_dirty_range(block->offset, block->used_length); + memory_region_set_size(block->mr, newsize); + if (block->resized) { + block->resized(block->idstr, newsize, block->host); + } + return 0; +} + static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) { RAMBlock *block; @@ -1305,13 +1353,14 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) /* This assumes the iothread lock is taken here too. */ qemu_mutex_lock_ramlist(); - new_block->offset = find_ram_offset(new_block->length); + new_block->offset = find_ram_offset(new_block->max_length); if (!new_block->host) { if (xen_enabled()) { - xen_ram_alloc(new_block->offset, new_block->length, new_block->mr); + xen_ram_alloc(new_block->offset, new_block->max_length, + new_block->mr); } else { - new_block->host = phys_mem_alloc(new_block->length, + new_block->host = phys_mem_alloc(new_block->max_length, &new_block->mr->align); if (!new_block->host) { error_setg_errno(errp, errno, @@ -1320,13 +1369,13 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) qemu_mutex_unlock_ramlist(); return -1; } - memory_try_enable_merging(new_block->host, new_block->length); + memory_try_enable_merging(new_block->host, new_block->max_length); } } /* Keep the list sorted from biggest to smallest block. */ QTAILQ_FOREACH(block, &ram_list.blocks, next) { - if (block->length < new_block->length) { + if (block->max_length < new_block->max_length) { break; } } @@ -1350,14 +1399,15 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp) old_ram_size, new_ram_size); } } - cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length); + cpu_physical_memory_set_dirty_range(new_block->offset, + new_block->used_length); - qemu_ram_setup_dump(new_block->host, new_block->length); - qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE); - qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK); + qemu_ram_setup_dump(new_block->host, new_block->max_length); + qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); + qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); if (kvm_enabled()) { - kvm_setup_guest_memory(new_block->host, new_block->length); + kvm_setup_guest_memory(new_block->host, new_block->max_length); } return new_block->offset; @@ -1391,7 +1441,8 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, size = TARGET_PAGE_ALIGN(size); new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; - new_block->length = size; + new_block->used_length = size; + new_block->max_length = size; new_block->flags = share ? RAM_SHARED : 0; new_block->host = file_ram_alloc(new_block, size, mem_path, errp); @@ -1410,7 +1461,12 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, } #endif -ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, +static +ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, + void (*resized)(const char*, + uint64_t length, + void *host), + void *host, bool resizeable, MemoryRegion *mr, Error **errp) { RAMBlock *new_block; @@ -1418,14 +1474,21 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, Error *local_err = NULL; size = TARGET_PAGE_ALIGN(size); + max_size = TARGET_PAGE_ALIGN(max_size); new_block = g_malloc0(sizeof(*new_block)); new_block->mr = mr; - new_block->length = size; + new_block->resized = resized; + new_block->used_length = size; + new_block->max_length = max_size; + assert(max_size >= size); new_block->fd = -1; new_block->host = host; if (host) { new_block->flags |= RAM_PREALLOC; } + if (resizeable) { + new_block->flags |= RAM_RESIZEABLE; + } addr = ram_block_add(new_block, &local_err); if (local_err) { g_free(new_block); @@ -1435,9 +1498,24 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, return addr; } +ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, + MemoryRegion *mr, Error **errp) +{ + return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); +} + ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) { - return qemu_ram_alloc_from_ptr(size, NULL, mr, errp); + return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); +} + +ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, + void (*resized)(const char*, + uint64_t length, + void *host), + MemoryRegion *mr, Error **errp) +{ + return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); } void qemu_ram_free_from_ptr(ram_addr_t addr) @@ -1475,11 +1553,11 @@ void qemu_ram_free(ram_addr_t addr) xen_invalidate_map_cache_entry(block->host); #ifndef _WIN32 } else if (block->fd >= 0) { - munmap(block->host, block->length); + munmap(block->host, block->max_length); close(block->fd); #endif } else { - qemu_anon_ram_free(block->host, block->length); + qemu_anon_ram_free(block->host, block->max_length); } g_free(block); break; @@ -1499,7 +1577,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) QTAILQ_FOREACH(block, &ram_list.blocks, next) { offset = addr - block->offset; - if (offset < block->length) { + if (offset < block->max_length) { vaddr = ramblock_ptr(block, offset); if (block->flags & RAM_PREALLOC) { ; @@ -1575,7 +1653,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr) return xen_map_cache(addr, 0, 0); } else if (block->host == NULL) { block->host = - xen_map_cache(block->offset, block->length, 1); + xen_map_cache(block->offset, block->max_length, 1); } } return ramblock_ptr(block, addr - block->offset); @@ -1594,9 +1672,9 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size) RAMBlock *block; QTAILQ_FOREACH(block, &ram_list.blocks, next) { - if (addr - block->offset < block->length) { - if (addr - block->offset + *size > block->length) - *size = block->length - addr + block->offset; + if (addr - block->offset < block->max_length) { + if (addr - block->offset + *size > block->max_length) + *size = block->max_length - addr + block->offset; return ramblock_ptr(block, addr - block->offset); } } @@ -1619,7 +1697,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) } block = ram_list.mru_block; - if (block && block->host && host - block->host < block->length) { + if (block && block->host && host - block->host < block->max_length) { goto found; } @@ -1628,7 +1706,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) if (block->host == NULL) { continue; } - if (host - block->host < block->length) { + if (host - block->host < block->max_length) { goto found; } } @@ -2882,7 +2960,7 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) RAMBlock *block; QTAILQ_FOREACH(block, &ram_list.blocks, next) { - func(block->host, block->offset, block->length, opaque); + func(block->host, block->offset, block->used_length, opaque); } } #endif |