diff options
author | Gu Zheng <guz.fnst@cn.fujitsu.com> | 2016-05-12 09:18:13 +0530 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2016-05-30 14:03:59 +1000 |
commit | 4c055ab54fae39b6329c57bcb5334d59b920463e (patch) | |
tree | d1bb4c50138f6283bda97737937c32733f943414 /kvm-all.c | |
parent | 9dfeca7c6b1d3a8f36531bbbac0322a9907bcd86 (diff) | |
download | qemu-4c055ab54fae39b6329c57bcb5334d59b920463e.tar.gz qemu-4c055ab54fae39b6329c57bcb5334d59b920463e.tar.bz2 qemu-4c055ab54fae39b6329c57bcb5334d59b920463e.zip |
cpu: Reclaim vCPU objects
In order to deal well with the kvm vcpus (which can not be removed without any
protection), we do not close KVM vcpu fd, just record and mark it as stopped
into a list, so that we can reuse it for the appending cpu hot-add request if
possible. It is also the approach that kvm guys suggested:
https://www.mail-archive.com/kvm@vger.kernel.org/msg102839.html
Signed-off-by: Chen Fan <chen.fan.fnst@cn.fujitsu.com>
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
Signed-off-by: Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
[- Explicit CPU_REMOVE() from qemu_kvm/tcg_destroy_vcpu()
isn't needed as it is done from cpu_exec_exit()
- Use iothread mutex instead of global mutex during
destroy
- Don't cleanup vCPU object from vCPU thread context
but leave it to the callers (device_add/device_del)]
Reviewed-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'kvm-all.c')
-rw-r--r-- | kvm-all.c | 57 |
1 files changed, 56 insertions, 1 deletions
@@ -61,6 +61,12 @@ #define KVM_MSI_HASHTAB_SIZE 256 +struct KVMParkedVcpu { + unsigned long vcpu_id; + int kvm_fd; + QLIST_ENTRY(KVMParkedVcpu) node; +}; + struct KVMState { AccelState parent_obj; @@ -94,6 +100,7 @@ struct KVMState QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; #endif KVMMemoryListener memory_listener; + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; }; KVMState *kvm_state; @@ -237,6 +244,53 @@ static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot) return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); } +int kvm_destroy_vcpu(CPUState *cpu) +{ + KVMState *s = kvm_state; + long mmap_size; + struct KVMParkedVcpu *vcpu = NULL; + int ret = 0; + + DPRINTF("kvm_destroy_vcpu\n"); + + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); + if (mmap_size < 0) { + ret = mmap_size; + DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + goto err; + } + + ret = munmap(cpu->kvm_run, mmap_size); + if (ret < 0) { + goto err; + } + + vcpu = g_malloc0(sizeof(*vcpu)); + vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); + vcpu->kvm_fd = cpu->kvm_fd; + QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); +err: + return ret; +} + +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) +{ + struct KVMParkedVcpu *cpu; + + QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { + if (cpu->vcpu_id == vcpu_id) { + int kvm_fd; + + QLIST_REMOVE(cpu, node); + kvm_fd = cpu->kvm_fd; + g_free(cpu); + return kvm_fd; + } + } + + return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); +} + int kvm_init_vcpu(CPUState *cpu) { KVMState *s = kvm_state; @@ -245,7 +299,7 @@ int kvm_init_vcpu(CPUState *cpu) DPRINTF("kvm_init_vcpu\n"); - ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu)); + ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu)); if (ret < 0) { DPRINTF("kvm_create_vcpu failed\n"); goto err; @@ -1501,6 +1555,7 @@ static int kvm_init(MachineState *ms) #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif + QLIST_INIT(&s->kvm_parked_vcpus); s->vmfd = -1; s->fd = qemu_open("/dev/kvm", O_RDWR); if (s->fd == -1) { |