diff options
author | Anthony Liguori <anthony@codemonkey.ws> | 2013-08-29 17:21:51 -0500 |
---|---|---|
committer | Anthony Liguori <anthony@codemonkey.ws> | 2013-08-29 17:21:51 -0500 |
commit | b5d54bd42158b90b239bb6ce9c13072eb3a53fd2 (patch) | |
tree | a4c6db6a9ee012c4873ff7dee67e2e63b32aa7b6 /target-i386 | |
parent | e560992f21437380857ae490c907810d99459df5 (diff) | |
parent | 821c808bd1863efc2c1e977800ae77db633a185c (diff) | |
download | qemu-b5d54bd42158b90b239bb6ce9c13072eb3a53fd2.tar.gz qemu-b5d54bd42158b90b239bb6ce9c13072eb3a53fd2.tar.bz2 qemu-b5d54bd42158b90b239bb6ce9c13072eb3a53fd2.zip |
Merge remote-tracking branch 'qemu-kvm/uq/master' into stable-1.5
* qemu-kvm/uq/master:
kvm-stub: fix compilation
kvm: shorten the parameter list for get_real_device()
kvm: i386: fix LAPIC TSC deadline timer save/restore
kvm-all.c: max_cpus should not exceed KVM vcpu limit
kvm: Simplify kvm_handle_io
kvm: x86: fix setting IA32_FEATURE_CONTROL with nested VMX disabled
kvm: add KVM_IRQFD_FLAG_RESAMPLE support
kvm: migrate vPMU state
target-i386: remove tabs from target-i386/cpu.h
Initialize IA32_FEATURE_CONTROL MSR in reset and migration
Conflicts:
target-i386/cpu.h
target-i386/kvm.c
aliguori: fixup trivial conflicts due to whitespace and added cpu
argument
Signed-off-by: Anthony Liguori <anthony@codemonkey.ws>
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.h | 181 | ||||
-rw-r--r-- | target-i386/kvm.c | 139 | ||||
-rw-r--r-- | target-i386/machine.c | 66 |
3 files changed, 300 insertions, 86 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 8a3d0fda32..5723eff9a8 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -37,9 +37,9 @@ #define TARGET_HAS_ICE 1 #ifdef TARGET_X86_64 -#define ELF_MACHINE EM_X86_64 +#define ELF_MACHINE EM_X86_64 #else -#define ELF_MACHINE EM_386 +#define ELF_MACHINE EM_386 #endif #define CPUArchState struct CPUX86State @@ -98,10 +98,10 @@ #define DESC_TSS_BUSY_MASK (1 << 9) /* eflags masks */ -#define CC_C 0x0001 -#define CC_P 0x0004 -#define CC_A 0x0010 -#define CC_Z 0x0040 +#define CC_C 0x0001 +#define CC_P 0x0004 +#define CC_A 0x0010 +#define CC_Z 0x0040 #define CC_S 0x0080 #define CC_O 0x0800 @@ -109,14 +109,14 @@ #define IOPL_SHIFT 12 #define VM_SHIFT 17 -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define DF_MASK 0x00000400 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 -#define RF_MASK 0x00010000 -#define VM_MASK 0x00020000 -#define AC_MASK 0x00040000 +#define TF_MASK 0x00000100 +#define IF_MASK 0x00000200 +#define DF_MASK 0x00000400 +#define IOPL_MASK 0x00003000 +#define NT_MASK 0x00004000 +#define RF_MASK 0x00010000 +#define VM_MASK 0x00020000 +#define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 #define VIP_MASK 0x00100000 #define ID_MASK 0x00200000 @@ -238,28 +238,28 @@ #define DR7_TYPE_IO_RW 0x2 #define DR7_TYPE_DATA_RW 0x3 -#define PG_PRESENT_BIT 0 -#define PG_RW_BIT 1 -#define PG_USER_BIT 2 -#define PG_PWT_BIT 3 -#define PG_PCD_BIT 4 -#define PG_ACCESSED_BIT 5 -#define PG_DIRTY_BIT 6 -#define PG_PSE_BIT 7 -#define PG_GLOBAL_BIT 8 -#define PG_NX_BIT 63 +#define PG_PRESENT_BIT 0 +#define PG_RW_BIT 1 +#define PG_USER_BIT 2 +#define PG_PWT_BIT 3 +#define PG_PCD_BIT 4 +#define PG_ACCESSED_BIT 5 +#define PG_DIRTY_BIT 6 +#define PG_PSE_BIT 7 +#define PG_GLOBAL_BIT 8 +#define PG_NX_BIT 63 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) -#define PG_RW_MASK (1 << PG_RW_BIT) -#define PG_USER_MASK (1 << PG_USER_BIT) -#define PG_PWT_MASK (1 << PG_PWT_BIT) -#define PG_PCD_MASK (1 << PG_PCD_BIT) +#define PG_RW_MASK (1 << PG_RW_BIT) +#define PG_USER_MASK (1 << PG_USER_BIT) +#define PG_PWT_MASK (1 << PG_PWT_BIT) +#define PG_PCD_MASK (1 << PG_PCD_BIT) #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) -#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) -#define PG_PSE_MASK (1 << PG_PSE_BIT) -#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) +#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) +#define PG_PSE_MASK (1 << PG_PSE_BIT) +#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) #define PG_HI_USER_MASK 0x7ff0000000000000LL -#define PG_NX_MASK (1LL << PG_NX_BIT) +#define PG_NX_MASK (1LL << PG_NX_BIT) #define PG_ERROR_W_BIT 1 @@ -269,45 +269,48 @@ #define PG_ERROR_RSVD_MASK 0x08 #define PG_ERROR_I_D_MASK 0x10 -#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ -#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ +#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ +#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ -#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) -#define MCE_BANKS_DEF 10 +#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) +#define MCE_BANKS_DEF 10 -#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ -#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ -#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ +#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ +#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ +#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ -#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ -#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ -#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ -#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ -#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ -#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ -#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ -#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ -#define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCI_STATUS_VAL (1ULL<<63) /* valid error */ +#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ +#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ +#define MCI_STATUS_EN (1ULL<<60) /* error enabled */ +#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ +#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ +#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ +#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ +#define MCI_STATUS_AR (1ULL<<55) /* Action required */ /* MISC register defines */ -#define MCM_ADDR_SEGOFF 0 /* segment offset */ -#define MCM_ADDR_LINEAR 1 /* linear address */ -#define MCM_ADDR_PHYS 2 /* physical address */ -#define MCM_ADDR_MEM 3 /* memory address */ -#define MCM_ADDR_GENERIC 7 /* generic */ +#define MCM_ADDR_SEGOFF 0 /* segment offset */ +#define MCM_ADDR_LINEAR 1 /* linear address */ +#define MCM_ADDR_PHYS 2 /* physical address */ +#define MCM_ADDR_MEM 3 /* memory address */ +#define MCM_ADDR_GENERIC 7 /* generic */ #define MSR_IA32_TSC 0x10 #define MSR_IA32_APICBASE 0x1b #define MSR_IA32_APICBASE_BSP (1<<8) #define MSR_IA32_APICBASE_ENABLE (1<<11) #define MSR_IA32_APICBASE_BASE (0xfffff<<12) +#define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_TSC_ADJUST 0x0000003b #define MSR_IA32_TSCDEADLINE 0x6e0 -#define MSR_MTRRcap 0xfe -#define MSR_MTRRcap_VCNT 8 -#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) -#define MSR_MTRRcap_WC_SUPPORTED (1 << 10) +#define MSR_P6_PERFCTR0 0xc1 + +#define MSR_MTRRcap 0xfe +#define MSR_MTRRcap_VCNT 8 +#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) +#define MSR_MTRRcap_WC_SUPPORTED (1 << 10) #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 @@ -317,35 +320,45 @@ #define MSR_MCG_STATUS 0x17a #define MSR_MCG_CTL 0x17b +#define MSR_P6_EVNTSEL0 0x186 + #define MSR_IA32_PERF_STATUS 0x198 -#define MSR_IA32_MISC_ENABLE 0x1a0 +#define MSR_IA32_MISC_ENABLE 0x1a0 /* Indicates good rep/movs microcode on some processors: */ #define MSR_IA32_MISC_ENABLE_DEFAULT 1 -#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) -#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) - -#define MSR_MTRRfix64K_00000 0x250 -#define MSR_MTRRfix16K_80000 0x258 -#define MSR_MTRRfix16K_A0000 0x259 -#define MSR_MTRRfix4K_C0000 0x268 -#define MSR_MTRRfix4K_C8000 0x269 -#define MSR_MTRRfix4K_D0000 0x26a -#define MSR_MTRRfix4K_D8000 0x26b -#define MSR_MTRRfix4K_E0000 0x26c -#define MSR_MTRRfix4K_E8000 0x26d -#define MSR_MTRRfix4K_F0000 0x26e -#define MSR_MTRRfix4K_F8000 0x26f +#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) +#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) + +#define MSR_MTRRfix64K_00000 0x250 +#define MSR_MTRRfix16K_80000 0x258 +#define MSR_MTRRfix16K_A0000 0x259 +#define MSR_MTRRfix4K_C0000 0x268 +#define MSR_MTRRfix4K_C8000 0x269 +#define MSR_MTRRfix4K_D0000 0x26a +#define MSR_MTRRfix4K_D8000 0x26b +#define MSR_MTRRfix4K_E0000 0x26c +#define MSR_MTRRfix4K_E8000 0x26d +#define MSR_MTRRfix4K_F0000 0x26e +#define MSR_MTRRfix4K_F8000 0x26f #define MSR_PAT 0x277 -#define MSR_MTRRdefType 0x2ff +#define MSR_MTRRdefType 0x2ff + +#define MSR_CORE_PERF_FIXED_CTR0 0x309 +#define MSR_CORE_PERF_FIXED_CTR1 0x30a +#define MSR_CORE_PERF_FIXED_CTR2 0x30b +#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d +#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e +#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f +#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 -#define MSR_MC0_CTL 0x400 -#define MSR_MC0_STATUS 0x401 -#define MSR_MC0_ADDR 0x402 -#define MSR_MC0_MISC 0x403 +#define MSR_MC0_CTL 0x400 +#define MSR_MC0_STATUS 0x401 +#define MSR_MC0_ADDR 0x402 +#define MSR_MC0_MISC 0x403 #define MSR_EFER 0xc0000080 @@ -724,6 +737,9 @@ typedef struct { #define CPU_NB_REGS CPU_NB_REGS32 #endif +#define MAX_FIXED_COUNTERS 3 +#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) + #define NB_MMU_MODES 3 typedef enum TPRAccess { @@ -817,6 +833,15 @@ typedef struct CPUX86State { uint64_t mcg_status; uint64_t msr_ia32_misc_enable; + uint64_t msr_ia32_feature_control; + + uint64_t msr_fixed_ctr_ctrl; + uint64_t msr_global_ctrl; + uint64_t msr_global_status; + uint64_t msr_global_ovf_ctrl; + uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; + uint64_t msr_gp_counters[MAX_GP_COUNTERS]; + uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; /* exception/interrupt handling */ int error_code; @@ -1089,7 +1114,7 @@ static inline CPUX86State *cpu_init(const char *cpu_model) #define cpu_gen_code cpu_x86_gen_code #define cpu_signal_handler cpu_x86_signal_handler #define cpu_list x86_cpu_list -#define cpudef_setup x86_cpudef_setup +#define cpudef_setup x86_cpudef_setup /* MMU modes definitions */ #define MMU_MODE0_SUFFIX _kernel diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 0b6eb0155c..749aa09a21 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -65,12 +65,16 @@ static bool has_msr_star; static bool has_msr_hsave_pa; static bool has_msr_tsc_adjust; static bool has_msr_tsc_deadline; +static bool has_msr_feature_control; static bool has_msr_async_pf_en; static bool has_msr_pv_eoi_en; static bool has_msr_misc_enable; static bool has_msr_kvm_steal_time; static int lm_capable_kernel; +static bool has_msr_architectural_pmu; +static uint32_t num_architectural_pmu_counters; + bool kvm_allows_irq0_override(void) { return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing(); @@ -597,6 +601,25 @@ int kvm_arch_init_vcpu(CPUState *cs) break; } } + + if (limit >= 0x0a) { + uint32_t ver; + + cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused); + if ((ver & 0xff) > 0) { + has_msr_architectural_pmu = true; + num_architectural_pmu_counters = (ver & 0xff00) >> 8; + + /* Shouldn't be more than 32, since that's the number of bits + * available in EBX to tell us _which_ counters are available. + * Play it safe. + */ + if (num_architectural_pmu_counters > MAX_GP_COUNTERS) { + num_architectural_pmu_counters = MAX_GP_COUNTERS; + } + } + } + cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); for (i = 0x80000000; i <= limit; i++) { @@ -660,6 +683,12 @@ int kvm_arch_init_vcpu(CPUState *cs) qemu_add_vm_change_state_handler(cpu_update_state, env); + c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); + if (c) { + has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || + !!(c->ecx & CPUID_EXT_SMX); + } + cpuid_data.cpuid.padding = 0; r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); if (r) { @@ -1060,6 +1089,26 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry, entry->data = value; } +static int kvm_put_tscdeadline_msr(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + struct { + struct kvm_msrs info; + struct kvm_msr_entry entries[1]; + } msr_data; + struct kvm_msr_entry *msrs = msr_data.entries; + + if (!has_msr_tsc_deadline) { + return 0; + } + + kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline); + + msr_data.info.nmsrs = 1; + + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data); +} + static int kvm_put_msrs(X86CPU *cpu, int level) { CPUX86State *env = &cpu->env; @@ -1068,7 +1117,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) struct kvm_msr_entry entries[100]; } msr_data; struct kvm_msr_entry *msrs = msr_data.entries; - int n = 0; + int n = 0, i; kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); @@ -1083,9 +1132,6 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_tsc_adjust) { kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust); } - if (has_msr_tsc_deadline) { - kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline); - } if (has_msr_misc_enable) { kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE, env->msr_ia32_misc_enable); @@ -1110,9 +1156,8 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } } /* - * The following paravirtual MSRs have side effects on the guest or are - * too heavy for normal writeback. Limit them to reset or full state - * updates. + * The following MSRs have side effects on the guest or are too heavy + * for normal writeback. Limit them to reset or full state updates. */ if (level >= KVM_PUT_RESET_STATE) { kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, @@ -1130,6 +1175,33 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME, env->steal_time_msr); } + if (has_msr_architectural_pmu) { + /* Stop the counter. */ + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0); + + /* Set the counter values. */ + for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i, + env->msr_fixed_counters[i]); + } + for (i = 0; i < num_architectural_pmu_counters; i++) { + kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i, + env->msr_gp_counters[i]); + kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i, + env->msr_gp_evtsel[i]); + } + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS, + env->msr_global_status); + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL, + env->msr_global_ovf_ctrl); + + /* Now start the PMU. */ + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, + env->msr_fixed_ctr_ctrl); + kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, + env->msr_global_ctrl); + } if (hyperv_hypercall_available(cpu)) { kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID, 0); kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL, 0); @@ -1137,6 +1209,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (cpu->hyperv_vapic) { kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE, 0); } + if (has_msr_feature_control) { + kvm_msr_entry_set(&msrs[n++], MSR_IA32_FEATURE_CONTROL, + env->msr_ia32_feature_control); + } } if (env->mcg_cap) { int i; @@ -1361,6 +1437,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_misc_enable) { msrs[n++].index = MSR_IA32_MISC_ENABLE; } + if (has_msr_feature_control) { + msrs[n++].index = MSR_IA32_FEATURE_CONTROL; + } if (!env->tsc_valid) { msrs[n++].index = MSR_IA32_TSC; @@ -1386,6 +1465,19 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_kvm_steal_time) { msrs[n++].index = MSR_KVM_STEAL_TIME; } + if (has_msr_architectural_pmu) { + msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL; + msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL; + msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS; + msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL; + for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i; + } + for (i = 0; i < num_architectural_pmu_counters; i++) { + msrs[n++].index = MSR_P6_PERFCTR0 + i; + msrs[n++].index = MSR_P6_EVNTSEL0 + i; + } + } if (env->mcg_cap) { msrs[n++].index = MSR_MCG_STATUS; @@ -1402,7 +1494,8 @@ static int kvm_get_msrs(X86CPU *cpu) } for (i = 0; i < ret; i++) { - switch (msrs[i].index) { + uint32_t index = msrs[i].index; + switch (index) { case MSR_IA32_SYSENTER_CS: env->sysenter_cs = msrs[i].data; break; @@ -1459,6 +1552,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_MISC_ENABLE: env->msr_ia32_misc_enable = msrs[i].data; break; + case MSR_IA32_FEATURE_CONTROL: + env->msr_ia32_feature_control = msrs[i].data; + break; default: if (msrs[i].index >= MSR_MC0_CTL && msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { @@ -1474,6 +1570,27 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_KVM_STEAL_TIME: env->steal_time_msr = msrs[i].data; break; + case MSR_CORE_PERF_FIXED_CTR_CTRL: + env->msr_fixed_ctr_ctrl = msrs[i].data; + break; + case MSR_CORE_PERF_GLOBAL_CTRL: + env->msr_global_ctrl = msrs[i].data; + break; + case MSR_CORE_PERF_GLOBAL_STATUS: + env->msr_global_status = msrs[i].data; + break; + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + env->msr_global_ovf_ctrl = msrs[i].data; + break; + case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: + env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; + break; + case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: + env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; + break; + case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: + env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; + break; } } @@ -1724,6 +1841,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level) return ret; } } + + ret = kvm_put_tscdeadline_msr(x86_cpu); + if (ret < 0) { + return ret; + } + ret = kvm_put_vcpu_events(x86_cpu, level); if (ret < 0) { return ret; diff --git a/target-i386/machine.c b/target-i386/machine.c index f9ec581faa..dc81cde535 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -435,6 +435,14 @@ static bool misc_enable_needed(void *opaque) return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; } +static bool feature_control_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->msr_ia32_feature_control != 0; +} + static const VMStateDescription vmstate_msr_ia32_misc_enable = { .name = "cpu/msr_ia32_misc_enable", .version_id = 1, @@ -446,6 +454,58 @@ static const VMStateDescription vmstate_msr_ia32_misc_enable = { } }; +static const VMStateDescription vmstate_msr_ia32_feature_control = { + .name = "cpu/msr_ia32_feature_control", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + +static bool pmu_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl || + env->msr_global_status || env->msr_global_ovf_ctrl) { + return true; + } + for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + if (env->msr_fixed_counters[i]) { + return true; + } + } + for (i = 0; i < MAX_GP_COUNTERS; i++) { + if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) { + return true; + } + } + + return false; +} + +static const VMStateDescription vmstate_msr_architectural_pmu = { + .name = "cpu/msr_architectural_pmu", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), + VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), + VMSTATE_UINT64(env.msr_global_status, X86CPU), + VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU), + VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS), + VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS), + VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -571,6 +631,12 @@ const VMStateDescription vmstate_x86_cpu = { }, { .vmsd = &vmstate_msr_ia32_misc_enable, .needed = misc_enable_needed, + }, { + .vmsd = &vmstate_msr_ia32_feature_control, + .needed = feature_control_needed, + }, { + .vmsd = &vmstate_msr_architectural_pmu, + .needed = pmu_enable_needed, } , { /* empty */ } |