summaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorChanho Park <chanho61.park@samsung.com>2014-12-10 15:42:55 +0900
committerChanho Park <chanho61.park@samsung.com>2014-12-10 15:42:55 +0900
commit0d6a2f7e595218b5632ba7005128470e65138951 (patch)
tree596b09930ef1538e6606450e2d8b88ec2e296a9b /target-i386
parent16b1353a36171ae06d63fd309f4772dbfb1da113 (diff)
downloadqemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.gz
qemu-0d6a2f7e595218b5632ba7005128470e65138951.tar.bz2
qemu-0d6a2f7e595218b5632ba7005128470e65138951.zip
Imported Upstream version 2.2.0upstream/2.2.1upstream/2.2.0
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/cpu-qom.h5
-rw-r--r--target-i386/cpu.c169
-rw-r--r--target-i386/cpu.h74
-rw-r--r--target-i386/fpu_helper.c21
-rw-r--r--target-i386/gdbstub.c2
-rw-r--r--target-i386/helper.c30
-rw-r--r--target-i386/kvm.c120
-rw-r--r--target-i386/machine.c91
-rw-r--r--target-i386/seg_helper.c90
-rw-r--r--target-i386/translate.c6
10 files changed, 516 insertions, 92 deletions
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index 71a1b97cf..b557b619c 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -92,6 +92,7 @@ typedef struct X86CPU {
bool enforce_cpuid;
bool expose_kvm;
bool migratable;
+ bool host_features;
/* if true the CPUID code directly forward host cache leaves to the guest */
bool cache_info_passthrough;
@@ -129,6 +130,7 @@ extern struct VMStateDescription vmstate_x86_cpu;
* @cpu: vCPU the interrupt is to be handled by.
*/
void x86_cpu_do_interrupt(CPUState *cpu);
+bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
int cpuid, void *opaque);
@@ -150,4 +152,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void x86_cpu_exec_enter(CPUState *cpu);
+void x86_cpu_exec_exit(CPUState *cpu);
+
#endif
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 6d008ab5e..e9df33e5c 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -257,10 +257,10 @@ static const char *svm_feature_name[] = {
};
static const char *cpuid_7_0_ebx_feature_name[] = {
- "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
- "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
- NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
+ "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
+ NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
};
static const char *cpuid_apm_edx_feature_name[] = {
@@ -426,6 +426,12 @@ static const ExtSaveArea ext_save_areas[] = {
.offset = 0x3c0, .size = 0x40 },
[4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
.offset = 0x400, .size = 0x40 },
+ [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x440, .size = 0x40 },
+ [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x480, .size = 0x200 },
+ [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = 0x680, .size = 0x400 },
};
const char *get_register_name_32(unsigned int reg)
@@ -436,14 +442,6 @@ const char *get_register_name_32(unsigned int reg)
return x86_reg_info_32[reg].name;
}
-/* collects per-function cpuid data
- */
-typedef struct model_features_t {
- uint32_t *guest_feat;
- uint32_t *host_feat;
- FeatureWord feat_word;
-} model_features_t;
-
/* KVM-specific features that are automatically added to all CPU models
* when KVM is enabled.
*/
@@ -461,14 +459,21 @@ static uint32_t kvm_default_features[FEATURE_WORDS] = {
/* Features that are not added by default to any CPU model when KVM is enabled.
*/
static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
+ [FEAT_1_EDX] = CPUID_ACPI,
[FEAT_1_ECX] = CPUID_EXT_MONITOR,
+ [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
};
-void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
+void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
{
kvm_default_features[w] &= ~features;
}
+void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
+{
+ kvm_default_unset_features[w] &= ~features;
+}
+
/*
* Returns the set of feature flags that are supported and migratable by
* QEMU, for a given FeatureWord.
@@ -535,8 +540,8 @@ void host_cpuid(uint32_t function, uint32_t count,
* otherwise the string is assumed to sized by a terminating nul.
* Return lexical ordering of *s1:*s2.
*/
-static int sstrcmp(const char *s1, const char *e1, const char *s2,
- const char *e2)
+static int sstrcmp(const char *s1, const char *e1,
+ const char *s2, const char *e2)
{
for (;;) {
if (!*s1 || !*s2 || *s1 != *s2)
@@ -592,7 +597,8 @@ static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
}
static void add_flagname_to_bitmaps(const char *flagname,
- FeatureWordArray words)
+ FeatureWordArray words,
+ Error **errp)
{
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
@@ -603,7 +609,7 @@ static void add_flagname_to_bitmaps(const char *flagname,
}
}
if (w == FEATURE_WORDS) {
- fprintf(stderr, "CPU feature %s not found\n", flagname);
+ error_setg(errp, "CPU feature %s not found", flagname);
}
}
@@ -679,10 +685,11 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 16,
.model = 2,
.stepping = 3,
+ /* Missing: CPUID_HT */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
- CPUID_PSE36 | CPUID_VME | CPUID_HT,
+ CPUID_PSE36 | CPUID_VME,
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
CPUID_EXT_POPCNT,
@@ -698,8 +705,9 @@ static X86CPUDefinition builtin_x86_defs[] = {
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+ /* Missing: CPUID_SVM_LBRV */
.features[FEAT_SVM] =
- CPUID_SVM_NPT | CPUID_SVM_LBRV,
+ CPUID_SVM_NPT,
.xlevel = 0x8000001A,
.model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
},
@@ -710,15 +718,16 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 6,
.model = 15,
.stepping = 11,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
- CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
- CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
+ /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
+ * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
- CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
- CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
+ CPUID_EXT_CX16,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
.features[FEAT_8000_0001_ECX] =
@@ -793,13 +802,15 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 6,
.model = 14,
.stepping = 8,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES | CPUID_VME |
- CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
- CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
+ CPUID_SS,
+ /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
+ * CPUID_EXT_PDCM, CPUID_EXT_VMX */
.features[FEAT_1_ECX] =
- CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
- CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
.features[FEAT_8000_0001_EDX] =
CPUID_EXT2_NX,
.xlevel = 0x80000008,
@@ -872,14 +883,16 @@ static X86CPUDefinition builtin_x86_defs[] = {
.family = 6,
.model = 28,
.stepping = 2,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
.features[FEAT_1_EDX] =
PPRO_FEATURES |
- CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
- CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
+ CPUID_ACPI | CPUID_SS,
/* Some CPUs got no CPUID_SEP */
+ /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
+ * CPUID_EXT_XTPR */
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
- CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
CPUID_EXT_MOVBE,
.features[FEAT_8000_0001_EDX] =
(PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
@@ -1254,6 +1267,9 @@ void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
}
}
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only);
+
#ifdef CONFIG_KVM
static int cpu_x86_fill_model_id(char *str)
@@ -1310,26 +1326,23 @@ static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
dc->props = host_x86_cpu_properties;
}
-static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
- bool migratable_only);
-
static void host_x86_cpu_initfn(Object *obj)
{
X86CPU *cpu = X86_CPU(obj);
CPUX86State *env = &cpu->env;
KVMState *s = kvm_state;
- FeatureWord w;
assert(kvm_enabled());
+ /* We can't fill the features array here because we don't know yet if
+ * "migratable" is true or false.
+ */
+ cpu->host_features = true;
+
env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
- for (w = 0; w < FEATURE_WORDS; w++) {
- env->features[w] =
- x86_cpu_get_supported_feature_word(w, cpu->migratable);
- }
object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
}
@@ -1716,9 +1729,9 @@ static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
if (value < min || value > max) {
error_setg(errp, "Property %s.%s doesn't take value %" PRId64
- " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
- object_get_typename(obj), name ? name : "null",
- value, min, max);
+ " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
+ object_get_typename(obj), name ? name : "null",
+ value, min, max);
return;
}
cpu->hyperv_spinlock_attempts = value;
@@ -1761,9 +1774,9 @@ static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
while (featurestr) {
char *val;
if (featurestr[0] == '+') {
- add_flagname_to_bitmaps(featurestr + 1, plus_features);
+ add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
} else if (featurestr[0] == '-') {
- add_flagname_to_bitmaps(featurestr + 1, minus_features);
+ add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
} else if ((val = strchr(featurestr, '='))) {
*val = 0; val++;
feat2prop(featurestr);
@@ -1808,8 +1821,8 @@ static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
}
if (numvalue < min) {
error_report("hv-spinlocks value shall always be >= 0x%x"
- ", fixup will be removed in future versions",
- min);
+ ", fixup will be removed in future versions",
+ min);
numvalue = min;
}
snprintf(num, sizeof(num), "%" PRId32, numvalue);
@@ -1828,6 +1841,13 @@ static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
featurestr = strtok(NULL, ",");
}
+ if (cpu->host_features) {
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] =
+ x86_cpu_get_supported_feature_word(w, cpu->migratable);
+ }
+ }
+
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] |= plus_features[w];
env->features[w] &= ~minus_features[w];
@@ -1839,7 +1859,7 @@ static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
* if flags, suppress names undefined in featureset.
*/
static void listflags(char *buf, int bufsize, uint32_t fbits,
- const char **featureset, uint32_t flags)
+ const char **featureset, uint32_t flags)
{
const char **p = &featureset[31];
char *q, *b, bit;
@@ -2572,7 +2592,7 @@ static void x86_cpu_reset(CPUState *s)
for (i = 0; i < 8; i++) {
env->fptags[i] = 1;
}
- env->fpuc = 0x37f;
+ cpu_set_fpuc(env, 0x37f);
env->mxcsr = 0x1f80;
env->xstate_bv = XSTATE_FP | XSTATE_SSE;
@@ -2588,6 +2608,16 @@ static void x86_cpu_reset(CPUState *s)
env->xcr0 = 1;
+ /*
+ * SDM 11.11.5 requires:
+ * - IA32_MTRR_DEF_TYPE MSR.E = 0
+ * - IA32_MTRR_PHYSMASKn.V = 0
+ * All other bits are undefined. For simplification, zero it all.
+ */
+ env->mtrr_deftype = 0;
+ memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+ memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
if (s->cpu_index == 0) {
@@ -2678,6 +2708,13 @@ static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
}
#endif
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -2685,6 +2722,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
CPUX86State *env = &cpu->env;
Error *local_err = NULL;
+ static bool ht_warned;
if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
env->cpuid_level = 7;
@@ -2693,9 +2731,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
* CPUID[1].EDX.
*/
- if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
- env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
- env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
+ if (IS_AMD_CPU(env)) {
env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
& CPUID_EXT2_AMD_ALIASES);
@@ -2724,6 +2760,20 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
mce_init(cpu);
qemu_init_vcpu(cs);
+ /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
+ * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+ * based on inputs (sockets,cores,threads), it is still better to gives
+ * users a warning.
+ *
+ * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
+ * cs->nr_threads hasn't be populated yet and the checking is incorrect.
+ */
+ if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
+ error_report("AMD CPU doesn't support hyperthreading. Please configure"
+ " -smp options properly.");
+ ht_warned = true;
+ }
+
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
goto out;
@@ -2825,9 +2875,6 @@ static void x86_cpu_initfn(Object *obj)
if (tcg_enabled() && !inited) {
inited = 1;
optimize_flags_init();
-#ifndef CONFIG_USER_ONLY
- cpu_set_debug_excp_handler(breakpoint_handler);
-#endif
}
}
@@ -2865,8 +2912,14 @@ static bool x86_cpu_has_work(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_POLL)) &&
+#if !defined(CONFIG_USER_ONLY)
+ if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ apic_poll_irq(cpu->apic_state);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
+ }
+#endif
+
+ return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
@@ -2905,6 +2958,7 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
@@ -2924,6 +2978,11 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_x86_cpu;
#endif
cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
+#ifndef CONFIG_USER_ONLY
+ cc->debug_excp_handler = breakpoint_handler;
+#endif
+ cc->cpu_exec_enter = x86_cpu_exec_enter;
+ cc->cpu_exec_exit = x86_cpu_exec_exit;
}
static const TypeInfo x86_cpu_type_info = {
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index e634d83e8..015f5b527 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -337,6 +337,8 @@
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
+
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
#define MSR_MTRRfix16K_A0000 0x259
@@ -393,6 +395,9 @@
#define XSTATE_YMM (1ULL << 2)
#define XSTATE_BNDREGS (1ULL << 3)
#define XSTATE_BNDCSR (1ULL << 4)
+#define XSTATE_OPMASK (1ULL << 5)
+#define XSTATE_ZMM_Hi256 (1ULL << 6)
+#define XSTATE_Hi16_ZMM (1ULL << 7)
/* CPUID feature words */
@@ -558,9 +563,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EBX_INVPCID (1U << 10)
#define CPUID_7_0_EBX_RTM (1U << 11)
#define CPUID_7_0_EBX_MPX (1U << 14)
+#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
#define CPUID_7_0_EBX_RDSEED (1U << 18)
#define CPUID_7_0_EBX_ADX (1U << 19)
#define CPUID_7_0_EBX_SMAP (1U << 20)
+#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
+#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
+#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
/* CPUID[0x80000007].EDX flags: */
#define CPUID_APM_INVTSC (1U << 8)
@@ -705,6 +714,24 @@ typedef union {
} XMMReg;
typedef union {
+ uint8_t _b[32];
+ uint16_t _w[16];
+ uint32_t _l[8];
+ uint64_t _q[4];
+ float32 _s[8];
+ float64 _d[4];
+} YMMReg;
+
+typedef union {
+ uint8_t _b[64];
+ uint16_t _w[32];
+ uint32_t _l[16];
+ uint64_t _q[8];
+ float32 _s[16];
+ float64 _d[8];
+} ZMMReg;
+
+typedef union {
uint8_t _b[8];
uint16_t _w[4];
uint32_t _l[2];
@@ -723,6 +750,20 @@ typedef struct BNDCSReg {
} BNDCSReg;
#ifdef HOST_WORDS_BIGENDIAN
+#define ZMM_B(n) _b[63 - (n)]
+#define ZMM_W(n) _w[31 - (n)]
+#define ZMM_L(n) _l[15 - (n)]
+#define ZMM_S(n) _s[15 - (n)]
+#define ZMM_Q(n) _q[7 - (n)]
+#define ZMM_D(n) _d[7 - (n)]
+
+#define YMM_B(n) _b[31 - (n)]
+#define YMM_W(n) _w[15 - (n)]
+#define YMM_L(n) _l[7 - (n)]
+#define YMM_S(n) _s[7 - (n)]
+#define YMM_Q(n) _q[3 - (n)]
+#define YMM_D(n) _d[3 - (n)]
+
#define XMM_B(n) _b[15 - (n)]
#define XMM_W(n) _w[7 - (n)]
#define XMM_L(n) _l[3 - (n)]
@@ -735,6 +776,20 @@ typedef struct BNDCSReg {
#define MMX_L(n) _l[1 - (n)]
#define MMX_S(n) _s[1 - (n)]
#else
+#define ZMM_B(n) _b[n]
+#define ZMM_W(n) _w[n]
+#define ZMM_L(n) _l[n]
+#define ZMM_S(n) _s[n]
+#define ZMM_Q(n) _q[n]
+#define ZMM_D(n) _d[n]
+
+#define YMM_B(n) _b[n]
+#define YMM_W(n) _w[n]
+#define YMM_L(n) _l[n]
+#define YMM_S(n) _s[n]
+#define YMM_Q(n) _q[n]
+#define YMM_D(n) _d[n]
+
#define XMM_B(n) _b[n]
#define XMM_W(n) _w[n]
#define XMM_L(n) _l[n]
@@ -773,6 +828,8 @@ typedef struct {
#define NB_MMU_MODES 3
+#define NB_OPMASK_REGS 8
+
typedef enum TPRAccess {
TPR_ACCESS_READ,
TPR_ACCESS_WRITE,
@@ -837,6 +894,12 @@ typedef struct CPUX86State {
XMMReg ymmh_regs[CPU_NB_REGS];
+ uint64_t opmask_regs[NB_OPMASK_REGS];
+ YMMReg zmmh_regs[CPU_NB_REGS];
+#ifdef TARGET_X86_64
+ ZMMReg hi16_zmm_regs[CPU_NB_REGS];
+#endif
+
/* sysenter registers */
uint32_t sysenter_cs;
target_ulong sysenter_esp;
@@ -930,7 +993,7 @@ typedef struct CPUX86State {
/* MTRRs */
uint64_t mtrr_fixed[11];
uint64_t mtrr_deftype;
- MTRRVar mtrr_var[8];
+ MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
/* For KVM */
uint32_t mp_state;
@@ -1041,7 +1104,7 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
}
static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
- int sipi_vector)
+ uint8_t sipi_vector)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
@@ -1119,7 +1182,7 @@ static inline int hw_breakpoint_len(unsigned long dr7, int index)
void hw_breakpoint_insert(CPUX86State *env, int index);
void hw_breakpoint_remove(CPUX86State *env, int index);
bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update);
-void breakpoint_handler(CPUX86State *env);
+void breakpoint_handler(CPUState *cs);
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
@@ -1249,6 +1312,7 @@ void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
/* cc_helper.c */
extern const uint8_t parity_table[256];
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+void update_fp_status(CPUX86State *env);
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
{
@@ -1284,6 +1348,7 @@ static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
/* fpu_helper.c */
void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
+void cpu_set_fpuc(CPUX86State *env, uint16_t val);
/* svm_helper.c */
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
@@ -1300,7 +1365,8 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
uint32_t feat_add, uint32_t feat_remove);
-void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features);
+void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features);
+void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features);
/* Return name of 32-bit register, from a R_* constant */
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c
index 1b2900d5d..1d4eee397 100644
--- a/target-i386/fpu_helper.c
+++ b/target-i386/fpu_helper.c
@@ -537,7 +537,7 @@ uint32_t helper_fnstcw(CPUX86State *env)
return env->fpuc;
}
-static void update_fp_status(CPUX86State *env)
+void update_fp_status(CPUX86State *env)
{
int rnd_type;
@@ -575,8 +575,7 @@ static void update_fp_status(CPUX86State *env)
void helper_fldcw(CPUX86State *env, uint32_t val)
{
- env->fpuc = val;
- update_fp_status(env);
+ cpu_set_fpuc(env, val);
}
void helper_fclex(CPUX86State *env)
@@ -595,7 +594,7 @@ void helper_fninit(CPUX86State *env)
{
env->fpus = 0;
env->fpstt = 0;
- env->fpuc = 0x37f;
+ cpu_set_fpuc(env, 0x37f);
env->fptags[0] = 1;
env->fptags[1] = 1;
env->fptags[2] = 1;
@@ -1013,11 +1012,11 @@ void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
int i, fpus, fptag;
if (data32) {
- env->fpuc = cpu_lduw_data(env, ptr);
+ cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
fpus = cpu_lduw_data(env, ptr + 4);
fptag = cpu_lduw_data(env, ptr + 8);
} else {
- env->fpuc = cpu_lduw_data(env, ptr);
+ cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
fpus = cpu_lduw_data(env, ptr + 2);
fptag = cpu_lduw_data(env, ptr + 4);
}
@@ -1046,7 +1045,7 @@ void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
/* fninit */
env->fpus = 0;
env->fpstt = 0;
- env->fpuc = 0x37f;
+ cpu_set_fpuc(env, 0x37f);
env->fptags[0] = 1;
env->fptags[1] = 1;
env->fptags[2] = 1;
@@ -1157,7 +1156,7 @@ void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64)
raise_exception(env, EXCP0D_GPF);
}
- env->fpuc = cpu_lduw_data(env, ptr);
+ cpu_set_fpuc(env, cpu_lduw_data(env, ptr));
fpus = cpu_lduw_data(env, ptr + 2);
fptag = cpu_lduw_data(env, ptr + 4);
env->fpstt = (fpus >> 11) & 7;
@@ -1257,6 +1256,12 @@ void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
}
+void cpu_set_fpuc(CPUX86State *env, uint16_t val)
+{
+ env->fpuc = val;
+ update_fp_status(env);
+}
+
void helper_ldmxcsr(CPUX86State *env, uint32_t val)
{
cpu_set_mxcsr(env, val);
diff --git a/target-i386/gdbstub.c b/target-i386/gdbstub.c
index 19fe9adc3..ff99cfb00 100644
--- a/target-i386/gdbstub.c
+++ b/target-i386/gdbstub.c
@@ -203,7 +203,7 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
case IDX_FP_REGS + 8:
- env->fpuc = ldl_p(mem_buf);
+ cpu_set_fpuc(env, ldl_p(mem_buf));
return 4;
case IDX_FP_REGS + 9:
tmp = ldl_p(mem_buf);
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 47b982b43..345bda188 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -615,8 +615,8 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
if (!(pdpe & PG_PRESENT_MASK)) {
goto do_fault;
}
- rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK;
- if (pdpe & rsvd_mask) {
+ rsvd_mask |= PG_HI_USER_MASK;
+ if (pdpe & (rsvd_mask | PG_NX_MASK)) {
goto do_fault_rsvd;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
@@ -1011,9 +1011,10 @@ bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
return hit_enabled;
}
-void breakpoint_handler(CPUX86State *env)
+void breakpoint_handler(CPUState *cs)
{
- CPUState *cs = CPU(x86_env_get_cpu(env));
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
CPUBreakpoint *bp;
if (cs->watchpoint_hit) {
@@ -1261,3 +1262,24 @@ void do_cpu_sipi(X86CPU *cpu)
{
}
#endif
+
+/* Frob eflags into and out of the CPU temporary format. */
+
+void x86_cpu_exec_enter(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ env->df = 1 - (2 * ((env->eflags >> 10) & 1));
+ CC_OP = CC_OP_EFLAGS;
+ env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+}
+
+void x86_cpu_exec_exit(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->eflags = cpu_compute_eflags(env);
+}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 097fe1188..ccf36e871 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -79,6 +79,7 @@ static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
static bool has_msr_hv_tsc;
+static bool has_msr_mtrr;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
@@ -739,6 +740,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ has_msr_mtrr = true;
+ }
+
return 0;
}
@@ -1026,6 +1031,9 @@ static int kvm_put_fpu(X86CPU *cpu)
#define XSAVE_YMMH_SPACE 144
#define XSAVE_BNDREGS 240
#define XSAVE_BNDCSR 256
+#define XSAVE_OPMASK 272
+#define XSAVE_ZMM_Hi256 288
+#define XSAVE_Hi16_ZMM 416
static int kvm_put_xsave(X86CPU *cpu)
{
@@ -1062,6 +1070,14 @@ static int kvm_put_xsave(X86CPU *cpu)
sizeof env->bnd_regs);
memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs,
sizeof(env->bndcs_regs));
+ memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs,
+ sizeof env->opmask_regs);
+ memcpy(&xsave->region[XSAVE_ZMM_Hi256], env->zmmh_regs,
+ sizeof env->zmmh_regs);
+#ifdef TARGET_X86_64
+ memcpy(&xsave->region[XSAVE_Hi16_ZMM], env->hi16_zmm_regs,
+ sizeof env->hi16_zmm_regs);
+#endif
r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
return r;
}
@@ -1183,7 +1199,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
+ struct kvm_msr_entry entries[150];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int n = 0, i;
@@ -1278,6 +1294,37 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
env->msr_hv_tsc);
}
+ if (has_msr_mtrr) {
+ kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRphysBase(i), env->mtrr_var[i].base);
+ kvm_msr_entry_set(&msrs[n++],
+ MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
+ }
+ }
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
@@ -1366,6 +1413,14 @@ static int kvm_get_xsave(X86CPU *cpu)
sizeof env->bnd_regs);
memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR],
sizeof(env->bndcs_regs));
+ memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK],
+ sizeof env->opmask_regs);
+ memcpy(env->zmmh_regs, &xsave->region[XSAVE_ZMM_Hi256],
+ sizeof env->zmmh_regs);
+#ifdef TARGET_X86_64
+ memcpy(env->hi16_zmm_regs, &xsave->region[XSAVE_Hi16_ZMM],
+ sizeof env->hi16_zmm_regs);
+#endif
return 0;
}
@@ -1484,7 +1539,7 @@ static int kvm_get_msrs(X86CPU *cpu)
CPUX86State *env = &cpu->env;
struct {
struct kvm_msrs info;
- struct kvm_msr_entry entries[100];
+ struct kvm_msr_entry entries[150];
} msr_data;
struct kvm_msr_entry *msrs = msr_data.entries;
int ret, i, n;
@@ -1572,6 +1627,24 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_tsc) {
msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
}
+ if (has_msr_mtrr) {
+ msrs[n++].index = MSR_MTRRdefType;
+ msrs[n++].index = MSR_MTRRfix64K_00000;
+ msrs[n++].index = MSR_MTRRfix16K_80000;
+ msrs[n++].index = MSR_MTRRfix16K_A0000;
+ msrs[n++].index = MSR_MTRRfix4K_C0000;
+ msrs[n++].index = MSR_MTRRfix4K_C8000;
+ msrs[n++].index = MSR_MTRRfix4K_D0000;
+ msrs[n++].index = MSR_MTRRfix4K_D8000;
+ msrs[n++].index = MSR_MTRRfix4K_E0000;
+ msrs[n++].index = MSR_MTRRfix4K_E8000;
+ msrs[n++].index = MSR_MTRRfix4K_F0000;
+ msrs[n++].index = MSR_MTRRfix4K_F8000;
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ msrs[n++].index = MSR_MTRRphysBase(i);
+ msrs[n++].index = MSR_MTRRphysMask(i);
+ }
+ }
msr_data.info.nmsrs = n;
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
@@ -1692,6 +1765,49 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_REFERENCE_TSC:
env->msr_hv_tsc = msrs[i].data;
break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = msrs[i].data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[0] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ env->mtrr_fixed[1] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[2] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ env->mtrr_fixed[3] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C8000:
+ env->mtrr_fixed[4] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D0000:
+ env->mtrr_fixed[5] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D8000:
+ env->mtrr_fixed[6] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E0000:
+ env->mtrr_fixed[7] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E8000:
+ env->mtrr_fixed[8] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F0000:
+ env->mtrr_fixed[9] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[10] = msrs[i].data;
+ break;
+ case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
+ if (index & 1) {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
+ } else {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
+ }
+ break;
}
}
diff --git a/target-i386/machine.c b/target-i386/machine.c
index 16d2f6a80..1c13b1435 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -60,6 +60,44 @@ static const VMStateDescription vmstate_ymmh_reg = {
#define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \
VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg)
+static const VMStateDescription vmstate_zmmh_reg = {
+ .name = "zmmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(YMM_Q(0), YMMReg),
+ VMSTATE_UINT64(YMM_Q(1), YMMReg),
+ VMSTATE_UINT64(YMM_Q(2), YMMReg),
+ VMSTATE_UINT64(YMM_Q(3), YMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_zmmh_reg, YMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+ .name = "hi16_zmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_hi16_zmm_reg, ZMMReg)
+#endif
+
static const VMStateDescription vmstate_bnd_regs = {
.name = "bnd_regs",
.version_id = 1,
@@ -315,13 +353,13 @@ static int cpu_post_load(void *opaque, int version_id)
env->hflags &= ~HF_CPL_MASK;
env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
- /* XXX: restore FPU round state */
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;
env->fptag_vmstate ^= 0xff;
for(i = 0; i < 8; i++) {
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
}
+ update_fp_status(env);
cpu_breakpoint_remove_all(cs, BP_CPU);
cpu_watchpoint_remove_all(cs, BP_CPU);
@@ -603,6 +641,52 @@ static const VMStateDescription vmstate_msr_hyperv_time = {
}
};
+static bool avx512_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ if (env->opmask_regs[i]) {
+ return true;
+ }
+ }
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_ZMMH(reg, field) (env->zmmh_regs[reg].YMM_Q(field))
+ if (ENV_ZMMH(i, 0) || ENV_ZMMH(i, 1) ||
+ ENV_ZMMH(i, 2) || ENV_ZMMH(i, 3)) {
+ return true;
+ }
+#ifdef TARGET_X86_64
+#define ENV_Hi16_ZMM(reg, field) (env->hi16_zmm_regs[reg].ZMM_Q(field))
+ if (ENV_Hi16_ZMM(i, 0) || ENV_Hi16_ZMM(i, 1) ||
+ ENV_Hi16_ZMM(i, 2) || ENV_Hi16_ZMM(i, 3) ||
+ ENV_Hi16_ZMM(i, 4) || ENV_Hi16_ZMM(i, 5) ||
+ ENV_Hi16_ZMM(i, 6) || ENV_Hi16_ZMM(i, 7)) {
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+ .name = "cpu/avx512",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+ VMSTATE_ZMMH_REGS_VARS(env.zmmh_regs, X86CPU, CPU_NB_REGS),
+#ifdef TARGET_X86_64
+ VMSTATE_Hi16_ZMM_REGS_VARS(env.hi16_zmm_regs, X86CPU, CPU_NB_REGS),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
@@ -677,7 +761,7 @@ VMStateDescription vmstate_x86_cpu = {
/* MTRRs */
VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
- VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, 8, 8),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
/* KVM-related states */
VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
@@ -745,6 +829,9 @@ VMStateDescription vmstate_x86_cpu = {
}, {
.vmsd = &vmstate_msr_hyperv_time,
.needed = hyperv_time_enable_needed,
+ }, {
+ .vmsd = &vmstate_avx512,
+ .needed = avx512_needed,
} , {
/* empty */
}
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 2d970d0cb..c98eeb435 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -883,32 +883,23 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
}
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
/* to inner privilege */
- if (ist != 0) {
- esp = get_rsp_from_tss(env, ist + 3);
- } else {
- esp = get_rsp_from_tss(env, dpl);
- }
- esp &= ~0xfLL; /* align stack */
- ss = 0;
new_stack = 1;
+ esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
+ ss = 0;
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same privilege */
if (env->eflags & VM_MASK) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
new_stack = 0;
- if (ist != 0) {
- esp = get_rsp_from_tss(env, ist + 3);
- } else {
- esp = env->regs[R_ESP];
- }
- esp &= ~0xfLL; /* align stack */
+ esp = env->regs[R_ESP];
dpl = cpl;
} else {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
new_stack = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
+ esp &= ~0xfLL; /* align stack */
PUSHQ(esp, env->segs[R_SS].selector);
PUSHQ(esp, env->regs[R_ESP]);
@@ -1127,8 +1118,8 @@ static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
/* Since we emulate only user space, we cannot do more than
exiting the emulation with the suitable exception and error
- code */
- if (is_int) {
+ code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
+ if (is_int || intno == EXCP_SYSCALL) {
env->eip = next_eip;
}
}
@@ -1279,6 +1270,75 @@ void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
}
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ bool ret = false;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ }
+#endif
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ do_cpu_sipi(cpu);
+ } else if (env->hflags2 & HF2_GIF_MASK) {
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ ret = true;
+ } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(env->hflags2 & HF2_NMI_MASK)) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ ret = true;
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ ret = true;
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
+ (env->hflags2 & HF2_HIF_MASK)) ||
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+ int intno;
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ /* ensure that no TB jump will be modified as
+ the program flow was changed */
+ ret = true;
+#if !defined(CONFIG_USER_ONLY)
+ } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+ int intno;
+ /* FIXME: this should respect TPR */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
+ intno = ldl_phys(cs->as, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ ret = true;
+#endif
+ }
+ }
+
+ return ret;
+}
+
void helper_enter_level(CPUX86State *env, int level, int data32,
target_ulong t1)
{
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 6fcd8245d..782f7d266 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -32,6 +32,9 @@
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
+#include "trace-tcg.h"
+
+
#define PREFIX_REPZ 0x01
#define PREFIX_REPNZ 0x02
#define PREFIX_LOCK 0x04
@@ -7984,7 +7987,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base);
- break;
+ goto done_generating;
}
}
}
@@ -8035,6 +8038,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
}
if (tb->cflags & CF_LAST_IO)
gen_io_end();
+done_generating:
gen_tb_end(tb, num_insns);
*tcg_ctx.gen_opc_ptr = INDEX_op_end;
/* we don't forget to fill the last values */