summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-02-24 18:59:18 +0100
committerAvi Kivity <avi@redhat.com>2010-04-25 13:53:23 +0300
commit7f5d8b5600b5294137886b46bf00ef811d0fdf32 (patch)
tree8eb3d48b169c0c642014eeff3255f785cfa300ab /arch
parentb44ea385d8cb187e04ec8d901d4c320c8b07c40b (diff)
downloadlinux-3.10-7f5d8b5600b5294137886b46bf00ef811d0fdf32.tar.gz
linux-3.10-7f5d8b5600b5294137886b46bf00ef811d0fdf32.tar.bz2
linux-3.10-7f5d8b5600b5294137886b46bf00ef811d0fdf32.zip
KVM: SVM: Handle nested selective_cr0 intercept correctly
If we have the following situation with nested svm: 1. Host KVM intercepts cr0 writes 2. Guest hypervisor intercepts only selective cr0 writes Then we get an cr0 write intercept which is handled on the host. But that intercepts may actually be a selective cr0 intercept for the guest. This patch checks for this condition and injects a selective cr0 intercept if needed. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/svm.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b4aac5c7ad8..631d2e54449 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1043,6 +1043,27 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_nested(svm)) {
+ /*
+ * We are here because we run in nested mode, the host kvm
+ * intercepts cr0 writes but the l1 hypervisor does not.
+ * But the L1 hypervisor may intercept selective cr0 writes.
+ * This needs to be checked here.
+ */
+ unsigned long old, new;
+
+ /* Remove bits that would trigger a real cr0 write intercept */
+ old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
+ new = cr0 & SVM_CR0_SELECTIVE_MASK;
+
+ if (old == new) {
+ /* cr0 write with ts and mp unchanged */
+ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
+ if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE)
+ return;
+ }
+ }
+
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {