summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86.c13
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--virt/kvm/kvm_main.c47
5 files changed, 46 insertions, 19 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 02b51dd4e4a..f77df1c5de6 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr);
+ addr, sizeof(u8));
}
void kvm_lapic_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f19ac0aca60..e1721324c27 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+ sizeof(u32)))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
gpa_offset = data & ~(PAGE_MASK | 1);
- /* Check that the address is 32-byte aligned. */
- if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
- break;
-
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
- &vcpu->arch.pv_time, data & ~1ULL))
+ &vcpu->arch.pv_time, data & ~1ULL,
+ sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS))
+ data & KVM_STEAL_VALID_BITS,
+ sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cad77fe09d7..c1395825192 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- gpa_t gpa);
+ gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index fa7cc7244cb..b0bcce0ddc9 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
+ unsigned long len;
struct kvm_memory_slot *memslot;
};
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index adc68feb5c5..f18013f09e6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- gpa_t gpa)
+ gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int offset = offset_in_page(gpa);
- gfn_t gfn = gpa >> PAGE_SHIFT;
+ gfn_t start_gfn = gpa >> PAGE_SHIFT;
+ gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
+ gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
+ gfn_t nr_pages_avail;
ghc->gpa = gpa;
ghc->generation = slots->generation;
- ghc->memslot = gfn_to_memslot(kvm, gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
- if (!kvm_is_error_hva(ghc->hva))
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
ghc->hva += offset;
- else
- return -EFAULT;
-
+ } else {
+ /*
+ * If the requested region crosses two memslots, we still
+ * verify that the entire region is valid here.
+ */
+ while (start_gfn <= end_gfn) {
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
+ &nr_pages_avail);
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+ start_gfn += nr_pages_avail;
+ }
+ /* Use the slow path for cross page reads and writes. */
+ ghc->memslot = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
+ BUG_ON(len > ghc->len);
+
if (slots->generation != ghc->generation)
- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+ if (unlikely(!ghc->memslot))
+ return kvm_write_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
+ BUG_ON(len > ghc->len);
+
if (slots->generation != ghc->generation)
- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+ if (unlikely(!ghc->memslot))
+ return kvm_read_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;