summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@arm.com>2018-11-26 18:21:22 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2019-02-19 21:05:36 +0000
commitaccb99bcd0ca6d3ee412557b0c3f583a3abc0eb6 (patch)
treee997f4ed8b196b597a92bc89b22f00b70e78eee0 /virt
parente329fb75d519e3dc3eb11b22d5bb846516be3521 (diff)
downloadlinux-rpi-accb99bcd0ca6d3ee412557b0c3f583a3abc0eb6.tar.gz
linux-rpi-accb99bcd0ca6d3ee412557b0c3f583a3abc0eb6.tar.bz2
linux-rpi-accb99bcd0ca6d3ee412557b0c3f583a3abc0eb6.zip
KVM: arm/arm64: Simplify bg_timer programming
Instead of calling into kvm_timer_[un]schedule from the main kvm blocking path, test if the VCPU is on the wait queue from the load/put path and perform the background timer setup/cancel in this path. This has the distinct advantage that we no longer race between load/put and schedule/unschedule and programming and canceling of the bg_timer always happens when the timer state is not loaded. Note that we must now remove the checks in kvm_timer_blocking that do not schedule a background timer if one of the timers can fire, because we no longer have a guarantee that kvm_vcpu_check_block() will be called before kvm_timer_blocking. Reported-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arch_timer.c35
-rw-r--r--virt/kvm/arm/arm.c2
2 files changed, 14 insertions, 23 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index b07ac4614e1c..4986028d9829 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -349,22 +349,12 @@ out:
* thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle.
*/
-void kvm_timer_schedule(struct kvm_vcpu *vcpu)
+static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- vtimer_save_state(vcpu);
-
- /*
- * No need to schedule a background timer if any guest timer has
- * already expired, because kvm_vcpu_block will return before putting
- * the thread to sleep.
- */
- if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
- return;
-
/*
* If both timers are not capable of raising interrupts (disabled or
* masked), then there's no more work for us to do.
@@ -373,12 +363,19 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
return;
/*
- * The guest timers have not yet expired, schedule a background timer.
+ * At least one guest time will expire. Schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
}
+static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ soft_timer_cancel(&timer->bg_timer);
+}
+
static void vtimer_restore_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -401,15 +398,6 @@ out:
local_irq_restore(flags);
}
-void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
-{
- struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
-
- vtimer_restore_state(vcpu);
-
- soft_timer_cancel(&timer->bg_timer);
-}
-
static void set_cntvoff(u64 cntvoff)
{
u32 low = lower_32_bits(cntvoff);
@@ -485,6 +473,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
/* Set the background timer for the physical timer emulation. */
phys_timer_emulate(vcpu);
+ kvm_timer_unblocking(vcpu);
+
/* If the timer fired while we weren't running, inject it now */
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
@@ -527,6 +517,9 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
*/
soft_timer_cancel(&timer->phys_timer);
+ if (swait_active(kvm_arch_vcpu_wq(vcpu)))
+ kvm_timer_blocking(vcpu);
+
/*
* The kernel may decide to run userspace after calling vcpu_put, so
* we reset cntvoff to 0 to ensure a consistent read between user
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index b77db673bb03..9fbdb9e1c51f 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -335,13 +335,11 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- kvm_timer_schedule(vcpu);
kvm_vgic_v4_enable_doorbell(vcpu);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- kvm_timer_unschedule(vcpu);
kvm_vgic_v4_disable_doorbell(vcpu);
}