summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-06-12 09:53:47 +0200
committerArnd Bergmann <arnd@arndb.de>2009-06-12 11:32:58 +0200
commit5b02ee3d219f9e01b6e9146e25613822cfc2e5ce (patch)
tree7ce9126738c3cf4b37d67170d0e4b34818c057a9 /arch/ia64
parent26a28fa4fea5b8c65713aa50c124f76a88c7924d (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
downloadkernel-common-5b02ee3d219f9e01b6e9146e25613822cfc2e5ce.tar.gz
kernel-common-5b02ee3d219f9e01b6e9146e25613822cfc2e5ce.tar.bz2
kernel-common-5b02ee3d219f9e01b6e9146e25613822cfc2e5ce.zip
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to atomic.h which I renamed to atomic_long.h. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c3
-rw-r--r--arch/ia64/include/asm/kvm_host.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/iosapic.c10
-rw-r--r--arch/ia64/kernel/irq_ia64.c3
-rw-r--r--arch/ia64/kernel/msi_ia64.c16
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c263
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/ia64/kvm/optvfault.S30
-rw-r--r--arch/ia64/kvm/process.c5
-rw-r--r--arch/ia64/kvm/vcpu.c20
-rw-r--r--arch/ia64/kvm/vmm.c12
-rw-r--r--arch/ia64/kvm/vmm_ivt.S18
-rw-r--r--arch/ia64/kvm/vtlb.c3
-rw-r--r--arch/ia64/sn/kernel/irq.c4
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c8
19 files changed, 314 insertions, 130 deletions
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index cc0a3182db3c..acb5047ab573 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -21,9 +21,10 @@ hpsim_irq_noop (unsigned int irq)
{
}
-static void
+static int
hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
{
+ return 0;
}
static struct hw_interrupt_type irq_type_hp_sim = {
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 4542651e6acb..5f43697aed30 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -371,6 +371,7 @@ struct kvm_vcpu_arch {
int last_run_cpu;
int vmm_tr_slot;
int vm_tr_slot;
+ int sn_rtc_tr_slot;
#define KVM_MP_STATE_RUNNABLE 0
#define KVM_MP_STATE_UNINITIALIZED 1
@@ -465,6 +466,7 @@ struct kvm_arch {
unsigned long vmm_init_rr;
int online_vcpus;
+ int is_sn2;
struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat;
@@ -472,6 +474,7 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
+ int iommu_flags;
struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap;
@@ -578,6 +581,8 @@ struct kvm_vmm_info{
kvm_vmm_entry *vmm_entry;
kvm_tramp_entry *tramp_entry;
unsigned long vmm_ivt;
+ unsigned long patch_mov_ar;
+ unsigned long patch_mov_ar_sn2;
};
int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
@@ -585,7 +590,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu);
-static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
#endif /* __ASSEMBLY__*/
#endif
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7a9bff47564f..0a9cc73d35c7 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -146,6 +146,8 @@
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
+ _PAGE_MA_UC)
# ifndef __ASSEMBLY__
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5510317db37b..baec6f00f7f3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void)
* success: return IRQ number (>=0)
* failure: return < 0
*/
-int acpi_register_gsi(u32 gsi, int triggering, int polarity)
+int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
{
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
return gsi;
@@ -678,7 +678,8 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
fadt = (struct acpi_table_fadt *)fadt_header;
- acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
+ acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
+ ACPI_ACTIVE_LOW);
return 0;
}
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 166e0d839fa0..f92cef47bf86 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -329,7 +329,7 @@ unmask_irq (unsigned int irq)
}
-static void
+static int
iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
{
#ifdef CONFIG_SMP
@@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpumask_first_and(cpu_online_mask, mask);
if (cpu >= nr_cpu_ids)
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
dest = cpu_physical_id(cpu);
if (!iosapic_intr_info[irq].count)
- return; /* not an IOSAPIC interrupt */
+ return -1; /* not an IOSAPIC interrupt */
set_irq_affinity_info(irq, dest, redir);
@@ -376,7 +376,9 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
}
+
#endif
+ return 0;
}
/*
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index acc4d19ae62a..b448197728be 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -610,6 +610,9 @@ static struct irqaction ipi_irqaction = {
.name = "IPI"
};
+/*
+ * KVM uses this interrupt to force a cpu out of guest mode
+ */
static struct irqaction resched_irqaction = {
.handler = dummy_handler,
.flags = IRQF_DISABLED,
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 2b15e233f7fe..0f8ade9331ba 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,7 +12,7 @@
static struct irq_chip ia64_msi_chip;
#ifdef CONFIG_SMP
-static void ia64_set_msi_irq_affinity(unsigned int irq,
+static int ia64_set_msi_irq_affinity(unsigned int irq,
const cpumask_t *cpu_mask)
{
struct msi_msg msg;
@@ -20,10 +20,10 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
int cpu = first_cpu(*cpu_mask);
if (!cpu_online(cpu))
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
read_msi_msg(irq, &msg);
@@ -39,6 +39,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
write_msi_msg(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
+
+ return 0;
}
#endif /* CONFIG_SMP */
@@ -130,17 +132,17 @@ void arch_teardown_msi_irq(unsigned int irq)
#ifdef CONFIG_DMAR
#ifdef CONFIG_SMP
-static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
{
struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
int cpu = cpumask_first(mask);
if (!cpu_online(cpu))
- return;
+ return -1;
if (irq_prepare_move(irq, cpu))
- return;
+ return -1;
dmar_msi_read(irq, &msg);
@@ -151,6 +153,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
dmar_msi_write(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, mask);
+
+ return 0;
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 0a2d6b86075a..64d520937874 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -23,7 +23,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HAVE_KVM && EXPERIMENTAL
+ depends on HAVE_KVM && MODULES && EXPERIMENTAL
# for device assignment:
depends on PCI
select PREEMPT_NOTIFIERS
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d20a5db4c4dd..80c57b0a21c4 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -41,6 +41,9 @@
#include <asm/div64.h>
#include <asm/tlb.h>
#include <asm/elf.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
#include "misc.h"
#include "vti.h"
@@ -65,6 +68,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (vcpu->kvm->arch.is_sn2)
+ return rtc_time();
+ else
+#endif
+ return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
static void kvm_flush_icache(unsigned long start, unsigned long len)
{
int l;
@@ -119,8 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
unsigned long saved_psr;
int slot;
- pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
- PAGE_KERNEL));
+ pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
@@ -283,6 +295,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
+static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
+{
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+ if (!test_and_set_bit(vector, &vpd->irr[0])) {
+ vcpu->arch.irq_new_pending = 1;
+ kvm_vcpu_kick(vcpu);
+ return 1;
+ }
+ return 0;
+}
+
/*
* offset: address offset to IPI space.
* value: deliver value.
@@ -292,20 +316,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
{
switch (dm) {
case SAPIC_FIXED:
- kvm_apic_set_irq(vcpu, vector, 0);
break;
case SAPIC_NMI:
- kvm_apic_set_irq(vcpu, 2, 0);
+ vector = 2;
break;
case SAPIC_EXTINT:
- kvm_apic_set_irq(vcpu, 0, 0);
+ vector = 0;
break;
case SAPIC_INIT:
case SAPIC_PMI:
default:
printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
- break;
+ return;
}
+ __apic_accept_irq(vcpu, vector);
}
static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
@@ -413,6 +437,23 @@ static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
+static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
+{
+ unsigned long pte, rtc_phys_addr, map_addr;
+ int slot;
+
+ map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
+ rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
+ pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
+ slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
+ vcpu->arch.sn_rtc_tr_slot = slot;
+ if (slot < 0) {
+ printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
+ slot = 0;
+ }
+ return slot;
+}
+
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
@@ -426,7 +467,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+ vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
if (time_after(vcpu_now_itc, vpd->itm)) {
vcpu->arch.timer_check = 1;
@@ -447,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0;
- if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
+ kvm_cpu_has_pending_timer(vcpu))
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
@@ -551,22 +592,35 @@ static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
if (r < 0)
goto out;
vcpu->arch.vm_tr_slot = r;
+
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (kvm->arch.is_sn2) {
+ r = kvm_sn2_setup_mappings(vcpu);
+ if (r < 0)
+ goto out;
+ }
+#endif
+
r = 0;
out:
return r;
-
}
static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
{
-
+ struct kvm *kvm = vcpu->kvm;
ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
-
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ if (kvm->arch.is_sn2)
+ ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
+#endif
}
static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
{
+ unsigned long psr;
+ int r;
int cpu = smp_processor_id();
if (vcpu->arch.last_run_cpu != cpu ||
@@ -578,36 +632,27 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
vcpu->arch.host_rr6 = ia64_get_rr(RR6);
vti_set_rr6(vcpu->arch.vmm_rr);
- return kvm_insert_vmm_mapping(vcpu);
+ local_irq_save(psr);
+ r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
+ return r;
}
+
static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
{
kvm_purge_vmm_mapping(vcpu);
vti_set_rr6(vcpu->arch.host_rr6);
}
-static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
int r;
- /*Get host and guest context with guest address space.*/
- host_ctx = kvm_get_host_context(vcpu);
- guest_ctx = kvm_get_guest_context(vcpu);
-
- r = kvm_vcpu_pre_transition(vcpu);
- if (r < 0)
- goto out;
- kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
- kvm_vcpu_post_transition(vcpu);
- r = 0;
-out:
- return r;
-}
-
-static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
-{
- int r;
+ /*
+ * down_read() may sleep and return with interrupts enabled
+ */
+ down_read(&vcpu->kvm->slots_lock);
again:
if (signal_pending(current)) {
@@ -616,26 +661,31 @@ again:
goto out;
}
- /*
- * down_read() may sleep and return with interrupts enabled
- */
- down_read(&vcpu->kvm->slots_lock);
-
preempt_disable();
local_irq_disable();
- vcpu->guest_mode = 1;
+ /*Get host and guest context with guest address space.*/
+ host_ctx = kvm_get_host_context(vcpu);
+ guest_ctx = kvm_get_guest_context(vcpu);
+
+ clear_bit(KVM_REQ_KICK, &vcpu->requests);
+
+ r = kvm_vcpu_pre_transition(vcpu);
+ if (r < 0)
+ goto vcpu_run_fail;
+
+ up_read(&vcpu->kvm->slots_lock);
kvm_guest_enter();
- r = vti_vcpu_run(vcpu, kvm_run);
- if (r < 0) {
- local_irq_enable();
- preempt_enable();
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- goto out;
- }
+
+ /*
+ * Transition to the guest
+ */
+ kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+
+ kvm_vcpu_post_transition(vcpu);
vcpu->arch.launched = 1;
- vcpu->guest_mode = 0;
+ set_bit(KVM_REQ_KICK, &vcpu->requests);
local_irq_enable();
/*
@@ -646,9 +696,10 @@ again:
*/
barrier();
kvm_guest_exit();
- up_read(&vcpu->kvm->slots_lock);
preempt_enable();
+ down_read(&vcpu->kvm->slots_lock);
+
r = kvm_handle_exit(kvm_run, vcpu);
if (r > 0) {
@@ -657,12 +708,20 @@ again:
}
out:
+ up_read(&vcpu->kvm->slots_lock);
if (r > 0) {
kvm_resched(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
goto again;
}
return r;
+
+vcpu_run_fail:
+ local_irq_enable();
+ preempt_enable();
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ goto out;
}
static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
@@ -788,6 +847,9 @@ struct kvm *kvm_arch_create_vm(void)
if (IS_ERR(kvm))
return ERR_PTR(-ENOMEM);
+
+ kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
kvm_init_vm(kvm);
kvm->arch.online_vcpus = 0;
@@ -884,7 +946,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
RESTORE_REGS(saved_gp);
vcpu->arch.irq_new_pending = 1;
- vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+ vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
set_bit(KVM_REQ_RESUME, &vcpu->requests);
vcpu_put(vcpu);
@@ -1043,10 +1105,6 @@ static void kvm_free_vmm_area(void)
}
}
-static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-}
-
static int vti_init_vpd(struct kvm_vcpu *vcpu)
{
int i;
@@ -1165,7 +1223,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
regs->cr_iip = PALE_RESET_ENTRY;
/*Initialize itc offset for vcpus*/
- itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+ itc_offset = 0UL - kvm_get_itc(vcpu);
for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
@@ -1237,6 +1295,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
local_irq_save(psr);
r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
if (r)
goto fail;
r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
@@ -1254,13 +1313,11 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
goto uninit;
kvm_purge_vmm_mapping(vcpu);
- local_irq_restore(psr);
return 0;
uninit:
kvm_vcpu_uninit(vcpu);
fail:
- local_irq_restore(psr);
return r;
}
@@ -1291,7 +1348,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->kvm = kvm;
cpu = get_cpu();
- vti_vcpu_load(vcpu, cpu);
r = vti_vcpu_setup(vcpu, id);
put_cpu();
@@ -1427,7 +1483,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
}
for (i = 0; i < 4; i++)
regs->insvc[i] = vcpu->arch.insvc[i];
- regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+ regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
SAVE_REGS(xtp);
SAVE_REGS(metaphysical_rr0);
SAVE_REGS(metaphysical_rr4);
@@ -1574,6 +1630,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
void kvm_arch_flush_shadow(struct kvm *kvm)
{
+ kvm_flush_remote_tlbs(kvm);
}
long kvm_arch_dev_ioctl(struct file *filp,
@@ -1616,8 +1673,37 @@ out:
return 0;
}
+
+/*
+ * On SN2, the ITC isn't stable, so copy in fast path code to use the
+ * SN2 RTC, replacing the ITC based default verion.
+ */
+static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
+ struct module *module)
+{
+ unsigned long new_ar, new_ar_sn2;
+ unsigned long module_base;
+
+ if (!ia64_platform_is("sn2"))
+ return;
+
+ module_base = (unsigned long)module->module_core;
+
+ new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
+ new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
+
+ printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
+ "as source\n");
+
+ /*
+ * Copy the SN2 version of mov_ar into place. They are both
+ * the same size, so 6 bundles is sufficient (6 * 0x10).
+ */
+ memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
+}
+
static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
- struct module *module)
+ struct module *module)
{
unsigned long module_base;
unsigned long vmm_size;
@@ -1639,6 +1725,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
return -EFAULT;
memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+ kvm_patch_vmm(vmm_info, module);
kvm_flush_icache(kvm_vmm_base, vmm_size);
/*Recalculate kvm_vmm_info based on new VMM*/
@@ -1792,38 +1879,24 @@ void kvm_arch_hardware_unsetup(void)
{
}
-static void vcpu_kick_intr(void *info)
-{
-#ifdef DEBUG
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
- printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
-#endif
-}
-
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
- int ipi_pcpu = vcpu->cpu;
- int cpu = get_cpu();
+ int me;
+ int cpu = vcpu->cpu;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- if (vcpu->guest_mode && cpu != ipi_pcpu)
- smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
+ me = get_cpu();
+ if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
+ if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
+ smp_send_reschedule(cpu);
put_cpu();
}
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{
-
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
-
- if (!test_and_set_bit(vec, &vpd->irr[0])) {
- vcpu->arch.irq_new_pending = 1;
- kvm_vcpu_kick(vcpu);
- return 1;
- }
- return 0;
+ return __apic_accept_irq(vcpu, irq->vector);
}
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
@@ -1836,20 +1909,18 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
return 0;
}
-struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
- unsigned long bitmap)
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{
- struct kvm_vcpu *lvcpu = kvm->vcpus[0];
- int i;
-
- for (i = 1; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i])
- continue;
- if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
- lvcpu = kvm->vcpus[i];
- }
+ return vcpu1->arch.xtp - vcpu2->arch.xtp;
+}
- return lvcpu;
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+ int short_hand, int dest, int dest_mode)
+{
+ struct kvm_lapic *target = vcpu->arch.apic;
+ return (dest_mode == 0) ?
+ kvm_apic_match_physical_addr(target, dest) :
+ kvm_apic_match_logical_addr(target, dest);
}
static int find_highest_bits(int *dat)
@@ -1888,6 +1959,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
return 0;
}
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /* do real check here */
+ return 1;
+}
+
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.timer_fired;
@@ -1918,6 +1995,7 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
long psr;
local_irq_save(psr);
r = kvm_insert_vmm_mapping(vcpu);
+ local_irq_restore(psr);
if (r)
goto fail;
@@ -1930,7 +2008,6 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
kvm_purge_vmm_mapping(vcpu);
r = 0;
fail:
- local_irq_restore(psr);
return r;
}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index a8ae52ed5635..e4b82319881d 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -21,6 +21,9 @@
#include <linux/kvm_host.h>
#include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/shub_mmr.h>
#include "vti.h"
#include "misc.h"
@@ -188,12 +191,35 @@ static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
return result;
}
-static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+/*
+ * On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
+ * RTC is used instead. This function patches the ratios from SAL
+ * to match the RTC before providing them to the guest.
+ */
+static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
{
+ struct pal_freq_ratio *ratio;
+ unsigned long sal_freq, sal_drift, factor;
+
+ result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &sal_freq, &sal_drift);
+ ratio = (struct pal_freq_ratio *)&result->v2;
+ factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
+ sn_rtc_cycles_per_second;
+
+ ratio->num = 3;
+ ratio->den = factor;
+}
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
struct ia64_pal_retval result;
PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+
+ if (vcpu->kvm->arch.is_sn2)
+ sn2_patch_itc_freq_ratios(&result);
+
return result;
}
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index 6d6cbcb14893..ee541cebcd78 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -20,6 +20,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
+ int short_hand, int dest, int dest_mode);
+int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
+#define kvm_apic_present(x) (true)
#endif
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index 32254ce9a1bd..f793be3effff 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -11,6 +11,7 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
+#include <asm/kvm_host.h>
#include "vti.h"
#include "asm-offsets.h"
@@ -140,6 +141,35 @@ GLOBAL_ENTRY(kvm_asm_mov_from_ar)
;;
END(kvm_asm_mov_from_ar)
+/*
+ * Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
+ * clock as it's source for emulating the ITC. This version will be
+ * copied on top of the original version if the host is determined to
+ * be an SN2.
+ */
+GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
+ add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+ movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
+
+ add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+ extr.u r17=r25,6,7
+ mov r24=b0
+ ;;
+ ld8 r18=[r18]
+ ld8 r19=[r19]
+ addl r20=@gprel(asm_mov_to_reg),gp
+ ;;
+ add r19=r19,r18
+ shladd r17=r17,4,r20
+ ;;
+ adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+ st8 [r16] = r19
+ mov b0=r17
+ br.sptk.few b0
+ ;;
+END(kvm_asm_mov_from_ar_sn2)
+
+
// mov r1=rr[r3]
GLOBAL_ENTRY(kvm_asm_mov_from_rr)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index b1dc80952d91..a8f84da04b49 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -652,20 +652,25 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
unsigned long isr, unsigned long iim)
{
struct kvm_vcpu *v = current_vcpu;
+ long psr;
if (ia64_psr(regs)->cpl == 0) {
/* Allow hypercalls only when cpl = 0. */
if (iim == DOMN_PAL_REQUEST) {
+ local_irq_save(psr);
set_pal_call_data(v);
vmm_transition(v);
get_pal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
} else if (iim == DOMN_SAL_REQUEST) {
+ local_irq_save(psr);
set_sal_call_data(v);
vmm_transition(v);
get_sal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
}
}
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a18ee17b9192..a2c6c15e4761 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -788,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
}
+/*
+ * The Altix RTC is mapped specially here for the vmm module
+ */
+#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
+static long kvm_get_itc(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
+ struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
+
+ if (kvm->arch.is_sn2)
+ return (*SN_RTC_BASE);
+ else
+#endif
+ return ia64_getreg(_IA64_REG_AR_ITC);
+}
+
/************************************************************************
* lsapic timer
***********************************************************************/
u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
{
unsigned long guest_itc;
- guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+ guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
if (guest_itc >= VMX(vcpu, last_itc)) {
VMX(vcpu, last_itc) = guest_itc;
@@ -809,7 +825,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
struct kvm_vcpu *v;
struct kvm *kvm;
int i;
- long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+ long itc_offset = val - kvm_get_itc(vcpu);
unsigned long vitv = VCPU(vcpu, itv);
kvm = (struct kvm *)KVM_VM_BASE;
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index 9eee5c04bacc..f4b4c899bb6c 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -30,15 +30,19 @@ MODULE_AUTHOR("Intel");
MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt;
+extern char kvm_asm_mov_from_ar;
+extern char kvm_asm_mov_from_ar_sn2;
extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = {
- .module = THIS_MODULE,
- .vmm_entry = vmm_entry,
- .tramp_entry = vmm_trampoline,
- .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+ .module = THIS_MODULE,
+ .vmm_entry = vmm_entry,
+ .tramp_entry = vmm_trampoline,
+ .vmm_ivt = (unsigned long)&kvm_ia64_ivt,
+ .patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
+ .patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
};
static int __init kvm_vmm_init(void)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ef1a017a318..40920c630649 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15)ssm psr.i // restore psr.i
+ (p15)ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer
;;
KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
;;
srlz.i
;;
- //(p15) ssm psr.i
+ (p15) ssm psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret = r24
;;
(p7) srlz.i
;;
-//(p6) ssm psr.i
+(p6) ssm psr.i
;;
mov rp=rpsave
mov ar.pfs=pfssave
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 2c2501f13159..4290a429bf7c 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"(p7) st8 [%2]=r9;;"
"ssm psr.ic;;"
"srlz.d;;"
- /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+ "ssm psr.i;;"
+ "srlz.d;;"
: "=r"(ret) : "r"(iha), "r"(pte):"memory");
return ret;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 66fd705e82c0..764f26abac05 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -227,7 +227,7 @@ finish_up:
return new_irq_info;
}
-static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
nasid_t nasid;
@@ -239,6 +239,8 @@ static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
+
+ return 0;
}
#ifdef CONFIG_SMP
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 81e428943d73..fbbfb9701201 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
}
#ifdef CONFIG_SMP
-static void sn_set_msi_irq_affinity(unsigned int irq,
+static int sn_set_msi_irq_affinity(unsigned int irq,
const struct cpumask *cpu_mask)
{
struct msi_msg msg;
@@ -168,7 +168,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
cpu = cpumask_first(cpu_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
- return;
+ return -1;
/*
* Release XIO resources for the old MSI PCI address
@@ -189,7 +189,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
sn_msi_info[irq].sn_irq_info = new_irq_info;
if (new_irq_info == NULL)
- return;
+ return -1;
/*
* Map the xio address into bus space
@@ -206,6 +206,8 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
write_msi_msg(irq, &msg);
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
+
+ return 0;
}
#endif /* CONFIG_SMP */