summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorYasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>2007-07-17 21:22:48 +0900
committerTony Luck <tony.luck@intel.com>2007-07-17 09:57:42 -0700
commitcd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2 (patch)
tree6960768a1ddecb74e6a5fa1dfc978e5df8635eb2 /arch/ia64
parent4994be1b3fe9120c88022ff5c0c33f6312b17adb (diff)
downloadlinux-3.10-cd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2.tar.gz
linux-3.10-cd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2.tar.bz2
linux-3.10-cd378f18cf73d92bf0b6e1e6b5759b5dd729a9f2.zip
[IA64] Support irq migration across domain
Add support for IRQ migration across vector domain. Signed-off-by: Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> Signed-off-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/iosapic.c20
-rw-r--r--arch/ia64/kernel/irq_ia64.c42
-rw-r--r--arch/ia64/kernel/msi_ia64.c20
3 files changed, 70 insertions, 12 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index e647254c270..c101c8bff27 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -354,11 +354,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
irq &= (~IA64_IRQ_REDIRECTED);
- /* IRQ migration across domain is not supported yet */
- cpus_and(mask, mask, irq_to_domain(irq));
+ cpus_and(mask, mask, cpu_online_map);
if (cpus_empty(mask))
return;
+ if (reassign_irq_vector(irq, first_cpu(mask)))
+ return;
+
dest = cpu_physical_id(first_cpu(mask));
if (list_empty(&iosapic_intr_info[irq].rtes))
@@ -376,6 +378,8 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
else
/* change delivery mode to fixed */
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
+ low32 &= IOSAPIC_VECTOR_MASK;
+ low32 |= irq_to_vector(irq);
iosapic_intr_info[irq].low32 = low32;
iosapic_intr_info[irq].dest = dest;
@@ -404,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq)
{
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
+ int do_unmask_irq = 0;
+
+ if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ do_unmask_irq = 1;
+ mask_irq(irq);
+ }
- move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec);
+
+ if (unlikely(do_unmask_irq)) {
+ move_masked_irq(irq);
+ unmask_irq(irq);
+ }
}
#define iosapic_shutdown_level_irq mask_irq
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index a3667631ed8..22806b94025 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -172,15 +172,13 @@ int bind_irq_vector(int irq, int vector, cpumask_t domain)
return ret;
}
-static void clear_irq_vector(int irq)
+static void __clear_irq_vector(int irq)
{
- unsigned long flags;
int vector, cpu, pos;
cpumask_t mask;
cpumask_t domain;
struct irq_cfg *cfg = &irq_cfg[irq];
- spin_lock_irqsave(&vector_lock, flags);
BUG_ON((unsigned)irq >= NR_IRQS);
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
vector = cfg->vector;
@@ -193,6 +191,14 @@ static void clear_irq_vector(int irq)
irq_status[irq] = IRQ_UNUSED;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
cpus_andnot(vector_table[pos], vector_table[pos], domain);
+}
+
+static void clear_irq_vector(int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ __clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
}
@@ -275,6 +281,36 @@ void destroy_and_reserve_irq(unsigned int irq)
reserve_irq(irq);
}
+static int __reassign_irq_vector(int irq, int cpu)
+{
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ int vector;
+ cpumask_t domain;
+
+ if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
+ return -EINVAL;
+ if (cpu_isset(cpu, cfg->domain))
+ return 0;
+ domain = vector_allocation_domain(cpu);
+ vector = find_unassigned_vector(domain);
+ if (vector < 0)
+ return -ENOSPC;
+ __clear_irq_vector(irq);
+ BUG_ON(__bind_irq_vector(irq, vector, domain));
+ return 0;
+}
+
+int reassign_irq_vector(int irq, int cpu)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __reassign_irq_vector(irq, cpu);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
+}
+
/*
* Dynamic irq allocate and deallocation for MSI
*/
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 1d22670cc88..2fdbd5c3f21 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -13,6 +13,7 @@
#define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
@@ -50,22 +51,29 @@ static struct irq_chip ia64_msi_chip;
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{
struct msi_msg msg;
- u32 addr;
+ u32 addr, data;
+ int cpu = first_cpu(cpu_mask);
- /* IRQ migration across domain is not supported yet */
- cpus_and(cpu_mask, cpu_mask, irq_to_domain(irq));
- if (cpus_empty(cpu_mask))
+ if (!cpu_online(cpu))
+ return;
+
+ if (reassign_irq_vector(irq, cpu))
return;
read_msi_msg(irq, &msg);
addr = msg.address_lo;
addr &= MSI_ADDR_DESTID_MASK;
- addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
+ addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
msg.address_lo = addr;
+ data = msg.data;
+ data &= MSI_DATA_VECTOR_MASK;
+ data |= MSI_DATA_VECTOR(irq_to_vector(irq));
+ msg.data = data;
+
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = cpu_mask;
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
}
#endif /* CONFIG_SMP */