diff options
author | Arnd Bergmann <arnd@arndb.de> | 2006-01-05 14:05:29 +0000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 15:44:57 +1100 |
commit | 2fb9d2063626374dd8a2514b3a730facac8235d8 (patch) | |
tree | b410dcdbc5aee656c37951be36951130450549e7 /arch | |
parent | aeb013772a2cc85a8d0baffd64977d2888bc781d (diff) | |
download | kernel-common-2fb9d2063626374dd8a2514b3a730facac8235d8.tar.gz kernel-common-2fb9d2063626374dd8a2514b3a730facac8235d8.tar.bz2 kernel-common-2fb9d2063626374dd8a2514b3a730facac8235d8.zip |
[PATCH] spufs: set irq affinity for running threads
For far, all SPU triggered interrupts always end up on
the first SMT thread, which is a bad solution.
This patch implements setting the affinity to the
CPU that was running last when entering execution on
an SPU. This should result in a significant reduction
in IPI calls and better cache locality for SPE thread
specific data.
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/interrupt.c | 42 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/interrupt.h | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 5 |
4 files changed, 40 insertions, 16 deletions
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7fbe78a9327d..63aa52acf441 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -23,6 +23,7 @@ #include <linux/config.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/module.h> #include <linux/percpu.h> #include <linux/types.h> @@ -55,6 +56,7 @@ struct iic_regs { struct iic { struct iic_regs __iomem *regs; + u8 target_id; }; static DEFINE_PER_CPU(struct iic, iic); @@ -172,12 +174,11 @@ int iic_get_irq(struct pt_regs *regs) return irq; } -static struct iic_regs __iomem *find_iic(int cpu) +static int setup_iic(int cpu, struct iic *iic) { struct device_node *np; int nodeid = cpu / 2; unsigned long regs; - struct iic_regs __iomem *iic_regs; for (np = of_find_node_by_type(NULL, "cpu"); np; @@ -188,20 +189,23 @@ static struct iic_regs __iomem *find_iic(int cpu) if (!np) { printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); - iic_regs = NULL; - } else { - regs = *(long *)get_property(np, "iic", NULL); - - /* hack until we have decided on the devtree info */ - regs += 0x400; - if (cpu & 1) - regs += 0x20; - - printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); - iic_regs = __ioremap(regs, sizeof(struct iic_regs), - _PAGE_NO_CACHE); + iic->regs = NULL; + iic->target_id = 0xff; + return -ENODEV; } - return iic_regs; + + regs = *(long *)get_property(np, "iic", NULL); + + /* hack until we have decided on the devtree info */ + regs += 0x400; + if (cpu & 1) + regs += 0x20; + + printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); + iic->regs = __ioremap(regs, sizeof(struct iic_regs), + _PAGE_NO_CACHE); + iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); + return 0; } #ifdef CONFIG_SMP @@ -227,6 +231,12 @@ void iic_cause_IPI(int cpu, int mesg) out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); } +u8 iic_get_target_id(int cpu) +{ + return per_cpu(iic, cpu).target_id; +} +EXPORT_SYMBOL_GPL(iic_get_target_id); + static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) { smp_message_recv(iic_irq_to_ipi(irq), regs); @@ -276,7 +286,7 @@ void iic_init_IRQ(void) irq_offset = 0; for_each_cpu(cpu) { iic = &per_cpu(iic, cpu); - iic->regs = find_iic(cpu); + setup_iic(cpu, iic); if (iic->regs) out_be64(&iic->regs->prio, 0xff); } diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 37d58e6fd0c6..a14bd38791c0 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h @@ -54,6 +54,7 @@ extern void iic_setup_cpu(void); extern void iic_local_enable(void); extern void iic_local_disable(void); +extern u8 iic_get_target_id(int cpu); extern void spider_init_IRQ(void); extern int spider_get_irq(unsigned long int_pending); diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 7fe3fa3da0e9..d75ae03df686 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -507,6 +507,14 @@ int spu_irq_class_1_bottom(struct spu *spu) return ret; } +void spu_irq_setaffinity(struct spu *spu, int cpu) +{ + u64 target = iic_get_target_id(cpu); + u64 route = target << 48 | target << 32 | target << 16; + spu_int_route_set(spu, route); +} +EXPORT_SYMBOL_GPL(spu_irq_setaffinity); + static void __iomem * __init map_spe_prop(struct device_node *n, const char *name) { diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c34198c29159..963182fbd1aa 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -357,6 +357,11 @@ int spu_activate(struct spu_context *ctx, u64 flags) if (!spu) return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN; bind_context(spu, ctx); + /* + * We're likely to wait for interrupts on the same + * CPU that we are now on, so send them here. + */ + spu_irq_setaffinity(spu, raw_smp_processor_id()); put_active_spu(spu); return 0; } |