summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorGeoff Levand <geoffrey.levand@am.sony.com>2006-06-19 20:33:30 +0200
committerPaul Mackerras <paulus@samba.org>2006-06-21 15:01:31 +1000
commita91942ae7ebd518006dcbeb2a1d7b147253c080e (patch)
tree74a923f4847495b509089d92010c42c0cb123f4a /arch/powerpc/platforms
parent540270d82db943855538cea5d0c790e7e669dda0 (diff)
downloadlinux-stable-a91942ae7ebd518006dcbeb2a1d7b147253c080e.tar.gz
linux-stable-a91942ae7ebd518006dcbeb2a1d7b147253c080e.tar.bz2
linux-stable-a91942ae7ebd518006dcbeb2a1d7b147253c080e.zip
[POWERPC] spufs: fix spu irq affinity setting
This changes the hypervisor abstraction of setting cpu affinity to a higher level to avoid platform dependent interrupt controller routines. I replaced spu_priv1_ops:spu_int_route_set() with a new routine spu_priv1_ops:spu_cpu_affinity_set(). As a by-product, this change eliminated what looked like an existing bug in the set affinity code where spu_int_route_set() mistakenly called int_stat_get(). Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c8
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c3
3 files changed, 8 insertions, 11 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index c3bb7299c4b5..8ca22394e4d3 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -522,14 +522,6 @@ int spu_irq_class_1_bottom(struct spu *spu)
return ret;
}
-void spu_irq_setaffinity(struct spu *spu, int cpu)
-{
- u64 target = iic_get_target_id(cpu);
- u64 route = target << 48 | target << 32 | target << 16;
- spu_int_route_set(spu, route);
-}
-EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
-
static int __init find_spu_node_id(struct device_node *spe)
{
unsigned int *id;
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index abe8a84925d2..71b69f0a1a48 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -24,6 +24,8 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
+#include "interrupt.h"
+
static void int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
@@ -60,8 +62,10 @@ static u64 int_stat_get(struct spu *spu, int class)
return in_be64(&spu->priv1->int_stat_RW[class]);
}
-static void int_route_set(struct spu *spu, u64 route)
+static void cpu_affinity_set(struct spu *spu, int cpu)
{
+ u64 target = iic_get_target_id(cpu);
+ u64 route = target << 48 | target << 32 | target << 16;
out_be64(&spu->priv1->int_route_RW, route);
}
@@ -138,7 +142,7 @@ const struct spu_priv1_ops spu_priv1_mmio_ops =
.int_mask_get = int_mask_get,
.int_stat_clear = int_stat_clear,
.int_stat_get = int_stat_get,
- .int_route_set = int_route_set,
+ .cpu_affinity_set = cpu_affinity_set,
.mfc_dar_get = mfc_dar_get,
.mfc_dsisr_get = mfc_dsisr_get,
.mfc_dsisr_set = mfc_dsisr_set,
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index bf652cd77000..3dcc5d8d66b9 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -43,6 +43,7 @@
#include <asm/mmu_context.h>
#include <asm/spu.h>
#include <asm/spu_csa.h>
+#include <asm/spu_priv1.h>
#include "spufs.h"
#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
@@ -363,7 +364,7 @@ int spu_activate(struct spu_context *ctx, u64 flags)
* We're likely to wait for interrupts on the same
* CPU that we are now on, so send them here.
*/
- spu_irq_setaffinity(spu, raw_smp_processor_id());
+ spu_cpu_affinity_set(spu, raw_smp_processor_id());
put_active_spu(spu);
return 0;
}