From 30a6c337dcefa7583fe9289fedb28783af980c0c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:45 +0100 Subject: [POWERPC] spufs: remove SPU_CONTEXT_PREEMPT Remove the SPU_CONTEXT_PREEMPT define. It's unused and won't be used in this form after the scheduler rework. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/spufs.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0941c56df9b..9b44abe921c 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -37,9 +37,6 @@ enum { }; struct spu_context_ops; - -#define SPU_CONTEXT_PREEMPT 0UL - struct spu_gang; struct spu_context { -- cgit v1.2.3 From 5cb23afc9e64841adb43d46160a5c63a80ebfd54 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:46 +0100 Subject: [POWERPC] spufs: remove empty last line in run.c Remove the empty last line in arch/powerpc/platforms/cell/spufs/run.c. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/run.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 1acc2ffef8c..51b78da2f0d 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -361,4 +361,3 @@ out: up(&ctx->run_sema); return ret; } - -- cgit v1.2.3 From aa56c16807ba7b8e801216cab012d2f498755ba5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:47 +0100 Subject: [POWERPC] spufs: remove superfluous SPU_STATE_SAVED assignments unbind_context already sets the context state to SPU_STATE_SAVED, thus the spu_deactivate callers don't need to do it again. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 4 +--- arch/powerpc/platforms/cell/spufs/sched.c | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 28c718ca3b5..dd89aa7c1f1 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -197,10 +197,8 @@ void spu_acquire_saved(struct spu_context *ctx) up_read(&ctx->state_sema); down_write(&ctx->state_sema); - if (ctx->state == SPU_STATE_RUNNABLE) { + if (ctx->state == SPU_STATE_RUNNABLE) spu_deactivate(ctx); - ctx->state = SPU_STATE_SAVED; - } downgrade_write(&ctx->state_sema); } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index bd6fe4b7a84..6599cba9689 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -315,7 +315,6 @@ void spu_yield(struct spu_context *ctx) pr_debug("%s: yielding SPU %d NODE %d\n", __FUNCTION__, spu->number, spu->node); spu_deactivate(ctx); - ctx->state = SPU_STATE_SAVED; need_yield = 1; } else { spu->prio = MAX_PRIO; -- cgit v1.2.3 From 81998bafe299b8b675157f0a4dfe8dad43215da9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:48 +0100 Subject: [POWERPC] spufs: bind_context sets SPU_STATE_RUNNABLE Only bind_context/unbind_context change the spu context state. Thus we can move all assignents of SPU_STATE_RUNNABLE into bind_context, which parallels the unbind side aswell. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 2 -- arch/powerpc/platforms/cell/spufs/sched.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index dd89aa7c1f1..ccffc449763 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -139,7 +139,6 @@ int spu_acquire_exclusive(struct spu_context *ctx) ret = spu_activate(ctx, 0); if (ret) goto out; - ctx->state = SPU_STATE_RUNNABLE; } else { /* We need to exclude userspace access to the context. */ spu_unmap_mappings(ctx); @@ -173,7 +172,6 @@ int spu_acquire_runnable(struct spu_context *ctx) ret = spu_activate(ctx, 0); if (ret) goto out; - ctx->state = SPU_STATE_RUNNABLE; } downgrade_write(&ctx->state_sema); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 6599cba9689..7e9657eb690 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -118,6 +118,8 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) spu->timestamp = jiffies; spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_switch_notify(spu, ctx); + + ctx->state = SPU_STATE_RUNNABLE; } static inline void unbind_context(struct spu *spu, struct spu_context *ctx) -- cgit v1.2.3 From 202557d29eae528f464652e92085f3b19b05a0a7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:49 +0100 Subject: [POWERPC] spufs: sched.c cleanups Various cleanups to sched.c that don't change the global control flow: - add kerneldoc comments to various functions - add spu_ prefixes to various functions - add/remove context from the runqueue in bind/unbind_context as it's part of the logical operation - add a call to put_active_spu to spu_unbind_contex as it's logically part of the unbind operation Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 98 +++++++++++++++++++------------ 1 file changed, 61 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 7e9657eb690..1d330f67f5a 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -68,6 +68,43 @@ static inline int node_allowed(int node) return 1; } +/** + * spu_add_to_active_list - add spu to active list + * @spu: spu to add to the active list + */ +static void spu_add_to_active_list(struct spu *spu) +{ + mutex_lock(&spu_prio->active_mutex[spu->node]); + list_add_tail(&spu->list, &spu_prio->active_list[spu->node]); + mutex_unlock(&spu_prio->active_mutex[spu->node]); +} + +/** + * spu_remove_from_active_list - remove spu from active list + * @spu: spu to remove from the active list + * + * This function removes an spu from the active list. If the spu was + * found on the active list the function returns 1, else it doesn't do + * anything and returns 0. + */ +static int spu_remove_from_active_list(struct spu *spu) +{ + int node = spu->node; + struct spu *tmp; + int rc = 0; + + mutex_lock(&spu_prio->active_mutex[node]); + list_for_each_entry(tmp, &spu_prio->active_list[node], list) { + if (tmp == spu) { + list_del_init(&spu->list); + rc = 1; + break; + } + } + mutex_unlock(&spu_prio->active_mutex[node]); + return rc; +} + static inline void mm_needs_global_tlbie(struct mm_struct *mm) { int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; @@ -94,8 +131,12 @@ int spu_switch_event_unregister(struct notifier_block * n) return blocking_notifier_chain_unregister(&spu_switch_notifier, n); } - -static inline void bind_context(struct spu *spu, struct spu_context *ctx) +/** + * spu_bind_context - bind spu context to physical spu + * @spu: physical spu to bind to + * @ctx: context to bind + */ +static void spu_bind_context(struct spu *spu, struct spu_context *ctx) { pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, spu->number, spu->node); @@ -118,14 +159,24 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx) spu->timestamp = jiffies; spu_cpu_affinity_set(spu, raw_smp_processor_id()); spu_switch_notify(spu, ctx); - + spu_add_to_active_list(spu); ctx->state = SPU_STATE_RUNNABLE; } -static inline void unbind_context(struct spu *spu, struct spu_context *ctx) +/** + * spu_unbind_context - unbind spu context from physical spu + * @spu: physical spu to unbind from + * @ctx: context to unbind + * + * If the spu was on the active list the function returns 1, else 0. + */ +static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) { + int was_active = spu_remove_from_active_list(spu); + pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, spu->pid, spu->number, spu->node); + spu_switch_notify(spu, NULL); spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); @@ -143,6 +194,8 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx) ctx->spu = NULL; spu->flags = 0; spu->ctx = NULL; + + return was_active; } static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait, @@ -199,33 +252,6 @@ static void spu_prio_wakeup(void) } } -static int get_active_spu(struct spu *spu) -{ - int node = spu->node; - struct spu *tmp; - int rc = 0; - - mutex_lock(&spu_prio->active_mutex[node]); - list_for_each_entry(tmp, &spu_prio->active_list[node], list) { - if (tmp == spu) { - list_del_init(&spu->list); - rc = 1; - break; - } - } - mutex_unlock(&spu_prio->active_mutex[node]); - return rc; -} - -static void put_active_spu(struct spu *spu) -{ - int node = spu->node; - - mutex_lock(&spu_prio->active_mutex[node]); - list_add_tail(&spu->list, &spu_prio->active_list[node]); - mutex_unlock(&spu_prio->active_mutex[node]); -} - static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) { struct spu *spu = NULL; @@ -275,8 +301,7 @@ int spu_activate(struct spu_context *ctx, u64 flags) spu_prio_wakeup(); break; } - bind_context(spu, ctx); - put_active_spu(spu); + spu_bind_context(spu, ctx); break; } spu_prio_wait(ctx, flags); @@ -292,14 +317,13 @@ int spu_activate(struct spu_context *ctx, u64 flags) void spu_deactivate(struct spu_context *ctx) { struct spu *spu; - int needs_idle; + int was_active; spu = ctx->spu; if (!spu) return; - needs_idle = get_active_spu(spu); - unbind_context(spu, ctx); - if (needs_idle) { + was_active = spu_unbind_context(spu, ctx); + if (was_active) { spu_free(spu); spu_prio_wakeup(); } -- cgit v1.2.3 From 650f8b0291ecd0abdeadbd0ff3d70c3538e55405 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:36:50 +0100 Subject: [POWERPC] spufs: simplify state_mutex The r/w semaphore to lock the spus was overkill and can be replaced with a mutex to make it faster, simpler and easier to debug. It also helps to allow making most spufs interruptible in future patches. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 33 +++++++++-------------------- arch/powerpc/platforms/cell/spufs/sched.c | 8 +++---- arch/powerpc/platforms/cell/spufs/spufs.h | 6 +++--- 3 files changed, 17 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index ccffc449763..c9aab9b1cd8 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) } spin_lock_init(&ctx->mmio_lock); kref_init(&ctx->kref); - init_rwsem(&ctx->state_sema); + mutex_init(&ctx->state_mutex); init_MUTEX(&ctx->run_sema); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); @@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref) { struct spu_context *ctx; ctx = container_of(kref, struct spu_context, kref); - down_write(&ctx->state_sema); + mutex_lock(&ctx->state_mutex); spu_deactivate(ctx); - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); spu_fini_csa(&ctx->csa); if (ctx->gang) spu_gang_remove_ctx(ctx->gang, ctx); @@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx) void spu_acquire(struct spu_context *ctx) { - down_read(&ctx->state_sema); + mutex_lock(&ctx->state_mutex); } void spu_release(struct spu_context *ctx) { - up_read(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); } void spu_unmap_mappings(struct spu_context *ctx) @@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) { int ret = 0; - down_write(&ctx->state_sema); + mutex_lock(&ctx->state_mutex); /* ctx is about to be freed, can't acquire any more */ if (!ctx->owner) { ret = -EINVAL; @@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) out: if (ret) - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); return ret; } @@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx) { int ret = 0; - down_read(&ctx->state_sema); + mutex_lock(&ctx->state_mutex); if (ctx->state == SPU_STATE_RUNNABLE) { ctx->spu->prio = current->prio; return 0; } - up_read(&ctx->state_sema); - down_write(&ctx->state_sema); /* ctx is about to be freed, can't acquire any more */ if (!ctx->owner) { ret = -EINVAL; @@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx) goto out; } - downgrade_write(&ctx->state_sema); /* On success, we return holding the lock */ - return ret; out: /* Release here, to simplify calling code. */ - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); return ret; } void spu_acquire_saved(struct spu_context *ctx) { - down_read(&ctx->state_sema); - - if (ctx->state == SPU_STATE_SAVED) - return; - - up_read(&ctx->state_sema); - down_write(&ctx->state_sema); - + mutex_lock(&ctx->state_mutex); if (ctx->state == SPU_STATE_RUNNABLE) spu_deactivate(ctx); - - downgrade_write(&ctx->state_sema); } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 1d330f67f5a..c61a34b1408 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -233,11 +233,11 @@ static void spu_prio_wait(struct spu_context *ctx, u64 flags) spu_add_wq(wq, &wait, prio); if (!signal_pending(current)) { - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, current->pid, current->prio); schedule(); - down_write(&ctx->state_sema); + mutex_lock(&ctx->state_mutex); } spu_del_wq(wq, &wait, prio); @@ -334,7 +334,7 @@ void spu_yield(struct spu_context *ctx) struct spu *spu; int need_yield = 0; - if (down_write_trylock(&ctx->state_sema)) { + if (mutex_trylock(&ctx->state_mutex)) { if ((spu = ctx->spu) != NULL) { int best = sched_find_first_bit(spu_prio->bitmap); if (best < MAX_PRIO) { @@ -346,7 +346,7 @@ void spu_yield(struct spu_context *ctx) spu->prio = MAX_PRIO; } } - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); } if (unlikely(need_yield)) yield(); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 9b44abe921c..de2401afb22 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -23,7 +23,7 @@ #define SPUFS_H #include -#include +#include #include #include @@ -53,7 +53,7 @@ struct spu_context { u64 object_id; /* user space pointer for oprofile */ enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; - struct rw_semaphore state_sema; + struct mutex state_mutex; struct semaphore run_sema; struct mm_struct *owner; @@ -173,7 +173,7 @@ int spu_acquire_exclusive(struct spu_context *ctx); static inline void spu_release_exclusive(struct spu_context *ctx) { - up_write(&ctx->state_sema); + mutex_unlock(&ctx->state_mutex); } int spu_activate(struct spu_context *ctx, u64 flags); -- cgit v1.2.3 From 6a0641e51011def4e308fd07387047f5ee50647f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:21 +0100 Subject: [POWERPC] spufs: state_mutex cleanup Various cleanups in code surrounding the state semaphore: - inline spu_acquire/spu_release - cleanup spu_acquire_* and add kerneldoc comments to these functions - remove spu_release_exclusive and replace it with spu_release Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 97 ++++++++++++++++------------- arch/powerpc/platforms/cell/spufs/run.c | 2 +- arch/powerpc/platforms/cell/spufs/spufs.h | 18 +++--- 3 files changed, 64 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index c9aab9b1cd8..f2630dc0db8 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -96,16 +96,6 @@ void spu_forget(struct spu_context *ctx) spu_release(ctx); } -void spu_acquire(struct spu_context *ctx) -{ - mutex_lock(&ctx->state_mutex); -} - -void spu_release(struct spu_context *ctx) -{ - mutex_unlock(&ctx->state_mutex); -} - void spu_unmap_mappings(struct spu_context *ctx) { if (ctx->local_store) @@ -124,66 +114,85 @@ void spu_unmap_mappings(struct spu_context *ctx) unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); } +/** + * spu_acquire_exclusive - lock spu contex and protect against userspace access + * @ctx: spu contex to lock + * + * Note: + * Returns 0 and with the context locked on success + * Returns negative error and with the context _unlocked_ on failure. + */ int spu_acquire_exclusive(struct spu_context *ctx) { - int ret = 0; + int ret = -EINVAL; - mutex_lock(&ctx->state_mutex); - /* ctx is about to be freed, can't acquire any more */ - if (!ctx->owner) { - ret = -EINVAL; - goto out; - } + spu_acquire(ctx); + /* + * Context is about to be freed, so we can't acquire it anymore. + */ + if (!ctx->owner) + goto out_unlock; if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) - goto out; + goto out_unlock; } else { - /* We need to exclude userspace access to the context. */ + /* + * We need to exclude userspace access to the context. + * + * To protect against memory access we invalidate all ptes + * and make sure the pagefault handlers block on the mutex. + */ spu_unmap_mappings(ctx); } -out: - if (ret) - mutex_unlock(&ctx->state_mutex); + return 0; + + out_unlock: + spu_release(ctx); return ret; } +/** + * spu_acquire_runnable - lock spu contex and make sure it is in runnable state + * @ctx: spu contex to lock + * + * Note: + * Returns 0 and with the context locked on success + * Returns negative error and with the context _unlocked_ on failure. + */ int spu_acquire_runnable(struct spu_context *ctx) { - int ret = 0; - - mutex_lock(&ctx->state_mutex); - if (ctx->state == SPU_STATE_RUNNABLE) { - ctx->spu->prio = current->prio; - return 0; - } - - /* ctx is about to be freed, can't acquire any more */ - if (!ctx->owner) { - ret = -EINVAL; - goto out; - } + int ret = -EINVAL; + spu_acquire(ctx); if (ctx->state == SPU_STATE_SAVED) { + /* + * Context is about to be freed, so we can't acquire it anymore. + */ + if (!ctx->owner) + goto out_unlock; ret = spu_activate(ctx, 0); if (ret) - goto out; - } + goto out_unlock; + } else + ctx->spu->prio = current->prio; - /* On success, we return holding the lock */ - return ret; -out: - /* Release here, to simplify calling code. */ - mutex_unlock(&ctx->state_mutex); + return 0; + out_unlock: + spu_release(ctx); return ret; } +/** + * spu_acquire_saved - lock spu contex and make sure it is in saved state + * @ctx: spu contex to lock + */ void spu_acquire_saved(struct spu_context *ctx) { - mutex_lock(&ctx->state_mutex); - if (ctx->state == SPU_STATE_RUNNABLE) + spu_acquire(ctx); + if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); } diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 51b78da2f0d..e1647311044 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -133,7 +133,7 @@ out_drop_priv: spu_mfc_sr1_set(ctx->spu, sr1); out_unlock: - spu_release_exclusive(ctx); + spu_release(ctx); out: return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index de2401afb22..fa07ec2e2c1 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -158,6 +158,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx); void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); /* context management */ +static inline void spu_acquire(struct spu_context *ctx) +{ + mutex_lock(&ctx->state_mutex); +} + +static inline void spu_release(struct spu_context *ctx) +{ + mutex_unlock(&ctx->state_mutex); +} + struct spu_context * alloc_spu_context(struct spu_gang *gang); void destroy_spu_context(struct kref *kref); struct spu_context * get_spu_context(struct spu_context *ctx); @@ -165,17 +175,9 @@ int put_spu_context(struct spu_context *ctx); void spu_unmap_mappings(struct spu_context *ctx); void spu_forget(struct spu_context *ctx); -void spu_acquire(struct spu_context *ctx); -void spu_release(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx); void spu_acquire_saved(struct spu_context *ctx); int spu_acquire_exclusive(struct spu_context *ctx); - -static inline void spu_release_exclusive(struct spu_context *ctx) -{ - mutex_unlock(&ctx->state_mutex); -} - int spu_activate(struct spu_context *ctx, u64 flags); void spu_deactivate(struct spu_context *ctx); void spu_yield(struct spu_context *ctx); -- cgit v1.2.3 From 8389998ae9ea2888c86c446f7911ddced50052a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:22 +0100 Subject: [POWERPC] spufs: move prio to spu_context It doesn't make any sense to have a priority field in the physical spu structure. Move it into the spu context instead. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 4 ++-- arch/powerpc/platforms/cell/spufs/sched.c | 6 +----- arch/powerpc/platforms/cell/spufs/spufs.h | 3 +++ arch/powerpc/xmon/xmon.c | 1 - 4 files changed, 6 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index f2630dc0db8..88a88718630 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ctx->owner = get_task_mm(current); if (gang) spu_gang_add_ctx(gang, ctx); + ctx->prio = current->prio; goto out; out_free: kfree(ctx); @@ -176,8 +177,7 @@ int spu_acquire_runnable(struct spu_context *ctx) ret = spu_activate(ctx, 0); if (ret) goto out_unlock; - } else - ctx->spu->prio = current->prio; + } return 0; diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c61a34b1408..03b357ce398 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -145,7 +145,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ctx->spu = spu; ctx->ops = &spu_hw_ops; spu->pid = current->pid; - spu->prio = current->prio; spu->mm = ctx->owner; mm_needs_global_tlbie(spu->mm); spu->ibox_callback = spufs_ibox_callback; @@ -189,7 +188,6 @@ static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) spu->dma_callback = NULL; spu->mm = NULL; spu->pid = 0; - spu->prio = MAX_PRIO; ctx->ops = &spu_backing_ops; ctx->spu = NULL; spu->flags = 0; @@ -223,7 +221,7 @@ static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait, static void spu_prio_wait(struct spu_context *ctx, u64 flags) { - int prio = current->prio; + int prio = ctx->prio; wait_queue_head_t *wq = &spu_prio->waitq[prio]; DEFINE_WAIT(wait); @@ -342,8 +340,6 @@ void spu_yield(struct spu_context *ctx) __FUNCTION__, spu->number, spu->node); spu_deactivate(ctx); need_yield = 1; - } else { - spu->prio = MAX_PRIO; } } mutex_unlock(&ctx->state_mutex); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index fa07ec2e2c1..b500e94188b 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -74,6 +74,9 @@ struct spu_context { struct list_head gang_list; struct spu_gang *gang; + + /* scheduler fields */ + int prio; }; struct spu_gang { diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 77540a2f770..0183e5fbaf4 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu) DUMP_FIELD(spu, "0x%lx", irqs[2]); DUMP_FIELD(spu, "0x%x", slb_replace); DUMP_FIELD(spu, "%d", pid); - DUMP_FIELD(spu, "%d", prio); DUMP_FIELD(spu, "0x%p", mm); DUMP_FIELD(spu, "0x%p", ctx); DUMP_FIELD(spu, "0x%p", rq); -- cgit v1.2.3 From 079cdb61614c466c939ebf74c7ef6745667bc61e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:23 +0100 Subject: [POWERPC] spufs: runqueue simplification This is the biggest patch in this series, and it reworks the guts of the spu scheduler runqueue mechanism: - instead of embedding a waitqueue in the runqueue there is now a simple doubly-linked list, the actual wakeups happen by reusing the stop_wq in the spu context (maybe we should rename it one day) - spu_free and spu_prio_wakeup are merged into a single spu_reschedule function - various functionality is split out into small helpers, and kerneldoc comments are added in various places to document what's going on. - spu_activate is rewritten into a tight loop by removing test for various impossible conditions and using the infrastructure in this patch. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 159 +++++++++++++++++------------- arch/powerpc/platforms/cell/spufs/spufs.h | 1 + 2 files changed, 93 insertions(+), 67 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 03b357ce398..6f8e2257c5a 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -49,7 +49,8 @@ #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) struct spu_prio_array { unsigned long bitmap[SPU_BITMAP_SIZE]; - wait_queue_head_t waitq[MAX_PRIO]; + struct list_head runq[MAX_PRIO]; + spinlock_t runq_lock; struct list_head active_list[MAX_NUMNODES]; struct mutex active_mutex[MAX_NUMNODES]; }; @@ -196,61 +197,91 @@ static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) return was_active; } -static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait, - int prio) +/** + * spu_add_to_rq - add a context to the runqueue + * @ctx: context to add + */ +static void spu_add_to_rq(struct spu_context *ctx) { - prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE); - set_bit(prio, spu_prio->bitmap); + spin_lock(&spu_prio->runq_lock); + list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); + set_bit(ctx->prio, spu_prio->bitmap); + spin_unlock(&spu_prio->runq_lock); } -static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait, - int prio) +/** + * spu_del_from_rq - remove a context from the runqueue + * @ctx: context to remove + */ +static void spu_del_from_rq(struct spu_context *ctx) { - u64 flags; - - __set_current_state(TASK_RUNNING); - - spin_lock_irqsave(&wq->lock, flags); + spin_lock(&spu_prio->runq_lock); + list_del_init(&ctx->rq); + if (list_empty(&spu_prio->runq[ctx->prio])) + clear_bit(ctx->prio, spu_prio->bitmap); + spin_unlock(&spu_prio->runq_lock); +} - remove_wait_queue_locked(wq, wait); - if (list_empty(&wq->task_list)) - clear_bit(prio, spu_prio->bitmap); +/** + * spu_grab_context - remove one context from the runqueue + * @prio: priority of the context to be removed + * + * This function removes one context from the runqueue for priority @prio. + * If there is more than one context with the given priority the first + * task on the runqueue will be taken. + * + * Returns the spu_context it just removed. + * + * Must be called with spu_prio->runq_lock held. + */ +static struct spu_context *spu_grab_context(int prio) +{ + struct list_head *rq = &spu_prio->runq[prio]; - spin_unlock_irqrestore(&wq->lock, flags); + if (list_empty(rq)) + return NULL; + return list_entry(rq->next, struct spu_context, rq); } -static void spu_prio_wait(struct spu_context *ctx, u64 flags) +static void spu_prio_wait(struct spu_context *ctx) { - int prio = ctx->prio; - wait_queue_head_t *wq = &spu_prio->waitq[prio]; DEFINE_WAIT(wait); - if (ctx->spu) - return; - - spu_add_wq(wq, &wait, prio); + prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); if (!signal_pending(current)) { mutex_unlock(&ctx->state_mutex); - pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, - current->pid, current->prio); schedule(); mutex_lock(&ctx->state_mutex); } - - spu_del_wq(wq, &wait, prio); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&ctx->stop_wq, &wait); } -static void spu_prio_wakeup(void) +/** + * spu_reschedule - try to find a runnable context for a spu + * @spu: spu available + * + * This function is called whenever a spu becomes idle. It looks for the + * most suitable runnable spu context and schedules it for execution. + */ +static void spu_reschedule(struct spu *spu) { - int best = sched_find_first_bit(spu_prio->bitmap); + int best; + + spu_free(spu); + + spin_lock(&spu_prio->runq_lock); + best = sched_find_first_bit(spu_prio->bitmap); if (best < MAX_PRIO) { - wait_queue_head_t *wq = &spu_prio->waitq[best]; - wake_up_interruptible_nr(wq, 1); + struct spu_context *ctx = spu_grab_context(best); + if (ctx) + wake_up(&ctx->stop_wq); } + spin_unlock(&spu_prio->runq_lock); } -static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) +static struct spu *spu_get_idle(struct spu_context *ctx) { struct spu *spu = NULL; int node = cpu_to_node(raw_smp_processor_id()); @@ -267,15 +298,6 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags) return spu; } -static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) -{ - /* Future: spu_get_idle() if possible, - * otherwise try to preempt an active - * context. - */ - return spu_get_idle(ctx, flags); -} - /* The three externally callable interfaces * for the scheduler begin here. * @@ -284,32 +306,36 @@ static inline struct spu *spu_get(struct spu_context *ctx, u64 flags) * spu_yield - yield an SPU if others are waiting. */ +/** + * spu_activate - find a free spu for a context and execute it + * @ctx: spu context to schedule + * @flags: flags (currently ignored) + * + * Tries to find a free spu to run @ctx. If no free spu is availble + * add the context to the runqueue so it gets woken up once an spu + * is available. + */ int spu_activate(struct spu_context *ctx, u64 flags) { - struct spu *spu; - int ret = 0; - for (;;) { - if (ctx->spu) - return 0; - spu = spu_get(ctx, flags); - if (spu != NULL) { - if (ctx->spu != NULL) { - spu_free(spu); - spu_prio_wakeup(); - break; - } + if (ctx->spu) + return 0; + + do { + struct spu *spu; + + spu = spu_get_idle(ctx); + if (spu) { spu_bind_context(spu, ctx); - break; - } - spu_prio_wait(ctx, flags); - if (signal_pending(current)) { - ret = -ERESTARTSYS; - spu_prio_wakeup(); - break; + return 0; } - } - return ret; + + spu_add_to_rq(ctx); + spu_prio_wait(ctx); + spu_del_from_rq(ctx); + } while (!signal_pending(current)); + + return -ERESTARTSYS; } void spu_deactivate(struct spu_context *ctx) @@ -321,10 +347,8 @@ void spu_deactivate(struct spu_context *ctx) if (!spu) return; was_active = spu_unbind_context(spu, ctx); - if (was_active) { - spu_free(spu); - spu_prio_wakeup(); - } + if (was_active) + spu_reschedule(spu); } void spu_yield(struct spu_context *ctx) @@ -359,7 +383,7 @@ int __init spu_sched_init(void) return 1; } for (i = 0; i < MAX_PRIO; i++) { - init_waitqueue_head(&spu_prio->waitq[i]); + INIT_LIST_HEAD(&spu_prio->runq[i]); __clear_bit(i, spu_prio->bitmap); } __set_bit(MAX_PRIO, spu_prio->bitmap); @@ -367,6 +391,7 @@ int __init spu_sched_init(void) mutex_init(&spu_prio->active_mutex[i]); INIT_LIST_HEAD(&spu_prio->active_list[i]); } + spin_lock_init(&spu_prio->runq_lock); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index b500e94188b..7f5a4fc03c0 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -76,6 +76,7 @@ struct spu_context { struct spu_gang *gang; /* scheduler fields */ + struct list_head rq; int prio; }; -- cgit v1.2.3 From 26bec67386dbf6ef887254e815398842e182cdcd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:24 +0100 Subject: [POWERPC] spufs: optimize spu_run There is no need to directly wake up contexts in spu_activate when called from spu_run, so add a flag to surpress this wakeup. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 4 ++-- arch/powerpc/platforms/cell/spufs/file.c | 4 ++-- arch/powerpc/platforms/cell/spufs/run.c | 4 ++-- arch/powerpc/platforms/cell/spufs/sched.c | 10 ++++++---- arch/powerpc/platforms/cell/spufs/spufs.h | 13 +++++++++++-- 5 files changed, 23 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 88a88718630..056a8ad0238 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -163,7 +163,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) * Returns 0 and with the context locked on success * Returns negative error and with the context _unlocked_ on failure. */ -int spu_acquire_runnable(struct spu_context *ctx) +int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags) { int ret = -EINVAL; @@ -174,7 +174,7 @@ int spu_acquire_runnable(struct spu_context *ctx) */ if (!ctx->owner) goto out_unlock; - ret = spu_activate(ctx, 0); + ret = spu_activate(ctx, flags); if (ret) goto out_unlock; } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index a528020baa1..c729813043a 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -164,7 +164,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, /* error here usually means a signal.. we might want to test * the error code more precisely though */ - ret = spu_acquire_runnable(ctx); + ret = spu_acquire_runnable(ctx, 0); if (ret) return NOPFN_REFAULT; @@ -1306,7 +1306,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, if (ret) goto out; - spu_acquire_runnable(ctx); + spu_acquire_runnable(ctx, 0); if (file->f_flags & O_NONBLOCK) { ret = ctx->ops->send_mfc_command(ctx, &cmd); } else { diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index e1647311044..a973e79e9fd 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) int ret; unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; - ret = spu_acquire_runnable(ctx); + ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); if (ret) return ret; @@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) spu_release(ctx); ret = spu_setup_isolated(ctx); if (!ret) - ret = spu_acquire_runnable(ctx); + ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); } /* if userspace has set the runcntrl register (eg, to issue an diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 6f8e2257c5a..07d0d095c62 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -247,8 +247,8 @@ static void spu_prio_wait(struct spu_context *ctx) { DEFINE_WAIT(wait); + set_bit(SPU_SCHED_WAKE, &ctx->sched_flags); prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); - if (!signal_pending(current)) { mutex_unlock(&ctx->state_mutex); schedule(); @@ -256,6 +256,7 @@ static void spu_prio_wait(struct spu_context *ctx) } __set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->stop_wq, &wait); + clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags); } /** @@ -275,7 +276,7 @@ static void spu_reschedule(struct spu *spu) best = sched_find_first_bit(spu_prio->bitmap); if (best < MAX_PRIO) { struct spu_context *ctx = spu_grab_context(best); - if (ctx) + if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) wake_up(&ctx->stop_wq); } spin_unlock(&spu_prio->runq_lock); @@ -315,7 +316,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx) * add the context to the runqueue so it gets woken up once an spu * is available. */ -int spu_activate(struct spu_context *ctx, u64 flags) +int spu_activate(struct spu_context *ctx, unsigned long flags) { if (ctx->spu) @@ -331,7 +332,8 @@ int spu_activate(struct spu_context *ctx, u64 flags) } spu_add_to_rq(ctx); - spu_prio_wait(ctx); + if (!(flags & SPU_ACTIVATE_NOWAKE)) + spu_prio_wait(ctx); spu_del_from_rq(ctx); } while (!signal_pending(current)); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 7f5a4fc03c0..421f59167c5 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -39,6 +39,11 @@ enum { struct spu_context_ops; struct spu_gang; +/* ctx->sched_flags */ +enum { + SPU_SCHED_WAKE = 0, +}; + struct spu_context { struct spu *spu; /* pointer to a physical SPU */ struct spu_state csa; /* SPU context save area. */ @@ -77,6 +82,7 @@ struct spu_context { /* scheduler fields */ struct list_head rq; + unsigned long sched_flags; int prio; }; @@ -179,10 +185,13 @@ int put_spu_context(struct spu_context *ctx); void spu_unmap_mappings(struct spu_context *ctx); void spu_forget(struct spu_context *ctx); -int spu_acquire_runnable(struct spu_context *ctx); +int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); void spu_acquire_saved(struct spu_context *ctx); int spu_acquire_exclusive(struct spu_context *ctx); -int spu_activate(struct spu_context *ctx, u64 flags); +enum { + SPU_ACTIVATE_NOWAKE = 1, +}; +int spu_activate(struct spu_context *ctx, unsigned long flags); void spu_deactivate(struct spu_context *ctx); void spu_yield(struct spu_context *ctx); int __init spu_sched_init(void); -- cgit v1.2.3 From 678b2ff1e65ecccdb15cbfe97081572fc35944b7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:25 +0100 Subject: [POWERPC] spu sched: simplity spu_remove_from_active_list If we call spu_remove_from_active_list that spu is always guaranteed to be on the active list and in runnable state, so we can simply do a list_del to remove it and unconditionally take the was_active codepath. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 44 +++++++++++-------------------- 1 file changed, 15 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 07d0d095c62..40202a752a7 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -83,27 +83,14 @@ static void spu_add_to_active_list(struct spu *spu) /** * spu_remove_from_active_list - remove spu from active list * @spu: spu to remove from the active list - * - * This function removes an spu from the active list. If the spu was - * found on the active list the function returns 1, else it doesn't do - * anything and returns 0. */ -static int spu_remove_from_active_list(struct spu *spu) +static void spu_remove_from_active_list(struct spu *spu) { int node = spu->node; - struct spu *tmp; - int rc = 0; mutex_lock(&spu_prio->active_mutex[node]); - list_for_each_entry(tmp, &spu_prio->active_list[node], list) { - if (tmp == spu) { - list_del_init(&spu->list); - rc = 1; - break; - } - } + list_del_init(&spu->list); mutex_unlock(&spu_prio->active_mutex[node]); - return rc; } static inline void mm_needs_global_tlbie(struct mm_struct *mm) @@ -167,16 +154,13 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) * spu_unbind_context - unbind spu context from physical spu * @spu: physical spu to unbind from * @ctx: context to unbind - * - * If the spu was on the active list the function returns 1, else 0. */ -static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) +static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { - int was_active = spu_remove_from_active_list(spu); - pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, spu->pid, spu->number, spu->node); + spu_remove_from_active_list(spu); spu_switch_notify(spu, NULL); spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); @@ -193,8 +177,6 @@ static int spu_unbind_context(struct spu *spu, struct spu_context *ctx) ctx->spu = NULL; spu->flags = 0; spu->ctx = NULL; - - return was_active; } /** @@ -340,17 +322,21 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) return -ERESTARTSYS; } +/** + * spu_deactivate - unbind a context from it's physical spu + * @ctx: spu context to unbind + * + * Unbind @ctx from the physical spu it is running on and schedule + * the highest priority context to run on the freed physical spu. + */ void spu_deactivate(struct spu_context *ctx) { - struct spu *spu; - int was_active; + struct spu *spu = ctx->spu; - spu = ctx->spu; - if (!spu) - return; - was_active = spu_unbind_context(spu, ctx); - if (was_active) + if (spu) { + spu_unbind_context(spu, ctx); spu_reschedule(spu); + } } void spu_yield(struct spu_context *ctx) -- cgit v1.2.3 From ae7b4c5284d11d49ed9432c16505fcbeb8d3b8cf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:26 +0100 Subject: [POWERPC] spu sched: update some comments Give spu_yield a kerneldoc comment and remove the old comment documenting spu_activate, spu_deactive and spu_yield as all of them now have descriptive kerneldoc comments of their own. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 40202a752a7..eb06a030ca0 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -281,14 +281,6 @@ static struct spu *spu_get_idle(struct spu_context *ctx) return spu; } -/* The three externally callable interfaces - * for the scheduler begin here. - * - * spu_activate - bind a context to SPU, waiting as needed. - * spu_deactivate - unbind a context from its SPU. - * spu_yield - yield an SPU if others are waiting. - */ - /** * spu_activate - find a free spu for a context and execute it * @ctx: spu context to schedule @@ -339,6 +331,14 @@ void spu_deactivate(struct spu_context *ctx) } } +/** + * spu_yield - yield a physical spu if others are waiting + * @ctx: spu context to yield + * + * Check if there is a higher priority context waiting and if yes + * unbind @ctx from the physical spu and schedule the highest + * priority context to run on the freed physical spu instead. + */ void spu_yield(struct spu_context *ctx) { struct spu *spu; -- cgit v1.2.3 From 52f04fcf66a5d5d90790d6cfde52e391ecf2b882 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:27 +0100 Subject: [POWERPC] spu sched: forced preemption at execution If we start a spu context with realtime priority we want it to run immediately and not wait until some other lower priority thread has finished. Try to find a suitable victim and use it's spu in this case. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 1 + arch/powerpc/platforms/cell/spufs/sched.c | 74 +++++++++++++++++++++++++++++ arch/powerpc/platforms/cell/spufs/spufs.h | 1 + 3 files changed, 76 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 056a8ad0238..d581f4ec99b 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -53,6 +53,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ctx->owner = get_task_mm(current); if (gang) spu_gang_add_ctx(gang, ctx); + ctx->rt_priority = current->rt_priority; ctx->prio = current->prio; goto out; out_free: diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index eb06a030ca0..814f65e025f 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -281,6 +281,74 @@ static struct spu *spu_get_idle(struct spu_context *ctx) return spu; } +/** + * find_victim - find a lower priority context to preempt + * @ctx: canidate context for running + * + * Returns the freed physical spu to run the new context on. + */ +static struct spu *find_victim(struct spu_context *ctx) +{ + struct spu_context *victim = NULL; + struct spu *spu; + int node, n; + + /* + * Look for a possible preemption candidate on the local node first. + * If there is no candidate look at the other nodes. This isn't + * exactly fair, but so far the whole spu schedule tries to keep + * a strong node affinity. We might want to fine-tune this in + * the future. + */ + restart: + node = cpu_to_node(raw_smp_processor_id()); + for (n = 0; n < MAX_NUMNODES; n++, node++) { + node = (node < MAX_NUMNODES) ? node : 0; + if (!node_allowed(node)) + continue; + + mutex_lock(&spu_prio->active_mutex[node]); + list_for_each_entry(spu, &spu_prio->active_list[node], list) { + struct spu_context *tmp = spu->ctx; + + if (tmp->rt_priority < ctx->rt_priority && + (!victim || tmp->rt_priority < victim->rt_priority)) + victim = spu->ctx; + } + mutex_unlock(&spu_prio->active_mutex[node]); + + if (victim) { + /* + * This nests ctx->state_mutex, but we always lock + * higher priority contexts before lower priority + * ones, so this is safe until we introduce + * priority inheritance schemes. + */ + if (!mutex_trylock(&victim->state_mutex)) { + victim = NULL; + goto restart; + } + + spu = victim->spu; + if (!spu) { + /* + * This race can happen because we've dropped + * the active list mutex. No a problem, just + * restart the search. + */ + mutex_unlock(&victim->state_mutex); + victim = NULL; + goto restart; + } + spu_unbind_context(spu, victim); + mutex_unlock(&victim->state_mutex); + return spu; + } + } + + return NULL; +} + /** * spu_activate - find a free spu for a context and execute it * @ctx: spu context to schedule @@ -300,6 +368,12 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) struct spu *spu; spu = spu_get_idle(ctx); + /* + * If this is a realtime thread we try to get it running by + * preempting a lower priority thread. + */ + if (!spu && ctx->rt_priority) + spu = find_victim(ctx); if (spu) { spu_bind_context(spu, ctx); return 0; diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 421f59167c5..85b182d1646 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -83,6 +83,7 @@ struct spu_context { /* scheduler fields */ struct list_head rq; unsigned long sched_flags; + unsigned long rt_priority; int prio; }; -- cgit v1.2.3 From 72cb360839f88c02ccf38f1df214316e05886ff3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:28 +0100 Subject: [POWERPC] spu sched: use DECLARE_BITMAP use DECLARE_BITMAP in the spu scheduler instead of reimplementing it. Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/sched.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 814f65e025f..ba4b01e01ac 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -46,9 +46,8 @@ #define SPU_MIN_TIMESLICE (100 * HZ / 1000) -#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) struct spu_prio_array { - unsigned long bitmap[SPU_BITMAP_SIZE]; + DECLARE_BITMAP(bitmap, MAX_PRIO); struct list_head runq[MAX_PRIO]; spinlock_t runq_lock; struct list_head active_list[MAX_NUMNODES]; -- cgit v1.2.3 From 2eb1b12049844a8ebc670e0e4fc908bc3f8933d3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Feb 2007 21:54:29 +0100 Subject: [POWERPC] spu sched: static timeslicing for SCHED_RR contexts For SCHED_RR tasks we can do some really trivial timeslicing. Basically we fire up a time for every scheduler tick that searches for a higher or same priority thread that is on the runqueue and if there is one context switches to it. Because we can't lock spus from timer context we actually run this from a delayed runqueue instead of a timer. A nice optimization would be to skip the actual priority bitmap search when there are less contexts than physical spus available. To implement this I need a so far unpublished patch from Andre, and it will be added after we have that patch in. Note that right now we only do the time slicing for SCHED_RR tasks. The code would work for SCHED_OTHER tasks aswell, but their prio value is defered from the one the PPU thread has at time of spu_run, and using this for spu scheduling decisions would make the code very unfair. SCHED_OTHER support will be enabled once we the spu scheduler knows how to calculcate cpu_context.prio (very soon) Signed-off-by: Christoph Hellwig Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/context.c | 2 ++ arch/powerpc/platforms/cell/spufs/run.c | 9 ++++-- arch/powerpc/platforms/cell/spufs/sched.c | 43 ++++++++++++++++++++++++++++- arch/powerpc/platforms/cell/spufs/spufs.h | 5 ++++ 4 files changed, 56 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index d581f4ec99b..04ad2e364e9 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -54,7 +54,9 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) if (gang) spu_gang_add_ctx(gang, ctx); ctx->rt_priority = current->rt_priority; + ctx->policy = current->policy; ctx->prio = current->prio; + INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick); goto out; out_free: kfree(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index a973e79e9fd..353a8fa07ab 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); if (runcntl == 0) runcntl = SPU_RUNCNTL_RUNNABLE; - } else + } else { + spu_start_tick(ctx); ctx->ops->npc_write(ctx, *npc); + } ctx->ops->runcntl_write(ctx, runcntl); return ret; @@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, { int ret = 0; + spu_stop_tick(ctx); *status = ctx->ops->status_read(ctx); *npc = ctx->ops->npc_read(ctx); spu_release(ctx); @@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, } if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { ret = spu_reacquire_runnable(ctx, npc, &status); - if (ret) + if (ret) { + spu_stop_tick(ctx); goto out2; + } continue; } ret = spu_process_events(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index ba4b01e01ac..2f25e68b4ba 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -44,7 +44,7 @@ #include #include "spufs.h" -#define SPU_MIN_TIMESLICE (100 * HZ / 1000) +#define SPU_TIMESLICE (HZ) struct spu_prio_array { DECLARE_BITMAP(bitmap, MAX_PRIO); @@ -55,6 +55,7 @@ struct spu_prio_array { }; static struct spu_prio_array *spu_prio; +static struct workqueue_struct *spu_sched_wq; static inline int node_allowed(int node) { @@ -68,6 +69,40 @@ static inline int node_allowed(int node) return 1; } +void spu_start_tick(struct spu_context *ctx) +{ + if (ctx->policy == SCHED_RR) + queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); +} + +void spu_stop_tick(struct spu_context *ctx) +{ + if (ctx->policy == SCHED_RR) + cancel_delayed_work(&ctx->sched_work); +} + +void spu_sched_tick(struct work_struct *work) +{ + struct spu_context *ctx = + container_of(work, struct spu_context, sched_work.work); + struct spu *spu; + int rearm = 1; + + mutex_lock(&ctx->state_mutex); + spu = ctx->spu; + if (spu) { + int best = sched_find_first_bit(spu_prio->bitmap); + if (best <= ctx->prio) { + spu_deactivate(ctx); + rearm = 0; + } + } + mutex_unlock(&ctx->state_mutex); + + if (rearm) + spu_start_tick(ctx); +} + /** * spu_add_to_active_list - add spu to active list * @spu: spu to add to the active list @@ -437,10 +472,15 @@ int __init spu_sched_init(void) { int i; + spu_sched_wq = create_singlethread_workqueue("spusched"); + if (!spu_sched_wq) + return 1; + spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); if (!spu_prio) { printk(KERN_WARNING "%s: Unable to allocate priority queue.\n", __FUNCTION__); + destroy_workqueue(spu_sched_wq); return 1; } for (i = 0; i < MAX_PRIO; i++) { @@ -471,4 +511,5 @@ void __exit spu_sched_exit(void) mutex_unlock(&spu_prio->active_mutex[node]); } kfree(spu_prio); + destroy_workqueue(spu_sched_wq); } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 85b182d1646..0c437891dfd 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -82,8 +82,10 @@ struct spu_context { /* scheduler fields */ struct list_head rq; + struct delayed_work sched_work; unsigned long sched_flags; unsigned long rt_priority; + int policy; int prio; }; @@ -195,6 +197,9 @@ enum { int spu_activate(struct spu_context *ctx, unsigned long flags); void spu_deactivate(struct spu_context *ctx); void spu_yield(struct spu_context *ctx); +void spu_start_tick(struct spu_context *ctx); +void spu_stop_tick(struct spu_context *ctx); +void spu_sched_tick(struct work_struct *work); int __init spu_sched_init(void); void __exit spu_sched_exit(void); -- cgit v1.2.3 From 128b8546a83a9e37448bc126e1045dc1db291165 Mon Sep 17 00:00:00 2001 From: Masato Noguchi Date: Tue, 13 Feb 2007 21:54:30 +0100 Subject: [POWERPC] spufs: avoid accessing kernel memory through mmapped /mem node I found an exploit in current kernel. Currently, there is no range check about mmapping "/mem" node in spufs. Thus, an application can access privilege memory region. In case this kernel already worked on a public server, I send this information only here. If there are such servers in somewhere, please replace it, ASAP. Signed-off-by: Masato Noguchi Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/spufs/file.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c729813043a..b00653d69c0 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, offset += vma->vm_pgoff << PAGE_SHIFT; + if (offset >= LS_SIZE) + return NOPFN_SIGBUS; + spu_acquire(ctx); if (ctx->state == SPU_STATE_SAVED) { -- cgit v1.2.3 From bcb63e25ed3c56ee40cca4d18fbaac1d2a40c1d6 Mon Sep 17 00:00:00 2001 From: Carl Love Date: Tue, 13 Feb 2007 22:02:02 +0100 Subject: [POWERPC] cell: PPU Oprofile cleanup patch This is a clean up patch that includes the following changes: -Some comments were added to clarify the code based on feedback from the community. -The write_pm_cntrl() and set_count_mode() were passed a structure element from a global variable. The argument was removed so the functions now just operate on the global directly. -The set_pm_event() function call in the cell_virtual_cntr() routine was moved to a for-loop before the for_each_cpu loop Signed-off-by: Carl Love Signed-off-by: Maynard Johnson Signed-off-by: Arnd Bergmann --- arch/powerpc/oprofile/op_model_cell.c | 104 ++++++++++++++++++++-------------- arch/powerpc/platforms/cell/pmu.c | 14 ++--- 2 files changed, 67 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 2eb15f38810..8d4a9586464 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -41,8 +41,12 @@ #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ -#define NUM_THREADS 2 -#define VIRT_CNTR_SW_TIME_NS 100000000 // 0.5 seconds +#define NUM_THREADS 2 /* number of physical threads in + * physical processor + */ +#define NUM_TRACE_BUS_WORDS 4 +#define NUM_INPUT_BUS_WORDS 2 + struct pmc_cntrl_data { unsigned long vcntr; @@ -93,7 +97,6 @@ static struct { u32 pm07_cntrl[NR_PHYS_CTRS]; } pm_regs; - #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12) #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4) #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8) @@ -101,7 +104,6 @@ static struct { #define GET_COUNT_CYCLES(x) (x & 0x00000001) #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2) - static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values); static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS]; @@ -129,8 +131,8 @@ static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED; static u32 ctr_enabled; -static unsigned char trace_bus[4]; -static unsigned char input_bus[2]; +static unsigned char trace_bus[NUM_TRACE_BUS_WORDS]; +static unsigned char input_bus[NUM_INPUT_BUS_WORDS]; /* * Firmware interface functions @@ -183,7 +185,8 @@ static void pm_rtas_activate_signals(u32 node, u32 count) for (j = 0; j < count; j++) { /* fw expects physical cpu # */ pm_signal_local[j].cpu = node; - pm_signal_local[j].signal_group = pm_signal[j].signal_group; + pm_signal_local[j].signal_group + = pm_signal[j].signal_group; pm_signal_local[j].bus_word = pm_signal[j].bus_word; pm_signal_local[j].sub_unit = pm_signal[j].sub_unit; pm_signal_local[j].bit = pm_signal[j].bit; @@ -232,13 +235,21 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) p->signal_group = event / 100; p->bus_word = bus_word; - p->sub_unit = unit_mask & 0x0000f000; + p->sub_unit = (unit_mask & 0x0000f000) >> 12; pm_regs.pm07_cntrl[ctr] = 0; pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles); pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity); pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control); + /* Some of the islands signal selection is based on 64 bit words. + * The debug bus words are 32 bits, the input words to the performance + * counters are defined as 32 bits. Need to convert the 64 bit island + * specification to the appropriate 32 input bit and bus word for the + * performance counter event selection. See the CELL Performance + * monitoring signals manual and the Perf cntr hardware descriptions + * for the details. + */ if (input_control == 0) { if (signal_bit > 31) { signal_bit -= 32; @@ -259,12 +270,12 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) p->bit = signal_bit; } - for (i = 0; i < 4; i++) { + for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) { if (bus_word & (1 << i)) { pm_regs.debug_bus_control |= (bus_type << (31 - (2 * i) + 1)); - for (j = 0; j < 2; j++) { + for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) { if (input_bus[j] == 0xff) { input_bus[j] = i; pm_regs.group_control |= @@ -278,52 +289,58 @@ out: ; } -static void write_pm_cntrl(int cpu, struct pm_cntrl *pm_cntrl) +static void write_pm_cntrl(int cpu) { - /* Oprofile will use 32 bit counters, set bits 7:10 to 0 */ + /* Oprofile will use 32 bit counters, set bits 7:10 to 0 + * pmregs.pm_cntrl is a global + */ + u32 val = 0; - if (pm_cntrl->enable == 1) + if (pm_regs.pm_cntrl.enable == 1) val |= CBE_PM_ENABLE_PERF_MON; - if (pm_cntrl->stop_at_max == 1) + if (pm_regs.pm_cntrl.stop_at_max == 1) val |= CBE_PM_STOP_AT_MAX; - if (pm_cntrl->trace_mode == 1) - val |= CBE_PM_TRACE_MODE_SET(pm_cntrl->trace_mode); + if (pm_regs.pm_cntrl.trace_mode == 1) + val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode); - if (pm_cntrl->freeze == 1) + if (pm_regs.pm_cntrl.freeze == 1) val |= CBE_PM_FREEZE_ALL_CTRS; /* Routine set_count_mode must be called previously to set * the count mode based on the user selection of user and kernel. */ - val |= CBE_PM_COUNT_MODE_SET(pm_cntrl->count_mode); + val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode); cbe_write_pm(cpu, pm_control, val); } static inline void -set_count_mode(u32 kernel, u32 user, struct pm_cntrl *pm_cntrl) +set_count_mode(u32 kernel, u32 user) { /* The user must specify user and kernel if they want them. If - * neither is specified, OProfile will count in hypervisor mode + * neither is specified, OProfile will count in hypervisor mode. + * pm_regs.pm_cntrl is a global */ if (kernel) { if (user) - pm_cntrl->count_mode = CBE_COUNT_ALL_MODES; + pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES; else - pm_cntrl->count_mode = CBE_COUNT_SUPERVISOR_MODE; + pm_regs.pm_cntrl.count_mode = + CBE_COUNT_SUPERVISOR_MODE; } else { if (user) - pm_cntrl->count_mode = CBE_COUNT_PROBLEM_MODE; + pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE; else - pm_cntrl->count_mode = CBE_COUNT_HYPERVISOR_MODE; + pm_regs.pm_cntrl.count_mode = + CBE_COUNT_HYPERVISOR_MODE; } } static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl) { - pm07_cntrl[ctr] |= PM07_CTR_ENABLE(1); + pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE; cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]); } @@ -365,6 +382,14 @@ static void cell_virtual_cntr(unsigned long data) hdw_thread = 1 ^ hdw_thread; next_hdw_thread = hdw_thread; + for (i = 0; i < num_counters; i++) + /* There are some per thread events. Must do the + * set event, for the thread that is being started + */ + set_pm_event(i, + pmc_cntrl[next_hdw_thread][i].evnts, + pmc_cntrl[next_hdw_thread][i].masks); + /* The following is done only once per each node, but * we need cpu #, not node #, to pass to the cbe_xxx functions. */ @@ -385,12 +410,13 @@ static void cell_virtual_cntr(unsigned long data) == 0xFFFFFFFF) /* If the cntr value is 0xffffffff, we must * reset that to 0xfffffff0 when the current - * thread is restarted. This will generate a new - * interrupt and make sure that we never restore - * the counters to the max value. If the counters - * were restored to the max value, they do not - * increment and no interrupts are generated. Hence - * no more samples will be collected on that cpu. + * thread is restarted. This will generate a + * new interrupt and make sure that we never + * restore the counters to the max value. If + * the counters were restored to the max value, + * they do not increment and no interrupts are + * generated. Hence no more samples will be + * collected on that cpu. */ cbe_write_ctr(cpu, i, 0xFFFFFFF0); else @@ -410,9 +436,6 @@ static void cell_virtual_cntr(unsigned long data) * Must do the set event, enable_cntr * for each cpu. */ - set_pm_event(i, - pmc_cntrl[next_hdw_thread][i].evnts, - pmc_cntrl[next_hdw_thread][i].masks); enable_ctr(cpu, i, pm_regs.pm07_cntrl); } else { @@ -465,8 +488,7 @@ cell_reg_setup(struct op_counter_config *ctr, pm_regs.pm_cntrl.trace_mode = 0; pm_regs.pm_cntrl.freeze = 1; - set_count_mode(sys->enable_kernel, sys->enable_user, - &pm_regs.pm_cntrl); + set_count_mode(sys->enable_kernel, sys->enable_user); /* Setup the thread 0 events */ for (i = 0; i < num_ctrs; ++i) { @@ -498,10 +520,10 @@ cell_reg_setup(struct op_counter_config *ctr, pmc_cntrl[1][i].vcntr = i; } - for (i = 0; i < 4; i++) + for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) trace_bus[i] = 0xff; - for (i = 0; i < 2; i++) + for (i = 0; i < NUM_INPUT_BUS_WORDS; i++) input_bus[i] = 0xff; /* Our counters count up, and "count" refers to @@ -560,7 +582,7 @@ static void cell_cpu_setup(struct op_counter_config *cntr) cbe_write_pm(cpu, pm_start_stop, 0); cbe_write_pm(cpu, group_control, pm_regs.group_control); cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control); - write_pm_cntrl(cpu, &pm_regs.pm_cntrl); + write_pm_cntrl(cpu); for (i = 0; i < num_counters; ++i) { if (ctr_enabled & (1 << i)) { @@ -602,7 +624,7 @@ static void cell_global_start(struct op_counter_config *ctr) } } - cbe_clear_pm_interrupts(cpu); + cbe_get_and_clear_pm_interrupts(cpu); cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask); cbe_enable_pm(cpu); } @@ -672,7 +694,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) cbe_disable_pm(cpu); - interrupt_mask = cbe_clear_pm_interrupts(cpu); + interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu); /* If the interrupt mask has been cleared, then the virt cntr * has cleared the interrupt. When the thread that generated diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c index d04ae1671e6..66ca4b5a1db 100644 --- a/arch/powerpc/platforms/cell/pmu.c +++ b/arch/powerpc/platforms/cell/pmu.c @@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); * Enabling/disabling interrupts for the entire performance monitoring unit. */ -u32 cbe_query_pm_interrupts(u32 cpu) -{ - return cbe_read_pm(cpu, pm_status); -} -EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts); - -u32 cbe_clear_pm_interrupts(u32 cpu) +u32 cbe_get_and_clear_pm_interrupts(u32 cpu) { /* Reading pm_status clears the interrupt bits. */ - return cbe_query_pm_interrupts(cpu); + return cbe_read_pm(cpu, pm_status); } -EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts); +EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts); void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) { @@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); void cbe_disable_pm_interrupts(u32 cpu) { - cbe_clear_pm_interrupts(cpu); + cbe_get_and_clear_pm_interrupts(cpu); cbe_write_pm(cpu, pm_status, 0); } EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); -- cgit v1.2.3 From c7eb734766217b9ddac217cbccae3aedcfa67520 Mon Sep 17 00:00:00 2001 From: Maynard Johnson Date: Tue, 13 Feb 2007 22:02:03 +0100 Subject: [POWERPC] cell: pm_rtas_activat_signals routine cleanup The code was setting up the debug bus for group 21 when profiling on the event PPU CYCLES. The debug bus is not actually used by the hardware performance counters when counting PPU CYCLES. Setting up the debug bus for PPU CYCLES causes signal routing conflicts on the debug bus when profiling PPU cycles and another PPU event. This patch fixes the code to only setup the debug bus to route the performance signals for the non PPU CYCLE events. Signed-off-by: Maynard Johnson Signed-off-by: Carl Love Signed-off-by: Arnd Bergmann --- arch/powerpc/oprofile/op_model_cell.c | 49 +++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 8d4a9586464..e08e1d7b3dc 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -39,6 +39,9 @@ #include "../platforms/cell/interrupt.h" #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ +#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying + * PPU_CYCLES event + */ #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */ #define NUM_THREADS 2 /* number of physical threads in @@ -62,7 +65,7 @@ struct pmc_cntrl_data { struct pm_signal { u16 cpu; /* Processor to modify */ u16 sub_unit; /* hw subunit this applies to (if applicable) */ - u16 signal_group; /* Signal Group to Enable/Disable */ + short int signal_group; /* Signal Group to Enable/Disable */ u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event * Bus Word(s) (bitmask) */ @@ -179,26 +182,40 @@ static void pm_rtas_reset_signals(u32 node) static void pm_rtas_activate_signals(u32 node, u32 count) { int ret; - int j; + int i, j; struct pm_signal pm_signal_local[NR_PHYS_CTRS]; + /* There is no debug setup required for the cycles event. + * Note that only events in the same group can be used. + * Otherwise, there will be conflicts in correctly routing + * the signals on the debug bus. It is the responsiblity + * of the OProfile user tool to check the events are in + * the same group. + */ + i = 0; for (j = 0; j < count; j++) { - /* fw expects physical cpu # */ - pm_signal_local[j].cpu = node; - pm_signal_local[j].signal_group - = pm_signal[j].signal_group; - pm_signal_local[j].bus_word = pm_signal[j].bus_word; - pm_signal_local[j].sub_unit = pm_signal[j].sub_unit; - pm_signal_local[j].bit = pm_signal[j].bit; + if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) { + + /* fw expects physical cpu # */ + pm_signal_local[i].cpu = node; + pm_signal_local[i].signal_group + = pm_signal[j].signal_group; + pm_signal_local[i].bus_word = pm_signal[j].bus_word; + pm_signal_local[i].sub_unit = pm_signal[j].sub_unit; + pm_signal_local[i].bit = pm_signal[j].bit; + i++; + } } - ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE, - pm_signal_local, - count * sizeof(struct pm_signal)); + if (i != 0) { + ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE, + pm_signal_local, + i * sizeof(struct pm_signal)); - if (ret) - printk(KERN_WARNING "%s: rtas returned: %d\n", - __FUNCTION__, ret); + if (ret) + printk(KERN_WARNING "%s: rtas returned: %d\n", + __FUNCTION__, ret); + } } /* @@ -215,7 +232,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask) /* Special Event: Count all cpu cycles */ pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES; p = &(pm_signal[ctr]); - p->signal_group = 21; + p->signal_group = PPU_CYCLES_GRP_NUM; p->bus_word = 1; p->sub_unit = 0; p->bit = 0; -- cgit v1.2.3