diff options
author | Jeremy Kerr <jeremy@au1.ibm.com> | 2006-10-24 18:31:19 +0200 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-10-25 14:20:21 +1000 |
commit | 099814bb1f9bd9081d7c85867f8eb8c049abc1b9 (patch) | |
tree | d8a7a0a064bb922b90bf0a5c03f9864783c629df | |
parent | 0afacde3df4c9980f505d9afd7cb0058389732ca (diff) | |
download | linux-3.10-099814bb1f9bd9081d7c85867f8eb8c049abc1b9.tar.gz linux-3.10-099814bb1f9bd9081d7c85867f8eb8c049abc1b9.tar.bz2 linux-3.10-099814bb1f9bd9081d7c85867f8eb8c049abc1b9.zip |
[POWERPC] spufs: Add isolated-mode SPE recycling support
When in isolated mode, SPEs have access to an area of persistent
storage, which is per-SPE. In order for isolated-mode apps to
communicate arbitrary data through this storage, we need to ensure that
isolated physical SPEs can be reused for subsequent applications.
Add a file ("recycle") in a spethread dir to enable isolated-mode
recycling. By writing to this file, the kernel will reload the
isolated-mode loader kernel, allowing a new app to be run on the same
physical SPE.
This requires the spu_acquire_exclusive function to enforce exclusive
access to the SPE while the loader is initialised.
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 27 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 32 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/inode.c | 23 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 7 |
4 files changed, 81 insertions, 8 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 034cf6af53a..48eb050bcf4 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx) unmap_mapping_range(ctx->signal2, 0, 0x4000, 1); } +int spu_acquire_exclusive(struct spu_context *ctx) +{ + int ret = 0; + + down_write(&ctx->state_sema); + /* ctx is about to be freed, can't acquire any more */ + if (!ctx->owner) { + ret = -EINVAL; + goto out; + } + + if (ctx->state == SPU_STATE_SAVED) { + ret = spu_activate(ctx, 0); + if (ret) + goto out; + ctx->state = SPU_STATE_RUNNABLE; + } else { + /* We need to exclude userspace access to the context. */ + spu_unmap_mappings(ctx); + } + +out: + if (ret) + up_write(&ctx->state_sema); + return ret; +} + int spu_acquire_runnable(struct spu_context *ctx) { int ret = 0; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 8ca330671ad..5b8ba6c3aa3 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1343,6 +1343,37 @@ static struct file_operations spufs_mfc_fops = { .mmap = spufs_mfc_mmap, }; + +static int spufs_recycle_open(struct inode *inode, struct file *file) +{ + file->private_data = SPUFS_I(inode)->i_ctx; + return nonseekable_open(inode, file); +} + +static ssize_t spufs_recycle_write(struct file *file, + const char __user *buffer, size_t size, loff_t *pos) +{ + struct spu_context *ctx = file->private_data; + int ret; + + if (!(ctx->flags & SPU_CREATE_ISOLATE)) + return -EINVAL; + + if (size < 1) + return -EINVAL; + + ret = spu_recycle_isolated(ctx); + + if (ret) + return ret; + return size; +} + +static struct file_operations spufs_recycle_fops = { + .open = spufs_recycle_open, + .write = spufs_recycle_write, +}; + static void spufs_npc_set(void *data, u64 val) { struct spu_context *ctx = data; @@ -1551,5 +1582,6 @@ struct tree_descr spufs_dir_nosched_contents[] = { { "psmap", &spufs_psmap_fops, 0666, }, { "phys-id", &spufs_id_ops, 0666, }, { "object-id", &spufs_object_id_ops, 0666, }, + { "recycle", &spufs_recycle_fops, 0222, }, {}, }; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index c8751936672..9e457be140e 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -248,7 +248,7 @@ static int spu_setup_isolated(struct spu_context *ctx) if (!isolated_loader) return -ENODEV; - if ((ret = spu_acquire_runnable(ctx)) != 0) + if ((ret = spu_acquire_exclusive(ctx)) != 0) return ret; mfc_cntl = &ctx->spu->priv2->mfc_control_RW; @@ -314,10 +314,16 @@ out_drop_priv: spu_mfc_sr1_set(ctx->spu, sr1); out_unlock: - up_write(&ctx->state_sema); + spu_release_exclusive(ctx); return ret; } +int spu_recycle_isolated(struct spu_context *ctx) +{ + ctx->ops->runcntl_stop(ctx); + return spu_setup_isolated(ctx); +} + static int spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, int mode) @@ -341,12 +347,6 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, goto out_iput; ctx->flags = flags; - if (flags & SPU_CREATE_ISOLATE) { - ret = spu_setup_isolated(ctx); - if (ret) - goto out_iput; - } - inode->i_op = &spufs_dir_inode_operations; inode->i_fop = &simple_dir_operations; if (flags & SPU_CREATE_NOSCHED) @@ -432,6 +432,13 @@ static int spufs_create_context(struct inode *inode, out_unlock: mutex_unlock(&inode->i_mutex); out: + if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) { + int setup_err = spu_setup_isolated( + SPUFS_I(dentry->d_inode)->i_ctx); + if (setup_err) + ret = setup_err; + } + dput(dentry); return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index b17b809ecd7..f438f0b8525 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -163,6 +163,12 @@ void spu_acquire(struct spu_context *ctx); void spu_release(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx); void spu_acquire_saved(struct spu_context *ctx); +int spu_acquire_exclusive(struct spu_context *ctx); + +static inline void spu_release_exclusive(struct spu_context *ctx) +{ + up_write(&ctx->state_sema); +} int spu_activate(struct spu_context *ctx, u64 flags); void spu_deactivate(struct spu_context *ctx); @@ -170,6 +176,7 @@ void spu_yield(struct spu_context *ctx); int __init spu_sched_init(void); void __exit spu_sched_exit(void); +int spu_recycle_isolated(struct spu_context *ctx); /* * spufs_wait * Same as wait_event_interruptible(), except that here |