diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-10 23:10:35 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-20 21:29:38 -0400 |
commit | 2dd542b7aeb1c222273cf0593a718d9b44998d9f (patch) | |
tree | aea852e444475a669e1cb21a328b980ba19b9ecb | |
parent | e23754f880f10124f0a2848f9d17e361a295378e (diff) | |
download | linux-3.10-2dd542b7aeb1c222273cf0593a718d9b44998d9f.tar.gz linux-3.10-2dd542b7aeb1c222273cf0593a718d9b44998d9f.tar.bz2 linux-3.10-2dd542b7aeb1c222273cf0593a718d9b44998d9f.zip |
aio: aio_nr decrements don't need to be delayed
we can do that right in __put_ioctx(); as the result, the loop
in ioctx_alloc() can be killed.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/aio.c | 42 |
1 files changed, 14 insertions, 28 deletions
@@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx) static void ctx_rcu_free(struct rcu_head *head) { struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); - unsigned nr_events = ctx->max_reqs; - kmem_cache_free(kioctx_cachep, ctx); - - if (nr_events) { - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - nr_events > aio_nr); - aio_nr -= nr_events; - spin_unlock(&aio_nr_lock); - } } /* __put_ioctx @@ -217,6 +208,7 @@ static void ctx_rcu_free(struct rcu_head *head) */ static void __put_ioctx(struct kioctx *ctx) { + unsigned nr_events = ctx->max_reqs; BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); @@ -224,6 +216,12 @@ static void __put_ioctx(struct kioctx *ctx) aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } pr_debug("__put_ioctx: freeing %p\n", ctx); call_rcu(&ctx->rcu_head, ctx_rcu_free); } @@ -247,7 +245,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) { struct mm_struct *mm; struct kioctx *ctx; - int did_sync = 0; int err = -ENOMEM; /* Prevent overflows */ @@ -257,7 +254,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ERR_PTR(-EINVAL); } - if ((unsigned long)nr_events > aio_max_nr) + if (!nr_events || (unsigned long)nr_events > aio_max_nr) return ERR_PTR(-EAGAIN); ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); @@ -281,25 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - do { - spin_lock_bh(&aio_nr_lock); - if (aio_nr + nr_events > aio_max_nr || - aio_nr + nr_events < aio_nr) - ctx->max_reqs = 0; - else - aio_nr += ctx->max_reqs; + spin_lock_bh(&aio_nr_lock); + if (aio_nr + nr_events > aio_max_nr || + aio_nr + nr_events < aio_nr) { spin_unlock_bh(&aio_nr_lock); - if (ctx->max_reqs || did_sync) - break; - - /* wait for rcu callbacks to have completed before giving up */ - synchronize_rcu(); - did_sync = 1; - ctx->max_reqs = nr_events; - } while (1); - - if (ctx->max_reqs == 0) goto out_cleanup; + } + aio_nr += ctx->max_reqs; + spin_unlock_bh(&aio_nr_lock); /* now link into global list. */ spin_lock(&mm->ioctx_lock); |