diff options
author | Chanho Park <chanho61.park@samsung.com> | 2014-09-05 20:35:53 +0900 |
---|---|---|
committer | Chanho Park <chanho61.park@samsung.com> | 2014-09-05 20:35:53 +0900 |
commit | 16b1353a36171ae06d63fd309f4772dbfb1da113 (patch) | |
tree | cf6c297ee81aba0d9b47f23d78a889667e7bce48 /aio-posix.c | |
parent | a15119db2ff5c2fdfdeb913b297bf8aa3399132e (diff) | |
download | qemu-16b1353a36171ae06d63fd309f4772dbfb1da113.tar.gz qemu-16b1353a36171ae06d63fd309f4772dbfb1da113.tar.bz2 qemu-16b1353a36171ae06d63fd309f4772dbfb1da113.zip |
Imported Upstream version 2.1.0upstream/2.1.0
Diffstat (limited to 'aio-posix.c')
-rw-r--r-- | aio-posix.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/aio-posix.c b/aio-posix.c index f921d4f53..2eada2e04 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -125,7 +125,7 @@ static bool aio_dispatch(AioContext *ctx) bool progress = false; /* - * We have to walk very carefully in case qemu_aio_set_fd_handler is + * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ node = QLIST_FIRST(&ctx->aio_handlers); @@ -175,27 +175,56 @@ static bool aio_dispatch(AioContext *ctx) bool aio_poll(AioContext *ctx, bool blocking) { AioHandler *node; + bool was_dispatching; int ret; bool progress; + was_dispatching = ctx->dispatching; progress = false; + /* aio_notify can avoid the expensive event_notifier_set if + * everything (file descriptors, bottom halves, timers) will + * be re-evaluated before the next blocking poll(). This happens + * in two cases: + * + * 1) when aio_poll is called with blocking == false + * + * 2) when we are called after poll(). If we are called before + * poll(), bottom halves will not be re-evaluated and we need + * aio_notify() if blocking == true. + * + * The first aio_dispatch() only does something when AioContext is + * running as a GSource, and in that case aio_poll is used only + * with blocking == false, so this optimization is already quite + * effective. However, the code is ugly and should be restructured + * to have a single aio_dispatch() call. To do this, we need to + * reorganize aio_poll into a prepare/poll/dispatch model like + * glib's. + * + * If we're in a nested event loop, ctx->dispatching might be true. + * In that case we can restore it just before returning, but we + * have to clear it now. + */ + aio_set_dispatching(ctx, !blocking); + /* * If there are callbacks left that have been queued, we need to call them. * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for qemu_aio_wait loops). + * does not need a complete flush (as is the case for aio_poll loops). */ if (aio_bh_poll(ctx)) { blocking = false; progress = true; } + /* Re-evaluate condition (1) above. */ + aio_set_dispatching(ctx, !blocking); if (aio_dispatch(ctx)) { progress = true; } if (progress && !blocking) { - return true; + goto out; } ctx->walking_handlers++; @@ -234,9 +263,12 @@ bool aio_poll(AioContext *ctx, bool blocking) } /* Run dispatch even if there were no readable fds to run timers */ + aio_set_dispatching(ctx, true); if (aio_dispatch(ctx)) { progress = true; } +out: + aio_set_dispatching(ctx, was_dispatching); return progress; } |