summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-10-03 18:14:15 +0200
committerKevin Wolf <kwolf@redhat.com>2016-10-07 13:34:07 +0200
commit5b8bb3595a2941e9408021f1080e60ce86d677d2 (patch)
tree6703cf075db4beb6f35110568491136f7b90ef4e
parent818bbc86c9f9c47f67d11c0a068116c4333fd0ba (diff)
downloadqemu-5b8bb3595a2941e9408021f1080e60ce86d677d2.tar.gz
qemu-5b8bb3595a2941e9408021f1080e60ce86d677d2.tar.bz2
qemu-5b8bb3595a2941e9408021f1080e60ce86d677d2.zip
async: add aio_bh_schedule_oneshot
qemu_bh_delete is already clearing bh->scheduled at the same time as it's setting bh->deleted. Since it's not using any memory barriers, there is no synchronization going on for bh->deleted, and this makes the bh->deleted checks superfluous in aio_compute_timeout, aio_bh_poll and aio_ctx_check. Just remove them, and put the (bh->scheduled && bh->deleted) combo to work in a new function aio_bh_schedule_oneshot. The new function removes the need to save the QEMUBH pointer between the creation and the execution of the bottom half. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--async.c27
-rw-r--r--include/block/aio.h6
2 files changed, 29 insertions, 4 deletions
diff --git a/async.c b/async.c
index 3bca9b0adb..f30d011ebc 100644
--- a/async.c
+++ b/async.c
@@ -44,6 +44,25 @@ struct QEMUBH {
bool deleted;
};
+void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
+{
+ QEMUBH *bh;
+ bh = g_new(QEMUBH, 1);
+ *bh = (QEMUBH){
+ .ctx = ctx,
+ .cb = cb,
+ .opaque = opaque,
+ };
+ qemu_mutex_lock(&ctx->bh_lock);
+ bh->next = ctx->first_bh;
+ bh->scheduled = 1;
+ bh->deleted = 1;
+ /* Make sure that the members are ready before putting bh into list */
+ smp_wmb();
+ ctx->first_bh = bh;
+ qemu_mutex_unlock(&ctx->bh_lock);
+}
+
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
@@ -86,7 +105,7 @@ int aio_bh_poll(AioContext *ctx)
* thread sees the zero before bh->cb has run, and thus will call
* aio_notify again if necessary.
*/
- if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
+ if (atomic_xchg(&bh->scheduled, 0)) {
/* Idle BHs and the notify BH don't count as progress */
if (!bh->idle && bh != ctx->notify_dummy_bh) {
ret = 1;
@@ -104,7 +123,7 @@ int aio_bh_poll(AioContext *ctx)
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
- if (bh->deleted) {
+ if (bh->deleted && !bh->scheduled) {
*bhp = bh->next;
g_free(bh);
} else {
@@ -168,7 +187,7 @@ aio_compute_timeout(AioContext *ctx)
QEMUBH *bh;
for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ if (bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
* every 10ms */
@@ -216,7 +235,7 @@ aio_ctx_check(GSource *source)
aio_notify_accept(ctx);
for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ if (bh->scheduled) {
return true;
}
}
diff --git a/include/block/aio.h b/include/block/aio.h
index 173c1ed404..b9fe2cb37e 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -181,6 +181,12 @@ void aio_context_acquire(AioContext *ctx);
void aio_context_release(AioContext *ctx);
/**
+ * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
+ * only once and as soon as possible.
+ */
+void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
+
+/**
* aio_bh_new: Allocate a new bottom half structure.
*
* Bottom halves are lightweight callbacks whose invocation is guaranteed