diff options
author | Kevin Wolf <kwolf@redhat.com> | 2011-06-07 15:04:32 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2011-06-14 17:03:25 +0200 |
commit | 42496d6240bfedc7ac6d92f04f92cff6c2e9f226 (patch) | |
tree | 6cdc93f0d670c8e6c62fe6479d6ee94531c478af /block | |
parent | 7bf4162a801761792834e9e7da4b80b089434ce7 (diff) | |
download | qemu-42496d6240bfedc7ac6d92f04f92cff6c2e9f226.tar.gz qemu-42496d6240bfedc7ac6d92f04f92cff6c2e9f226.tar.bz2 qemu-42496d6240bfedc7ac6d92f04f92cff6c2e9f226.zip |
qcow2: Avoid direct AIO callback
bdrv_aio_* must not call the callback before returning to its caller. In qcow2,
this could happen in some error cases. This starts the real requests processing
in a BH to avoid this situation.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/qcow2.c | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/block/qcow2.c b/block/qcow2.c index 8451ded9a3..2c51e7ccbd 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -378,6 +378,7 @@ typedef struct QCowAIOCB { uint64_t bytes_done; uint64_t cluster_offset; uint8_t *cluster_data; + bool is_write; BlockDriverAIOCB *hd_aiocb; QEMUIOVector hd_qiov; QEMUBH *bh; @@ -399,12 +400,19 @@ static AIOPool qcow2_aio_pool = { }; static void qcow2_aio_read_cb(void *opaque, int ret); -static void qcow2_aio_read_bh(void *opaque) +static void qcow2_aio_write_cb(void *opaque, int ret); + +static void qcow2_aio_rw_bh(void *opaque) { QCowAIOCB *acb = opaque; qemu_bh_delete(acb->bh); acb->bh = NULL; - qcow2_aio_read_cb(opaque, 0); + + if (acb->is_write) { + qcow2_aio_write_cb(opaque, 0); + } else { + qcow2_aio_read_cb(opaque, 0); + } } static int qcow2_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb) @@ -493,14 +501,14 @@ static void qcow2_aio_read_cb(void *opaque, int ret) goto done; } } else { - ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); + ret = qcow2_schedule_bh(qcow2_aio_rw_bh, acb); if (ret < 0) goto done; } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&acb->hd_qiov, 0, 512 * acb->cur_nr_sectors); - ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); + ret = qcow2_schedule_bh(qcow2_aio_rw_bh, acb); if (ret < 0) goto done; } @@ -515,7 +523,7 @@ static void qcow2_aio_read_cb(void *opaque, int ret) s->cluster_cache + index_in_cluster * 512, 512 * acb->cur_nr_sectors); - ret = qcow2_schedule_bh(qcow2_aio_read_bh, acb); + ret = qcow2_schedule_bh(qcow2_aio_rw_bh, acb); if (ret < 0) goto done; } else { @@ -572,6 +580,7 @@ static QCowAIOCB *qcow2_aio_setup(BlockDriverState *bs, int64_t sector_num, acb->hd_aiocb = NULL; acb->sector_num = sector_num; acb->qiov = qiov; + acb->is_write = is_write; qemu_iovec_init(&acb->hd_qiov, qiov->niov); @@ -591,17 +600,22 @@ static BlockDriverAIOCB *qcow2_aio_readv(BlockDriverState *bs, void *opaque) { QCowAIOCB *acb; + int ret; acb = qcow2_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); if (!acb) return NULL; - qcow2_aio_read_cb(acb, 0); + ret = qcow2_schedule_bh(qcow2_aio_rw_bh, acb); + if (ret < 0) { + qemu_iovec_destroy(&acb->hd_qiov); + qemu_aio_release(acb); + return NULL; + } + return &acb->common; } -static void qcow2_aio_write_cb(void *opaque, int ret); - static void run_dependent_requests(QCowL2Meta *m) { QCowAIOCB *req; @@ -724,6 +738,7 @@ static BlockDriverAIOCB *qcow2_aio_writev(BlockDriverState *bs, { BDRVQcowState *s = bs->opaque; QCowAIOCB *acb; + int ret; s->cluster_cache_offset = -1; /* disable compressed cache */ @@ -731,7 +746,13 @@ static BlockDriverAIOCB *qcow2_aio_writev(BlockDriverState *bs, if (!acb) return NULL; - qcow2_aio_write_cb(acb, 0); + ret = qcow2_schedule_bh(qcow2_aio_rw_bh, acb); + if (ret < 0) { + qemu_iovec_destroy(&acb->hd_qiov); + qemu_aio_release(acb); + return NULL; + } + return &acb->common; } |