diff options
author | aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162> | 2009-01-22 16:59:24 +0000 |
---|---|---|
committer | aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162> | 2009-01-22 16:59:24 +0000 |
commit | 51b6c751e4f0d777f197395f1824e08f93da0554 (patch) | |
tree | e1fff08e9e152c580dce839151e376395ea3715f /block.c | |
parent | 31f78c38f47b27adbde40a485df16ac0ff161a6a (diff) | |
download | qemu-51b6c751e4f0d777f197395f1824e08f93da0554.tar.gz qemu-51b6c751e4f0d777f197395f1824e08f93da0554.tar.bz2 qemu-51b6c751e4f0d777f197395f1824e08f93da0554.zip |
Vectored block device API (Avi Kivity)
Most devices that are capable of DMA are also capable of scatter-gather.
With the memory mapping API, this means that the device code needs to be
able to access discontiguous host memory regions.
For block devices, this translates to vectored I/O. This patch implements
an aynchronous vectored interface for the qemu block devices. At the moment
all I/O is bounced and submitted through the non-vectored API; in the future
we will convert block devices to natively support vectored I/O wherever
possible.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6397 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'block.c')
-rw-r--r-- | block.c | 68 |
1 files changed, 68 insertions, 0 deletions
@@ -1246,6 +1246,69 @@ char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn) /**************************************************************/ /* async I/Os */ +typedef struct VectorTranslationState { + QEMUIOVector *iov; + uint8_t *bounce; + int is_write; + BlockDriverAIOCB *aiocb; + BlockDriverAIOCB *this_aiocb; +} VectorTranslationState; + +static void bdrv_aio_rw_vector_cb(void *opaque, int ret) +{ + VectorTranslationState *s = opaque; + + if (!s->is_write) { + qemu_iovec_from_buffer(s->iov, s->bounce); + } + qemu_free(s->bounce); + s->this_aiocb->cb(s->this_aiocb->opaque, ret); + qemu_aio_release(s->this_aiocb); +} + +static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs, + int64_t sector_num, + QEMUIOVector *iov, + int nb_sectors, + BlockDriverCompletionFunc *cb, + void *opaque, + int is_write) + +{ + VectorTranslationState *s = qemu_mallocz(sizeof(*s)); + BlockDriverAIOCB *aiocb = qemu_aio_get(bs, cb, opaque); + + s->this_aiocb = aiocb; + s->iov = iov; + s->bounce = qemu_memalign(512, nb_sectors * 512); + s->is_write = is_write; + if (is_write) { + qemu_iovec_to_buffer(s->iov, s->bounce); + s->aiocb = bdrv_aio_write(bs, sector_num, s->bounce, nb_sectors, + bdrv_aio_rw_vector_cb, s); + } else { + s->aiocb = bdrv_aio_read(bs, sector_num, s->bounce, nb_sectors, + bdrv_aio_rw_vector_cb, s); + } + return aiocb; +} + +BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque) +{ + return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors, + cb, opaque, 0); +} + +BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockDriverCompletionFunc *cb, void *opaque) +{ + return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors, + cb, opaque, 1); +} + BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) @@ -1294,6 +1357,11 @@ void bdrv_aio_cancel(BlockDriverAIOCB *acb) { BlockDriver *drv = acb->bs->drv; + if (acb->cb == bdrv_aio_rw_vector_cb) { + VectorTranslationState *s = acb->opaque; + acb = s->aiocb; + } + drv->bdrv_aio_cancel(acb); } |