summaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2011-09-19 11:26:48 +0200
committerKevin Wolf <kwolf@redhat.com>2011-12-05 14:49:40 +0100
commit1b9f1491f82de0c8f9b09d1a06ac33304449a634 (patch)
tree13b07e148453376ef976a01160c049b7b974ebde /block/qcow2-cluster.c
parent1c8a881daaca6fe0646a425b0970fb3ad25f6732 (diff)
downloadqemu-1b9f1491f82de0c8f9b09d1a06ac33304449a634.tar.gz
qemu-1b9f1491f82de0c8f9b09d1a06ac33304449a634.tar.bz2
qemu-1b9f1491f82de0c8f9b09d1a06ac33304449a634.zip
qcow2: Unlock during COW
Unlocking during COW allows for more parallelism. One change it requires is that buffers are dynamically allocated instead of just using a per-image buffer. While touching the code, drop the synchronous qcow2_read() function and replace it by a bdrv_read() call. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c104
1 files changed, 35 insertions, 69 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index f4e049fa90..0e337075b3 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -289,89 +289,51 @@ void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
}
}
-
-static int qcow2_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- BDRVQcowState *s = bs->opaque;
- int ret, index_in_cluster, n, n1;
- uint64_t cluster_offset;
- struct iovec iov;
- QEMUIOVector qiov;
-
- while (nb_sectors > 0) {
- n = nb_sectors;
-
- ret = qcow2_get_cluster_offset(bs, sector_num << 9, &n,
- &cluster_offset);
- if (ret < 0) {
- return ret;
- }
-
- index_in_cluster = sector_num & (s->cluster_sectors - 1);
- if (!cluster_offset) {
- if (bs->backing_hd) {
- /* read from the base image */
- iov.iov_base = buf;
- iov.iov_len = n * 512;
- qemu_iovec_init_external(&qiov, &iov, 1);
-
- n1 = qcow2_backing_read1(bs->backing_hd, &qiov, sector_num, n);
- if (n1 > 0) {
- BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING);
- ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
- if (ret < 0)
- return -1;
- }
- } else {
- memset(buf, 0, 512 * n);
- }
- } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
- if (qcow2_decompress_cluster(bs, cluster_offset) < 0)
- return -1;
- memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
- } else {
- BLKDBG_EVENT(bs->file, BLKDBG_READ);
- ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512);
- if (ret != n * 512)
- return -1;
- if (s->crypt_method) {
- qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0,
- &s->aes_decrypt_key);
- }
- }
- nb_sectors -= n;
- sector_num += n;
- buf += n * 512;
- }
- return 0;
-}
-
static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
uint64_t cluster_offset, int n_start, int n_end)
{
BDRVQcowState *s = bs->opaque;
int n, ret;
+ void *buf;
+
+ /*
+ * If this is the last cluster and it is only partially used, we must only
+ * copy until the end of the image, or bdrv_check_request will fail for the
+ * bdrv_read/write calls below.
+ */
+ if (start_sect + n_end > bs->total_sectors) {
+ n_end = bs->total_sectors - start_sect;
+ }
n = n_end - n_start;
- if (n <= 0)
+ if (n <= 0) {
return 0;
+ }
+
+ buf = qemu_blockalign(bs, n * BDRV_SECTOR_SIZE);
+
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
- ret = qcow2_read(bs, start_sect + n_start, s->cluster_data, n);
- if (ret < 0)
- return ret;
+ ret = bdrv_read(bs, start_sect + n_start, buf, n);
+ if (ret < 0) {
+ goto out;
+ }
+
if (s->crypt_method) {
qcow2_encrypt_sectors(s, start_sect + n_start,
- s->cluster_data,
- s->cluster_data, n, 1,
+ buf, buf, n, 1,
&s->aes_encrypt_key);
}
+
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
- ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start,
- s->cluster_data, n);
- if (ret < 0)
- return ret;
- return 0;
+ ret = bdrv_write(bs->file, (cluster_offset >> 9) + n_start, buf, n);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = 0;
+out:
+ qemu_vfree(buf);
+ return ret;
}
@@ -620,7 +582,9 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
if (m->n_start) {
cow = true;
+ qemu_co_mutex_unlock(&s->lock);
ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
+ qemu_co_mutex_lock(&s->lock);
if (ret < 0)
goto err;
}
@@ -628,8 +592,10 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
if (m->nb_available & (s->cluster_sectors - 1)) {
uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
cow = true;
+ qemu_co_mutex_unlock(&s->lock);
ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
m->nb_available - end, s->cluster_sectors);
+ qemu_co_mutex_lock(&s->lock);
if (ret < 0)
goto err;
}