summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@linux.vnet.ibm.com>2011-11-23 11:47:56 +0000
committerKevin Wolf <kwolf@redhat.com>2011-12-05 14:51:38 +0100
commitd83947ac6d2a58fdaf128fbe1b22bc9639e79fe5 (patch)
tree99040eddc127c7d1967f7e6e2709b31afa8c1a56
parentf4658285f99473367dbbc34ce6970ec4637c2388 (diff)
downloadqemu-d83947ac6d2a58fdaf128fbe1b22bc9639e79fe5.tar.gz
qemu-d83947ac6d2a58fdaf128fbe1b22bc9639e79fe5.tar.bz2
qemu-d83947ac6d2a58fdaf128fbe1b22bc9639e79fe5.zip
block: request overlap detection
Detect overlapping requests and remember to align to cluster boundaries if the image format uses them. This assumes that allocating I/O is performed in cluster granularity - which is true for qcow2, qed, etc. Signed-off-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--block.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/block.c b/block.c
index 1b3c809ed4..ca0e8ece1e 100644
--- a/block.c
+++ b/block.c
@@ -1133,21 +1133,62 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
}
+/**
+ * Round a region to cluster boundaries
+ */
+static void round_to_clusters(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors,
+ int64_t *cluster_sector_num,
+ int *cluster_nb_sectors)
+{
+ BlockDriverInfo bdi;
+
+ if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
+ *cluster_sector_num = sector_num;
+ *cluster_nb_sectors = nb_sectors;
+ } else {
+ int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
+ *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
+ *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
+ nb_sectors, c);
+ }
+}
+
static bool tracked_request_overlaps(BdrvTrackedRequest *req,
int64_t sector_num, int nb_sectors) {
- return false; /* not yet implemented */
+ /* aaaa bbbb */
+ if (sector_num >= req->sector_num + req->nb_sectors) {
+ return false;
+ }
+ /* bbbb aaaa */
+ if (req->sector_num >= sector_num + nb_sectors) {
+ return false;
+ }
+ return true;
}
static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs,
int64_t sector_num, int nb_sectors)
{
BdrvTrackedRequest *req;
+ int64_t cluster_sector_num;
+ int cluster_nb_sectors;
bool retry;
+ /* If we touch the same cluster it counts as an overlap. This guarantees
+ * that allocating writes will be serialized and not race with each other
+ * for the same cluster. For example, in copy-on-read it ensures that the
+ * CoR read and write operations are atomic and guest writes cannot
+ * interleave between them.
+ */
+ round_to_clusters(bs, sector_num, nb_sectors,
+ &cluster_sector_num, &cluster_nb_sectors);
+
do {
retry = false;
QLIST_FOREACH(req, &bs->tracked_requests, list) {
- if (tracked_request_overlaps(req, sector_num, nb_sectors)) {
+ if (tracked_request_overlaps(req, cluster_sector_num,
+ cluster_nb_sectors)) {
qemu_co_queue_wait(&req->wait_queue);
retry = true;
break;