aboutsummaryrefslogtreecommitdiff
path: root/block/io.c
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2018-07-09 19:37:18 +0300
committerKevin Wolf <kwolf@redhat.com>2018-07-10 13:10:25 +0200
commit09d2f948462f4979d18f573a0734d1daae8e67a9 (patch)
treebe170c2bba1a5ea8561ab0708554203443bb6f0d /block/io.c
parent67b51fb998c697afb5d744066fcbde53e04fe941 (diff)
downloadqemu-09d2f948462f4979d18f573a0734d1daae8e67a9.zip
qemu-09d2f948462f4979d18f573a0734d1daae8e67a9.tar.gz
qemu-09d2f948462f4979d18f573a0734d1daae8e67a9.tar.bz2
block: add BDRV_REQ_SERIALISING flag
Serialized writes should be used in copy-on-write of backup(sync=none) for image fleecing scheme. We need to change an assert in bdrv_aligned_pwritev, added in 28de2dcd88de. The assert may fail now, because call to wait_serialising_requests here may become first call to it for this request with serializing flag set. It occurs if the request is aligned (otherwise, we should already set serializing flag before calling bdrv_aligned_pwritev and correspondingly waited for all intersecting requests). However, for aligned requests, we should not care about outdating of previously read data, as there no such data. Therefore, let's just update an assert to not care about aligned requests. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/io.c')
-rw-r--r--block/io.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/block/io.c b/block/io.c
index 75ab26f..6be9c40 100644
--- a/block/io.c
+++ b/block/io.c
@@ -637,6 +637,18 @@ static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
}
+static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
+{
+ /*
+ * If the request is serialising, overlap_offset and overlap_bytes are set,
+ * so we can check if the request is aligned. Otherwise, don't care and
+ * return false.
+ */
+
+ return req->serialising && (req->offset == req->overlap_offset) &&
+ (req->bytes == req->overlap_bytes);
+}
+
/**
* Round a region to cluster boundaries
*/
@@ -1311,6 +1323,9 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
mark_request_serialising(req, bdrv_get_cluster_size(bs));
}
+ /* BDRV_REQ_SERIALISING is only for write operation */
+ assert(!(flags & BDRV_REQ_SERIALISING));
+
if (!(flags & BDRV_REQ_NO_SERIALISING)) {
wait_serialising_requests(req);
}
@@ -1594,8 +1609,14 @@ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
/* BDRV_REQ_NO_SERIALISING is only for read operation */
assert(!(flags & BDRV_REQ_NO_SERIALISING));
+
+ if (flags & BDRV_REQ_SERIALISING) {
+ mark_request_serialising(req, bdrv_get_cluster_size(bs));
+ }
+
waited = wait_serialising_requests(req);
- assert(!waited || !req->serialising);
+ assert(!waited || !req->serialising ||
+ is_request_serialising_and_aligned(req));
assert(req->overlap_offset <= offset);
assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
if (flags & BDRV_REQ_WRITE_UNCHANGED) {
@@ -2949,6 +2970,8 @@ static int coroutine_fn bdrv_co_copy_range_internal(
tracked_request_begin(&req, src->bs, src_offset, bytes,
BDRV_TRACKED_READ);
+ /* BDRV_REQ_SERIALISING is only for write operation */
+ assert(!(read_flags & BDRV_REQ_SERIALISING));
if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
wait_serialising_requests(&req);
}
@@ -2968,6 +2991,9 @@ static int coroutine_fn bdrv_co_copy_range_internal(
/* BDRV_REQ_NO_SERIALISING is only for read operation */
assert(!(write_flags & BDRV_REQ_NO_SERIALISING));
+ if (write_flags & BDRV_REQ_SERIALISING) {
+ mark_request_serialising(&req, bdrv_get_cluster_size(dst->bs));
+ }
wait_serialising_requests(&req);
ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,