aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMax Reitz <mreitz@redhat.com>2019-11-01 16:25:08 +0100
committerMax Reitz <mreitz@redhat.com>2019-11-04 09:29:15 +0100
commit304d9d7f034ff7f5e1e66a65b7f720f63a72c57e (patch)
treece4d5e8d16bac1271fc2c543f217760f989c7b66 /block
parentdcfbece68441bfbe6803de60ab5a8045196094b4 (diff)
downloadqemu-304d9d7f034ff7f5e1e66a65b7f720f63a72c57e.zip
qemu-304d9d7f034ff7f5e1e66a65b7f720f63a72c57e.tar.gz
qemu-304d9d7f034ff7f5e1e66a65b7f720f63a72c57e.tar.bz2
block: Make wait/mark serialising requests public
Make both bdrv_mark_request_serialising() and bdrv_wait_serialising_requests() public so they can be used from block drivers. Cc: qemu-stable@nongnu.org Signed-off-by: Max Reitz <mreitz@redhat.com> Message-id: 20191101152510.11719-2-mreitz@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/io.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/block/io.c b/block/io.c
index 02659f9..039c0d4 100644
--- a/block/io.c
+++ b/block/io.c
@@ -715,7 +715,7 @@ static void tracked_request_begin(BdrvTrackedRequest *req,
qemu_co_mutex_unlock(&bs->reqs_lock);
}
-static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
+void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
{
int64_t overlap_offset = req->offset & ~(align - 1);
uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
@@ -805,7 +805,7 @@ void bdrv_dec_in_flight(BlockDriverState *bs)
bdrv_wakeup(bs);
}
-static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
+bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
{
BlockDriverState *bs = self->bs;
BdrvTrackedRequest *req;
@@ -1437,14 +1437,14 @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
* with each other for the same cluster. For example, in copy-on-read
* it ensures that the CoR read and write operations are atomic and
* guest writes cannot interleave between them. */
- mark_request_serialising(req, bdrv_get_cluster_size(bs));
+ bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
}
/* BDRV_REQ_SERIALISING is only for write operation */
assert(!(flags & BDRV_REQ_SERIALISING));
if (!(flags & BDRV_REQ_NO_SERIALISING)) {
- wait_serialising_requests(req);
+ bdrv_wait_serialising_requests(req);
}
if (flags & BDRV_REQ_COPY_ON_READ) {
@@ -1841,10 +1841,10 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
assert(!(flags & ~BDRV_REQ_MASK));
if (flags & BDRV_REQ_SERIALISING) {
- mark_request_serialising(req, bdrv_get_cluster_size(bs));
+ bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
}
- waited = wait_serialising_requests(req);
+ waited = bdrv_wait_serialising_requests(req);
assert(!waited || !req->serialising ||
is_request_serialising_and_aligned(req));
@@ -2008,8 +2008,8 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
padding = bdrv_init_padding(bs, offset, bytes, &pad);
if (padding) {
- mark_request_serialising(req, align);
- wait_serialising_requests(req);
+ bdrv_mark_request_serialising(req, align);
+ bdrv_wait_serialising_requests(req);
bdrv_padding_rmw_read(child, req, &pad, true);
@@ -2111,8 +2111,8 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
}
if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
- mark_request_serialising(&req, align);
- wait_serialising_requests(&req);
+ bdrv_mark_request_serialising(&req, align);
+ bdrv_wait_serialising_requests(&req);
bdrv_padding_rmw_read(child, &req, &pad, false);
}
@@ -3205,7 +3205,7 @@ static int coroutine_fn bdrv_co_copy_range_internal(
/* BDRV_REQ_SERIALISING is only for write operation */
assert(!(read_flags & BDRV_REQ_SERIALISING));
if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
- wait_serialising_requests(&req);
+ bdrv_wait_serialising_requests(&req);
}
ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
@@ -3336,7 +3336,7 @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
* new area, we need to make sure that no write requests are made to it
* concurrently or they might be overwritten by preallocation. */
if (new_bytes) {
- mark_request_serialising(&req, 1);
+ bdrv_mark_request_serialising(&req, 1);
}
if (bs->read_only) {
error_setg(errp, "Image is read-only");