aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2019-10-01 16:14:05 +0300
committerMax Reitz <mreitz@redhat.com>2019-10-10 10:56:18 +0200
commita6ffe1998cfe1fd3cd83de0a7d1dd16eb514f987 (patch)
tree2340299ddbb9c799fa5a9d9f0d63bd89551182a2 /block
parentf2d86ade4da71d1f32ffc5977ea5417c20996919 (diff)
downloadqemu-a6ffe1998cfe1fd3cd83de0a7d1dd16eb514f987.zip
qemu-a6ffe1998cfe1fd3cd83de0a7d1dd16eb514f987.tar.gz
qemu-a6ffe1998cfe1fd3cd83de0a7d1dd16eb514f987.tar.bz2
block/backup: move in-flight requests handling from backup to block-copy
Move synchronization mechanism to block-copy, to be able to use one block-copy instance from backup job and backup-top filter in parallel. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-id: 20191001131409.14202-2-vsementsov@virtuozzo.com Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/backup.c52
-rw-r--r--block/block-copy.c43
2 files changed, 43 insertions, 52 deletions
diff --git a/block/backup.c b/block/backup.c
index 4613b8c..d918836 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -29,13 +29,6 @@
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
-typedef struct CowRequest {
- int64_t start_byte;
- int64_t end_byte;
- QLIST_ENTRY(CowRequest) list;
- CoQueue wait_queue; /* coroutines blocked on this request */
-} CowRequest;
-
typedef struct BackupBlockJob {
BlockJob common;
BlockDriverState *source_bs;
@@ -51,50 +44,12 @@ typedef struct BackupBlockJob {
uint64_t bytes_read;
int64_t cluster_size;
NotifierWithReturn before_write;
- QLIST_HEAD(, CowRequest) inflight_reqs;
BlockCopyState *bcs;
} BackupBlockJob;
static const BlockJobDriver backup_job_driver;
-/* See if in-flight requests overlap and wait for them to complete */
-static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
- int64_t start,
- int64_t end)
-{
- CowRequest *req;
- bool retry;
-
- do {
- retry = false;
- QLIST_FOREACH(req, &job->inflight_reqs, list) {
- if (end > req->start_byte && start < req->end_byte) {
- qemu_co_queue_wait(&req->wait_queue, NULL);
- retry = true;
- break;
- }
- }
- } while (retry);
-}
-
-/* Keep track of an in-flight request */
-static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
- int64_t start, int64_t end)
-{
- req->start_byte = start;
- req->end_byte = end;
- qemu_co_queue_init(&req->wait_queue);
- QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
-}
-
-/* Forget about a completed request */
-static void cow_request_end(CowRequest *req)
-{
- QLIST_REMOVE(req, list);
- qemu_co_queue_restart_all(&req->wait_queue);
-}
-
static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
{
BackupBlockJob *s = opaque;
@@ -116,7 +71,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
bool *error_is_read,
bool is_write_notifier)
{
- CowRequest cow_request;
int ret = 0;
int64_t start, end; /* bytes */
@@ -127,14 +81,9 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
trace_backup_do_cow_enter(job, start, offset, bytes);
- wait_for_overlapping_requests(job, start, end);
- cow_request_begin(&cow_request, job, start, end);
-
ret = block_copy(job->bcs, start, end - start, error_is_read,
is_write_notifier);
- cow_request_end(&cow_request);
-
trace_backup_do_cow_return(job, offset, bytes, ret);
qemu_co_rwlock_unlock(&job->flush_rwlock);
@@ -316,7 +265,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
int ret = 0;
- QLIST_INIT(&s->inflight_reqs);
qemu_co_rwlock_init(&s->flush_rwlock);
backup_init_copy_bitmap(s);
diff --git a/block/block-copy.c b/block/block-copy.c
index 3fc9152..61e5ea5 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -19,6 +19,41 @@
#include "block/block-copy.h"
#include "sysemu/block-backend.h"
+static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
+ int64_t start,
+ int64_t end)
+{
+ BlockCopyInFlightReq *req;
+ bool waited;
+
+ do {
+ waited = false;
+ QLIST_FOREACH(req, &s->inflight_reqs, list) {
+ if (end > req->start_byte && start < req->end_byte) {
+ qemu_co_queue_wait(&req->wait_queue, NULL);
+ waited = true;
+ break;
+ }
+ }
+ } while (waited);
+}
+
+static void block_copy_inflight_req_begin(BlockCopyState *s,
+ BlockCopyInFlightReq *req,
+ int64_t start, int64_t end)
+{
+ req->start_byte = start;
+ req->end_byte = end;
+ qemu_co_queue_init(&req->wait_queue);
+ QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
+}
+
+static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
+{
+ QLIST_REMOVE(req, list);
+ qemu_co_queue_restart_all(&req->wait_queue);
+}
+
void block_copy_state_free(BlockCopyState *s)
{
if (!s) {
@@ -79,6 +114,8 @@ BlockCopyState *block_copy_state_new(
s->use_copy_range =
!(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
+ QLIST_INIT(&s->inflight_reqs);
+
/*
* We just allow aio context change on our block backends. block_copy() user
* (now it's only backup) is responsible for source and target being in same
@@ -266,6 +303,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
int64_t end = bytes + start; /* bytes */
void *bounce_buffer = NULL;
int64_t status_bytes;
+ BlockCopyInFlightReq req;
/*
* block_copy() user is responsible for keeping source and target in same
@@ -276,6 +314,9 @@ int coroutine_fn block_copy(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
assert(QEMU_IS_ALIGNED(end, s->cluster_size));
+ block_copy_wait_inflight_reqs(s, start, bytes);
+ block_copy_inflight_req_begin(s, &req, start, end);
+
while (start < end) {
int64_t dirty_end;
@@ -329,5 +370,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
qemu_vfree(bounce_buffer);
}
+ block_copy_inflight_req_end(&req);
+
return ret;
}