aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/backup.c22
-rw-r--r--block/io.c12
-rw-r--r--block/mirror.c43
3 files changed, 58 insertions, 19 deletions
diff --git a/block/backup.c b/block/backup.c
index feeb9f8..581269b 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -246,12 +246,20 @@ static void backup_abort(BlockJob *job)
}
}
+static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
+{
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+
+ blk_set_aio_context(s->target, aio_context);
+}
+
static const BlockJobDriver backup_job_driver = {
- .instance_size = sizeof(BackupBlockJob),
- .job_type = BLOCK_JOB_TYPE_BACKUP,
- .set_speed = backup_set_speed,
- .commit = backup_commit,
- .abort = backup_abort,
+ .instance_size = sizeof(BackupBlockJob),
+ .job_type = BLOCK_JOB_TYPE_BACKUP,
+ .set_speed = backup_set_speed,
+ .commit = backup_commit,
+ .abort = backup_abort,
+ .attached_aio_context = backup_attached_aio_context,
};
static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -392,9 +400,7 @@ static void coroutine_fn backup_run(void *opaque)
while (!block_job_is_cancelled(&job->common)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
- job->common.busy = false;
- qemu_coroutine_yield();
- job->common.busy = true;
+ block_job_yield(&job->common);
}
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job);
diff --git a/block/io.c b/block/io.c
index ebdb9d8..7cf3645 100644
--- a/block/io.c
+++ b/block/io.c
@@ -2347,9 +2347,13 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
return 0;
}
- tracked_request_begin(&req, bs, sector_num, nb_sectors,
- BDRV_TRACKED_DISCARD);
- bdrv_set_dirty(bs, sector_num, nb_sectors);
+ tracked_request_begin(&req, bs, sector_num << BDRV_SECTOR_BITS,
+ nb_sectors << BDRV_SECTOR_BITS, BDRV_TRACKED_DISCARD);
+
+ ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
+ if (ret < 0) {
+ goto out;
+ }
max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0) {
@@ -2398,6 +2402,8 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
}
ret = 0;
out:
+ bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
+ req.bytes >> BDRV_SECTOR_BITS);
tracked_request_end(&req);
return ret;
}
diff --git a/block/mirror.c b/block/mirror.c
index 075384a..a04ed9c 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -331,6 +331,8 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
mirror_wait_for_io(s);
}
+ block_job_pause_point(&s->common);
+
/* Find the number of consective dirty chunks following the first dirty
* one, and wait for in flight requests in them. */
while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
@@ -582,6 +584,8 @@ static void coroutine_fn mirror_run(void *opaque)
if (now - last_pause_ns > SLICE_TIME) {
last_pause_ns = now;
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
+ } else {
+ block_job_pause_point(&s->common);
}
if (block_job_is_cancelled(&s->common)) {
@@ -613,6 +617,8 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit;
}
+ block_job_pause_point(&s->common);
+
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
/* s->common.offset contains the number of bytes already processed so
* far, cnt is the number of dirty sectors remaining and
@@ -795,18 +801,39 @@ static void mirror_complete(BlockJob *job, Error **errp)
block_job_enter(&s->common);
}
+/* There is no matching mirror_resume() because mirror_run() will begin
+ * iterating again when the job is resumed.
+ */
+static void coroutine_fn mirror_pause(BlockJob *job)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+
+ mirror_drain(s);
+}
+
+static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+
+ blk_set_aio_context(s->target, new_context);
+}
+
static const BlockJobDriver mirror_job_driver = {
- .instance_size = sizeof(MirrorBlockJob),
- .job_type = BLOCK_JOB_TYPE_MIRROR,
- .set_speed = mirror_set_speed,
- .complete = mirror_complete,
+ .instance_size = sizeof(MirrorBlockJob),
+ .job_type = BLOCK_JOB_TYPE_MIRROR,
+ .set_speed = mirror_set_speed,
+ .complete = mirror_complete,
+ .pause = mirror_pause,
+ .attached_aio_context = mirror_attached_aio_context,
};
static const BlockJobDriver commit_active_job_driver = {
- .instance_size = sizeof(MirrorBlockJob),
- .job_type = BLOCK_JOB_TYPE_COMMIT,
- .set_speed = mirror_set_speed,
- .complete = mirror_complete,
+ .instance_size = sizeof(MirrorBlockJob),
+ .job_type = BLOCK_JOB_TYPE_COMMIT,
+ .set_speed = mirror_set_speed,
+ .complete = mirror_complete,
+ .pause = mirror_pause,
+ .attached_aio_context = mirror_attached_aio_context,
};
static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,