aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-12-05 13:20:03 -0500
committerKevin Wolf <kwolf@redhat.com>2023-12-21 22:49:27 +0100
commitb49f4755c7fa35ea6e17e5b52c1cdaef6b4aa21c (patch)
tree42f28762584421da298ba15cb21d027e002476bd /migration
parent6bc30f19498547fac9cef98316a65cf6c1f14205 (diff)
downloadqemu-b49f4755c7fa35ea6e17e5b52c1cdaef6b4aa21c.zip
qemu-b49f4755c7fa35ea6e17e5b52c1cdaef6b4aa21c.tar.gz
qemu-b49f4755c7fa35ea6e17e5b52c1cdaef6b4aa21c.tar.bz2
block: remove AioContext locking
This is the big patch that removes aio_context_acquire()/aio_context_release() from the block layer and affected block layer users. There isn't a clean way to split this patch and the reviewers are likely the same group of people, so I decided to do it in one patch. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Message-ID: <20231205182011.1976568-7-stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/block.c34
-rw-r--r--migration/migration-hmp-cmds.c3
-rw-r--r--migration/savevm.c22
3 files changed, 9 insertions, 50 deletions
diff --git a/migration/block.c b/migration/block.c
index a15f9bd..6ec6a1d 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -66,7 +66,7 @@ typedef struct BlkMigDevState {
/* Protected by block migration lock. */
int64_t completed_sectors;
- /* During migration this is protected by iothread lock / AioContext.
+ /* During migration this is protected by bdrv_dirty_bitmap_lock().
* Allocation and free happen during setup and cleanup respectively.
*/
BdrvDirtyBitmap *dirty_bitmap;
@@ -101,7 +101,7 @@ typedef struct BlkMigState {
int prev_progress;
int bulk_completed;
- /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
+ /* Lock must be taken _inside_ the iothread lock. */
QemuMutex lock;
} BlkMigState;
@@ -270,7 +270,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
if (bmds->shared_base) {
qemu_mutex_lock_iothread();
- aio_context_acquire(blk_get_aio_context(bb));
/* Skip unallocated sectors; intentionally treats failure or
* partial sector as an allocated sector */
while (cur_sector < total_sectors &&
@@ -281,7 +280,6 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
}
cur_sector += count >> BDRV_SECTOR_BITS;
}
- aio_context_release(blk_get_aio_context(bb));
qemu_mutex_unlock_iothread();
}
@@ -313,21 +311,16 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
block_mig_state.submitted++;
blk_mig_unlock();
- /* We do not know if bs is under the main thread (and thus does
- * not acquire the AioContext when doing AIO) or rather under
- * dataplane. Thus acquire both the iothread mutex and the
- * AioContext.
- *
- * This is ugly and will disappear when we make bdrv_* thread-safe,
- * without the need to acquire the AioContext.
+ /*
+ * The migration thread does not have an AioContext. Lock the BQL so that
+ * I/O runs in the main loop AioContext (see
+ * qemu_get_current_aio_context()).
*/
qemu_mutex_lock_iothread();
- aio_context_acquire(blk_get_aio_context(bmds->blk));
bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
nr_sectors * BDRV_SECTOR_SIZE);
blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
0, blk_mig_read_cb, blk);
- aio_context_release(blk_get_aio_context(bmds->blk));
qemu_mutex_unlock_iothread();
bmds->cur_sector = cur_sector + nr_sectors;
@@ -512,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void)
}
}
-/* Called with iothread lock and AioContext taken. */
+/* Called with iothread lock taken. */
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
@@ -606,9 +599,7 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
int ret = 1;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
- aio_context_acquire(blk_get_aio_context(bmds->blk));
ret = mig_save_device_dirty(f, bmds, is_async);
- aio_context_release(blk_get_aio_context(bmds->blk));
if (ret <= 0) {
break;
}
@@ -666,9 +657,9 @@ static int64_t get_remaining_dirty(void)
int64_t dirty = 0;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
- aio_context_acquire(blk_get_aio_context(bmds->blk));
+ bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
- aio_context_release(blk_get_aio_context(bmds->blk));
+ bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
}
return dirty;
@@ -681,7 +672,6 @@ static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
BlockDriverState *bs;
- AioContext *ctx;
unset_dirty_tracking();
@@ -693,13 +683,7 @@ static void block_migration_cleanup_bmds(void)
bdrv_op_unblock_all(bs, bmds->blocker);
}
error_free(bmds->blocker);
-
- /* Save ctx, because bmds->blk can disappear during blk_unref. */
- ctx = blk_get_aio_context(bmds->blk);
- aio_context_acquire(ctx);
blk_unref(bmds->blk);
- aio_context_release(ctx);
-
g_free(bmds->blk_name);
g_free(bmds->aio_bitmap);
g_free(bmds);
diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c
index 86ae832..99710c8 100644
--- a/migration/migration-hmp-cmds.c
+++ b/migration/migration-hmp-cmds.c
@@ -852,14 +852,11 @@ static void vm_completion(ReadLineState *rs, const char *str)
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
SnapshotInfoList *snapshots, *snapshot;
- AioContext *ctx = bdrv_get_aio_context(bs);
bool ok = false;
- aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs)) {
ok = bdrv_query_snapshot_info_list(bs, &snapshots, NULL) == 0;
}
- aio_context_release(ctx);
if (!ok) {
continue;
}
diff --git a/migration/savevm.c b/migration/savevm.c
index eec5503..1b9ab7b 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -3049,7 +3049,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
int saved_vm_running;
uint64_t vm_state_size;
g_autoptr(GDateTime) now = g_date_time_new_now_local();
- AioContext *aio_context;
GLOBAL_STATE_CODE();
@@ -3092,7 +3091,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
if (bs == NULL) {
return false;
}
- aio_context = bdrv_get_aio_context(bs);
saved_vm_running = runstate_is_running();
@@ -3101,8 +3099,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
bdrv_drain_all_begin();
- aio_context_acquire(aio_context);
-
memset(sn, 0, sizeof(*sn));
/* fill auxiliary fields */
@@ -3139,14 +3135,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
goto the_end;
}
- /* The bdrv_all_create_snapshot() call that follows acquires the AioContext
- * for itself. BDRV_POLL_WHILE() does not support nested locking because
- * it only releases the lock once. Therefore synchronous I/O will deadlock
- * unless we release the AioContext before bdrv_all_create_snapshot().
- */
- aio_context_release(aio_context);
- aio_context = NULL;
-
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size,
has_devices, devices, errp);
if (ret < 0) {
@@ -3157,10 +3145,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
ret = 0;
the_end:
- if (aio_context) {
- aio_context_release(aio_context);
- }
-
bdrv_drain_all_end();
if (saved_vm_running) {
@@ -3258,7 +3242,6 @@ bool load_snapshot(const char *name, const char *vmstate,
QEMUSnapshotInfo sn;
QEMUFile *f;
int ret;
- AioContext *aio_context;
MigrationIncomingState *mis = migration_incoming_get_current();
if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
@@ -3278,12 +3261,9 @@ bool load_snapshot(const char *name, const char *vmstate,
if (!bs_vm_state) {
return false;
}
- aio_context = bdrv_get_aio_context(bs_vm_state);
/* Don't even try to load empty VM states */
- aio_context_acquire(aio_context);
ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
- aio_context_release(aio_context);
if (ret < 0) {
return false;
} else if (sn.vm_state_size == 0) {
@@ -3320,10 +3300,8 @@ bool load_snapshot(const char *name, const char *vmstate,
ret = -EINVAL;
goto err_drain;
}
- aio_context_acquire(aio_context);
ret = qemu_loadvm_state(f);
migration_incoming_state_destroy();
- aio_context_release(aio_context);
bdrv_drain_all_end();