diff options
author | Kevin Wolf <kwolf@redhat.com> | 2025-06-25 10:50:19 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2025-07-14 17:12:35 +0200 |
commit | d402da1360c2240e81f0e5fc80ddbfc6238e0da8 (patch) | |
tree | b7f914e65b8e846d221abc96d00cf815d42621c5 | |
parent | 430e2be81e0970ee06c1c956f7698262b2ec514f (diff) | |
download | qemu-d402da1360c2240e81f0e5fc80ddbfc6238e0da8.zip qemu-d402da1360c2240e81f0e5fc80ddbfc6238e0da8.tar.gz qemu-d402da1360c2240e81f0e5fc80ddbfc6238e0da8.tar.bz2 |
file-posix: Fix aio=threads performance regression after enablign FUA
For aio=threads, we're currently not implementing REQ_FUA in any useful
way, but just do a separate raw_co_flush_to_disk() call. This changes
behaviour compared to the old state, which used bdrv_co_flush() with its
optimisations. As a quick fix, call bdrv_co_flush() again like before.
Eventually, we can use pwritev2() to make use of RWF_DSYNC if available,
but we'll still have to keep this code path as a fallback, so this fix
is required either way.
While the fix itself is a one-liner, some new graph locking annotations
are needed to convince TSA that the locking is correct.
Cc: qemu-stable@nongnu.org
Fixes: 984a32f17e8d ("file-posix: Support FUA writes")
Buglink: https://issues.redhat.com/browse/RHEL-96854
Reported-by: Tingting Mao <timao@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20250625085019.27735-1-kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r-- | block/file-posix.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/block/file-posix.c b/block/file-posix.c index 9b5f08c..8c73867 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -2564,9 +2564,9 @@ static inline bool raw_check_linux_aio(BDRVRawState *s) } #endif -static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, - uint64_t bytes, QEMUIOVector *qiov, int type, - int flags) +static int coroutine_fn GRAPH_RDLOCK +raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, uint64_t bytes, + QEMUIOVector *qiov, int type, int flags) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; @@ -2625,7 +2625,7 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, ret = raw_thread_pool_submit(handle_aiocb_rw, &acb); if (ret == 0 && (flags & BDRV_REQ_FUA)) { /* TODO Use pwritev2() instead if it's available */ - ret = raw_co_flush_to_disk(bs); + ret = bdrv_co_flush(bs); } goto out; /* Avoid the compiler err of unused label */ @@ -2660,16 +2660,16 @@ out: return ret; } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ, flags); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, - int64_t bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) +static int coroutine_fn GRAPH_RDLOCK +raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE, flags); } @@ -3606,10 +3606,11 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, #endif #if defined(CONFIG_BLKZONED) -static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, - int64_t *offset, - QEMUIOVector *qiov, - BdrvRequestFlags flags) { +static int coroutine_fn GRAPH_RDLOCK +raw_co_zone_append(BlockDriverState *bs, + int64_t *offset, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { assert(flags == 0); int64_t zone_size_mask = bs->bl.zone_size - 1; int64_t iov_len = 0; |