aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-08-16 16:43:46 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-08-16 16:43:46 +0100
commite018ccb3fbfa1d446ca7b49266c8a80dce40612d (patch)
tree335595d6245022743679088f2de7b0d6d5c8a6d6 /block
parentc6a2225a5a5e272edc0304845e32ea436ea1043a (diff)
parenta6b257a08e3d72219f03e461a52152672fec0612 (diff)
downloadqemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.zip
qemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.tar.gz
qemu-e018ccb3fbfa1d446ca7b49266c8a80dce40612d.tar.bz2
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches: - file-posix: Fix O_DIRECT alignment detection - Fixes for concurrent block jobs - block-backend: Queue requests while drained (fix IDE vs. job crashes) - qemu-img convert: Deprecate using -n and -o together - iotests: Migration tests with filter nodes - iotests: More media change tests # gpg: Signature made Fri 16 Aug 2019 10:29:18 BST # gpg: using RSA key 7F09B272C88F2FD6 # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] # Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6 * remotes/kevin/tags/for-upstream: file-posix: Handle undetectable alignment qemu-img convert: Deprecate using -n and -o together block-backend: Queue requests while drained mirror: Keep mirror_top_bs drained after dropping permissions block: Remove blk_pread_unthrottled() iotests: Add test for concurrent stream/commit tests: Test mid-drain bdrv_replace_child_noperm() tests: Test polling in bdrv_drop_intermediate() block: Reduce (un)drains when replacing a child block: Keep subtree drained in drop_intermediate block: Simplify bdrv_filter_default_perms() iotests: Test migration with all kinds of filter nodes iotests: Move migration helpers to iotests.py iotests/118: Add -blockdev based tests iotests/118: Create test classes dynamically iotests/118: Test media change for scsi-cd Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r--block/backup.c1
-rw-r--r--block/block-backend.c69
-rw-r--r--block/commit.c2
-rw-r--r--block/file-posix.c36
-rw-r--r--block/mirror.c7
5 files changed, 84 insertions, 31 deletions
diff --git a/block/backup.c b/block/backup.c
index b26c22c..4743c8f 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -644,6 +644,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto error;
}
+ blk_set_disable_request_queuing(job->target, true);
job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
diff --git a/block/block-backend.c b/block/block-backend.c
index 84e76bf..1c605d5 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -81,6 +81,9 @@ struct BlockBackend {
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
int quiesce_counter;
+ CoQueue queued_requests;
+ bool disable_request_queuing;
+
VMChangeStateEntry *vmsh;
bool force_allow_inactivate;
@@ -341,6 +344,7 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
block_acct_init(&blk->stats);
+ qemu_co_queue_init(&blk->queued_requests);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
QLIST_INIT(&blk->aio_notifiers);
@@ -1098,6 +1102,11 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
blk->allow_aio_context_change = allow;
}
+void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
+{
+ blk->disable_request_queuing = disable;
+}
+
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
{
@@ -1129,13 +1138,24 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
return 0;
}
+static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
+{
+ if (blk->quiesce_counter && !blk->disable_request_queuing) {
+ qemu_co_queue_wait(&blk->queued_requests, NULL);
+ }
+}
+
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
- BlockDriverState *bs = blk_bs(blk);
+ BlockDriverState *bs;
+
+ blk_wait_while_drained(blk);
+ /* Call blk_bs() only after waiting, the graph may have changed */
+ bs = blk_bs(blk);
trace_blk_co_preadv(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
@@ -1161,8 +1181,12 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
- BlockDriverState *bs = blk_bs(blk);
+ BlockDriverState *bs;
+
+ blk_wait_while_drained(blk);
+ /* Call blk_bs() only after waiting, the graph may have changed */
+ bs = blk_bs(blk);
trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
@@ -1239,22 +1263,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
return rwco.ret;
}
-int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
- int count)
-{
- int ret;
-
- ret = blk_check_byte_request(blk, offset, count);
- if (ret < 0) {
- return ret;
- }
-
- blk_root_drained_begin(blk->root);
- ret = blk_pread(blk, offset, buf, count);
- blk_root_drained_end(blk->root, NULL);
- return ret;
-}
-
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int bytes, BdrvRequestFlags flags)
{
@@ -1367,6 +1375,12 @@ static void blk_aio_read_entry(void *opaque)
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;
+ if (rwco->blk->quiesce_counter) {
+ blk_dec_in_flight(rwco->blk);
+ blk_wait_while_drained(rwco->blk);
+ blk_inc_in_flight(rwco->blk);
+ }
+
assert(qiov->size == acb->bytes);
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
@@ -1379,6 +1393,12 @@ static void blk_aio_write_entry(void *opaque)
BlkRwCo *rwco = &acb->rwco;
QEMUIOVector *qiov = rwco->iobuf;
+ if (rwco->blk->quiesce_counter) {
+ blk_dec_in_flight(rwco->blk);
+ blk_wait_while_drained(rwco->blk);
+ blk_inc_in_flight(rwco->blk);
+ }
+
assert(!qiov || qiov->size == acb->bytes);
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
qiov, rwco->flags);
@@ -1500,6 +1520,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
+ blk_wait_while_drained(blk);
+
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -1540,7 +1562,11 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
- int ret = blk_check_byte_request(blk, offset, bytes);
+ int ret;
+
+ blk_wait_while_drained(blk);
+
+ ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
@@ -1550,6 +1576,8 @@ int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
int blk_co_flush(BlockBackend *blk)
{
+ blk_wait_while_drained(blk);
+
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
@@ -2250,6 +2278,9 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
+ while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
+ /* Resume all queued requests */
+ }
}
}
diff --git a/block/commit.c b/block/commit.c
index 2c5a6d4..408ae15 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -350,6 +350,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
+ blk_set_disable_request_queuing(s->base, true);
s->base_bs = base;
/* Required permissions are already taken with block_job_add_bdrv() */
@@ -358,6 +359,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
if (ret < 0) {
goto fail;
}
+ blk_set_disable_request_queuing(s->top, true);
s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
diff --git a/block/file-posix.c b/block/file-posix.c
index 4479cc7..b8b4dad 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -323,6 +323,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
BDRVRawState *s = bs->opaque;
char *buf;
size_t max_align = MAX(MAX_BLOCKSIZE, getpagesize());
+ size_t alignments[] = {1, 512, 1024, 2048, 4096};
/* For SCSI generic devices the alignment is not really used.
With buffered I/O, we don't have any restrictions. */
@@ -349,25 +350,38 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
}
#endif
- /* If we could not get the sizes so far, we can only guess them */
- if (!s->buf_align) {
+ /*
+ * If we could not get the sizes so far, we can only guess them. First try
+ * to detect request alignment, since it is more likely to succeed. Then
+ * try to detect buf_align, which cannot be detected in some cases (e.g.
+ * Gluster). If buf_align cannot be detected, we fallback to the value of
+ * request_alignment.
+ */
+
+ if (!bs->bl.request_alignment) {
+ int i;
size_t align;
- buf = qemu_memalign(max_align, 2 * max_align);
- for (align = 512; align <= max_align; align <<= 1) {
- if (raw_is_io_aligned(fd, buf + align, max_align)) {
- s->buf_align = align;
+ buf = qemu_memalign(max_align, max_align);
+ for (i = 0; i < ARRAY_SIZE(alignments); i++) {
+ align = alignments[i];
+ if (raw_is_io_aligned(fd, buf, align)) {
+ /* Fallback to safe value. */
+ bs->bl.request_alignment = (align != 1) ? align : max_align;
break;
}
}
qemu_vfree(buf);
}
- if (!bs->bl.request_alignment) {
+ if (!s->buf_align) {
+ int i;
size_t align;
- buf = qemu_memalign(s->buf_align, max_align);
- for (align = 512; align <= max_align; align <<= 1) {
- if (raw_is_io_aligned(fd, buf, align)) {
- bs->bl.request_alignment = align;
+ buf = qemu_memalign(max_align, 2 * max_align);
+ for (i = 0; i < ARRAY_SIZE(alignments); i++) {
+ align = alignments[i];
+ if (raw_is_io_aligned(fd, buf + align, max_align)) {
+ /* Fallback to request_aligment. */
+ s->buf_align = (align != 1) ? align : bs->bl.request_alignment;
break;
}
}
diff --git a/block/mirror.c b/block/mirror.c
index 9f5c59e..9b36391 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -656,7 +656,10 @@ static int mirror_exit_common(Job *job)
s->target = NULL;
/* We don't access the source any more. Dropping any WRITE/RESIZE is
- * required before it could become a backing file of target_bs. */
+ * required before it could become a backing file of target_bs. Not having
+ * these permissions any more means that we can't allow any new requests on
+ * mirror_top_bs from now on, so keep it drained. */
+ bdrv_drained_begin(mirror_top_bs);
bs_opaque->stop = true;
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
@@ -724,6 +727,7 @@ static int mirror_exit_common(Job *job)
bs_opaque->job = NULL;
bdrv_drained_end(src);
+ bdrv_drained_end(mirror_top_bs);
s->in_drain = false;
bdrv_unref(mirror_top_bs);
bdrv_unref(src);
@@ -1632,6 +1636,7 @@ static BlockJob *mirror_start_job(
blk_set_force_allow_inactivate(s->target);
}
blk_set_allow_aio_context_change(s->target, true);
+ blk_set_disable_request_queuing(s->target, true);
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;