From 3202d8e4047d5f4def96b5404633307c86e8db95 Mon Sep 17 00:00:00 2001 From: Michael Tokarev Date: Fri, 14 Jul 2023 14:06:05 +0300 Subject: block: spelling fixes Signed-off-by: Michael Tokarev Reviewed-by: Eric Blake --- block/block-copy.c | 4 ++-- block/export/vduse-blk.c | 2 +- block/export/vhost-user-blk-server.c | 2 +- block/export/vhost-user-blk-server.h | 2 +- block/file-posix.c | 8 ++++---- block/graph-lock.c | 2 +- block/io.c | 2 +- block/linux-aio.c | 2 +- block/mirror.c | 2 +- block/qcow2-refcount.c | 2 +- block/vhdx.c | 2 +- block/vhdx.h | 4 ++-- 12 files changed, 17 insertions(+), 17 deletions(-) (limited to 'block') diff --git a/block/block-copy.c b/block/block-copy.c index e13d7bc..1c60368 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -67,7 +67,7 @@ typedef struct BlockCopyCallState { QLIST_ENTRY(BlockCopyCallState) list; /* - * Fields that report information about return values and erros. + * Fields that report information about return values and errors. * Protected by lock in BlockCopyState. */ bool error_is_read; @@ -462,7 +462,7 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool, * Do copy of cluster-aligned chunk. Requested region is allowed to exceed * s->len only to cover last cluster when s->len is not aligned to clusters. * - * No sync here: nor bitmap neighter intersecting requests handling, only copy. + * No sync here: neither bitmap nor intersecting requests handling, only copy. * * @method is an in-out argument, so that copy_range can be either extended to * a full-size buffer or disabled if the copy_range attempt fails. The output diff --git a/block/export/vduse-blk.c b/block/export/vduse-blk.c index 83b0554..172f73c 100644 --- a/block/export/vduse-blk.c +++ b/block/export/vduse-blk.c @@ -138,7 +138,7 @@ static void vduse_blk_enable_queue(VduseDev *dev, VduseVirtq *vq) aio_set_fd_handler(vblk_exp->export.ctx, vduse_queue_get_fd(vq), on_vduse_vq_kick, NULL, NULL, NULL, vq); - /* Make sure we don't miss any kick afer reconnecting */ + /* Make sure we don't miss any kick after reconnecting */ eventfd_write(vduse_queue_get_fd(vq), 1); } diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index f7b5073..fe2cee3 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -1,5 +1,5 @@ /* - * Sharing QEMU block devices via vhost-user protocal + * Sharing QEMU block devices via vhost-user protocol * * Parts of the code based on nbd/server.c. * diff --git a/block/export/vhost-user-blk-server.h b/block/export/vhost-user-blk-server.h index fcf46fc..77fb5c0 100644 --- a/block/export/vhost-user-blk-server.h +++ b/block/export/vhost-user-blk-server.h @@ -1,5 +1,5 @@ /* - * Sharing QEMU block devices via vhost-user protocal + * Sharing QEMU block devices via vhost-user protocol * * Copyright (c) Coiby Xu . * Copyright (c) 2020 Red Hat, Inc. diff --git a/block/file-posix.c b/block/file-posix.c index b16e9c2..4757914 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -1159,9 +1159,9 @@ static int raw_reopen_prepare(BDRVReopenState *state, * As part of reopen prepare we also want to create new fd by * raw_reconfigure_getfd(). But it wants updated "perm", when in * bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to - * permission update. Happily, permission update is always a part (a seprate - * stage) of bdrv_reopen_multiple() so we can rely on this fact and - * reconfigure fd in raw_check_perm(). + * permission update. Happily, permission update is always a part + * (a separate stage) of bdrv_reopen_multiple() so we can rely on this + * fact and reconfigure fd in raw_check_perm(). */ s->reopen_state = state; @@ -3374,7 +3374,7 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) * of an array of zone descriptors. * zones is an array of zone descriptors to hold zone information on reply; * offset can be any byte within the entire size of the device; - * nr_zones is the maxium number of sectors the command should operate on. + * nr_zones is the maximum number of sectors the command should operate on. */ #if defined(CONFIG_BLKZONED) static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset, diff --git a/block/graph-lock.c b/block/graph-lock.c index 5e66f01..f357a2c 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -95,7 +95,7 @@ static uint32_t reader_count(void) QEMU_LOCK_GUARD(&aio_context_list_lock); - /* rd can temporarly be negative, but the total will *always* be >= 0 */ + /* rd can temporarily be negative, but the total will *always* be >= 0 */ rd = orphaned_reader_count; QTAILQ_FOREACH(brdv_graph, &aio_context_list, next_aio) { rd += qatomic_read(&brdv_graph->reader_count); diff --git a/block/io.c b/block/io.c index 76e7df1..19edab5 100644 --- a/block/io.c +++ b/block/io.c @@ -342,7 +342,7 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, * timer callback), it is a bug in the caller that should be fixed. */ assert(data.done); - /* Reaquire the AioContext of bs if we dropped it */ + /* Reacquire the AioContext of bs if we dropped it */ if (ctx != co_ctx) { aio_context_acquire(ctx); } diff --git a/block/linux-aio.c b/block/linux-aio.c index 561c71a..1a51503 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -227,7 +227,7 @@ static void qemu_laio_process_completions(LinuxAioState *s) /* If we are nested we have to notify the level above that we are done * by setting event_max to zero, upper level will then jump out of it's - * own `for` loop. If we are the last all counters droped to zero. */ + * own `for` loop. If we are the last all counters dropped to zero. */ s->event_max = 0; s->event_idx = 0; } diff --git a/block/mirror.c b/block/mirror.c index e213a89..aae4beb 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -502,7 +502,7 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s) job_pause_point(&s->common.job); - /* Find the number of consective dirty chunks following the first dirty + /* Find the number of consecutive dirty chunks following the first dirty * one, and wait for in flight requests in them. */ bdrv_dirty_bitmap_lock(s->dirty_bitmap); while (nb_chunks * s->granularity < s->buf_size) { diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c index 5095e99..996d121 100644 --- a/block/qcow2-refcount.c +++ b/block/qcow2-refcount.c @@ -2645,7 +2645,7 @@ rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res, * repeat all this until the reftable stops growing. * * (This loop will terminate, because with every cluster the - * reftable grows, it can accomodate a multitude of more refcounts, + * reftable grows, it can accommodate a multitude of more refcounts, * so that at some point this must be able to cover the reftable * and all refblocks describing it.) * diff --git a/block/vhdx.c b/block/vhdx.c index f2c3a80..a67edcc 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -1077,7 +1077,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, goto fail; } - /* endian convert populated BAT field entires */ + /* endian convert populated BAT field entries */ for (i = 0; i < s->bat_entries; i++) { s->bat[i] = le64_to_cpu(s->bat[i]); } diff --git a/block/vhdx.h b/block/vhdx.h index 7db746c..455a627 100644 --- a/block/vhdx.h +++ b/block/vhdx.h @@ -212,7 +212,7 @@ typedef struct QEMU_PACKED VHDXLogDataSector { uint32_t sequence_high; /* 4 MSB of 8 byte sequence_number */ uint8_t data[4084]; /* raw data, bytes 8-4091 (inclusive). see the data descriptor field for the - other mising bytes */ + other missing bytes */ uint32_t sequence_low; /* 4 LSB of 8 byte sequence_number */ } VHDXLogDataSector; @@ -257,7 +257,7 @@ typedef struct QEMU_PACKED VHDXMetadataTableHeader { #define VHDX_META_FLAGS_IS_USER 0x01 /* max 1024 entries */ #define VHDX_META_FLAGS_IS_VIRTUAL_DISK 0x02 /* virtual disk metadata if set, - otherwise file metdata */ + otherwise file metadata */ #define VHDX_META_FLAGS_IS_REQUIRED 0x04 /* parse must understand this entry to open the file */ typedef struct QEMU_PACKED VHDXMetadataTableEntry { -- cgit v1.1