aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2010-03-15 17:38:05 +0100
committerKevin Wolf <kwolf@redhat.com>2010-04-23 16:08:46 +0200
commit8252278afb4b646a5a21cf8c30bb0a0066825078 (patch)
treea301fbdd8a2d48e6b077ed7a2487c922440a6218 /block
parent8b9b0cc2fd1b866c0ce6c7f7385d840aad8b4c2c (diff)
downloadqemu-8252278afb4b646a5a21cf8c30bb0a0066825078.zip
qemu-8252278afb4b646a5a21cf8c30bb0a0066825078.tar.gz
qemu-8252278afb4b646a5a21cf8c30bb0a0066825078.tar.bz2
qcow2: Trigger blkdebug events
This adds blkdebug events to qcow2 to allow injecting I/O errors in specific places. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/blkdebug.c42
-rw-r--r--block/qcow2-cluster.c15
-rw-r--r--block/qcow2-refcount.c18
-rw-r--r--block/qcow2.c6
4 files changed, 81 insertions, 0 deletions
diff --git a/block/blkdebug.c b/block/blkdebug.c
index b813bfa..643c397 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -139,6 +139,48 @@ static QemuOptsList *config_groups[] = {
};
static const char *event_names[BLKDBG_EVENT_MAX] = {
+ [BLKDBG_L1_UPDATE] = "l1_update",
+ [BLKDBG_L1_GROW_ALLOC_TABLE] = "l1_grow.alloc_table",
+ [BLKDBG_L1_GROW_WRITE_TABLE] = "l1_grow.write_table",
+ [BLKDBG_L1_GROW_ACTIVATE_TABLE] = "l1_grow.activate_table",
+
+ [BLKDBG_L2_LOAD] = "l2_load",
+ [BLKDBG_L2_UPDATE] = "l2_update",
+ [BLKDBG_L2_UPDATE_COMPRESSED] = "l2_update_compressed",
+ [BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
+ [BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
+
+ [BLKDBG_READ] = "read",
+ [BLKDBG_READ_AIO] = "read_aio",
+ [BLKDBG_READ_BACKING] = "read_backing",
+ [BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
+ [BLKDBG_READ_COMPRESSED] = "read_compressed",
+
+ [BLKDBG_WRITE_AIO] = "write_aio",
+ [BLKDBG_WRITE_COMPRESSED] = "write_compressed",
+
+ [BLKDBG_VMSTATE_LOAD] = "vmstate_load",
+ [BLKDBG_VMSTATE_SAVE] = "vmstate_save",
+
+ [BLKDBG_COW_READ] = "cow_read",
+ [BLKDBG_COW_WRITE] = "cow_write",
+
+ [BLKDBG_REFTABLE_LOAD] = "reftable_load",
+ [BLKDBG_REFTABLE_GROW] = "reftable_grow",
+
+ [BLKDBG_REFBLOCK_LOAD] = "refblock_load",
+ [BLKDBG_REFBLOCK_UPDATE] = "refblock_update",
+ [BLKDBG_REFBLOCK_UPDATE_PART] = "refblock_update_part",
+ [BLKDBG_REFBLOCK_ALLOC] = "refblock_alloc",
+ [BLKDBG_REFBLOCK_ALLOC_HOOKUP] = "refblock_alloc.hookup",
+ [BLKDBG_REFBLOCK_ALLOC_WRITE] = "refblock_alloc.write",
+ [BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS] = "refblock_alloc.write_blocks",
+ [BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE] = "refblock_alloc.write_table",
+ [BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE] = "refblock_alloc.switch_table",
+
+ [BLKDBG_CLUSTER_ALLOC] = "cluster_alloc",
+ [BLKDBG_CLUSTER_ALLOC_BYTES] = "cluster_alloc_bytes",
+ [BLKDBG_CLUSTER_FREE] = "cluster_free",
};
static int get_event_by_name(const char *name, BlkDebugEvent *event)
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index c7057b1..8cb4b38 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -54,12 +54,14 @@ int qcow2_grow_l1_table(BlockDriverState *bs, int min_size)
memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
/* write new table (align to cluster) */
+ BLKDBG_EVENT(s->hd, BLKDBG_L1_GROW_ALLOC_TABLE);
new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
if (new_l1_table_offset < 0) {
qemu_free(new_l1_table);
return new_l1_table_offset;
}
+ BLKDBG_EVENT(s->hd, BLKDBG_L1_GROW_WRITE_TABLE);
for(i = 0; i < s->l1_size; i++)
new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
@@ -69,6 +71,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, int min_size)
new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
/* set new table */
+ BLKDBG_EVENT(s->hd, BLKDBG_L1_GROW_ACTIVATE_TABLE);
cpu_to_be32w((uint32_t*)data, new_l1_size);
cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
ret = bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,sizeof(data));
@@ -170,6 +173,8 @@ static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
min_index = l2_cache_new_entry(bs);
l2_table = s->l2_cache + (min_index << s->l2_bits);
+
+ BLKDBG_EVENT(s->hd, BLKDBG_L2_LOAD);
if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
s->l2_size * sizeof(uint64_t))
return NULL;
@@ -195,6 +200,7 @@ static int write_l1_entry(BDRVQcowState *s, int l1_index)
buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
}
+ BLKDBG_EVENT(s->hd, BLKDBG_L1_UPDATE);
if (bdrv_pwrite(s->hd, s->l1_table_offset + 8 * l1_start_index,
buf, sizeof(buf)) != sizeof(buf))
{
@@ -248,12 +254,14 @@ static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
} else {
/* if there was an old l2 table, read it from the disk */
+ BLKDBG_EVENT(s->hd, BLKDBG_L2_ALLOC_COW_READ);
if (bdrv_pread(s->hd, old_l2_offset,
l2_table, s->l2_size * sizeof(uint64_t)) !=
s->l2_size * sizeof(uint64_t))
return NULL;
}
/* write the l2 table to the file */
+ BLKDBG_EVENT(s->hd, BLKDBG_L2_ALLOC_WRITE);
if (bdrv_pwrite(s->hd, l2_offset,
l2_table, s->l2_size * sizeof(uint64_t)) !=
s->l2_size * sizeof(uint64_t))
@@ -335,6 +343,7 @@ static int qcow_read(BlockDriverState *bs, int64_t sector_num,
/* read from the base image */
n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n);
if (n1 > 0) {
+ BLKDBG_EVENT(s->hd, BLKDBG_READ_BACKING);
ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
if (ret < 0)
return -1;
@@ -347,6 +356,7 @@ static int qcow_read(BlockDriverState *bs, int64_t sector_num,
return -1;
memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
} else {
+ BLKDBG_EVENT(s->hd, BLKDBG_READ);
ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
if (ret != n * 512)
return -1;
@@ -371,6 +381,7 @@ static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
n = n_end - n_start;
if (n <= 0)
return 0;
+ BLKDBG_EVENT(s->hd, BLKDBG_COW_READ);
ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
if (ret < 0)
return ret;
@@ -380,6 +391,7 @@ static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
s->cluster_data, n, 1,
&s->aes_encrypt_key);
}
+ BLKDBG_EVENT(s->hd, BLKDBG_COW_WRITE);
ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
s->cluster_data, n);
if (ret < 0)
@@ -592,6 +604,7 @@ uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
/* compressed clusters never have the copied flag */
+ BLKDBG_EVENT(s->hd, BLKDBG_L2_UPDATE_COMPRESSED);
l2_table[l2_index] = cpu_to_be64(cluster_offset);
if (bdrv_pwrite(s->hd,
l2_offset + l2_index * sizeof(uint64_t),
@@ -615,6 +628,7 @@ static int write_l2_entries(BDRVQcowState *s, uint64_t *l2_table,
int end_offset = (8 * (l2_index + num) + 511) & ~511;
size_t len = end_offset - start_offset;
+ BLKDBG_EVENT(s->hd, BLKDBG_L2_UPDATE);
if (bdrv_pwrite(s->hd, l2_offset + start_offset, &l2_table[l2_start_index],
len) != len)
{
@@ -866,6 +880,7 @@ int qcow2_decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
sector_offset = coffset & 511;
csize = nb_csectors * 512 - sector_offset;
+ BLKDBG_EVENT(s->hd, BLKDBG_READ_COMPRESSED);
ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
if (ret < 0) {
return -1;
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 917fc88..47c9978 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -42,6 +42,7 @@ static int write_refcount_block(BDRVQcowState *s)
return 0;
}
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_UPDATE);
if (bdrv_pwrite(s->hd, s->refcount_block_cache_offset,
s->refcount_block_cache, size) != size)
{
@@ -63,6 +64,7 @@ int qcow2_refcount_init(BlockDriverState *bs)
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
s->refcount_table = qemu_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
+ BLKDBG_EVENT(s->hd, BLKDBG_REFTABLE_LOAD);
ret = bdrv_pread(s->hd, s->refcount_table_offset,
s->refcount_table, refcount_table_size2);
if (ret != refcount_table_size2)
@@ -93,6 +95,7 @@ static int load_refcount_block(BlockDriverState *bs,
write_refcount_block(s);
}
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_LOAD);
ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
s->cluster_size);
if (ret != s->cluster_size)
@@ -164,6 +167,8 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
unsigned int refcount_table_index;
int ret;
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC);
+
/* Find the refcount block for the given cluster */
refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
@@ -239,6 +244,7 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
}
/* Now the new refcount block needs to be written to disk */
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC_WRITE);
ret = bdrv_pwrite(s->hd, new_block, s->refcount_block_cache,
s->cluster_size);
if (ret < 0) {
@@ -248,6 +254,7 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
/* If the refcount table is big enough, just hook the block up there */
if (refcount_table_index < s->refcount_table_size) {
uint64_t data64 = cpu_to_be64(new_block);
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
ret = bdrv_pwrite(s->hd,
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
&data64, sizeof(data64));
@@ -270,6 +277,8 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
* refcount table at once without producing an inconsistent state in
* between.
*/
+ BLKDBG_EVENT(s->hd, BLKDBG_REFTABLE_GROW);
+
/* Calculate the number of refcount blocks needed so far */
uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
uint64_t blocks_used = (s->free_cluster_index +
@@ -325,6 +334,7 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
}
/* Write refcount blocks to disk */
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
ret = bdrv_pwrite(s->hd, meta_offset, new_blocks,
blocks_clusters * s->cluster_size);
qemu_free(new_blocks);
@@ -337,6 +347,7 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
cpu_to_be64s(&new_table[i]);
}
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
ret = bdrv_pwrite(s->hd, table_offset, new_table,
table_size * sizeof(uint64_t));
if (ret < 0) {
@@ -351,6 +362,7 @@ static int64_t alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index)
uint8_t data[12];
cpu_to_be64w((uint64_t*)data, table_offset);
cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
ret = bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
data, sizeof(data));
if (ret < 0) {
@@ -400,6 +412,7 @@ static int write_refcount_block_entries(BDRVQcowState *s,
& ~(REFCOUNTS_PER_SECTOR - 1);
size = (last_index - first_index) << REFCOUNT_SHIFT;
+ BLKDBG_EVENT(s->hd, BLKDBG_REFBLOCK_UPDATE_PART);
if (bdrv_pwrite(s->hd,
refcount_block_offset + (first_index << REFCOUNT_SHIFT),
&s->refcount_block_cache[first_index], size) != size)
@@ -555,9 +568,11 @@ retry:
int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
{
+ BDRVQcowState *s = bs->opaque;
int64_t offset;
int ret;
+ BLKDBG_EVENT(s->hd, BLKDBG_CLUSTER_ALLOC);
offset = alloc_clusters_noref(bs, size);
ret = update_refcount(bs, offset, size, 1);
if (ret < 0) {
@@ -574,6 +589,7 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
int64_t offset, cluster_offset;
int free_in_cluster;
+ BLKDBG_EVENT(s->hd, BLKDBG_CLUSTER_ALLOC_BYTES);
assert(size > 0 && size <= s->cluster_size);
if (s->free_byte_offset == 0) {
s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
@@ -615,8 +631,10 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size)
{
+ BDRVQcowState *s = bs->opaque;
int ret;
+ BLKDBG_EVENT(s->hd, BLKDBG_CLUSTER_FREE);
ret = update_refcount(bs, offset, size, -1);
if (ret < 0) {
fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
diff --git a/block/qcow2.c b/block/qcow2.c
index 4e97eb6..80c99af 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -429,6 +429,7 @@ static void qcow_aio_read_cb(void *opaque, int ret)
acb->hd_iov.iov_base = (void *)acb->buf;
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
+ BLKDBG_EVENT(s->hd, BLKDBG_READ_BACKING_AIO);
acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
&acb->hd_qiov, acb->cur_nr_sectors,
qcow_aio_read_cb, acb);
@@ -464,6 +465,7 @@ static void qcow_aio_read_cb(void *opaque, int ret)
acb->hd_iov.iov_base = (void *)acb->buf;
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
+ BLKDBG_EVENT(s->hd, BLKDBG_READ_AIO);
acb->hd_aiocb = bdrv_aio_readv(s->hd,
(acb->cluster_offset >> 9) + index_in_cluster,
&acb->hd_qiov, acb->cur_nr_sectors,
@@ -619,6 +621,7 @@ static void qcow_aio_write_cb(void *opaque, int ret)
acb->hd_iov.iov_base = (void *)src_buf;
acb->hd_iov.iov_len = acb->cur_nr_sectors * 512;
qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
+ BLKDBG_EVENT(s->hd, BLKDBG_WRITE_AIO);
acb->hd_aiocb = bdrv_aio_writev(s->hd,
(acb->cluster_offset >> 9) + index_in_cluster,
&acb->hd_qiov, acb->cur_nr_sectors,
@@ -1141,6 +1144,7 @@ static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
if (!cluster_offset)
return -1;
cluster_offset &= s->cluster_offset_mask;
+ BLKDBG_EVENT(s->hd, BLKDBG_WRITE_COMPRESSED);
if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
qemu_free(out_buf);
return -1;
@@ -1211,6 +1215,7 @@ static int qcow_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
int growable = bs->growable;
int ret;
+ BLKDBG_EVENT(s->hd, BLKDBG_VMSTATE_SAVE);
bs->growable = 1;
ret = bdrv_pwrite(bs, qcow_vm_state_offset(s) + pos, buf, size);
bs->growable = growable;
@@ -1225,6 +1230,7 @@ static int qcow_load_vmstate(BlockDriverState *bs, uint8_t *buf,
int growable = bs->growable;
int ret;
+ BLKDBG_EVENT(s->hd, BLKDBG_VMSTATE_LOAD);
bs->growable = 1;
ret = bdrv_pread(bs, qcow_vm_state_offset(s) + pos, buf, size);
bs->growable = growable;