diff options
author | Christoph Hellwig <hch@lst.de> | 2010-01-21 16:12:09 +0100 |
---|---|---|
committer | Anthony Liguori <aliguori@us.ibm.com> | 2010-01-26 15:45:00 -0600 |
commit | 7b88e48ba56bd849464e44717e81ff129695cc38 (patch) | |
tree | d3a19aefc202c5431e5143853a26749a572168a4 /block | |
parent | 9a2d77ad0dd6b3e93669543b846a75c02878eba7 (diff) | |
download | qemu-7b88e48ba56bd849464e44717e81ff129695cc38.zip qemu-7b88e48ba56bd849464e44717e81ff129695cc38.tar.gz qemu-7b88e48ba56bd849464e44717e81ff129695cc38.tar.bz2 |
qcow2: rename two QCowAIOCB members
The n member is not very descriptive and very hard to grep, rename it to
cur_nr_sectors to better indicate what it is used for. Also rename
nb_sectors to remaining_sectors as that is what it is used for.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/qcow2.c | 59 |
1 files changed, 30 insertions, 29 deletions
diff --git a/block/qcow2.c b/block/qcow2.c index 8c220ec..7f3aef5 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -332,8 +332,8 @@ typedef struct QCowAIOCB { QEMUIOVector *qiov; uint8_t *buf; void *orig_buf; - int nb_sectors; - int n; + int remaining_sectors; + int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t cluster_offset; uint8_t *cluster_data; BlockDriverAIOCB *hd_aiocb; @@ -399,38 +399,38 @@ static void qcow_aio_read_cb(void *opaque, int ret) } else { if (s->crypt_method) { qcow2_encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf, - acb->n, 0, + acb->cur_nr_sectors, 0, &s->aes_decrypt_key); } } - acb->nb_sectors -= acb->n; - acb->sector_num += acb->n; - acb->buf += acb->n * 512; + acb->remaining_sectors -= acb->cur_nr_sectors; + acb->sector_num += acb->cur_nr_sectors; + acb->buf += acb->cur_nr_sectors * 512; - if (acb->nb_sectors == 0) { + if (acb->remaining_sectors == 0) { /* request completed */ ret = 0; goto done; } /* prepare next AIO request */ - acb->n = acb->nb_sectors; - acb->cluster_offset = - qcow2_get_cluster_offset(bs, acb->sector_num << 9, &acb->n); + acb->cur_nr_sectors = acb->remaining_sectors; + acb->cluster_offset = qcow2_get_cluster_offset(bs, acb->sector_num << 9, + &acb->cur_nr_sectors); index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); if (!acb->cluster_offset) { if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, acb->sector_num, - acb->buf, acb->n); + acb->buf, acb->cur_nr_sectors); if (n1 > 0) { acb->hd_iov.iov_base = (void *)acb->buf; - acb->hd_iov.iov_len = acb->n * 512; + acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num, - &acb->hd_qiov, acb->n, + &acb->hd_qiov, acb->cur_nr_sectors, qcow_aio_read_cb, acb); if (acb->hd_aiocb == NULL) goto done; @@ -441,7 +441,7 @@ static void qcow_aio_read_cb(void *opaque, int ret) } } else { /* Note: in this case, no need to wait */ - memset(acb->buf, 0, 512 * acb->n); + memset(acb->buf, 0, 512 * acb->cur_nr_sectors); ret = qcow_schedule_bh(qcow_aio_read_bh, acb); if (ret < 0) goto done; @@ -450,8 +450,8 @@ static void qcow_aio_read_cb(void *opaque, int ret) /* add AIO support for compressed blocks ? */ if (qcow2_decompress_cluster(s, acb->cluster_offset) < 0) goto done; - memcpy(acb->buf, - s->cluster_cache + index_in_cluster * 512, 512 * acb->n); + memcpy(acb->buf, s->cluster_cache + index_in_cluster * 512, + 512 * acb->cur_nr_sectors); ret = qcow_schedule_bh(qcow_aio_read_bh, acb); if (ret < 0) goto done; @@ -462,11 +462,12 @@ static void qcow_aio_read_cb(void *opaque, int ret) } acb->hd_iov.iov_base = (void *)acb->buf; - acb->hd_iov.iov_len = acb->n * 512; + acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); acb->hd_aiocb = bdrv_aio_readv(s->hd, (acb->cluster_offset >> 9) + index_in_cluster, - &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb); + &acb->hd_qiov, acb->cur_nr_sectors, + qcow_aio_read_cb, acb); if (acb->hd_aiocb == NULL) goto done; } @@ -500,8 +501,8 @@ static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs, } else { acb->buf = (uint8_t *)qiov->iov->iov_base; } - acb->nb_sectors = nb_sectors; - acb->n = 0; + acb->remaining_sectors = nb_sectors; + acb->cur_nr_sectors = 0; acb->cluster_offset = 0; acb->l2meta.nb_clusters = 0; QLIST_INIT(&acb->l2meta.dependent_requests); @@ -569,24 +570,24 @@ static void qcow_aio_write_cb(void *opaque, int ret) if (ret < 0) goto done; - acb->nb_sectors -= acb->n; - acb->sector_num += acb->n; - acb->buf += acb->n * 512; + acb->remaining_sectors -= acb->cur_nr_sectors; + acb->sector_num += acb->cur_nr_sectors; + acb->buf += acb->cur_nr_sectors * 512; - if (acb->nb_sectors == 0) { + if (acb->remaining_sectors == 0) { /* request completed */ ret = 0; goto done; } index_in_cluster = acb->sector_num & (s->cluster_sectors - 1); - n_end = index_in_cluster + acb->nb_sectors; + n_end = index_in_cluster + acb->remaining_sectors; if (s->crypt_method && n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; ret = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9, - index_in_cluster, n_end, &acb->n, &acb->l2meta); + index_in_cluster, n_end, &acb->cur_nr_sectors, &acb->l2meta); if (ret < 0) { goto done; } @@ -608,17 +609,17 @@ static void qcow_aio_write_cb(void *opaque, int ret) s->cluster_size); } qcow2_encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf, - acb->n, 1, &s->aes_encrypt_key); + acb->cur_nr_sectors, 1, &s->aes_encrypt_key); src_buf = acb->cluster_data; } else { src_buf = acb->buf; } acb->hd_iov.iov_base = (void *)src_buf; - acb->hd_iov.iov_len = acb->n * 512; + acb->hd_iov.iov_len = acb->cur_nr_sectors * 512; qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1); acb->hd_aiocb = bdrv_aio_writev(s->hd, (acb->cluster_offset >> 9) + index_in_cluster, - &acb->hd_qiov, acb->n, + &acb->hd_qiov, acb->cur_nr_sectors, qcow_aio_write_cb, acb); if (acb->hd_aiocb == NULL) goto done; |