aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-15 13:54:33 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-05-15 13:54:33 -0700
commitab4c44d657aeca7e1da6d6dcb1741c8e7d357b8b (patch)
tree13b05307b2c4023bf21ef5acd38e325e7569e5ac /hw
parentc095228e8a8cdf5c15bb8a47c4d069582ae017d1 (diff)
parent01562fee5f3ad4506d57dbcf4b1903b565eceec7 (diff)
downloadqemu-ab4c44d657aeca7e1da6d6dcb1741c8e7d357b8b.zip
qemu-ab4c44d657aeca7e1da6d6dcb1741c8e7d357b8b.tar.gz
qemu-ab4c44d657aeca7e1da6d6dcb1741c8e7d357b8b.tar.bz2
Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
Pull request This pull request contain's Sam Li's zoned storage support in the QEMU block layer and virtio-blk emulation. v2: - Sam fixed the CI failures. CI passes for me now. [Richard] # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAmRiWCgACgkQnKSrs4Gr # c8h/7gf+MMm2cGEaf376t8HMwTc6wbXVfbmAlZrge2EXPZfFvEaxj7HClcEraOgV # yJsGWeU6mOw4r68ICJ/4KhrY1cdv+VZym/LsMLMcFUTXFHnyX4pyU3am31FPOI4K # +wrDYJOJhc4DkAESWGgEWiMKpuO/uUEgBmHdW+qPFCl77Yl/eP6H5uNP6nGFn55p # QpS/l8iha7PDkc81EsrjA+e/YI0ubfNSP7+zZElhQ98354CQ0MCfmZ6h9bT+o2bu # R7SBUj80e+2X0a1b9s/2Jz/x8l4TEsl8kr48/Q1usq3GVVkbjEgqsk6wTN13Q/4g # CeIR7E61ZeYzmpb4tLFRIqK2Jw+NEQ== # =Q8xW # -----END PGP SIGNATURE----- # gpg: Signature made Mon 15 May 2023 09:04:56 AM PDT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] * tag 'block-pull-request' of https://gitlab.com/stefanha/qemu: docs/zoned-storage:add zoned emulation use case virtio-blk: add some trace events for zoned emulation block: add accounting for zone append operation virtio-blk: add zoned storage emulation for zoned devices block: add some trace events for zone append qemu-iotests: test zone append operation block: introduce zone append write for zoned devices file-posix: add tracking of the zone write pointers docs/zoned-storage: add zoned device documentation block: add some trace events for new block layer APIs iotests: test new zone operations block: add zoned BlockDriver check to block layer block/raw-format: add zone operations to pass through requests block/block-backend: add block layer APIs resembling Linux ZonedBlockDevice ioctls block/file-posix: introduce helper functions for sysfs attributes block/block-common: add zoned device structs Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/block/trace-events7
-rw-r--r--hw/block/virtio-blk-common.c2
-rw-r--r--hw/block/virtio-blk.c405
-rw-r--r--hw/virtio/virtio-qmp.c2
4 files changed, 416 insertions, 0 deletions
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 2c45a62..34be8b9 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -44,9 +44,16 @@ pflash_write_unknown(const char *name, uint8_t cmd) "%s: unknown command 0x%02x"
# virtio-blk.c
virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d"
virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d"
+virtio_blk_zone_report_complete(void *vdev, void *req, unsigned int nr_zones, int ret) "vdev %p req %p nr_zones %u ret %d"
+virtio_blk_zone_mgmt_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d"
+virtio_blk_zone_append_complete(void *vdev, void *req, int64_t sector, int ret) "vdev %p req %p, append sector 0x%" PRIx64 " ret %d"
virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu"
virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu"
virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d"
+virtio_blk_handle_zone_report(void *vdev, void *req, int64_t sector, unsigned int nr_zones) "vdev %p req %p sector 0x%" PRIx64 " nr_zones %u"
+virtio_blk_handle_zone_mgmt(void *vdev, void *req, uint8_t op, int64_t sector, int64_t len) "vdev %p req %p op 0x%x sector 0x%" PRIx64 " len 0x%" PRIx64 ""
+virtio_blk_handle_zone_reset_all(void *vdev, void *req, int64_t sector, int64_t len) "vdev %p req %p sector 0x%" PRIx64 " cap 0x%" PRIx64 ""
+virtio_blk_handle_zone_append(void *vdev, void *req, int64_t sector) "vdev %p req %p, append sector 0x%" PRIx64 ""
# hd-geometry.c
hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d"
diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c
index ac52d7c..e2f8e2f 100644
--- a/hw/block/virtio-blk-common.c
+++ b/hw/block/virtio-blk-common.c
@@ -29,6 +29,8 @@ static const VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_blk_config, discard_sector_alignment)},
{.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES,
.end = endof(struct virtio_blk_config, write_zeroes_may_unmap)},
+ {.flags = 1ULL << VIRTIO_BLK_F_ZONED,
+ .end = endof(struct virtio_blk_config, zoned)},
{}
};
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index cefca93..8f65ea4 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -17,6 +17,7 @@
#include "qemu/module.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "block/block_int.h"
#include "trace.h"
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
@@ -601,6 +602,351 @@ err:
return err_status;
}
+typedef struct ZoneCmdData {
+ VirtIOBlockReq *req;
+ struct iovec *in_iov;
+ unsigned in_num;
+ union {
+ struct {
+ unsigned int nr_zones;
+ BlockZoneDescriptor *zones;
+ } zone_report_data;
+ struct {
+ int64_t offset;
+ } zone_append_data;
+ };
+} ZoneCmdData;
+
+/*
+ * check zoned_request: error checking before issuing requests. If all checks
+ * passed, return true.
+ * append: true if only zone append requests issued.
+ */
+static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len,
+ bool append, uint8_t *status) {
+ BlockDriverState *bs = blk_bs(s->blk);
+ int index;
+
+ if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) {
+ *status = VIRTIO_BLK_S_UNSUPP;
+ return false;
+ }
+
+ if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS)
+ || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) {
+ *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ return false;
+ }
+
+ if (append) {
+ if (bs->bl.write_granularity) {
+ if ((offset % bs->bl.write_granularity) != 0) {
+ *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP;
+ return false;
+ }
+ }
+
+ index = offset / bs->bl.zone_size;
+ if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) {
+ *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ return false;
+ }
+
+ if (len / 512 > bs->bl.max_append_sectors) {
+ if (bs->bl.max_append_sectors == 0) {
+ *status = VIRTIO_BLK_S_UNSUPP;
+ } else {
+ *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+static void virtio_blk_zone_report_complete(void *opaque, int ret)
+{
+ ZoneCmdData *data = opaque;
+ VirtIOBlockReq *req = data->req;
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ struct iovec *in_iov = data->in_iov;
+ unsigned in_num = data->in_num;
+ int64_t zrp_size, n, j = 0;
+ int64_t nz = data->zone_report_data.nr_zones;
+ int8_t err_status = VIRTIO_BLK_S_OK;
+
+ trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
+ if (ret) {
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ goto out;
+ }
+
+ struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
+ .nr_zones = cpu_to_le64(nz),
+ };
+ zrp_size = sizeof(struct virtio_blk_zone_report)
+ + sizeof(struct virtio_blk_zone_descriptor) * nz;
+ n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
+ if (n != sizeof(zrp_hdr)) {
+ virtio_error(vdev, "Driver provided input buffer that is too small!");
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ goto out;
+ }
+
+ for (size_t i = sizeof(zrp_hdr); i < zrp_size;
+ i += sizeof(struct virtio_blk_zone_descriptor), ++j) {
+ struct virtio_blk_zone_descriptor desc =
+ (struct virtio_blk_zone_descriptor) {
+ .z_start = cpu_to_le64(data->zone_report_data.zones[j].start
+ >> BDRV_SECTOR_BITS),
+ .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap
+ >> BDRV_SECTOR_BITS),
+ .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp
+ >> BDRV_SECTOR_BITS),
+ };
+
+ switch (data->zone_report_data.zones[j].type) {
+ case BLK_ZT_CONV:
+ desc.z_type = VIRTIO_BLK_ZT_CONV;
+ break;
+ case BLK_ZT_SWR:
+ desc.z_type = VIRTIO_BLK_ZT_SWR;
+ break;
+ case BLK_ZT_SWP:
+ desc.z_type = VIRTIO_BLK_ZT_SWP;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ switch (data->zone_report_data.zones[j].state) {
+ case BLK_ZS_RDONLY:
+ desc.z_state = VIRTIO_BLK_ZS_RDONLY;
+ break;
+ case BLK_ZS_OFFLINE:
+ desc.z_state = VIRTIO_BLK_ZS_OFFLINE;
+ break;
+ case BLK_ZS_EMPTY:
+ desc.z_state = VIRTIO_BLK_ZS_EMPTY;
+ break;
+ case BLK_ZS_CLOSED:
+ desc.z_state = VIRTIO_BLK_ZS_CLOSED;
+ break;
+ case BLK_ZS_FULL:
+ desc.z_state = VIRTIO_BLK_ZS_FULL;
+ break;
+ case BLK_ZS_EOPEN:
+ desc.z_state = VIRTIO_BLK_ZS_EOPEN;
+ break;
+ case BLK_ZS_IOPEN:
+ desc.z_state = VIRTIO_BLK_ZS_IOPEN;
+ break;
+ case BLK_ZS_NOT_WP:
+ desc.z_state = VIRTIO_BLK_ZS_NOT_WP;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* TODO: it takes O(n^2) time complexity. Optimizations required. */
+ n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc));
+ if (n != sizeof(desc)) {
+ virtio_error(vdev, "Driver provided input buffer "
+ "for descriptors that is too small!");
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ }
+ }
+
+out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
+ g_free(data->zone_report_data.zones);
+ g_free(data);
+}
+
+static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
+ struct iovec *in_iov,
+ unsigned in_num)
+{
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ unsigned int nr_zones;
+ ZoneCmdData *data;
+ int64_t zone_size, offset;
+ uint8_t err_status;
+
+ if (req->in_len < sizeof(struct virtio_blk_inhdr) +
+ sizeof(struct virtio_blk_zone_report) +
+ sizeof(struct virtio_blk_zone_descriptor)) {
+ virtio_error(vdev, "in buffer too small for zone report");
+ return;
+ }
+
+ /* start byte offset of the zone report */
+ offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
+ if (!check_zoned_request(s, offset, 0, false, &err_status)) {
+ goto out;
+ }
+ nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) -
+ sizeof(struct virtio_blk_zone_report)) /
+ sizeof(struct virtio_blk_zone_descriptor);
+ trace_virtio_blk_handle_zone_report(vdev, req,
+ offset >> BDRV_SECTOR_BITS, nr_zones);
+
+ zone_size = sizeof(BlockZoneDescriptor) * nr_zones;
+ data = g_malloc(sizeof(ZoneCmdData));
+ data->req = req;
+ data->in_iov = in_iov;
+ data->in_num = in_num;
+ data->zone_report_data.nr_zones = nr_zones;
+ data->zone_report_data.zones = g_malloc(zone_size),
+
+ blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones,
+ data->zone_report_data.zones,
+ virtio_blk_zone_report_complete, data);
+ return;
+out:
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+}
+
+static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
+{
+ VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ int8_t err_status = VIRTIO_BLK_S_OK;
+ trace_virtio_blk_zone_mgmt_complete(vdev, req,ret);
+
+ if (ret) {
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ }
+
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
+}
+
+static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
+{
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ BlockDriverState *bs = blk_bs(s->blk);
+ int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
+ uint64_t len;
+ uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
+ uint8_t err_status = VIRTIO_BLK_S_OK;
+
+ uint32_t type = virtio_ldl_p(vdev, &req->out.type);
+ if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) {
+ /* Entire drive capacity */
+ offset = 0;
+ len = capacity;
+ trace_virtio_blk_handle_zone_reset_all(vdev, req, 0,
+ bs->total_sectors);
+ } else {
+ if (bs->bl.zone_size > capacity - offset) {
+ /* The zoned device allows the last smaller zone. */
+ len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1);
+ } else {
+ len = bs->bl.zone_size;
+ }
+ trace_virtio_blk_handle_zone_mgmt(vdev, req, op,
+ offset >> BDRV_SECTOR_BITS,
+ len >> BDRV_SECTOR_BITS);
+ }
+
+ if (!check_zoned_request(s, offset, len, false, &err_status)) {
+ goto out;
+ }
+
+ blk_aio_zone_mgmt(s->blk, op, offset, len,
+ virtio_blk_zone_mgmt_complete, req);
+
+ return 0;
+out:
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+ return err_status;
+}
+
+static void virtio_blk_zone_append_complete(void *opaque, int ret)
+{
+ ZoneCmdData *data = opaque;
+ VirtIOBlockReq *req = data->req;
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ int64_t append_sector, n;
+ uint8_t err_status = VIRTIO_BLK_S_OK;
+
+ if (ret) {
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ goto out;
+ }
+
+ virtio_stq_p(vdev, &append_sector,
+ data->zone_append_data.offset >> BDRV_SECTOR_BITS);
+ n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector,
+ sizeof(append_sector));
+ if (n != sizeof(append_sector)) {
+ virtio_error(vdev, "Driver provided input buffer less than size of "
+ "append_sector");
+ err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
+ goto out;
+ }
+ trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
+
+out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
+ g_free(data);
+}
+
+static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
+ struct iovec *out_iov,
+ struct iovec *in_iov,
+ uint64_t out_num,
+ unsigned in_num) {
+ VirtIOBlock *s = req->dev;
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
+ uint8_t err_status = VIRTIO_BLK_S_OK;
+
+ int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
+ int64_t len = iov_size(out_iov, out_num);
+
+ trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
+ if (!check_zoned_request(s, offset, len, true, &err_status)) {
+ goto out;
+ }
+
+ ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData));
+ data->req = req;
+ data->in_iov = in_iov;
+ data->in_num = in_num;
+ data->zone_append_data.offset = offset;
+ qemu_iovec_init_external(&req->qiov, out_iov, out_num);
+
+ block_acct_start(blk_get_stats(s->blk), &req->acct, len,
+ BLOCK_ACCT_ZONE_APPEND);
+
+ blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0,
+ virtio_blk_zone_append_complete, data);
+ return 0;
+
+out:
+ aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
+ virtio_blk_req_complete(req, err_status);
+ virtio_blk_free_request(req);
+ aio_context_release(blk_get_aio_context(s->conf.conf.blk));
+ return err_status;
+}
+
static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
uint32_t type;
@@ -687,6 +1033,24 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
case VIRTIO_BLK_T_FLUSH:
virtio_blk_handle_flush(req, mrb);
break;
+ case VIRTIO_BLK_T_ZONE_REPORT:
+ virtio_blk_handle_zone_report(req, in_iov, in_num);
+ break;
+ case VIRTIO_BLK_T_ZONE_OPEN:
+ virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN);
+ break;
+ case VIRTIO_BLK_T_ZONE_CLOSE:
+ virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE);
+ break;
+ case VIRTIO_BLK_T_ZONE_FINISH:
+ virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH);
+ break;
+ case VIRTIO_BLK_T_ZONE_RESET:
+ virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
+ break;
+ case VIRTIO_BLK_T_ZONE_RESET_ALL:
+ virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
+ break;
case VIRTIO_BLK_T_SCSI_CMD:
virtio_blk_handle_scsi(req);
break;
@@ -705,6 +1069,14 @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
virtio_blk_free_request(req);
break;
}
+ case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
+ /*
+ * Passing out_iov/out_num and in_iov/in_num is not safe
+ * to access req->elem.out_sg directly because it may be
+ * modified by virtio_blk_handle_request().
+ */
+ virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num);
+ break;
/*
* VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
* VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
@@ -890,6 +1262,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
{
VirtIOBlock *s = VIRTIO_BLK(vdev);
BlockConf *conf = &s->conf.conf;
+ BlockDriverState *bs = blk_bs(s->blk);
struct virtio_blk_config blkcfg;
uint64_t capacity;
int64_t length;
@@ -954,6 +1327,30 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
blkcfg.write_zeroes_may_unmap = 1;
virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
}
+ if (bs->bl.zoned != BLK_Z_NONE) {
+ switch (bs->bl.zoned) {
+ case BLK_Z_HM:
+ blkcfg.zoned.model = VIRTIO_BLK_Z_HM;
+ break;
+ case BLK_Z_HA:
+ blkcfg.zoned.model = VIRTIO_BLK_Z_HA;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors,
+ bs->bl.zone_size / 512);
+ virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones,
+ bs->bl.max_active_zones);
+ virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones,
+ bs->bl.max_open_zones);
+ virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size);
+ virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors,
+ bs->bl.max_append_sectors);
+ } else {
+ blkcfg.zoned.model = VIRTIO_BLK_Z_NONE;
+ }
memcpy(config, &blkcfg, s->config_size);
}
@@ -1163,6 +1560,14 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
return;
}
+ BlockDriverState *bs = blk_bs(conf->conf.blk);
+ if (bs->bl.zoned != BLK_Z_NONE) {
+ virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
+ if (bs->bl.zoned == BLK_Z_HM) {
+ virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD);
+ }
+ }
+
if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
(!conf->max_discard_sectors ||
conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c
index b70148a..e84316d 100644
--- a/hw/virtio/virtio-qmp.c
+++ b/hw/virtio/virtio-qmp.c
@@ -176,6 +176,8 @@ static const qmp_virtio_feature_map_t virtio_blk_feature_map[] = {
"VIRTIO_BLK_F_DISCARD: Discard command supported"),
FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \
"VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_ZONED, \
+ "VIRTIO_BLK_F_ZONED: Zoned block devices"),
#ifndef VIRTIO_BLK_NO_LEGACY
FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \
"VIRTIO_BLK_F_BARRIER: Request barriers supported"),