aboutsummaryrefslogtreecommitdiff
path: root/hw/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'hw/scsi')
-rw-r--r--hw/scsi/esp-pci.c8
-rw-r--r--hw/scsi/esp.c53
-rw-r--r--hw/scsi/lsi53c895a.c16
-rw-r--r--hw/scsi/megasas.c55
-rw-r--r--hw/scsi/mptendian.c2
-rw-r--r--hw/scsi/mptsas.c13
-rw-r--r--hw/scsi/scsi-bus.c151
-rw-r--r--hw/scsi/scsi-disk.c245
-rw-r--r--hw/scsi/scsi-generic.c9
-rw-r--r--hw/scsi/spapr_vscsi.c5
-rw-r--r--hw/scsi/vhost-scsi-common.c13
-rw-r--r--hw/scsi/vhost-scsi.c22
-rw-r--r--hw/scsi/vhost-user-scsi.c35
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c105
-rw-r--r--hw/scsi/virtio-scsi.c519
-rw-r--r--hw/scsi/vmw_pvscsi.c76
-rw-r--r--hw/scsi/vmw_pvscsi.h4
17 files changed, 742 insertions, 589 deletions
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
index 42d9d2e..12c86eb 100644
--- a/hw/scsi/esp-pci.c
+++ b/hw/scsi/esp-pci.c
@@ -427,7 +427,7 @@ static void esp_pci_init(Object *obj)
object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
}
-static void esp_pci_class_init(ObjectClass *klass, void *data)
+static void esp_pci_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -440,7 +440,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
k->class_id = PCI_CLASS_STORAGE_SCSI;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
- dc->reset = esp_pci_hard_reset;
+ device_class_set_legacy_reset(dc, esp_pci_hard_reset);
dc->vmsd = &vmstate_esp_pci_scsi;
}
@@ -450,7 +450,7 @@ static const TypeInfo esp_pci_info = {
.instance_init = esp_pci_init,
.instance_size = sizeof(PCIESPState),
.class_init = esp_pci_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -557,7 +557,7 @@ static void dc390_scsi_realize(PCIDevice *dev, Error **errp)
contents[EE_CHKSUM2] = chksum >> 8;
}
-static void dc390_class_init(ObjectClass *klass, void *data)
+static void dc390_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 5d9b526..f24991f 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -197,39 +197,9 @@ static uint8_t esp_fifo_pop(ESPState *s)
return val;
}
-static uint32_t esp_fifo8_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
-{
- const uint8_t *buf;
- uint32_t n, n2;
- int len;
-
- if (maxlen == 0) {
- return 0;
- }
-
- len = maxlen;
- buf = fifo8_pop_buf(fifo, len, &n);
- if (dest) {
- memcpy(dest, buf, n);
- }
-
- /* Add FIFO wraparound if needed */
- len -= n;
- len = MIN(len, fifo8_num_used(fifo));
- if (len) {
- buf = fifo8_pop_buf(fifo, len, &n2);
- if (dest) {
- memcpy(&dest[n], buf, n2);
- }
- n += n2;
- }
-
- return n;
-}
-
static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
{
- uint32_t len = esp_fifo8_pop_buf(&s->fifo, dest, maxlen);
+ uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
esp_update_drq(s);
return len;
@@ -272,10 +242,7 @@ static uint32_t esp_get_stc(ESPState *s)
static uint8_t esp_pdma_read(ESPState *s)
{
- uint8_t val;
-
- val = esp_fifo_pop(s);
- return val;
+ return esp_fifo_pop(s);
}
static void esp_pdma_write(ESPState *s, uint8_t val)
@@ -335,7 +302,7 @@ static void do_command_phase(ESPState *s)
if (!cmdlen || !s->current_dev) {
return;
}
- esp_fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
+ fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
if (!current_lun) {
@@ -381,7 +348,7 @@ static void do_message_phase(ESPState *s)
/* Ignore extended messages for now */
if (s->cmdfifo_cdb_offset) {
int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
- esp_fifo8_pop_buf(&s->cmdfifo, NULL, len);
+ fifo8_drop(&s->cmdfifo, len);
s->cmdfifo_cdb_offset = 0;
}
}
@@ -486,7 +453,7 @@ static bool esp_cdb_ready(ESPState *s)
return false;
}
- pbuf = fifo8_peek_buf(&s->cmdfifo, len, &n);
+ pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
if (n < len) {
/*
* In normal use the cmdfifo should never wrap, but include this check
@@ -594,7 +561,7 @@ static void esp_do_dma(ESPState *s)
if (!s->current_req) {
return;
}
- if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
+ if (s->async_len == 0 && esp_get_tc(s)) {
/* Defer until data is available. */
return;
}
@@ -647,7 +614,7 @@ static void esp_do_dma(ESPState *s)
if (!s->current_req) {
return;
}
- if (s->async_len == 0 && esp_get_tc(s) && s->ti_size) {
+ if (s->async_len == 0 && esp_get_tc(s)) {
/* Defer until data is available. */
return;
}
@@ -1601,12 +1568,12 @@ static const VMStateDescription vmstate_sysbus_esp_scsi = {
}
};
-static void sysbus_esp_class_init(ObjectClass *klass, void *data)
+static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_esp_realize;
- dc->reset = sysbus_esp_hard_reset;
+ device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
dc->vmsd = &vmstate_sysbus_esp_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -1627,7 +1594,7 @@ static void esp_init(Object *obj)
fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
}
-static void esp_class_init(ObjectClass *klass, void *data)
+static void esp_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
index eb9828d..9ea4aa0 100644
--- a/hw/scsi/lsi53c895a.c
+++ b/hw/scsi/lsi53c895a.c
@@ -19,7 +19,7 @@
#include "hw/pci/pci_device.h"
#include "hw/scsi/scsi.h"
#include "migration/vmstate.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "trace.h"
@@ -188,7 +188,7 @@ static const char *names[] = {
#define LSI_TAG_VALID (1 << 16)
/* Maximum instructions to process. */
-#define LSI_MAX_INSN 100
+#define LSI_MAX_INSN 500
typedef struct lsi_request {
SCSIRequest *req;
@@ -1112,7 +1112,7 @@ bad:
static void lsi_memcpy(LSIState *s, uint32_t dest, uint32_t src, int count)
{
int n;
- uint8_t buf[LSI_BUF_SIZE];
+ QEMU_UNINITIALIZED uint8_t buf[LSI_BUF_SIZE];
trace_lsi_memcpy(dest, src, count);
while (count) {
@@ -2372,10 +2372,10 @@ static void lsi_scsi_exit(PCIDevice *dev)
LSIState *s = LSI53C895A(dev);
address_space_destroy(&s->pci_io_as);
- timer_del(s->scripts_timer);
+ timer_free(s->scripts_timer);
}
-static void lsi_class_init(ObjectClass *klass, void *data)
+static void lsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
@@ -2386,7 +2386,7 @@ static void lsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_LSI_53C895A;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
- dc->reset = lsi_scsi_reset;
+ device_class_set_legacy_reset(dc, lsi_scsi_reset);
dc->vmsd = &vmstate_lsi_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
@@ -2396,13 +2396,13 @@ static const TypeInfo lsi_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(LSIState),
.class_init = lsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
-static void lsi53c810_class_init(ObjectClass *klass, void *data)
+static void lsi53c810_class_init(ObjectClass *klass, const void *data)
{
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index 2d0c607..844643d 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -21,9 +21,9 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
-#include "sysemu/dma.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/rtc.h"
+#include "system/dma.h"
+#include "system/block-backend.h"
+#include "system/rtc.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "qemu/iov.h"
@@ -981,13 +981,11 @@ static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd)
static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd)
{
- struct mfi_pd_list info;
- size_t dcmd_size = sizeof(info);
+ struct mfi_pd_list info = {};
BusChild *kid;
uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks;
dma_addr_t residual;
- memset(&info, 0, dcmd_size);
offset = 8;
dcmd_limit = offset + sizeof(struct mfi_pd_address);
if (cmd->iov_size < dcmd_limit) {
@@ -1429,11 +1427,10 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd)
static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
{
- struct mfi_ctrl_props info;
+ struct mfi_ctrl_props info = {};
size_t dcmd_size = sizeof(info);
dma_addr_t residual;
- memset(&info, 0x0, dcmd_size);
if (cmd->iov_size < dcmd_size) {
trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
dcmd_size);
@@ -1781,7 +1778,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
uint8_t cdb[16];
int len;
struct SCSIDevice *sdev = NULL;
- int target_id, lun_id, cdb_len;
+ int target_id, lun_id;
lba_count = le32_to_cpu(cmd->frame->io.header.data_len);
lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo);
@@ -1790,7 +1787,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
target_id = cmd->frame->header.target_id;
lun_id = cmd->frame->header.lun_id;
- cdb_len = cmd->frame->header.cdb_len;
if (target_id < MFI_MAX_LD && lun_id == 0) {
sdev = scsi_device_find(&s->bus, 0, target_id, lun_id);
@@ -1805,15 +1801,6 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
return MFI_STAT_DEVICE_NOT_FOUND;
}
- if (cdb_len > 16) {
- trace_megasas_scsi_invalid_cdb_len(
- mfi_frame_desc(frame_cmd), 1, target_id, lun_id, cdb_len);
- megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE));
- cmd->frame->header.scsi_status = CHECK_CONDITION;
- s->event_count++;
- return MFI_STAT_SCSI_DONE_WITH_ERROR;
- }
-
cmd->iov_size = lba_count * sdev->blocksize;
if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) {
megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE));
@@ -1824,7 +1811,7 @@ static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd, int frame_cmd)
megasas_encode_lba(cdb, lba_start, lba_count, is_write);
cmd->req = scsi_req_new(sdev, cmd->index,
- lun_id, cdb, cdb_len, cmd);
+ lun_id, cdb, sizeof(cdb), cmd);
if (!cmd->req) {
trace_megasas_scsi_req_alloc_failed(
mfi_frame_desc(frame_cmd), target_id, lun_id);
@@ -2236,7 +2223,6 @@ static uint64_t megasas_queue_read(void *opaque, hwaddr addr,
static void megasas_queue_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
- return;
}
static const MemoryRegionOps megasas_queue_ops = {
@@ -2458,7 +2444,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info);
}
-static Property megasas_properties_gen1[] = {
+static const Property megasas_properties_gen1[] = {
DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge,
MEGASAS_DEFAULT_SGE),
DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds,
@@ -2469,10 +2455,9 @@ static Property megasas_properties_gen1[] = {
DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BIT("use_jbod", MegasasState, flags,
MEGASAS_FLAG_USE_JBOD, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static Property megasas_properties_gen2[] = {
+static const Property megasas_properties_gen2[] = {
DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge,
MEGASAS_DEFAULT_SGE),
DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds,
@@ -2483,7 +2468,6 @@ static Property megasas_properties_gen2[] = {
DEFINE_PROP_ON_OFF_AUTO("msix", MegasasState, msix, ON_OFF_AUTO_AUTO),
DEFINE_PROP_BIT("use_jbod", MegasasState, flags,
MEGASAS_FLAG_USE_JBOD, false),
- DEFINE_PROP_END_OF_LIST(),
};
typedef struct MegasasInfo {
@@ -2497,8 +2481,9 @@ typedef struct MegasasInfo {
int mmio_bar;
int osts;
const VMStateDescription *vmsd;
- Property *props;
- InterfaceInfo *interfaces;
+ const Property *props;
+ size_t props_count;
+ const InterfaceInfo *interfaces;
} MegasasInfo;
static struct MegasasInfo megasas_devices[] = {
@@ -2514,7 +2499,8 @@ static struct MegasasInfo megasas_devices[] = {
.osts = MFI_1078_RM | 1,
.vmsd = &vmstate_megasas_gen1,
.props = megasas_properties_gen1,
- .interfaces = (InterfaceInfo[]) {
+ .props_count = ARRAY_SIZE(megasas_properties_gen1),
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -2530,14 +2516,15 @@ static struct MegasasInfo megasas_devices[] = {
.osts = MFI_GEN2_RM,
.vmsd = &vmstate_megasas_gen2,
.props = megasas_properties_gen2,
- .interfaces = (InterfaceInfo[]) {
+ .props_count = ARRAY_SIZE(megasas_properties_gen2),
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_PCIE_DEVICE },
{ }
},
}
};
-static void megasas_class_init(ObjectClass *oc, void *data)
+static void megasas_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -2556,8 +2543,8 @@ static void megasas_class_init(ObjectClass *oc, void *data)
e->osts = info->osts;
e->product_name = info->product_name;
e->product_version = info->product_version;
- device_class_set_props(dc, info->props);
- dc->reset = megasas_scsi_reset;
+ device_class_set_props_n(dc, info->props, info->props_count);
+ device_class_set_legacy_reset(dc, megasas_scsi_reset);
dc->vmsd = info->vmsd;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->desc = info->desc;
@@ -2582,11 +2569,11 @@ static void megasas_register_types(void)
type_info.name = info->name;
type_info.parent = TYPE_MEGASAS_BASE;
- type_info.class_data = (void *)info;
+ type_info.class_data = info;
type_info.class_init = megasas_class_init;
type_info.interfaces = info->interfaces;
- type_register(&type_info);
+ type_register_static(&type_info);
}
}
diff --git a/hw/scsi/mptendian.c b/hw/scsi/mptendian.c
index 0d5abb4..6cba92f 100644
--- a/hw/scsi/mptendian.c
+++ b/hw/scsi/mptendian.c
@@ -22,7 +22,7 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/pci/msi.h"
#include "qemu/iov.h"
#include "hw/scsi/scsi.h"
diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c
index c5d3138..1ebe0b8 100644
--- a/hw/scsi/mptsas.c
+++ b/hw/scsi/mptsas.c
@@ -25,7 +25,7 @@
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "hw/pci/msi.h"
#include "qemu/iov.h"
#include "qemu/main-loop.h"
@@ -1410,14 +1410,13 @@ static const VMStateDescription vmstate_mptsas = {
}
};
-static Property mptsas_properties[] = {
+static const Property mptsas_properties[] = {
DEFINE_PROP_UINT64("sas_address", MPTSASState, sas_addr, 0),
/* TODO: test MSI support under Windows */
DEFINE_PROP_ON_OFF_AUTO("msi", MPTSASState, msi, ON_OFF_AUTO_AUTO),
- DEFINE_PROP_END_OF_LIST(),
};
-static void mptsas1068_class_init(ObjectClass *oc, void *data)
+static void mptsas1068_class_init(ObjectClass *oc, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
@@ -1431,7 +1430,7 @@ static void mptsas1068_class_init(ObjectClass *oc, void *data)
pc->subsystem_id = 0x8000;
pc->class_id = PCI_CLASS_STORAGE_SCSI;
device_class_set_props(dc, mptsas_properties);
- dc->reset = mptsas_reset;
+ device_class_set_legacy_reset(dc, mptsas_reset);
dc->vmsd = &vmstate_mptsas;
dc->desc = "LSI SAS 1068";
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
@@ -1442,7 +1441,7 @@ static const TypeInfo mptsas_info = {
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(MPTSASState),
.class_init = mptsas1068_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
@@ -1450,7 +1449,7 @@ static const TypeInfo mptsas_info = {
static void mptsas_register_types(void)
{
- type_register(&mptsas_info);
+ type_register_static(&mptsas_info);
}
type_init(mptsas_register_types)
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 9e40b0c..9b12ee7 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -9,12 +9,12 @@
#include "migration/qemu-file-types.h"
#include "migration/vmstate.h"
#include "scsi/constants.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
+#include "system/system.h"
+#include "system/runstate.h"
#include "trace.h"
-#include "sysemu/dma.h"
+#include "system/dma.h"
#include "qemu/cutils.h"
static char *scsibus_get_dev_path(DeviceState *dev);
@@ -100,8 +100,15 @@ static void scsi_device_for_each_req_sync(SCSIDevice *s,
assert(!runstate_is_running());
assert(qemu_in_main_thread());
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
- fn(req, opaque);
+ /*
+ * Locking is not necessary because the guest is stopped and no other
+ * threads can be accessing the requests list, but take the lock for
+ * consistency.
+ */
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
+ fn(req, opaque);
+ }
}
}
@@ -115,21 +122,29 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
{
g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
SCSIDevice *s = data->s;
- AioContext *ctx;
- SCSIRequest *req;
- SCSIRequest *next;
+ g_autoptr(GList) reqs = NULL;
/*
- * The BB cannot have changed contexts between this BH being scheduled and
- * now: BBs' AioContexts, when they have a node attached, can only be
- * changed via bdrv_try_change_aio_context(), in a drained section. While
- * we have the in-flight counter incremented, that drain must block.
+ * Build a list of requests in this AioContext so fn() can be invoked later
+ * outside requests_lock.
*/
- ctx = blk_get_aio_context(s->conf.blk);
- assert(ctx == qemu_get_current_aio_context());
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ AioContext *ctx = qemu_get_current_aio_context();
+ SCSIRequest *req;
+ SCSIRequest *next;
- QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
- data->fn(req, data->fn_opaque);
+ QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
+ if (req->ctx == ctx) {
+ scsi_req_ref(req); /* dropped after calling fn() */
+ reqs = g_list_prepend(reqs, req);
+ }
+ }
+ }
+
+ /* Call fn() on each request */
+ for (GList *elem = g_list_first(reqs); elem; elem = g_list_next(elem)) {
+ data->fn(elem->data, data->fn_opaque);
+ scsi_req_unref(elem->data);
}
/* Drop the reference taken by scsi_device_for_each_req_async() */
@@ -139,9 +154,35 @@ static void scsi_device_for_each_req_async_bh(void *opaque)
blk_dec_in_flight(s->conf.blk);
}
+static void scsi_device_for_each_req_async_do_ctx(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ AioContext *ctx = key;
+ SCSIDeviceForEachReqAsyncData *params = user_data;
+ SCSIDeviceForEachReqAsyncData *data;
+
+ data = g_new(SCSIDeviceForEachReqAsyncData, 1);
+ data->s = params->s;
+ data->fn = params->fn;
+ data->fn_opaque = params->fn_opaque;
+
+ /*
+ * Hold a reference to the SCSIDevice until
+ * scsi_device_for_each_req_async_bh() finishes.
+ */
+ object_ref(OBJECT(data->s));
+
+ /* Paired with scsi_device_for_each_req_async_bh() */
+ blk_inc_in_flight(data->s->conf.blk);
+
+ aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, data);
+}
+
/*
* Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
- * runs in the AioContext that is executing the request.
+ * must be thread-safe because it runs concurrently in each AioContext that is
+ * executing a request.
+ *
* Keeps the BlockBackend's in-flight counter incremented until everything is
* done, so draining it will settle all scheduled @fn() calls.
*/
@@ -151,24 +192,26 @@ static void scsi_device_for_each_req_async(SCSIDevice *s,
{
assert(qemu_in_main_thread());
- SCSIDeviceForEachReqAsyncData *data =
- g_new(SCSIDeviceForEachReqAsyncData, 1);
-
- data->s = s;
- data->fn = fn;
- data->fn_opaque = opaque;
-
- /*
- * Hold a reference to the SCSIDevice until
- * scsi_device_for_each_req_async_bh() finishes.
- */
- object_ref(OBJECT(s));
+ /* The set of AioContexts where the requests are being processed */
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
+ WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
+ SCSIRequest *req;
+ QTAILQ_FOREACH(req, &s->requests, next) {
+ g_hash_table_add(aio_contexts, req->ctx);
+ }
+ }
- /* Paired with blk_dec_in_flight() in scsi_device_for_each_req_async_bh() */
- blk_inc_in_flight(s->conf.blk);
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk),
- scsi_device_for_each_req_async_bh,
- data);
+ /* Schedule a BH for each AioContext */
+ SCSIDeviceForEachReqAsyncData params = {
+ .s = s,
+ .fn = fn,
+ .fn_opaque = opaque,
+ };
+ g_hash_table_foreach(
+ aio_contexts,
+ scsi_device_for_each_req_async_do_ctx,
+ &params
+ );
}
static void scsi_device_realize(SCSIDevice *s, Error **errp)
@@ -349,6 +392,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
dev->lun = lun;
}
+ qemu_mutex_init(&dev->requests_lock);
QTAILQ_INIT(&dev->requests);
scsi_device_realize(dev, &local_err);
if (local_err) {
@@ -356,7 +400,7 @@ static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
return;
}
dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
- scsi_dma_restart_cb, dev);
+ scsi_dma_restart_cb, NULL, dev);
}
static void scsi_qdev_unrealize(DeviceState *qdev)
@@ -369,6 +413,8 @@ static void scsi_qdev_unrealize(DeviceState *qdev)
scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
+ qemu_mutex_destroy(&dev->requests_lock);
+
scsi_device_unrealize(dev);
blockdev_mark_auto_del(dev->conf.blk);
@@ -384,6 +430,7 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
DeviceState *dev;
SCSIDevice *s;
DriveInfo *dinfo;
+ Error *local_err = NULL;
if (blk_is_sg(blk)) {
driver = "scsi-generic";
@@ -403,6 +450,14 @@ SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
s = SCSI_DEVICE(dev);
s->conf = *conf;
+ check_boot_index(conf->bootindex, &local_err);
+ if (local_err) {
+ object_unparent(OBJECT(dev));
+ error_propagate(errp, local_err);
+ return NULL;
+ }
+ add_boot_device_path(conf->bootindex, dev, NULL);
+
qdev_prop_set_uint32(dev, "scsi-id", unit);
if (object_property_find(OBJECT(dev), "removable")) {
qdev_prop_set_bit(dev, "removable", removable);
@@ -859,6 +914,7 @@ invalid_opcode:
}
}
+ req->ctx = qemu_get_current_aio_context();
req->cmd = cmd;
req->residual = req->cmd.xfer;
@@ -955,7 +1011,10 @@ static void scsi_req_enqueue_internal(SCSIRequest *req)
req->sg = NULL;
}
req->enqueued = true;
- QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
+
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
+ QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
+ }
}
int32_t scsi_req_enqueue(SCSIRequest *req)
@@ -975,7 +1034,9 @@ static void scsi_req_dequeue(SCSIRequest *req)
trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
req->retry = false;
if (req->enqueued) {
- QTAILQ_REMOVE(&req->dev->requests, req, next);
+ WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
+ QTAILQ_REMOVE(&req->dev->requests, req, next);
+ }
req->enqueued = false;
scsi_req_unref(req);
}
@@ -1934,14 +1995,13 @@ const VMStateDescription vmstate_scsi_device = {
}
};
-static Property scsi_props[] = {
+static const Property scsi_props[] = {
DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_device_class_init(ObjectClass *klass, void *data)
+static void scsi_device_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
@@ -1953,8 +2013,7 @@ static void scsi_device_class_init(ObjectClass *klass, void *data)
static void scsi_dev_instance_init(Object *obj)
{
- DeviceState *dev = DEVICE(obj);
- SCSIDevice *s = SCSI_DEVICE(dev);
+ SCSIDevice *s = SCSI_DEVICE(obj);
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", NULL,
@@ -1971,7 +2030,7 @@ static const TypeInfo scsi_device_type_info = {
.instance_init = scsi_dev_instance_init,
};
-static void scsi_bus_class_init(ObjectClass *klass, void *data)
+static void scsi_bus_class_init(ObjectClass *klass, const void *data)
{
BusClass *k = BUS_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@@ -1987,7 +2046,7 @@ static const TypeInfo scsi_bus_info = {
.parent = TYPE_BUS,
.instance_size = sizeof(SCSIBus),
.class_init = scsi_bus_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index a67092d..b4782c6 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -32,13 +32,14 @@
#include "migration/vmstate.h"
#include "hw/scsi/emulation.h"
#include "scsi/constants.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/blockdev.h"
+#include "system/arch_init.h"
+#include "system/block-backend.h"
+#include "system/blockdev.h"
#include "hw/block/block.h"
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
-#include "sysemu/dma.h"
-#include "sysemu/sysemu.h"
+#include "system/dma.h"
+#include "system/system.h"
#include "qemu/cutils.h"
#include "trace.h"
#include "qom/object.h"
@@ -65,9 +66,15 @@ OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
struct SCSIDiskClass {
SCSIDeviceClass parent_class;
+ /*
+ * Callbacks receive ret == 0 for success. Errors are represented either as
+ * negative errno values, or as positive SAM status codes. For host_status
+ * errors, the function passes ret == -ENODEV and sets the host_status field
+ * of the SCSIRequest.
+ */
DMAIOFunc *dma_readv;
DMAIOFunc *dma_writev;
- bool (*need_fua_emulation)(SCSICommand *cmd);
+ bool (*need_fua)(SCSICommand *cmd);
void (*update_sense)(SCSIRequest *r);
};
@@ -78,7 +85,7 @@ typedef struct SCSIDiskReq {
uint32_t sector_count;
uint32_t buflen;
bool started;
- bool need_fua_emulation;
+ bool need_fua;
struct iovec iov;
QEMUIOVector qiov;
BlockAcctCookie acct;
@@ -98,12 +105,12 @@ struct SCSIDiskState {
uint64_t max_unmap_size;
uint64_t max_io_size;
uint32_t quirks;
- QEMUBH *bh;
char *version;
char *serial;
char *vendor;
char *product;
char *device_id;
+ char *loadparm; /* only for s390x */
bool tray_open;
bool tray_locked;
/*
@@ -217,22 +224,61 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
SCSISense sense = SENSE_CODE(NO_SENSE);
- int error = 0;
+ int16_t host_status;
+ int error;
bool req_has_sense = false;
BlockErrorAction action;
int status;
+ /*
+ * host_status should only be set for SG_IO requests that came back with a
+ * host_status error in scsi_block_sgio_complete(). This error path passes
+ * -ENODEV as the return value.
+ *
+ * Reset host_status in the request because we may still want to complete
+ * the request successfully with the 'stop' or 'ignore' error policy.
+ */
+ host_status = r->req.host_status;
+ if (host_status != -1) {
+ assert(ret == -ENODEV);
+ r->req.host_status = -1;
+ }
+
if (ret < 0) {
status = scsi_sense_from_errno(-ret, &sense);
error = -ret;
} else {
/* A passthrough command has completed with nonzero status. */
status = ret;
- if (status == CHECK_CONDITION) {
+ switch (status) {
+ case CHECK_CONDITION:
req_has_sense = true;
error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
- } else {
+ break;
+ case RESERVATION_CONFLICT:
+ /*
+ * Don't apply the error policy, always report to the guest.
+ *
+ * This is a passthrough code path, so it's not a backend error, but
+ * a response to an invalid guest request.
+ *
+ * Windows Failover Cluster validation intentionally sends invalid
+ * requests to verify that reservations work as intended. It is
+ * crucial that it sees the resulting errors.
+ *
+ * Treating a reservation conflict as a guest-side error is obvious
+ * when a pr-manager is in use. Without one, the situation is less
+ * clear, but there might be nothing that can be fixed on the host
+ * (like in the above example), and we don't want to be stuck in a
+ * loop where resuming the VM and retrying the request immediately
+ * stops it again. So always reporting is still the safer option in
+ * this case, too.
+ */
+ error = 0;
+ break;
+ default:
error = EINVAL;
+ break;
}
}
@@ -242,8 +288,9 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
* are usually retried immediately, so do not post them to QMP and
* do not account them as failed I/O.
*/
- if (req_has_sense &&
- scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
+ if (!error || (req_has_sense &&
+ scsi_sense_buf_is_guest_recoverable(r->req.sense,
+ sizeof(r->req.sense)))) {
action = BLOCK_ERROR_ACTION_REPORT;
acct_failed = false;
} else {
@@ -256,6 +303,10 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
if (acct_failed) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
+ if (host_status != -1) {
+ scsi_req_complete_failed(&r->req, host_status);
+ return true;
+ }
if (req_has_sense) {
sdc->update_sense(&r->req);
} else if (status == CHECK_CONDITION) {
@@ -283,7 +334,7 @@ static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
return true;
}
- if (ret < 0) {
+ if (ret != 0) {
return scsi_handle_rw_error(r, ret, acct_failed);
}
@@ -295,9 +346,8 @@ static void scsi_aio_complete(void *opaque, int ret)
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
@@ -339,39 +389,16 @@ static bool scsi_is_cmd_fua(SCSICommand *cmd)
}
}
-static void scsi_write_do_fua(SCSIDiskReq *r)
-{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
-
- assert(r->req.aiocb == NULL);
- assert(!r->req.io_canceled);
-
- if (r->need_fua_emulation) {
- block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
- BLOCK_ACCT_FLUSH);
- r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
- return;
- }
-
- scsi_req_complete(&r->req, GOOD);
- scsi_req_unref(&r->req);
-}
-
static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
{
assert(r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
r->sector += r->sector_count;
r->sector_count = 0;
- if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
- scsi_write_do_fua(r);
- return;
- } else {
- scsi_req_complete(&r->req, GOOD);
- }
+ scsi_req_complete(&r->req, GOOD);
done:
scsi_req_unref(&r->req);
@@ -385,9 +412,10 @@ static void scsi_dma_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_dma_complete_noio(r, ret);
@@ -395,15 +423,13 @@ static void scsi_dma_complete(void *opaque, int ret)
static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert(r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
@@ -424,9 +450,10 @@ static void scsi_read_complete(void *opaque, int ret)
assert(r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
}
@@ -450,8 +477,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
r->req.residual -= r->req.sg->size;
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
sdc->dma_readv, r, scsi_dma_complete, r,
DMA_DIRECTION_FROM_DEVICE);
@@ -515,7 +541,7 @@ static void scsi_read_data(SCSIRequest *req)
first = !r->started;
r->started = true;
- if (first && r->need_fua_emulation) {
+ if (first && r->need_fua) {
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
BLOCK_ACCT_FLUSH);
r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
@@ -526,15 +552,13 @@ static void scsi_read_data(SCSIRequest *req)
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
{
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
- /* The request must only run in the BlockBackend's AioContext */
- assert(blk_get_aio_context(s->qdev.conf.blk) ==
- qemu_get_current_aio_context());
+ /* The request must run in its AioContext */
+ assert(r->req.ctx == qemu_get_current_aio_context());
assert (r->req.aiocb == NULL);
- if (scsi_disk_req_check_error(r, ret, false)) {
+ if (scsi_disk_req_check_error(r, ret, ret > 0)) {
goto done;
}
@@ -542,8 +566,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
r->sector += n;
r->sector_count -= n;
if (r->sector_count == 0) {
- scsi_write_do_fua(r);
- return;
+ scsi_req_complete(&r->req, GOOD);
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
@@ -562,9 +585,10 @@ static void scsi_write_complete(void * opaque, int ret)
assert (r->req.aiocb != NULL);
r->req.aiocb = NULL;
+ /* ret > 0 is accounted for in scsi_disk_req_check_error() */
if (ret < 0) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
- } else {
+ } else if (ret == 0) {
block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
scsi_write_complete_noio(r, ret);
@@ -575,6 +599,7 @@ static void scsi_write_data(SCSIRequest *req)
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
+ BlockCompletionFunc *cb;
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
@@ -600,19 +625,17 @@ static void scsi_write_data(SCSIRequest *req)
if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
r->req.cmd.buf[0] == VERIFY_16) {
- if (r->req.sg) {
- scsi_dma_complete_noio(r, 0);
- } else {
- scsi_write_complete_noio(r, 0);
- }
+ block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ cb = r->req.sg ? scsi_dma_complete : scsi_write_complete;
+ r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, cb, r);
return;
}
if (r->req.sg) {
dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
r->req.residual -= r->req.sg->size;
- r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
- r->req.sg, r->sector << BDRV_SECTOR_BITS,
+ r->req.aiocb = dma_blk_io(r->req.sg, r->sector << BDRV_SECTOR_BITS,
BDRV_SECTOR_SIZE,
sdc->dma_writev, r, scsi_dma_complete, r,
DMA_DIRECTION_TO_DEVICE);
@@ -2344,7 +2367,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
- r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
+ r->need_fua = sdc->need_fua(&r->req.cmd);
if (r->sector_count == 0) {
scsi_req_complete(&r->req, GOOD);
}
@@ -2815,26 +2838,13 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
if (ret == 0) {
if (io_hdr->host_status != SCSI_HOST_OK) {
- scsi_req_complete_failed(&r->req, io_hdr->host_status);
- scsi_req_unref(&r->req);
- return;
- }
-
- if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ r->req.host_status = io_hdr->host_status;
+ ret = -ENODEV;
+ } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
ret = BUSY;
} else {
ret = io_hdr->status;
}
-
- if (ret > 0) {
- if (scsi_handle_rw_error(r, ret, true)) {
- scsi_req_unref(&r->req);
- return;
- }
-
- /* Ignore error. */
- ret = 0;
- }
}
req->cb(req->cb_opaque, ret);
@@ -3103,19 +3113,57 @@ BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
{
SCSIDiskReq *r = opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
+ int flags = r->need_fua ? BDRV_REQ_FUA : 0;
+ return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, flags, cb, cb_opaque);
}
-static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
+static char *scsi_property_get_loadparm(Object *obj, Error **errp)
+{
+ return g_strdup(SCSI_DISK_BASE(obj)->loadparm);
+}
+
+static void scsi_property_set_loadparm(Object *obj, const char *value,
+ Error **errp)
+{
+ void *lp_str;
+
+ if (object_property_get_int(obj, "bootindex", NULL) < 0) {
+ error_setg(errp, "'loadparm' is only valid for boot devices");
+ return;
+ }
+
+ lp_str = g_malloc0(strlen(value) + 1);
+ if (!qdev_prop_sanitize_s390x_loadparm(lp_str, value, errp)) {
+ g_free(lp_str);
+ return;
+ }
+ SCSI_DISK_BASE(obj)->loadparm = lp_str;
+}
+
+static void scsi_property_add_specifics(DeviceClass *dc)
+{
+ ObjectClass *oc = OBJECT_CLASS(dc);
+
+ /* The loadparm property is only supported on s390x */
+ if (qemu_arch_available(QEMU_ARCH_S390X)) {
+ object_class_property_add_str(oc, "loadparm",
+ scsi_property_get_loadparm,
+ scsi_property_set_loadparm);
+ object_class_property_set_description(oc, "loadparm",
+ "load parameter (s390x only)");
+ }
+}
+
+static void scsi_disk_base_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
dc->fw_name = "disk";
- dc->reset = scsi_disk_reset;
+ device_class_set_legacy_reset(dc, scsi_disk_reset);
sdc->dma_readv = scsi_dma_readv;
sdc->dma_writev = scsi_dma_writev;
- sdc->need_fua_emulation = scsi_is_cmd_fua;
+ sdc->need_fua = scsi_is_cmd_fua;
}
static const TypeInfo scsi_disk_base_info = {
@@ -3139,12 +3187,12 @@ static const TypeInfo scsi_disk_base_info = {
DEFINE_PROP_BOOL("migrate-emulated-scsi-request", SCSIDiskState, migrate_emulated_scsi_request, true)
-static Property scsi_hd_properties[] = {
+static const Property scsi_hd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
DEFINE_PROP_BIT("removable", SCSIDiskState, features,
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
- SCSI_DISK_F_DPOFUA, false),
+ SCSI_DISK_F_DPOFUA, true),
DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
@@ -3159,7 +3207,6 @@ static Property scsi_hd_properties[] = {
quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
0),
DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_scsi_disk_state = {
@@ -3177,7 +3224,7 @@ static const VMStateDescription vmstate_scsi_disk_state = {
}
};
-static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
+static void scsi_hd_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3189,6 +3236,8 @@ static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI disk";
device_class_set_props(dc, scsi_hd_properties);
dc->vmsd = &vmstate_scsi_disk_state;
+
+ scsi_property_add_specifics(dc);
}
static const TypeInfo scsi_hd_info = {
@@ -3197,7 +3246,7 @@ static const TypeInfo scsi_hd_info = {
.class_init = scsi_hd_class_initfn,
};
-static Property scsi_cd_properties[] = {
+static const Property scsi_cd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
@@ -3215,10 +3264,9 @@ static Property scsi_cd_properties[] = {
0),
DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks,
SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
+static void scsi_cd_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3229,6 +3277,8 @@ static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
dc->desc = "virtual SCSI CD-ROM";
device_class_set_props(dc, scsi_cd_properties);
dc->vmsd = &vmstate_scsi_disk_state;
+
+ scsi_property_add_specifics(dc);
}
static const TypeInfo scsi_cd_info = {
@@ -3238,7 +3288,7 @@ static const TypeInfo scsi_cd_info = {
};
#ifdef __linux__
-static Property scsi_block_properties[] = {
+static const Property scsi_block_properties[] = {
DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
@@ -3251,10 +3301,9 @@ static Property scsi_block_properties[] = {
-1),
DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
DEFAULT_IO_TIMEOUT),
- DEFINE_PROP_END_OF_LIST(),
};
-static void scsi_block_class_initfn(ObjectClass *klass, void *data)
+static void scsi_block_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -3266,7 +3315,7 @@ static void scsi_block_class_initfn(ObjectClass *klass, void *data)
sdc->dma_readv = scsi_block_dma_readv;
sdc->dma_writev = scsi_block_dma_writev;
sdc->update_sense = scsi_block_update_sense;
- sdc->need_fua_emulation = scsi_block_no_fua;
+ sdc->need_fua = scsi_block_no_fua;
dc->desc = "SCSI block device passthrough";
device_class_set_props(dc, scsi_block_properties);
dc->vmsd = &vmstate_scsi_disk_state;
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index ee945f8..9e380a2 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -21,7 +21,7 @@
#include "hw/qdev-properties.h"
#include "hw/qdev-properties-system.h"
#include "hw/scsi/emulation.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "trace.h"
#ifdef __linux__
@@ -772,12 +772,11 @@ static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
}
-static Property scsi_generic_properties[] = {
+static const Property scsi_generic_properties[] = {
DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
DEFAULT_IO_TIMEOUT),
- DEFINE_PROP_END_OF_LIST(),
};
static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
@@ -787,7 +786,7 @@ static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private);
}
-static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
+static void scsi_generic_class_initfn(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
@@ -797,7 +796,7 @@ static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
sc->parse_cdb = scsi_generic_parse_cdb;
dc->fw_name = "disk";
dc->desc = "pass through generic scsi device (/dev/sg*)";
- dc->reset = scsi_generic_reset;
+ device_class_set_legacy_reset(dc, scsi_generic_reset);
device_class_set_props(dc, scsi_generic_properties);
dc->vmsd = &vmstate_scsi_device;
}
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index c75a6c8..20f70fb 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -1250,9 +1250,8 @@ static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
return 0;
}
-static Property spapr_vscsi_properties[] = {
+static const Property spapr_vscsi_properties[] = {
DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev),
- DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_spapr_vscsi = {
@@ -1268,7 +1267,7 @@ static const VMStateDescription vmstate_spapr_vscsi = {
},
};
-static void spapr_vscsi_class_init(ObjectClass *klass, void *data)
+static void spapr_vscsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c
index 4c86370..43525ba 100644
--- a/hw/scsi/vhost-scsi-common.c
+++ b/hw/scsi/vhost-scsi-common.c
@@ -101,24 +101,25 @@ err_host_notifiers:
return ret;
}
-void vhost_scsi_common_stop(VHostSCSICommon *vsc)
+int vhost_scsi_common_stop(VHostSCSICommon *vsc)
{
VirtIODevice *vdev = VIRTIO_DEVICE(vsc);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret = 0;
- vhost_dev_stop(&vsc->dev, vdev, true);
+ ret = vhost_dev_stop(&vsc->dev, vdev, true);
if (k->set_guest_notifiers) {
- ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
- if (ret < 0) {
- error_report("vhost guest notifier cleanup failed: %d", ret);
+ int r = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
+ if (r < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return r;
}
}
- assert(ret >= 0);
vhost_dev_disable_notifiers(&vsc->dev, vdev);
+ return ret;
}
uint64_t vhost_scsi_common_get_features(VirtIODevice *vdev, uint64_t features,
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index ae26bc1..cdf405b 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -29,7 +29,7 @@
#include "hw/fw-path-provider.h"
#include "hw/qdev-properties.h"
#include "qemu/cutils.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Features supported by host kernel. */
static const int kernel_feature_bits[] = {
@@ -38,6 +38,8 @@ static const int kernel_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
+ VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -112,7 +114,7 @@ static void vhost_scsi_stop(VHostSCSI *s)
vhost_scsi_common_stop(vsc);
}
-static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
+static int vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
{
VHostSCSI *s = VHOST_SCSI(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
@@ -123,7 +125,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
}
if (vhost_dev_is_started(&vsc->dev) == start) {
- return;
+ return 0;
}
if (start) {
@@ -137,6 +139,7 @@ static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val)
} else {
vhost_scsi_stop(s);
}
+ return 0;
}
static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -170,7 +173,7 @@ static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
struct vhost_dev *dev = &vsc->dev;
struct vhost_vring_worker vq_worker;
struct vhost_worker_state worker;
- int i, ret;
+ int i, ret = 0;
/* Use default worker */
if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
@@ -312,7 +315,6 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
if (vhostfd >= 0) {
close(vhostfd);
}
- return;
}
static void vhost_scsi_unrealize(DeviceState *dev)
@@ -341,7 +343,7 @@ static struct vhost_dev *vhost_scsi_get_vhost(VirtIODevice *vdev)
return &vsc->dev;
}
-static Property vhost_scsi_properties[] = {
+static const Property vhost_scsi_properties[] = {
DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd),
DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn),
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
@@ -357,13 +359,15 @@ static Property vhost_scsi_properties[] = {
DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features,
VIRTIO_SCSI_F_T10_PI,
false),
+ DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features,
+ VIRTIO_SCSI_F_HOTPLUG,
+ false),
DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon,
conf.worker_per_virtqueue, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void vhost_scsi_class_init(ObjectClass *klass, void *data)
+static void vhost_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -397,7 +401,7 @@ static const TypeInfo vhost_scsi_info = {
.instance_size = sizeof(VHostSCSI),
.class_init = vhost_scsi_class_init,
.instance_init = vhost_scsi_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index a63b1f4..25f2d89 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -27,7 +27,7 @@
#include "hw/virtio/vhost-user-scsi.h"
#include "hw/virtio/virtio.h"
#include "chardev/char-fe.h"
-#include "sysemu/sysemu.h"
+#include "system/system.h"
/* Features supported by the host application */
static const int user_feature_bits[] = {
@@ -36,6 +36,8 @@ static const int user_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_SCSI_F_HOTPLUG,
VIRTIO_F_RING_RESET,
+ VIRTIO_F_IN_ORDER,
+ VIRTIO_F_NOTIFICATION_DATA,
VHOST_INVALID_FEATURE_BIT
};
@@ -50,19 +52,19 @@ static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp)
return ret;
}
-static void vhost_user_scsi_stop(VHostUserSCSI *s)
+static int vhost_user_scsi_stop(VHostUserSCSI *s)
{
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
if (!s->started_vu) {
- return;
+ return 0;
}
s->started_vu = false;
- vhost_scsi_common_stop(vsc);
+ return vhost_scsi_common_stop(vsc);
}
-static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
+static int vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
DeviceState *dev = DEVICE(vdev);
@@ -73,11 +75,11 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
int ret;
if (!s->connected) {
- return;
+ return -1;
}
if (vhost_dev_is_started(&vsc->dev) == should_start) {
- return;
+ return 0;
}
if (should_start) {
@@ -89,8 +91,12 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
qemu_chr_fe_disconnect(&vs->conf.chardev);
}
} else {
- vhost_user_scsi_stop(s);
+ ret = vhost_user_scsi_stop(s);
+ if (ret) {
+ return ret;
+ }
}
+ return 0;
}
static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
@@ -181,7 +187,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
if (!s->connected) {
- return;
+ goto done;
}
s->connected = false;
@@ -189,6 +195,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
vhost_dev_cleanup(&vsc->dev);
+done:
/* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
vhost_user_scsi_event, NULL, dev, NULL, true);
@@ -214,8 +221,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
- vhost_user_scsi_disconnect,
- vhost_user_scsi_event);
+ vhost_user_scsi_disconnect);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@@ -339,7 +345,7 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
virtio_scsi_common_unrealize(dev);
}
-static Property vhost_user_scsi_properties[] = {
+static const Property vhost_user_scsi_properties[] = {
DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev),
DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0),
DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues,
@@ -358,7 +364,6 @@ static Property vhost_user_scsi_properties[] = {
DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features,
VIRTIO_SCSI_F_T10_PI,
false),
- DEFINE_PROP_END_OF_LIST(),
};
static void vhost_user_scsi_reset(VirtIODevice *vdev)
@@ -385,7 +390,7 @@ static const VMStateDescription vmstate_vhost_scsi = {
},
};
-static void vhost_user_scsi_class_init(ObjectClass *klass, void *data)
+static void vhost_user_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -421,7 +426,7 @@ static const TypeInfo vhost_user_scsi_info = {
.instance_size = sizeof(VHostUserSCSI),
.class_init = vhost_user_scsi_class_init,
.instance_init = vhost_user_scsi_instance_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_FW_PATH_PROVIDER },
{ }
},
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 2806a12..95f13fb 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -15,9 +15,10 @@
#include "qapi/error.h"
#include "hw/virtio/virtio-scsi.h"
#include "qemu/error-report.h"
-#include "sysemu/block-backend.h"
+#include "system/block-backend.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
+#include "hw/virtio/iothread-vq-mapping.h"
#include "hw/virtio/virtio-bus.h"
/* Context: BQL held */
@@ -28,7 +29,14 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
- if (vs->conf.iothread) {
+ if (vs->conf.iothread && vs->conf.iothread_vq_mapping_list) {
+ error_setg(errp,
+ "iothread and iothread-vq-mapping properties cannot be set "
+ "at the same time");
+ return;
+ }
+
+ if (vs->conf.iothread || vs->conf.iothread_vq_mapping_list) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp,
"device is incompatible with iothread "
@@ -39,15 +47,64 @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp)
error_setg(errp, "ioeventfd is required for iothread");
return;
}
- s->ctx = iothread_get_aio_context(vs->conf.iothread);
- } else {
- if (!virtio_device_ioeventfd_enabled(vdev)) {
+ }
+
+ s->vq_aio_context = g_new(AioContext *, vs->conf.num_queues +
+ VIRTIO_SCSI_VQ_NUM_FIXED);
+
+ /*
+ * Handle the ctrl virtqueue in the main loop thread where device resets
+ * can be performed.
+ */
+ s->vq_aio_context[0] = qemu_get_aio_context();
+
+ /*
+ * Handle the event virtqueue in the main loop thread where its no_poll
+ * behavior won't stop IOThread polling.
+ */
+ s->vq_aio_context[1] = qemu_get_aio_context();
+
+ if (vs->conf.iothread_vq_mapping_list) {
+ if (!iothread_vq_mapping_apply(vs->conf.iothread_vq_mapping_list,
+ &s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED],
+ vs->conf.num_queues, errp)) {
+ g_free(s->vq_aio_context);
+ s->vq_aio_context = NULL;
return;
}
- s->ctx = qemu_get_aio_context();
+ } else if (vs->conf.iothread) {
+ AioContext *ctx = iothread_get_aio_context(vs->conf.iothread);
+ for (uint16_t i = 0; i < vs->conf.num_queues; i++) {
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
+ }
+
+ /* Released in virtio_scsi_dataplane_cleanup() */
+ object_ref(OBJECT(vs->conf.iothread));
+ } else {
+ AioContext *ctx = qemu_get_aio_context();
+ for (unsigned i = 0; i < vs->conf.num_queues; i++) {
+ s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i] = ctx;
+ }
}
}
+/* Context: BQL held */
+void virtio_scsi_dataplane_cleanup(VirtIOSCSI *s)
+{
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+
+ if (vs->conf.iothread_vq_mapping_list) {
+ iothread_vq_mapping_cleanup(vs->conf.iothread_vq_mapping_list);
+ }
+
+ if (vs->conf.iothread) {
+ object_unref(OBJECT(vs->conf.iothread));
+ }
+
+ g_free(s->vq_aio_context);
+ s->vq_aio_context = NULL;
+}
+
static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
@@ -66,31 +123,20 @@ static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n)
}
/* Context: BH in IOThread */
-static void virtio_scsi_dataplane_stop_bh(void *opaque)
+static void virtio_scsi_dataplane_stop_vq_bh(void *opaque)
{
- VirtIOSCSI *s = opaque;
- VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ AioContext *ctx = qemu_get_current_aio_context();
+ VirtQueue *vq = opaque;
EventNotifier *host_notifier;
- int i;
- virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->ctrl_vq);
+ virtio_queue_aio_detach_host_notifier(vq, ctx);
+ host_notifier = virtio_queue_get_host_notifier(vq);
/*
* Test and clear notifier after disabling event, in case poll callback
* didn't have time to run.
*/
virtio_queue_host_notifier_read(host_notifier);
-
- virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->event_vq);
- virtio_queue_host_notifier_read(host_notifier);
-
- for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx);
- host_notifier = virtio_queue_get_host_notifier(vs->cmd_vqs[i]);
- virtio_queue_host_notifier_read(host_notifier);
- }
}
/* Context: BQL held */
@@ -154,11 +200,14 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
smp_wmb(); /* paired with aio_notify_accept() */
if (s->bus.drain_count == 0) {
- virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
- virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vs->ctrl_vq,
+ s->vq_aio_context[0]);
+ virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq,
+ s->vq_aio_context[1]);
for (i = 0; i < vs->conf.num_queues; i++) {
- virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
+ virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], ctx);
}
}
return 0;
@@ -207,7 +256,11 @@ void virtio_scsi_dataplane_stop(VirtIODevice *vdev)
s->dataplane_stopping = true;
if (s->bus.drain_count == 0) {
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_dataplane_stop_bh, s);
+ for (i = 0; i < vs->conf.num_queues + VIRTIO_SCSI_VQ_NUM_FIXED; i++) {
+ VirtQueue *vq = virtio_get_queue(&vs->parent_obj, i);
+ AioContext *ctx = s->vq_aio_context[i];
+ aio_wait_bh_oneshot(ctx, virtio_scsi_dataplane_stop_vq_bh, vq);
+ }
}
blk_drain_all(); /* ensure there are no in-flight requests */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 9f02cee..34ae14f 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -22,11 +22,12 @@
#include "qemu/error-report.h"
#include "qemu/iov.h"
#include "qemu/module.h"
-#include "sysemu/block-backend.h"
-#include "sysemu/dma.h"
+#include "system/block-backend.h"
+#include "system/dma.h"
#include "hw/qdev-properties.h"
#include "hw/scsi/scsi.h"
#include "scsi/constants.h"
+#include "hw/virtio/iothread-vq-mapping.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "trace.h"
@@ -47,7 +48,7 @@ typedef struct VirtIOSCSIReq {
/* Used for two-stage request submission and TMFs deferred to BH */
QTAILQ_ENTRY(VirtIOSCSIReq) next;
- /* Used for cancellation of request during TMFs */
+ /* Used for cancellation of request during TMFs. Atomic. */
int remaining;
SCSIRequest *sreq;
@@ -102,13 +103,18 @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req)
g_free(req);
}
-static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
+static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
{
VirtIOSCSI *s = req->dev;
VirtQueue *vq = req->vq;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
+
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
+
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
if (s->dataplane_started && !s->dataplane_fenced) {
virtio_notify_irqfd(vdev, vq);
@@ -116,6 +122,10 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_notify(vdev, vq);
}
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
+ }
+
if (req->sreq) {
req->sreq->hba_private = NULL;
scsi_req_unref(req->sreq);
@@ -123,34 +133,20 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
virtio_scsi_free_req(req);
}
-static void virtio_scsi_complete_req_bh(void *opaque)
+static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
{
- VirtIOSCSIReq *req = opaque;
+ virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
- virtio_scsi_complete_req(req);
-}
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
-/*
- * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
- * thread cannot touch the virtqueue since that could race with an IOThread.
- */
-static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
-{
- VirtIOSCSI *s = req->dev;
+ virtqueue_detach_element(req->vq, &req->elem, 0);
- if (!s->ctx || s->ctx == qemu_get_aio_context()) {
- /* No need to schedule a BH when there is no IOThread */
- virtio_scsi_complete_req(req);
- } else {
- /* Run request completion in the IOThread */
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
}
-}
-static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
-{
- virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
- virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
}
@@ -235,12 +231,21 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
return 0;
}
-static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
+static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
{
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
VirtIOSCSIReq *req;
+ if (vq_lock) {
+ qemu_mutex_lock(vq_lock);
+ }
+
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
+
+ if (vq_lock) {
+ qemu_mutex_unlock(vq_lock);
+ }
+
if (!req) {
return NULL;
}
@@ -294,137 +299,157 @@ typedef struct {
VirtIOSCSIReq *tmf_req;
} VirtIOSCSICancelNotifier;
+static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf)
+{
+ if (qatomic_fetch_dec(&tmf->remaining) == 1) {
+ trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun),
+ tmf->req.tmf.tag, tmf->resp.tmf.response);
+
+ virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock);
+ }
+}
+
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
{
VirtIOSCSICancelNotifier *n = container_of(notifier,
VirtIOSCSICancelNotifier,
notifier);
- if (--n->tmf_req->remaining == 0) {
- VirtIOSCSIReq *req = n->tmf_req;
-
- trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
- req->req.tmf.tag, req->resp.tmf.response);
- virtio_scsi_complete_req(req);
- }
+ virtio_scsi_tmf_dec_remaining(n->tmf_req);
g_free(n);
}
-static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
+static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
{
- if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
- }
+ VirtIOSCSICancelNotifier *notifier;
+
+ assert(r->ctx == qemu_get_current_aio_context());
+
+ /* Decremented in virtio_scsi_cancel_notify() */
+ qatomic_inc(&tmf->remaining);
+
+ notifier = g_new(VirtIOSCSICancelNotifier, 1);
+ notifier->notifier.notify = virtio_scsi_cancel_notify;
+ notifier->tmf_req = tmf;
+ scsi_req_cancel_async(r, &notifier->notifier);
}
-static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
+/* Execute a TMF on the requests in the current AioContext */
+static void virtio_scsi_do_tmf_aio_context(void *opaque)
{
- VirtIOSCSI *s = req->dev;
- SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
- BusChild *kid;
- int target;
+ AioContext *ctx = qemu_get_current_aio_context();
+ VirtIOSCSIReq *tmf = opaque;
+ VirtIOSCSI *s = tmf->dev;
+ SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun);
+ SCSIRequest *r;
+ bool match_tag;
- switch (req->req.tmf.subtype) {
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
- if (!d) {
- req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
- goto out;
- }
- if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
- req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
- goto out;
- }
- qatomic_inc(&s->resetting);
- device_cold_reset(&d->qdev);
- qatomic_dec(&s->resetting);
+ if (!d) {
+ tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ virtio_scsi_tmf_dec_remaining(tmf);
+ return;
+ }
+
+ /*
+ * This function could handle other subtypes that need to be processed in
+ * the request's AioContext in the future, but for now only request
+ * cancelation subtypes are performed here.
+ */
+ switch (tmf->req.tmf.subtype) {
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK:
+ match_tag = true;
+ break;
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
+ match_tag = false;
break;
+ default:
+ g_assert_not_reached();
+ }
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- target = req->req.tmf.lun[1];
- qatomic_inc(&s->resetting);
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH(r, &d->requests, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
+ assert(cmd_req); /* request has hba_private while enqueued */
- rcu_read_lock();
- QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
- SCSIDevice *d1 = SCSI_DEVICE(kid->child);
- if (d1->channel == 0 && d1->id == target) {
- device_cold_reset(&d1->qdev);
+ if (r->ctx != ctx) {
+ continue;
+ }
+ if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) {
+ continue;
}
+ virtio_scsi_tmf_cancel_req(tmf, r);
}
- rcu_read_unlock();
-
- qatomic_dec(&s->resetting);
- break;
-
- default:
- g_assert_not_reached();
- break;
}
-out:
- object_unref(OBJECT(d));
- virtio_scsi_complete_req_from_main_loop(req);
+ /* Incremented by virtio_scsi_do_tmf() */
+ virtio_scsi_tmf_dec_remaining(tmf);
+
+ object_unref(d);
}
-/* Some TMFs must be processed from the main loop thread */
-static void virtio_scsi_do_tmf_bh(void *opaque)
+static void dummy_bh(void *opaque)
{
- VirtIOSCSI *s = opaque;
- QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
- VirtIOSCSIReq *req;
- VirtIOSCSIReq *tmp;
+ /* Do nothing */
+}
+/*
+ * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
+ */
+static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
+{
GLOBAL_STATE_CODE();
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
- QTAILQ_INSERT_TAIL(&reqs, req, next);
- }
+ assert(!s->dataplane_started);
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
- }
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
- QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
- QTAILQ_REMOVE(&reqs, req, next);
- virtio_scsi_do_one_tmf_bh(req);
+ /* Our BH only runs after previously scheduled BHs */
+ aio_wait_bh_oneshot(ctx, dummy_bh, NULL);
}
}
-static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
+/*
+ * Run the TMF in a specific AioContext, handling only requests in that
+ * AioContext. This is necessary because requests can run in different
+ * AioContext and it is only possible to cancel them from the AioContext where
+ * they are running.
+ */
+static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf,
+ AioContext *ctx)
{
- VirtIOSCSIReq *req;
- VirtIOSCSIReq *tmp;
+ /* Decremented in virtio_scsi_do_tmf_aio_context() */
+ qatomic_inc(&tmf->remaining);
- GLOBAL_STATE_CODE();
-
- /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
- if (s->tmf_bh) {
- qemu_bh_delete(s->tmf_bh);
- s->tmf_bh = NULL;
- }
-
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
-
- /* SAM-6 6.3.2 Hard reset */
- req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
- virtio_scsi_complete_req(req);
- }
+ /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
+ aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf);
}
-static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
+/*
+ * Returns the AioContext for a given TMF's tag field or NULL. Note that the
+ * request identified by the tag may have completed by the time you can execute
+ * a BH in the AioContext, so don't assume the request still exists in your BH.
+ */
+static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d,
+ VirtIOSCSIReq *tmf)
{
- VirtIOSCSI *s = req->dev;
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ SCSIRequest *r;
+ SCSIRequest *next;
+
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
+ /* hba_private is non-NULL while the request is enqueued */
+ assert(cmd_req);
- if (!s->tmf_bh) {
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
- qemu_bh_schedule(s->tmf_bh);
+ if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) {
+ return r->ctx;
+ }
}
}
+ return NULL;
}
/* Return 0 if the request is ready to be completed and return to guest;
@@ -434,9 +459,9 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
SCSIRequest *r, *next;
+ AioContext *ctx;
int ret = 0;
- virtio_scsi_ctx_check(s, d);
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
@@ -451,7 +476,22 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
req->req.tmf.tag, req->req.tmf.subtype);
switch (req->req.tmf.subtype) {
- case VIRTIO_SCSI_T_TMF_ABORT_TASK:
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK: {
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+
+ ctx = find_aio_context_for_tmf_tag(d, req);
+ if (ctx) {
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
+ ret = -EINPROGRESS;
+ }
+ break;
+ }
+
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
if (!d) {
goto fail;
@@ -459,44 +499,82 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
goto incorrect_lun;
}
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
- VirtIOSCSIReq *cmd_req = r->hba_private;
- if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
- break;
+
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH(r, &d->requests, next) {
+ VirtIOSCSIReq *cmd_req = r->hba_private;
+ assert(cmd_req); /* request has hba_private while enqueued */
+
+ if (cmd_req->req.cmd.tag == req->req.tmf.tag) {
+ /*
+ * "If the specified command is present in the task set,
+ * then return a service response set to FUNCTION
+ * SUCCEEDED".
+ */
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ }
}
}
- if (r) {
- /*
- * Assert that the request has not been completed yet, we
- * check for it in the loop above.
- */
- assert(r->hba_private);
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
- /* "If the specified command is present in the task set, then
- * return a service response set to FUNCTION SUCCEEDED".
- */
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
- } else {
- VirtIOSCSICancelNotifier *notifier;
-
- req->remaining = 1;
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
- notifier->tmf_req = req;
- notifier->notifier.notify = virtio_scsi_cancel_notify;
- scsi_req_cancel_async(r, &notifier->notifier);
- ret = -EINPROGRESS;
+ break;
+
+ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+ qatomic_inc(&s->resetting);
+ device_cold_reset(&d->qdev);
+ qatomic_dec(&s->resetting);
+ break;
+
+ case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: {
+ BusChild *kid;
+ int target = req->req.tmf.lun[1];
+ qatomic_inc(&s->resetting);
+
+ rcu_read_lock();
+ QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *d1 = SCSI_DEVICE(kid->child);
+ if (d1->channel == 0 && d1->id == target) {
+ device_cold_reset(&d1->qdev);
}
}
+ rcu_read_unlock();
+
+ qatomic_dec(&s->resetting);
break;
+ }
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
- virtio_scsi_defer_tmf_to_bh(req);
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
+
+ if (!d) {
+ goto fail;
+ }
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
+ goto incorrect_lun;
+ }
+
+ qatomic_inc(&req->remaining);
+
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
+ ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
+
+ if (!g_hash_table_add(aio_contexts, ctx)) {
+ continue; /* skip previously added AioContext */
+ }
+
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
+ }
+
+ virtio_scsi_tmf_dec_remaining(req);
ret = -EINPROGRESS;
break;
+ }
- case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
if (!d) {
goto fail;
@@ -505,34 +583,19 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
goto incorrect_lun;
}
- /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
- * This way, if the bus starts calling back to the notifiers
- * even before we finish the loop, virtio_scsi_cancel_notify
- * will not complete the TMF too early.
- */
- req->remaining = 1;
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
- if (r->hba_private) {
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
- /* "If there is any command present in the task set, then
- * return a service response set to FUNCTION SUCCEEDED".
- */
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
- break;
- } else {
- VirtIOSCSICancelNotifier *notifier;
-
- req->remaining++;
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
- notifier->notifier.notify = virtio_scsi_cancel_notify;
- notifier->tmf_req = req;
- scsi_req_cancel_async(r, &notifier->notifier);
- }
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
+ /* Request has hba_private while enqueued */
+ assert(r->hba_private);
+
+ /*
+ * "If there is any command present in the task set, then
+ * return a service response set to FUNCTION SUCCEEDED".
+ */
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
+ break;
}
}
- if (--req->remaining > 0) {
- ret = -EINPROGRESS;
- }
break;
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
@@ -563,7 +626,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
&type, sizeof(type)) < sizeof(type)) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
}
@@ -571,7 +634,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
if (type == VIRTIO_SCSI_T_TMF) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
} else {
r = virtio_scsi_do_tmf(s, req);
@@ -581,7 +644,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
sizeof(VirtIOSCSICtrlANResp)) < 0) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
return;
} else {
req->req.an.event_requested =
@@ -601,7 +664,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
req->resp.an.response);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, &s->ctrl_lock);
} else {
assert(r == -EINPROGRESS);
}
@@ -611,7 +674,7 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req;
- while ((req = virtio_scsi_pop_req(s, vq))) {
+ while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
virtio_scsi_handle_ctrl_req(s, req);
}
}
@@ -626,9 +689,12 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
*/
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
{
- if (!s->ctx || s->dataplane_started) {
+ if (s->dataplane_started) {
return false;
}
+ if (s->vq_aio_context[0] == qemu_get_aio_context()) {
+ return false; /* not using IOThreads */
+ }
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
return !s->dataplane_fenced;
@@ -655,7 +721,7 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
* in virtio_scsi_command_complete.
*/
req->resp_size = sizeof(VirtIOSCSICmdResp);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, NULL);
}
static void virtio_scsi_command_failed(SCSIRequest *r)
@@ -789,7 +855,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
virtio_scsi_fail_cmd_req(req);
return -ENOTSUP;
} else {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, NULL);
return -EINVAL;
}
}
@@ -802,7 +868,6 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
virtio_scsi_complete_cmd_req(req);
return -ENOENT;
}
- virtio_scsi_ctx_check(s, d);
req->sreq = scsi_req_new(d, req->req.cmd.tag,
virtio_scsi_get_lun(req->req.cmd.lun),
req->req.cmd.cdb, vs->cdb_size, req);
@@ -844,7 +909,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
virtio_queue_set_notification(vq, 0);
}
- while ((req = virtio_scsi_pop_req(s, vq))) {
+ while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
if (!ret) {
QTAILQ_INSERT_TAIL(&reqs, req, next);
@@ -937,7 +1002,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
assert(!s->dataplane_started);
- virtio_scsi_reset_tmf_bh(s);
+ virtio_scsi_flush_defer_tmf_to_aio_context(s);
qatomic_inc(&s->resetting);
bus_cold_reset(BUS(&s->bus));
@@ -945,7 +1010,10 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
- s->events_dropped = false;
+
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ s->events_dropped = false;
+ }
}
typedef struct {
@@ -974,19 +1042,21 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
return;
}
- req = virtio_scsi_pop_req(s, vs->event_vq);
- if (!req) {
- s->events_dropped = true;
- return;
- }
+ req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ if (!req) {
+ s->events_dropped = true;
+ return;
+ }
- if (s->events_dropped) {
- event |= VIRTIO_SCSI_T_EVENTS_MISSED;
- s->events_dropped = false;
+ if (s->events_dropped) {
+ event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ s->events_dropped = false;
+ }
}
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
- virtio_scsi_bad_req(req);
+ virtio_scsi_bad_req(req, &s->event_lock);
return;
}
@@ -1006,12 +1076,18 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
}
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
- virtio_scsi_complete_req(req);
+ virtio_scsi_complete_req(req, &s->event_lock);
}
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
{
- if (s->events_dropped) {
+ bool events_dropped;
+
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
+ events_dropped = s->events_dropped;
+ }
+
+ if (events_dropped) {
VirtIOSCSIEventInfo info = {
.event = VIRTIO_SCSI_T_NO_EVENT,
};
@@ -1062,17 +1138,16 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
{
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED];
SCSIDevice *sd = SCSI_DEVICE(dev);
- int ret;
- if (s->ctx && !s->dataplane_fenced) {
- if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
- return;
- }
- ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
- if (ret < 0) {
- return;
- }
+ if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) {
+ /*
+ * Try to make the BlockBackend's AioContext match ours. Ignore failure
+ * because I/O will still work although block jobs and other users
+ * might be slower when multiple AioContexts use a BlockBackend.
+ */
+ blk_set_aio_context(sd->conf.blk, ctx, NULL);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
@@ -1107,7 +1182,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
- if (s->ctx) {
+ if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) {
/* If other users keep the BlockBackend in the iothread, that's ok */
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
}
@@ -1141,7 +1216,7 @@ static void virtio_scsi_drained_begin(SCSIBus *bus)
for (uint32_t i = 0; i < total_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
- virtio_queue_aio_detach_host_notifier(vq, s->ctx);
+ virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
}
}
@@ -1167,10 +1242,12 @@ static void virtio_scsi_drained_end(SCSIBus *bus)
for (uint32_t i = 0; i < total_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
+ AioContext *ctx = s->vq_aio_context[i];
+
if (vq == vs->event_vq) {
- virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx);
} else {
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
}
}
}
@@ -1239,8 +1316,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
VirtIOSCSI *s = VIRTIO_SCSI(dev);
Error *err = NULL;
- QTAILQ_INIT(&s->tmf_bh_list);
- qemu_mutex_init(&s->tmf_bh_lock);
+ qemu_mutex_init(&s->ctrl_lock);
+ qemu_mutex_init(&s->event_lock);
virtio_scsi_common_realize(dev,
virtio_scsi_handle_ctrl,
@@ -1275,18 +1352,19 @@ void virtio_scsi_common_unrealize(DeviceState *dev)
virtio_cleanup(vdev);
}
+/* main loop */
static void virtio_scsi_device_unrealize(DeviceState *dev)
{
VirtIOSCSI *s = VIRTIO_SCSI(dev);
- virtio_scsi_reset_tmf_bh(s);
-
+ virtio_scsi_dataplane_cleanup(s);
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
virtio_scsi_common_unrealize(dev);
- qemu_mutex_destroy(&s->tmf_bh_lock);
+ qemu_mutex_destroy(&s->event_lock);
+ qemu_mutex_destroy(&s->ctrl_lock);
}
-static Property virtio_scsi_properties[] = {
+static const Property virtio_scsi_properties[] = {
DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
VIRTIO_SCSI_AUTO_NUM_QUEUES),
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
@@ -1303,7 +1381,8 @@ static Property virtio_scsi_properties[] = {
VIRTIO_SCSI_F_CHANGE, true),
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
TYPE_IOTHREAD, IOThread *),
- DEFINE_PROP_END_OF_LIST(),
+ DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI,
+ parent_obj.conf.iothread_vq_mapping_list),
};
static const VMStateDescription vmstate_virtio_scsi = {
@@ -1316,7 +1395,7 @@ static const VMStateDescription vmstate_virtio_scsi = {
},
};
-static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
+static void virtio_scsi_common_class_init(ObjectClass *klass, const void *data)
{
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1325,7 +1404,7 @@ static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
-static void virtio_scsi_class_init(ObjectClass *klass, void *data)
+static void virtio_scsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
@@ -1359,7 +1438,7 @@ static const TypeInfo virtio_scsi_info = {
.parent = TYPE_VIRTIO_SCSI_COMMON,
.instance_size = sizeof(VirtIOSCSI),
.class_init = virtio_scsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ }
}
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index cd7bf6a..7c98b1b 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -68,18 +68,7 @@ struct PVSCSIClass {
OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI)
-/* Compatibility flags for migration */
-#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
-#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
- (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
-#define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1
-#define PVSCSI_COMPAT_DISABLE_PCIE \
- (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT)
-
-#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \
- ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION)
-#define PVSCSI_MSI_OFFSET(s) \
- (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c)
+#define PVSCSI_MSI_OFFSET (0x7c)
#define PVSCSI_EXP_EP_OFFSET (0x40)
typedef struct PVSCSIRingInfo {
@@ -129,8 +118,6 @@ struct PVSCSIState {
uint8_t msi_used; /* For migration compatibility */
PVSCSIRingInfo rings; /* Data transfer rings manager */
uint32_t resetting; /* Reset in progress */
-
- uint32_t compat_flags;
};
typedef struct PVSCSIRequest {
@@ -1110,7 +1097,7 @@ pvscsi_init_msi(PVSCSIState *s)
int res;
PCIDevice *d = PCI_DEVICE(s);
- res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS,
+ res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS,
PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL);
if (res < 0) {
trace_pvscsi_init_msi_fail(res);
@@ -1158,15 +1145,11 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
trace_pvscsi_state("init");
/* PCI subsystem ID, subsystem vendor ID, revision */
- if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) {
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000);
- } else {
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
- PCI_VENDOR_ID_VMWARE);
- pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
- PCI_DEVICE_ID_VMWARE_PVSCSI);
- pci_config_set_revision(pci_dev->config, 0x2);
- }
+ pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
+ PCI_VENDOR_ID_VMWARE);
+ pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
+ PCI_DEVICE_ID_VMWARE_PVSCSI);
+ pci_config_set_revision(pci_dev->config, 0x2);
/* PCI latency timer = 255 */
pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
@@ -1234,21 +1217,8 @@ pvscsi_post_load(void *opaque, int version_id)
return 0;
}
-static bool pvscsi_vmstate_need_pcie_device(void *opaque)
-{
- PVSCSIState *s = PVSCSI(opaque);
-
- return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE);
-}
-
-static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id)
-{
- return !pvscsi_vmstate_need_pcie_device(opaque);
-}
-
static const VMStateDescription vmstate_pvscsi_pcie_device = {
.name = "pvscsi/pcie",
- .needed = pvscsi_vmstate_need_pcie_device,
.fields = (const VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
VMSTATE_END_OF_LIST()
@@ -1262,9 +1232,6 @@ static const VMStateDescription vmstate_pvscsi = {
.pre_save = pvscsi_pre_save,
.post_load = pvscsi_post_load,
.fields = (const VMStateField[]) {
- VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState,
- pvscsi_vmstate_test_pci_device, 0,
- vmstate_pci_device, PCIDevice),
VMSTATE_UINT8(msi_used, PVSCSIState),
VMSTATE_UINT32(resetting, PVSCSIState),
VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
@@ -1296,33 +1263,19 @@ static const VMStateDescription vmstate_pvscsi = {
}
};
-static Property pvscsi_properties[] = {
+static const Property pvscsi_properties[] = {
DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
- DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags,
- PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false),
- DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags,
- PVSCSI_COMPAT_DISABLE_PCIE_BIT, false),
- DEFINE_PROP_END_OF_LIST(),
};
-static void pvscsi_realize(DeviceState *qdev, Error **errp)
+static void pvscsi_instance_init(Object *obj)
{
- PVSCSIClass *pvs_c = PVSCSI_GET_CLASS(qdev);
- PCIDevice *pci_dev = PCI_DEVICE(qdev);
- PVSCSIState *s = PVSCSI(qdev);
-
- if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) {
- pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
- }
-
- pvs_c->parent_dc_realize(qdev, errp);
+ PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
}
-static void pvscsi_class_init(ObjectClass *klass, void *data)
+static void pvscsi_class_init(ObjectClass *klass, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
- PVSCSIClass *pvs_k = PVSCSI_CLASS(klass);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
k->realize = pvscsi_realizefn;
@@ -1331,9 +1284,7 @@ static void pvscsi_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
- device_class_set_parent_realize(dc, pvscsi_realize,
- &pvs_k->parent_dc_realize);
- dc->reset = pvscsi_reset;
+ device_class_set_legacy_reset(dc, pvscsi_reset);
dc->vmsd = &vmstate_pvscsi;
device_class_set_props(dc, pvscsi_properties);
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
@@ -1347,7 +1298,8 @@ static const TypeInfo pvscsi_info = {
.class_size = sizeof(PVSCSIClass),
.instance_size = sizeof(PVSCSIState),
.class_init = pvscsi_class_init,
- .interfaces = (InterfaceInfo[]) {
+ .instance_init = pvscsi_instance_init,
+ .interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ INTERFACE_PCIE_DEVICE },
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
diff --git a/hw/scsi/vmw_pvscsi.h b/hw/scsi/vmw_pvscsi.h
index 17fcf66..a3ae517 100644
--- a/hw/scsi/vmw_pvscsi.h
+++ b/hw/scsi/vmw_pvscsi.h
@@ -14,8 +14,8 @@
* details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * along with this program; if not, see
+ * <https://www.gnu.org/licenses/>.
*
* Maintained by: Arvind Kumar <arvindkumar@vmware.com>
*