aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/arm/imx8mp-evk.c29
-rw-r--r--hw/core/machine.c12
-rw-r--r--hw/i386/amd_iommu.c1
-rw-r--r--hw/loongarch/virt.c43
-rw-r--r--hw/net/virtio-net.c87
-rw-r--r--hw/nvme/ctrl.c7
-rw-r--r--hw/nvme/ns.c4
-rw-r--r--hw/nvme/nvme.h3
-rw-r--r--hw/nvme/subsys.c9
-rw-r--r--hw/scsi/scsi-disk.c39
-rw-r--r--hw/smbios/smbios.c3
-rw-r--r--hw/ufs/ufs.c4
12 files changed, 130 insertions, 111 deletions
diff --git a/hw/arm/imx8mp-evk.c b/hw/arm/imx8mp-evk.c
index f17d5db..b5aec06e 100644
--- a/hw/arm/imx8mp-evk.c
+++ b/hw/arm/imx8mp-evk.c
@@ -15,6 +15,34 @@
#include "system/qtest.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
+#include <libfdt.h>
+
+static void imx8mp_evk_modify_dtb(const struct arm_boot_info *info, void *fdt)
+{
+ int i, offset;
+
+ /* Temporarily disable following nodes until they are implemented */
+ const char *nodes_to_remove[] = {
+ "nxp,imx8mp-fspi",
+ };
+
+ for (i = 0; i < ARRAY_SIZE(nodes_to_remove); i++) {
+ const char *dev_str = nodes_to_remove[i];
+
+ offset = fdt_node_offset_by_compatible(fdt, -1, dev_str);
+ while (offset >= 0) {
+ fdt_nop_node(fdt, offset);
+ offset = fdt_node_offset_by_compatible(fdt, offset, dev_str);
+ }
+ }
+
+ /* Remove cpu-idle-states property from CPU nodes */
+ offset = fdt_node_offset_by_compatible(fdt, -1, "arm,cortex-a53");
+ while (offset >= 0) {
+ fdt_nop_property(fdt, offset, "cpu-idle-states");
+ offset = fdt_node_offset_by_compatible(fdt, offset, "arm,cortex-a53");
+ }
+}
static void imx8mp_evk_init(MachineState *machine)
{
@@ -32,6 +60,7 @@ static void imx8mp_evk_init(MachineState *machine)
.board_id = -1,
.ram_size = machine->ram_size,
.psci_conduit = QEMU_PSCI_CONDUIT_SMC,
+ .modify_dtb = imx8mp_evk_modify_dtb,
};
s = FSL_IMX8MP(object_new(TYPE_FSL_IMX8MP));
diff --git a/hw/core/machine.c b/hw/core/machine.c
index f52a4f2..63c6ef9 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -1731,12 +1731,6 @@ void qdev_machine_creation_done(void)
phase_advance(PHASE_MACHINE_READY);
qdev_assert_realized_properly();
- /*
- * If the user used -machine dumpdtb=file.dtb to request that we
- * dump the DTB to a file, do it now, and exit.
- */
- handle_machine_dumpdtb(current_machine);
-
/* TODO: once all bus devices are qdevified, this should be done
* when bus is created by qdev.c */
/*
@@ -1750,6 +1744,12 @@ void qdev_machine_creation_done(void)
notifier_list_notify(&machine_init_done_notifiers, NULL);
+ /*
+ * If the user used -machine dumpdtb=file.dtb to request that we
+ * dump the DTB to a file, do it now, and exit.
+ */
+ handle_machine_dumpdtb(current_machine);
+
if (rom_check_and_register_reset() != 0) {
exit(1);
}
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 5b21cf1..5f9b952 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -1706,6 +1706,7 @@ static void amdvi_pci_class_init(ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->vendor_id = PCI_VENDOR_ID_AMD;
+ k->device_id = 0x1419;
k->class_id = 0x0806;
k->realize = amdvi_pci_realize;
diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c
index e258642..65c9027 100644
--- a/hw/loongarch/virt.c
+++ b/hw/loongarch/virt.c
@@ -936,29 +936,15 @@ static void virt_cpu_unplug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
CPUArchId *cpu_slot;
- Error *err = NULL;
LoongArchCPU *cpu = LOONGARCH_CPU(dev);
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
/* Notify ipi and extioi irqchip to remove interrupt routing to CPU */
- hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
-
- hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort);
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort);
/* Notify acpi ged CPU removed */
- hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
+ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &error_abort);
cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
cpu_slot->cpu = NULL;
@@ -971,33 +957,22 @@ static void virt_cpu_plug(HotplugHandler *hotplug_dev,
CPUArchId *cpu_slot;
LoongArchCPU *cpu = LOONGARCH_CPU(dev);
LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(hotplug_dev);
- Error *err = NULL;
- cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
- cpu_slot->cpu = CPU(dev);
if (lvms->ipi) {
- hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), dev, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort);
}
if (lvms->extioi) {
- hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort);
}
if (lvms->acpi_ged) {
- hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &err);
- if (err) {
- error_propagate(errp, err);
- }
+ hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev,
+ &error_abort);
}
+ cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id);
+ cpu_slot->cpu = CPU(dev);
return;
}
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index de87cfa..bd37651 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1702,44 +1702,41 @@ static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
* cache.
*/
static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
- size_t *hdr_len, const uint8_t *buf,
- size_t buf_size, size_t *buf_offset)
+ uint8_t *buf, size_t size)
{
size_t csum_size = ETH_HLEN + sizeof(struct ip_header) +
sizeof(struct udp_header);
- buf += *buf_offset;
- buf_size -= *buf_offset;
-
if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
- (buf_size >= csum_size && buf_size < 1500) && /* normal sized MTU */
+ (size >= csum_size && size < 1500) && /* normal sized MTU */
(buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
(buf[23] == 17) && /* ip.protocol == UDP */
(buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
- memcpy((uint8_t *)hdr + *hdr_len, buf, csum_size);
- net_checksum_calculate((uint8_t *)hdr + *hdr_len, csum_size, CSUM_UDP);
+ net_checksum_calculate(buf, size, CSUM_UDP);
hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
- *hdr_len += csum_size;
- *buf_offset += csum_size;
}
}
-static size_t receive_header(VirtIONet *n, struct virtio_net_hdr *hdr,
- const void *buf, size_t buf_size,
- size_t *buf_offset)
+static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
+ const void *buf, size_t size)
{
- size_t hdr_len = n->guest_hdr_len;
-
- memcpy(hdr, buf, sizeof(struct virtio_net_hdr));
-
- *buf_offset = n->host_hdr_len;
- work_around_broken_dhclient(hdr, &hdr_len, buf, buf_size, buf_offset);
+ if (n->has_vnet_hdr) {
+ /* FIXME this cast is evil */
+ void *wbuf = (void *)buf;
+ work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
+ size - n->host_hdr_len);
- if (n->needs_vnet_hdr_swap) {
- virtio_net_hdr_swap(VIRTIO_DEVICE(n), hdr);
+ if (n->needs_vnet_hdr_swap) {
+ virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
+ }
+ iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
+ } else {
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+ iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
}
-
- return hdr_len;
}
static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
@@ -1907,13 +1904,6 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
return (index == new_index) ? -1 : new_index;
}
-typedef struct Header {
- struct virtio_net_hdr_v1_hash virtio_net;
- struct eth_header eth;
- struct ip_header ip;
- struct udp_header udp;
-} Header;
-
static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
size_t size)
{
@@ -1923,15 +1913,15 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
size_t lens[VIRTQUEUE_MAX_SIZE];
struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
- Header hdr;
+ struct virtio_net_hdr_v1_hash extra_hdr;
unsigned mhdr_cnt = 0;
size_t offset, i, guest_offset, j;
ssize_t err;
- memset(&hdr.virtio_net, 0, sizeof(hdr.virtio_net));
+ memset(&extra_hdr, 0, sizeof(extra_hdr));
if (n->rss_data.enabled && n->rss_data.enabled_software_rss) {
- int index = virtio_net_process_rss(nc, buf, size, &hdr.virtio_net);
+ int index = virtio_net_process_rss(nc, buf, size, &extra_hdr);
if (index >= 0) {
nc = qemu_get_subqueue(n->nic, index % n->curr_queue_pairs);
}
@@ -1996,18 +1986,23 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
sg, elem->in_num,
- offsetof(typeof(hdr),
- virtio_net.hdr.num_buffers),
- sizeof(hdr.virtio_net.hdr.num_buffers));
+ offsetof(typeof(extra_hdr), hdr.num_buffers),
+ sizeof(extra_hdr.hdr.num_buffers));
+ } else {
+ extra_hdr.hdr.num_buffers = cpu_to_le16(1);
}
- guest_offset = n->has_vnet_hdr ?
- receive_header(n, (struct virtio_net_hdr *)&hdr,
- buf, size, &offset) :
- n->guest_hdr_len;
-
- iov_from_buf(sg, elem->in_num, 0, &hdr, guest_offset);
- total += guest_offset;
+ receive_header(n, sg, elem->in_num, buf, size);
+ if (n->rss_data.populate_hash) {
+ offset = offsetof(typeof(extra_hdr), hash_value);
+ iov_from_buf(sg, elem->in_num, offset,
+ (char *)&extra_hdr + offset,
+ sizeof(extra_hdr.hash_value) +
+ sizeof(extra_hdr.hash_report));
+ }
+ offset = n->host_hdr_len;
+ total += n->guest_hdr_len;
+ guest_offset = n->guest_hdr_len;
} else {
guest_offset = 0;
}
@@ -2033,11 +2028,11 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
}
if (mhdr_cnt) {
- virtio_stw_p(vdev, &hdr.virtio_net.hdr.num_buffers, i);
+ virtio_stw_p(vdev, &extra_hdr.hdr.num_buffers, i);
iov_from_buf(mhdr_sg, mhdr_cnt,
0,
- &hdr.virtio_net.hdr.num_buffers,
- sizeof hdr.virtio_net.hdr.num_buffers);
+ &extra_hdr.hdr.num_buffers,
+ sizeof extra_hdr.hdr.num_buffers);
}
for (j = 0; j < i; j++) {
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 518d02d..d6b77d4 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -7755,7 +7755,11 @@ static int nvme_start_ctrl(NvmeCtrl *n)
for (int i = 1; i <= NVME_MAX_NAMESPACES; i++) {
NvmeNamespace *ns = nvme_subsys_ns(n->subsys, i);
- if (ns && nvme_csi_supported(n, ns->csi) && !ns->params.detached) {
+ if (!ns || (!ns->params.shared && ns->ctrl != n)) {
+ continue;
+ }
+
+ if (nvme_csi_supported(n, ns->csi) && !ns->params.detached) {
if (!ns->attached || ns->params.shared) {
nvme_attach_ns(n, ns);
}
@@ -8988,6 +8992,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
if (n->namespace.blkconf.blk) {
ns = &n->namespace;
ns->params.nsid = 1;
+ ns->ctrl = n;
if (nvme_ns_setup(ns, errp)) {
return;
diff --git a/hw/nvme/ns.c b/hw/nvme/ns.c
index 98c1e75..4ab8ba7 100644
--- a/hw/nvme/ns.c
+++ b/hw/nvme/ns.c
@@ -763,6 +763,10 @@ static void nvme_ns_realize(DeviceState *dev, Error **errp)
ns->id_ns.endgid = cpu_to_le16(0x1);
ns->id_ns_ind.endgrpid = cpu_to_le16(0x1);
+
+ if (!ns->params.shared) {
+ ns->ctrl = n;
+ }
}
static const Property nvme_ns_props[] = {
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 6f782ba..b5c9378 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -268,6 +268,9 @@ typedef struct NvmeNamespace {
NvmeSubsystem *subsys;
NvmeEnduranceGroup *endgrp;
+ /* NULL for shared namespaces; set to specific controller if private */
+ NvmeCtrl *ctrl;
+
struct {
uint32_t err_rec;
} features;
diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c
index 2ae56f1..b617ac3 100644
--- a/hw/nvme/subsys.c
+++ b/hw/nvme/subsys.c
@@ -56,7 +56,7 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
{
NvmeSubsystem *subsys = n->subsys;
NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
- int cntlid, nsid, num_rsvd, num_vfs = n->params.sriov_max_vfs;
+ int cntlid, num_rsvd, num_vfs = n->params.sriov_max_vfs;
if (pci_is_vf(&n->parent_obj)) {
cntlid = le16_to_cpu(sctrl->scid);
@@ -92,13 +92,6 @@ int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp)
subsys->ctrls[cntlid] = n;
- for (nsid = 1; nsid < ARRAY_SIZE(subsys->namespaces); nsid++) {
- NvmeNamespace *ns = subsys->namespaces[nsid];
- if (ns && ns->params.shared && !ns->params.detached) {
- nvme_attach_ns(n, ns);
- }
- }
-
return cntlid;
}
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 8da1d5a..e59632e 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -68,10 +68,9 @@ struct SCSIDiskClass {
SCSIDeviceClass parent_class;
/*
* Callbacks receive ret == 0 for success. Errors are represented either as
- * negative errno values, or as positive SAM status codes.
- *
- * Beware: For errors returned in host_status, the function may directly
- * complete the request and never call the callback.
+ * negative errno values, or as positive SAM status codes. For host_status
+ * errors, the function passes ret == -ENODEV and sets the host_status field
+ * of the SCSIRequest.
*/
DMAIOFunc *dma_readv;
DMAIOFunc *dma_writev;
@@ -225,11 +224,26 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
SCSISense sense = SENSE_CODE(NO_SENSE);
+ int16_t host_status;
int error;
bool req_has_sense = false;
BlockErrorAction action;
int status;
+ /*
+ * host_status should only be set for SG_IO requests that came back with a
+ * host_status error in scsi_block_sgio_complete(). This error path passes
+ * -ENODEV as the return value.
+ *
+ * Reset host_status in the request because we may still want to complete
+ * the request successfully with the 'stop' or 'ignore' error policy.
+ */
+ host_status = r->req.host_status;
+ if (host_status != -1) {
+ assert(ret == -ENODEV);
+ r->req.host_status = -1;
+ }
+
if (ret < 0) {
status = scsi_sense_from_errno(-ret, &sense);
error = -ret;
@@ -289,6 +303,10 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
if (acct_failed) {
block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
}
+ if (host_status != -1) {
+ scsi_req_complete_failed(&r->req, host_status);
+ return true;
+ }
if (req_has_sense) {
sdc->update_sense(&r->req);
} else if (status == CHECK_CONDITION) {
@@ -409,7 +427,6 @@ done:
scsi_req_unref(&r->req);
}
-/* May not be called in all error cases, don't rely on cleanup here */
static void scsi_dma_complete(void *opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
@@ -448,7 +465,6 @@ done:
scsi_req_unref(&r->req);
}
-/* May not be called in all error cases, don't rely on cleanup here */
static void scsi_read_complete(void *opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
@@ -585,7 +601,6 @@ done:
scsi_req_unref(&r->req);
}
-/* May not be called in all error cases, don't rely on cleanup here */
static void scsi_write_complete(void * opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
@@ -2846,14 +2861,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret)
sg_io_hdr_t *io_hdr = &req->io_header;
if (ret == 0) {
- /* FIXME This skips calling req->cb() and any cleanup in it */
if (io_hdr->host_status != SCSI_HOST_OK) {
- scsi_req_complete_failed(&r->req, io_hdr->host_status);
- scsi_req_unref(&r->req);
- return;
- }
-
- if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ r->req.host_status = io_hdr->host_status;
+ ret = -ENODEV;
+ } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
ret = BUSY;
} else {
ret = io_hdr->status;
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index 02a09eb..ad4cd67 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -1285,6 +1285,9 @@ static int save_opt_one(void *opaque,
g_byte_array_append(data, (guint8 *)buf, ret);
}
+ buf[0] = '\0';
+ g_byte_array_append(data, (guint8 *)buf, 1);
+
qemu_close(fd);
*opt->dest = g_renew(char *, *opt->dest, (*opt->ndest) + 1);
diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c
index ee13eda..542f13b 100644
--- a/hw/ufs/ufs.c
+++ b/hw/ufs/ufs.c
@@ -1753,8 +1753,8 @@ static void ufs_init_hc(UfsHc *u)
u->geometry_desc.length = sizeof(GeometryDescriptor);
u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY;
u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0;
- u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */
- u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */
+ u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4MB: 8192 * 512B */
+ u->geometry_desc.allocation_unit_size = 0x1; /* 4MB: 1 segment */
u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */
u->geometry_desc.max_in_buffer_size = 0x8;
u->geometry_desc.max_out_buffer_size = 0x8;