diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-08-03 18:48:20 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-08-03 18:48:20 -0700 |
commit | a089a7338332578ac2a0d792f8b2e09baed8b817 (patch) | |
tree | 225c0f17e946df718f823f4a9f4c1f526dc7ca70 /hw | |
parent | 2e6a56f6fb9f6b3c9e9eeffe7fb306584e605103 (diff) | |
parent | 15b11a1da6a4b7c6b8bb37883f52b544dee2b8fd (diff) | |
download | qemu-a089a7338332578ac2a0d792f8b2e09baed8b817.zip qemu-a089a7338332578ac2a0d792f8b2e09baed8b817.tar.gz qemu-a089a7338332578ac2a0d792f8b2e09baed8b817.tar.bz2 |
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
pc,pci,virtio,crypto: bugfixes
fixes all over the place.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmTMJ90PHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRprTAH/1YxxP9Dhn71BjkwGQ18SmpNp0wlmP9GRJEy
# 7aQNO7ativ8njAX1fLEo0ZRJ5qX1MCw+/ZuEvIUZD+0biwimsVCPjWVLs3Q8geUs
# LzQWuvUoRGp136BtaZUrlS/cWr8TQY+4/lyK/xOBUOiI+5AP1Yi7eL9162RDQR3D
# cV/0eH8QNY+93n+VnyFY6Y55YnHyH9EBkxdtnVkt7NOCms4qMRf9IBiWOMaktp4w
# iTfvOfKbTCKhWDsNWIJEJUtWItRFp6OIRdO3KoMXBHuE8S/0C19fc2eBfbeN/bUK
# I5b4xO181ibzoPGWkDfLYi1wFfvGDDxFe119EzvDKU8dDtNFBoY=
# =FRdM
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 03 Aug 2023 03:19:09 PM PDT
# gpg: using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg: issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [undefined]
# gpg: aka "Michael S. Tsirkin <mst@redhat.com>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67
# Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469
* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (22 commits)
cryptodev: Handle unexpected request to avoid crash
virtio-crypto: verify src&dst buffer length for sym request
include/hw/i386/x86-iommu: Fix struct X86IOMMU_MSIMessage for big endian hosts
hw/i386/x86-iommu: Fix endianness issue in x86_iommu_irq_to_msi_message()
hw/i386/intel_iommu: Fix index calculation in vtd_interrupt_remap_msi()
hw/i386/intel_iommu: Fix struct VTDInvDescIEC on big endian hosts
hw/i386/intel_iommu: Fix endianness problems related to VTD_IR_TableEntry
hw/i386/intel_iommu: Fix trivial endianness problems
vhost: fix the fd leak
pci: do not respond config requests after PCI device eject
virtio: Fix packed virtqueue used_idx mask
hw/virtio: qmp: add RING_RESET to 'info virtio-status'
tests: acpi: update expected blobs
acpi: x86: remove _ADR on host bridges
tests: acpi: whitelist expected blobs
tests: acpi: x86: update expected blobs
x86: acpi: workaround Windows not handling name references in Package properly
tests: acpi: x86: whitelist expected blobs
hw/virtio: Add a protection against duplicate vu_scmi_stop calls
virtio-iommu: Standardize granule extraction and formatting
...
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/i386/acpi-build.c | 11 | ||||
-rw-r--r-- | hw/i386/intel_iommu.c | 23 | ||||
-rw-r--r-- | hw/i386/intel_iommu_internal.h | 9 | ||||
-rw-r--r-- | hw/i386/x86-iommu.c | 2 | ||||
-rw-r--r-- | hw/pci-bridge/cxl_upstream.c | 5 | ||||
-rw-r--r-- | hw/pci/pci_host.c | 15 | ||||
-rw-r--r-- | hw/virtio/vhost-user-scmi.c | 7 | ||||
-rw-r--r-- | hw/virtio/vhost.c | 2 | ||||
-rw-r--r-- | hw/virtio/virtio-crypto.c | 5 | ||||
-rw-r--r-- | hw/virtio/virtio-iommu.c | 12 | ||||
-rw-r--r-- | hw/virtio/virtio-qmp.c | 2 | ||||
-rw-r--r-- | hw/virtio/virtio.c | 2 |
12 files changed, 69 insertions, 26 deletions
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 9c74fa1..bb12b0a 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -362,10 +362,14 @@ Aml *aml_pci_device_dsm(void) { Aml *params = aml_local(0); Aml *pkg = aml_package(2); - aml_append(pkg, aml_name("BSEL")); - aml_append(pkg, aml_name("ASUN")); + aml_append(pkg, aml_int(0)); + aml_append(pkg, aml_int(0)); aml_append(method, aml_store(pkg, params)); aml_append(method, + aml_store(aml_name("BSEL"), aml_index(params, aml_int(0)))); + aml_append(method, + aml_store(aml_name("ASUN"), aml_index(params, aml_int(1)))); + aml_append(method, aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1), aml_arg(2), aml_arg(3), params)) ); @@ -1460,7 +1464,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); - aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); aml_append(dev, aml_pci_edsm()); aml_append(sb_scope, dev); @@ -1475,7 +1478,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); - aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid))); aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en)); aml_append(dev, aml_pci_edsm()); @@ -1589,7 +1591,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, aml_append(pkg, aml_eisaid("PNP0A08")); aml_append(pkg, aml_eisaid("PNP0A03")); aml_append(dev, aml_name_decl("_CID", pkg)); - aml_append(dev, aml_name_decl("_ADR", aml_int(0))); build_cxl_osc_method(dev); } else if (pci_bus_is_express(bus)) { aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index dcc3340..3ca71df 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -756,6 +756,8 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, return -VTD_FR_PASID_TABLE_INV; } + pdire->val = le64_to_cpu(pdire->val); + return 0; } @@ -780,6 +782,9 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, pe, entry_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_PASID_TABLE_INV; } + for (size_t i = 0; i < ARRAY_SIZE(pe->val); i++) { + pe->val[i] = le64_to_cpu(pe->val[i]); + } /* Do translation type check */ if (!vtd_pe_type_check(x86_iommu, pe)) { @@ -3323,14 +3328,15 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, return -VTD_FR_IR_ROOT_INVAL; } - trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + entry->data[0] = le64_to_cpu(entry->data[0]); + entry->data[1] = le64_to_cpu(entry->data[1]); + + trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]); if (!entry->irte.present) { error_report_once("%s: detected non-present IRTE " "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", - __func__, index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + __func__, index, entry->data[1], entry->data[0]); return -VTD_FR_IR_ENTRY_P; } @@ -3338,14 +3344,13 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, entry->irte.__reserved_2) { error_report_once("%s: detected non-zero reserved IRTE " "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", - __func__, index, le64_to_cpu(entry->data[1]), - le64_to_cpu(entry->data[0])); + __func__, index, entry->data[1], entry->data[0]); return -VTD_FR_IR_IRTE_RSVD; } if (sid != X86_IOMMU_SID_INVALID) { /* Validate IRTE SID */ - source_id = le32_to_cpu(entry->irte.source_id); + source_id = entry->irte.source_id; switch (entry->irte.sid_vtype) { case VTD_SVT_NONE: break; @@ -3399,7 +3404,7 @@ static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, irq->trigger_mode = irte.irte.trigger_mode; irq->vector = irte.irte.vector; irq->delivery_mode = irte.irte.delivery_mode; - irq->dest = le32_to_cpu(irte.irte.dest_id); + irq->dest = irte.irte.dest_id; if (!iommu->intr_eime) { #define VTD_IR_APIC_DEST_MASK (0xff00ULL) #define VTD_IR_APIC_DEST_SHIFT (8) @@ -3454,7 +3459,7 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, goto out; } - index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); + index = addr.addr.index_h << 15 | addr.addr.index_l; #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 2e61eec..e1450c5 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -321,12 +321,21 @@ typedef enum VTDFaultReason { /* Interrupt Entry Cache Invalidation Descriptor: VT-d 6.5.2.7. */ struct VTDInvDescIEC { +#if HOST_BIG_ENDIAN + uint64_t reserved_2:16; + uint64_t index:16; /* Start index to invalidate */ + uint64_t index_mask:5; /* 2^N for continuous int invalidation */ + uint64_t resved_1:22; + uint64_t granularity:1; /* If set, it's global IR invalidation */ + uint64_t type:4; /* Should always be 0x4 */ +#else uint32_t type:4; /* Should always be 0x4 */ uint32_t granularity:1; /* If set, it's global IR invalidation */ uint32_t resved_1:22; uint32_t index_mask:5; /* 2^N for continuous int invalidation */ uint32_t index:16; /* Start index to invalidate */ uint32_t reserved_2:16; +#endif }; typedef struct VTDInvDescIEC VTDInvDescIEC; diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 01d1132..726e9e1 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -63,7 +63,7 @@ void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out) msg.redir_hint = irq->redir_hint; msg.dest = irq->dest; msg.__addr_hi = irq->dest & 0xffffff00; - msg.__addr_head = cpu_to_le32(0xfee); + msg.__addr_head = 0xfee; /* Keep this from original MSI address bits */ msg.__not_used = irq->msi_addr_last_bits; diff --git a/hw/pci-bridge/cxl_upstream.c b/hw/pci-bridge/cxl_upstream.c index ef47e5d..9159f48 100644 --- a/hw/pci-bridge/cxl_upstream.c +++ b/hw/pci-bridge/cxl_upstream.c @@ -274,10 +274,7 @@ static int build_cdat_table(CDATSubHeader ***cdat_table, void *priv) }; } - *cdat_table = g_malloc0(sizeof(*cdat_table) * CXL_USP_CDAT_NUM_ENTRIES); - if (!*cdat_table) { - return -ENOMEM; - } + *cdat_table = g_new0(CDATSubHeader *, CXL_USP_CDAT_NUM_ENTRIES); /* Header always at start of structure */ (*cdat_table)[CXL_USP_CDAT_SSLBIS_LAT] = g_steal_pointer(&sslbis_latency); diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index 7af8afd..a18aa0a 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -62,6 +62,17 @@ static void pci_adjust_config_limit(PCIBus *bus, uint32_t *limit) } } +static bool is_pci_dev_ejected(PCIDevice *pci_dev) +{ + /* + * device unplug was requested and the guest acked it, + * so we stop responding config accesses even if the + * device is not deleted (failover flow) + */ + return pci_dev && pci_dev->partially_hotplugged && + !pci_dev->qdev.pending_deleted_event; +} + void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr, uint32_t limit, uint32_t val, uint32_t len) { @@ -75,7 +86,7 @@ void pci_host_config_write_common(PCIDevice *pci_dev, uint32_t addr, * allowing direct removal of unexposed functions. */ if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || - !pci_dev->has_power) { + !pci_dev->has_power || is_pci_dev_ejected(pci_dev)) { return; } @@ -100,7 +111,7 @@ uint32_t pci_host_config_read_common(PCIDevice *pci_dev, uint32_t addr, * allowing direct removal of unexposed functions. */ if ((pci_dev->qdev.hotplugged && !pci_get_function_0(pci_dev)) || - !pci_dev->has_power) { + !pci_dev->has_power || is_pci_dev_ejected(pci_dev)) { return ~0x0; } diff --git a/hw/virtio/vhost-user-scmi.c b/hw/virtio/vhost-user-scmi.c index d386fb2..918bb7d 100644 --- a/hw/virtio/vhost-user-scmi.c +++ b/hw/virtio/vhost-user-scmi.c @@ -63,6 +63,7 @@ static int vu_scmi_start(VirtIODevice *vdev) error_report("Error starting vhost-user-scmi: %d", ret); goto err_guest_notifiers; } + scmi->started_vu = true; /* * guest_notifier_mask/pending not used yet, so just unmask @@ -90,6 +91,12 @@ static void vu_scmi_stop(VirtIODevice *vdev) struct vhost_dev *vhost_dev = &scmi->vhost_dev; int ret; + /* vhost_dev_is_started() check in the callers is not fully reliable. */ + if (!scmi->started_vu) { + return; + } + scmi->started_vu = false; + if (!k->set_guest_notifiers) { return; } diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index abf0d03..e2f6ffb 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -2044,6 +2044,8 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) event_notifier_test_and_clear( &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); event_notifier_test_and_clear(&vdev->config_notifier); + event_notifier_cleanup( + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); trace_vhost_dev_stop(hdev, vdev->name, vrings); diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 44faf5a..13aec77 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -634,6 +634,11 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev, return NULL; } + if (unlikely(src_len != dst_len)) { + virtio_error(vdev, "sym request src len is different from dst len"); + return NULL; + } + max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len; if (unlikely(max_len > vcrypto->conf.max_size)) { virtio_error(vdev, "virtio-crypto too big length"); diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 201127c..be51635 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -728,13 +728,15 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); struct virtio_iommu_req_head head; struct virtio_iommu_req_tail tail = {}; - size_t output_size = sizeof(tail), sz; VirtQueueElement *elem; unsigned int iov_cnt; struct iovec *iov; void *buf = NULL; + size_t sz; for (;;) { + size_t output_size = sizeof(tail); + elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { return; @@ -852,17 +854,19 @@ static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, VirtIOIOMMUEndpoint *ep; uint32_t sid, flags; bool bypass_allowed; + int granule; bool found; int i; interval.low = addr; interval.high = addr + 1; + granule = ctz64(s->config.page_size_mask); IOMMUTLBEntry entry = { .target_as = &address_space_memory, .iova = addr, .translated_addr = addr, - .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1, + .addr_mask = BIT_ULL(granule) - 1, .perm = IOMMU_NONE, }; @@ -1115,7 +1119,7 @@ static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion *mr, if (s->granule_frozen) { int cur_granule = ctz64(cur_mask); - if (!(BIT(cur_granule) & new_mask)) { + if (!(BIT_ULL(cur_granule) & new_mask)) { error_setg(errp, "virtio-iommu %s does not support frozen granule 0x%llx", mr->parent_obj.name, BIT_ULL(cur_granule)); return -1; @@ -1161,7 +1165,7 @@ static void virtio_iommu_freeze_granule(Notifier *notifier, void *data) } s->granule_frozen = true; granule = ctz64(s->config.page_size_mask); - trace_virtio_iommu_freeze_granule(BIT(granule)); + trace_virtio_iommu_freeze_granule(BIT_ULL(granule)); } static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index 3d32dbe..7515b09 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -79,6 +79,8 @@ static const qmp_virtio_feature_map_t virtio_transport_map[] = { "VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"), FEATURE_ENTRY(VIRTIO_F_SR_IOV, \ "VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"), + FEATURE_ENTRY(VIRTIO_F_RING_RESET, \ + "VIRTIO_F_RING_RESET: Driver can reset a queue individually"), /* Virtio ring transport features */ FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \ "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"), diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 295a603..309038f 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -3321,7 +3321,7 @@ static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev, vq->last_avail_wrap_counter = vq->shadow_avail_wrap_counter = !!(idx & 0x8000); idx >>= 16; - vq->used_idx = idx & 0x7ffff; + vq->used_idx = idx & 0x7fff; vq->used_wrap_counter = !!(idx & 0x8000); } |