diff options
Diffstat (limited to 'hw')
-rw-r--r-- | hw/char/sifive_uart.c | 36 | ||||
-rw-r--r-- | hw/char/trace-events | 14 | ||||
-rw-r--r-- | hw/core/cpu-common.c | 1 | ||||
-rw-r--r-- | hw/cxl/cxl-host.c | 7 | ||||
-rw-r--r-- | hw/display/virtio-gpu.c | 13 | ||||
-rw-r--r-- | hw/intc/apic_common.c | 1 | ||||
-rw-r--r-- | hw/intc/riscv_aclint.c | 7 | ||||
-rw-r--r-- | hw/misc/ivshmem-flat.c | 8 | ||||
-rw-r--r-- | hw/net/virtio-net.c | 8 | ||||
-rw-r--r-- | hw/pci/pci.c | 5 | ||||
-rw-r--r-- | hw/remote/vfio-user-obj.c | 13 | ||||
-rw-r--r-- | hw/riscv/riscv-iommu.c | 154 | ||||
-rw-r--r-- | hw/s390x/virtio-ccw.c | 4 | ||||
-rw-r--r-- | hw/scsi/spapr_vscsi.c | 6 | ||||
-rw-r--r-- | hw/vfio/container-legacy.c | 3 | ||||
-rw-r--r-- | hw/vfio/cpr-iommufd.c | 3 | ||||
-rw-r--r-- | hw/vfio/cpr-legacy.c | 9 | ||||
-rw-r--r-- | hw/vfio/cpr.c | 13 | ||||
-rw-r--r-- | hw/vfio/pci.c | 9 | ||||
-rw-r--r-- | hw/virtio/virtio-mmio.c | 5 | ||||
-rw-r--r-- | hw/virtio/virtio-pci.c | 4 | ||||
-rw-r--r-- | hw/virtio/virtio.c | 13 |
22 files changed, 258 insertions, 78 deletions
diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 9bc697a..e7357d5 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -28,23 +28,18 @@ #define TX_INTERRUPT_TRIGGER_DELAY_NS 100 -/* - * Not yet implemented: - * - * Transmit FIFO using "qemu/fifo8.h" - */ - /* Returns the state of the IP (interrupt pending) register */ -static uint64_t sifive_uart_ip(SiFiveUARTState *s) +static uint32_t sifive_uart_ip(SiFiveUARTState *s) { - uint64_t ret = 0; + uint32_t ret = 0; - uint64_t txcnt = SIFIVE_UART_GET_TXCNT(s->txctrl); - uint64_t rxcnt = SIFIVE_UART_GET_RXCNT(s->rxctrl); + uint32_t txcnt = SIFIVE_UART_GET_TXCNT(s->txctrl); + uint32_t rxcnt = SIFIVE_UART_GET_RXCNT(s->rxctrl); - if (txcnt != 0) { + if (fifo8_num_used(&s->tx_fifo) < txcnt) { ret |= SIFIVE_UART_IP_TXWM; } + if (s->rx_fifo_len > rxcnt) { ret |= SIFIVE_UART_IP_RXWM; } @@ -55,15 +50,14 @@ static uint64_t sifive_uart_ip(SiFiveUARTState *s) static void sifive_uart_update_irq(SiFiveUARTState *s) { int cond = 0; - if ((s->ie & SIFIVE_UART_IE_TXWM) || - ((s->ie & SIFIVE_UART_IE_RXWM) && s->rx_fifo_len)) { + uint32_t ip = sifive_uart_ip(s); + + if (((ip & SIFIVE_UART_IP_TXWM) && (s->ie & SIFIVE_UART_IE_TXWM)) || + ((ip & SIFIVE_UART_IP_RXWM) && (s->ie & SIFIVE_UART_IE_RXWM))) { cond = 1; } - if (cond) { - qemu_irq_raise(s->irq); - } else { - qemu_irq_lower(s->irq); - } + + qemu_set_irq(s->irq, cond); } static gboolean sifive_uart_xmit(void *do_not_use, GIOCondition cond, @@ -119,10 +113,12 @@ static void sifive_uart_write_tx_fifo(SiFiveUARTState *s, const uint8_t *buf, if (size > fifo8_num_free(&s->tx_fifo)) { size = fifo8_num_free(&s->tx_fifo); - qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow"); + qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow.\n"); } - fifo8_push_all(&s->tx_fifo, buf, size); + if (size > 0) { + fifo8_push_all(&s->tx_fifo, buf, size); + } if (fifo8_is_full(&s->tx_fifo)) { s->txfifo |= SIFIVE_UART_TXFIFO_FULL; diff --git a/hw/char/trace-events b/hw/char/trace-events index 05a3303..9e74be2 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -58,15 +58,15 @@ imx_serial_write(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" imx_serial_put_data(const char *chrname, uint32_t value) "%s: 0x%" PRIx32 # pl011.c -pl011_irq_state(int level) "irq state %d" -pl011_read(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_read_fifo(unsigned rx_fifo_used, size_t rx_fifo_depth) "RX FIFO read, used %u/%zu" -pl011_write(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, size_t rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%zu, can_receive %u chars" -pl011_fifo_rx_put(uint32_t c, unsigned read_count, size_t rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%zu depth used" +pl011_irq_state(bool level) "irq state %d" +pl011_read(uint64_t addr, uint32_t value, const char *regname) "addr 0x%03" PRIx64 " value 0x%08x reg %s" +pl011_read_fifo(unsigned rx_fifo_used, unsigned rx_fifo_depth) "RX FIFO read, used %u/%u" +pl011_write(uint64_t addr, uint32_t value, const char *regname) "addr 0x%03" PRIx64 " value 0x%08x reg %s" +pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, unsigned rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%u, can_receive %u chars" +pl011_fifo_rx_put(uint32_t c, unsigned read_count, unsigned rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%u depth used" pl011_fifo_rx_full(void) "RX FIFO now full, RXFF set" pl011_baudrate_change(unsigned int baudrate, uint64_t clock, uint32_t ibrd, uint32_t fbrd) "new baudrate %u (clk: %" PRIu64 "hz, ibrd: %" PRIu32 ", fbrd: %" PRIu32 ")" -pl011_receive(int size) "recv %d chars" +pl011_receive(size_t size) "recv %zd chars" # cmsdk-apb-uart.c cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 41a3399..8c306c8 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -294,6 +294,7 @@ void cpu_exec_unrealizefn(CPUState *cpu) * accel_cpu_common_unrealize, which may free fields using call_rcu. */ accel_cpu_common_unrealize(cpu); + cpu_destroy_address_spaces(cpu); } static void cpu_common_initfn(Object *obj) diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 5c2ce25..0d891c6 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -72,6 +72,7 @@ static void cxl_fixed_memory_window_config(CXLFixedMemoryWindowOptions *object, static int cxl_fmws_link(Object *obj, void *opaque) { + Error **errp = opaque; struct CXLFixedWindow *fw; int i; @@ -87,9 +88,9 @@ static int cxl_fmws_link(Object *obj, void *opaque) o = object_resolve_path_type(fw->targets[i], TYPE_PXB_CXL_DEV, &ambig); if (!o) { - error_setg(&error_fatal, "Could not resolve CXLFM target %s", + error_setg(errp, "Could not resolve CXLFM target %s", fw->targets[i]); - return 1; + return -1; } fw->target_hbs[i] = PXB_CXL_DEV(o); } @@ -99,7 +100,7 @@ static int cxl_fmws_link(Object *obj, void *opaque) void cxl_fmws_link_targets(Error **errp) { /* Order doesn't matter for this, so no need to build list */ - object_child_foreach_recursive(object_get_root(), cxl_fmws_link, NULL); + object_child_foreach_recursive(object_get_root(), cxl_fmws_link, errp); } static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr, diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 0a1a625..3a55512 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -242,6 +242,7 @@ static uint32_t calc_image_hostmem(pixman_format_code_t pformat, static void virtio_gpu_resource_create_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + Error *err = NULL; pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; @@ -293,7 +294,8 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, c2d.width, c2d.height, c2d.height ? res->hostmem / c2d.height : 0, - &error_warn)) { + &err)) { + warn_report_err(err); goto end; } } @@ -1246,7 +1248,8 @@ static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, } qemu_put_be32(f, 0); /* end of list */ - return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); + return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL, + &error_fatal); } static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, @@ -1282,6 +1285,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { VirtIOGPU *g = opaque; + Error *err = NULL; struct virtio_gpu_simple_resource *res; uint32_t resource_id, pformat; int i; @@ -1317,7 +1321,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, res->width, res->height, res->height ? res->hostmem / res->height : 0, - &error_warn)) { + &err)) { + warn_report_err(err); g_free(res); return -EINVAL; } @@ -1343,7 +1348,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, } /* load & apply scanout state */ - vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); + vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1, &error_fatal); return 0; } diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 37a7a70..394fe02 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -379,6 +379,7 @@ static const VMStateDescription vmstate_apic_common = { .pre_load = apic_pre_load, .pre_save = apic_dispatch_pre_save, .post_load = apic_dispatch_post_load, + .priority = MIG_PRI_APIC, .fields = (const VMStateField[]) { VMSTATE_UINT32(apicbase, APICCommonState), VMSTATE_UINT8(id, APICCommonState), diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index 4623cfa0..9f4c36e 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -323,12 +323,15 @@ static void riscv_aclint_mtimer_reset_enter(Object *obj, ResetType type) static const VMStateDescription vmstate_riscv_mtimer = { .name = "riscv_mtimer", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { + VMSTATE_UINT64(time_delta, RISCVAclintMTimerState), VMSTATE_VARRAY_UINT32(timecmp, RISCVAclintMTimerState, num_harts, 0, vmstate_info_uint64, uint64_t), + VMSTATE_TIMER_PTR_VARRAY(timers, RISCVAclintMTimerState, + num_harts), VMSTATE_END_OF_LIST() } }; diff --git a/hw/misc/ivshmem-flat.c b/hw/misc/ivshmem-flat.c index e83e6c6..27ee8c9 100644 --- a/hw/misc/ivshmem-flat.c +++ b/hw/misc/ivshmem-flat.c @@ -138,6 +138,8 @@ static void ivshmem_flat_remove_peer(IvshmemFTState *s, uint16_t peer_id) static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer, int vector_fd) { + Error *err = NULL; + if (peer->vector_counter >= IVSHMEM_MAX_VECTOR_NUM) { trace_ivshmem_flat_add_vector_failure(peer->vector_counter, vector_fd, peer->id); @@ -154,8 +156,10 @@ static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer, * peer. */ peer->vector[peer->vector_counter].id = peer->vector_counter; - /* WARNING: qemu_socket_set_nonblock() return code ignored */ - qemu_set_blocking(vector_fd, false, &error_warn); + if (!qemu_set_blocking(vector_fd, false, &err)) { + /* FIXME handle the error */ + warn_report_err(err); + } event_notifier_init_fd(&peer->vector[peer->vector_counter].event_notifier, vector_fd); diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 6b5b5da..7848e26 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -1289,6 +1289,8 @@ exit: static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) { + Error *err = NULL; + if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { return true; } @@ -1306,7 +1308,11 @@ static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) return virtio_net_load_ebpf_fds(n, errp); } - ebpf_rss_load(&n->ebpf_rss, &error_warn); + ebpf_rss_load(&n->ebpf_rss, &err); + /* Beware, ebpf_rss_load() can return false with @err unset */ + if (err) { + warn_report_err(err); + } return true; } diff --git a/hw/pci/pci.c b/hw/pci/pci.c index c3df9d6..5e2c3c6 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -926,7 +926,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) * This makes us compatible with old devices * which never set or clear this bit. */ s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; - vmstate_save_state(f, &vmstate_pci_device, s, NULL); + vmstate_save_state(f, &vmstate_pci_device, s, NULL, &error_fatal); /* Restore the interrupt status bit. */ pci_update_irq_status(s); } @@ -934,7 +934,8 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) int pci_device_load(PCIDevice *s, QEMUFile *f) { int ret; - ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); + ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id, + &error_fatal); /* Restore the interrupt status bit. */ pci_update_irq_status(s); return ret; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index ea6165e..216b487 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -75,12 +75,17 @@ OBJECT_DECLARE_TYPE(VfuObject, VfuObjectClass, VFU_OBJECT) */ #define VFU_OBJECT_ERROR(o, fmt, ...) \ { \ + error_report((fmt), ## __VA_ARGS__); \ if (vfu_object_auto_shutdown()) { \ - error_setg(&error_abort, (fmt), ## __VA_ARGS__); \ - } else { \ - error_report((fmt), ## __VA_ARGS__); \ + /* \ + * FIXME This looks inappropriate. The error is serious \ + * enough programming error to warrant aborting the process \ + * when auto-shutdown is enabled, yet harmless enough to \ + * permit carrying on when it's disabled. Makes no sense. \ + */ \ + abort(); \ } \ - } \ + } struct VfuObjectClass { ObjectClass parent_class; diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c index 96a7fbd..b33c7fe 100644 --- a/hw/riscv/riscv-iommu.c +++ b/hw/riscv/riscv-iommu.c @@ -558,6 +558,7 @@ static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, MemTxResult res; dma_addr_t addr; uint64_t intn; + size_t offset; uint32_t n190; uint64_t pte[2]; int fault_type = RISCV_IOMMU_FQ_TTYPE_UADDR_WR; @@ -565,16 +566,18 @@ static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, /* Interrupt File Number */ intn = riscv_iommu_pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask); - if (intn >= 256) { + offset = intn * sizeof(pte); + + /* fetch MSI PTE */ + addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN)); + if (addr & offset) { /* Interrupt file number out of range */ res = MEMTX_ACCESS_ERROR; cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; goto err; } - /* fetch MSI PTE */ - addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN)); - addr = addr | (intn * sizeof(pte)); + addr |= offset; res = dma_memory_read(s->target_as, addr, &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED); if (res != MEMTX_OK) { @@ -866,6 +869,145 @@ static bool riscv_iommu_validate_process_ctx(RISCVIOMMUState *s, return true; } +/** + * pdt_memory_read: PDT wrapper of dma_memory_read. + * + * @s: IOMMU Device State + * @ctx: Device Translation Context with devid and pasid set + * @addr: address within that address space + * @buf: buffer with the data transferred + * @len: length of the data transferred + * @attrs: memory transaction attributes + */ +static MemTxResult pdt_memory_read(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx, + dma_addr_t addr, + void *buf, dma_addr_t len, + MemTxAttrs attrs) +{ + uint64_t gatp_mode, pte; + struct { + unsigned char step; + unsigned char levels; + unsigned char ptidxbits; + unsigned char ptesize; + } sc; + MemTxResult ret; + dma_addr_t base = addr; + + /* G stages translation mode */ + gatp_mode = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD); + if (gatp_mode == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) { + goto out; + } + + /* G stages translation tables root pointer */ + base = PPN_PHYS(get_field(ctx->gatp, RISCV_IOMMU_ATP_PPN_FIELD)); + + /* Start at step 0 */ + sc.step = 0; + + if (s->fctl & RISCV_IOMMU_FCTL_GXL) { + /* 32bit mode for GXL == 1 */ + switch (gatp_mode) { + case RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV32X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 2; + sc.ptidxbits = 10; + sc.ptesize = 4; + break; + default: + return MEMTX_ACCESS_ERROR; + } + } else { + /* 64bit mode for GXL == 0 */ + switch (gatp_mode) { + case RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV39X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 3; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV48X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 4; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV57X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 5; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + default: + return MEMTX_ACCESS_ERROR; + } + } + + do { + const unsigned va_bits = (sc.step ? 0 : 2) + sc.ptidxbits; + const unsigned va_skip = TARGET_PAGE_BITS + sc.ptidxbits * + (sc.levels - 1 - sc.step); + const unsigned idx = (addr >> va_skip) & ((1 << va_bits) - 1); + const dma_addr_t pte_addr = base + idx * sc.ptesize; + + /* Address range check before first level lookup */ + if (!sc.step) { + const uint64_t va_mask = (1ULL << (va_skip + va_bits)) - 1; + if ((addr & va_mask) != addr) { + return MEMTX_ACCESS_ERROR; + } + } + + /* Read page table entry */ + if (sc.ptesize == 4) { + uint32_t pte32 = 0; + ret = ldl_le_dma(s->target_as, pte_addr, &pte32, attrs); + pte = pte32; + } else { + ret = ldq_le_dma(s->target_as, pte_addr, &pte, attrs); + } + if (ret != MEMTX_OK) { + return ret; + } + + sc.step++; + hwaddr ppn = pte >> PTE_PPN_SHIFT; + + if (!(pte & PTE_V)) { + return MEMTX_ACCESS_ERROR; /* Invalid PTE */ + } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { + base = PPN_PHYS(ppn); /* Inner PTE, continue walking */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { + return MEMTX_ACCESS_ERROR; /* Reserved leaf PTE flags: PTE_W */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { + return MEMTX_ACCESS_ERROR; /* Reserved leaf PTE flags: PTE_W + PTE_X */ + } else if (ppn & ((1ULL << (va_skip - TARGET_PAGE_BITS)) - 1)) { + return MEMTX_ACCESS_ERROR; /* Misaligned PPN */ + } else { + /* Leaf PTE, translation completed. */ + base = PPN_PHYS(ppn) | (addr & ((1ULL << va_skip) - 1)); + break; + } + + if (sc.step == sc.levels) { + return MEMTX_ACCESS_ERROR; /* Can't find leaf PTE */ + } + } while (1); + +out: + return dma_memory_read(s->target_as, base, buf, len, attrs); +} + /* * RISC-V IOMMU Device Context Loopkup - Device Directory Tree Walk * @@ -1038,7 +1180,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) */ const int split = depth * 9 + 8; addr |= ((ctx->process_id >> split) << 3) & ~TARGET_PAGE_MASK; - if (dma_memory_read(s->target_as, addr, &de, sizeof(de), + if (pdt_memory_read(s, ctx, addr, &de, sizeof(de), MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; } @@ -1053,7 +1195,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) /* Leaf entry in PDT */ addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK; - if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2, + if (pdt_memory_read(s, ctx, addr, &dc.ta, sizeof(uint64_t) * 2, MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; } diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index d2f85b3..4cb1ced 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -1130,13 +1130,13 @@ static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); + vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL, &error_fatal); } static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); + return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1, &error_fatal); } static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index 20f70fb..f0a7dd2 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -630,7 +630,7 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq) vscsi_req *req = sreq->hba_private; assert(req->active); - vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL); + vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL, &error_fatal); trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num, req->cur_desc_offset); @@ -642,15 +642,17 @@ static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq) VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent); vscsi_req *req; int rc; + Error *local_err = NULL; assert(sreq->tag < VSCSI_REQ_LIMIT); req = &s->reqs[sreq->tag]; assert(!req->active); memset(req, 0, sizeof(*req)); - rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1); + rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1, &local_err); if (rc) { fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag); + error_report_err(local_err); return NULL; } assert(req->active); diff --git a/hw/vfio/container-legacy.c b/hw/vfio/container-legacy.c index 34352dd..629ff23 100644 --- a/hw/vfio/container-legacy.c +++ b/hw/vfio/container-legacy.c @@ -972,7 +972,8 @@ static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, error_setg(&vbasedev->cpr.mdev_blocker, "CPR does not support vfio mdev %s", vbasedev->name); if (migrate_add_blocker_modes(&vbasedev->cpr.mdev_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) < 0) { + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, + -1) < 0) { goto hiod_unref_exit; } } diff --git a/hw/vfio/cpr-iommufd.c b/hw/vfio/cpr-iommufd.c index 1d70c87..8a4d65d 100644 --- a/hw/vfio/cpr-iommufd.c +++ b/hw/vfio/cpr-iommufd.c @@ -159,7 +159,8 @@ bool vfio_iommufd_cpr_register_iommufd(IOMMUFDBackend *be, Error **errp) if (!vfio_cpr_supported(be, cpr_blocker)) { return migrate_add_blocker_modes(cpr_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) == 0; + MIG_MODE_CPR_TRANSFER, + MIG_MODE_CPR_EXEC, -1) == 0; } vmstate_register(NULL, -1, &iommufd_cpr_vmstate, be); diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c index 3a1d126..80af746 100644 --- a/hw/vfio/cpr-legacy.c +++ b/hw/vfio/cpr-legacy.c @@ -179,16 +179,17 @@ bool vfio_legacy_cpr_register_container(VFIOLegacyContainer *container, if (!vfio_cpr_supported(container, cpr_blocker)) { return migrate_add_blocker_modes(cpr_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) == 0; + MIG_MODE_CPR_TRANSFER, + MIG_MODE_CPR_EXEC, -1) == 0; } vfio_cpr_add_kvm_notifier(); vmstate_register(NULL, -1, &vfio_container_vmstate, container); - migration_add_notifier_mode(&container->cpr.transfer_notifier, - vfio_cpr_fail_notifier, - MIG_MODE_CPR_TRANSFER); + migration_add_notifier_modes(&container->cpr.transfer_notifier, + vfio_cpr_fail_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, -1); return true; } diff --git a/hw/vfio/cpr.c b/hw/vfio/cpr.c index 2c71fc1..db462aa 100644 --- a/hw/vfio/cpr.c +++ b/hw/vfio/cpr.c @@ -195,9 +195,10 @@ static int vfio_cpr_kvm_close_notifier(NotifierWithReturn *notifier, void vfio_cpr_add_kvm_notifier(void) { if (!kvm_close_notifier.notify) { - migration_add_notifier_mode(&kvm_close_notifier, - vfio_cpr_kvm_close_notifier, - MIG_MODE_CPR_TRANSFER); + migration_add_notifier_modes(&kvm_close_notifier, + vfio_cpr_kvm_close_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, + -1); } } @@ -282,9 +283,9 @@ static int vfio_cpr_pci_notifier(NotifierWithReturn *notifier, void vfio_cpr_pci_register_device(VFIOPCIDevice *vdev) { - migration_add_notifier_mode(&vdev->cpr.transfer_notifier, - vfio_cpr_pci_notifier, - MIG_MODE_CPR_TRANSFER); + migration_add_notifier_modes(&vdev->cpr.transfer_notifier, + vfio_cpr_pci_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, -1); } void vfio_cpr_pci_unregister_device(VFIOPCIDevice *vdev) diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 5b022da..06b06af 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -2821,8 +2821,8 @@ static int vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f, Error **errp) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); - return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL, - errp); + return vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL, + errp); } static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) @@ -2831,13 +2831,16 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) PCIDevice *pdev = PCI_DEVICE(vdev); pcibus_t old_addr[PCI_NUM_REGIONS - 1]; int bar, ret; + Error *local_err = NULL; for (bar = 0; bar < PCI_ROM_SLOT; bar++) { old_addr[bar] = pdev->io_regions[bar].addr; } - ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); + ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1, + &local_err); if (ret) { + error_report_err(local_err); return ret; } diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 532c671..fb58c36 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -34,6 +34,7 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "trace.h" +#include "qapi/error.h" static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) { @@ -612,14 +613,14 @@ static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); - vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); + vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL, &error_fatal); } static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); - return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); + return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1, &error_fatal); } static bool virtio_mmio_has_extra_state(DeviceState *opaque) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 767216d..d2595fb 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -154,14 +154,14 @@ static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); + vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL, &error_fatal); } static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); + return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1, &error_fatal); } static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 9a81ad9..0a68f1b 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -2992,6 +2992,7 @@ int virtio_save(VirtIODevice *vdev, QEMUFile *f) VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); int i; + Error *local_err = NULL; if (k->save_config) { k->save_config(qbus->parent, f); @@ -3035,14 +3036,15 @@ int virtio_save(VirtIODevice *vdev, QEMUFile *f) } if (vdc->vmsd) { - int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); + int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL, &local_err); if (ret) { + error_report_err(local_err); return ret; } } /* Subsections */ - return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); + return vmstate_save_state(f, &vmstate_virtio, vdev, NULL, &error_fatal); } /* A wrapper for use as a VMState .put function */ @@ -3235,6 +3237,7 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + Error *local_err = NULL; /* * We poison the endianness to ensure it does not get used before @@ -3327,15 +3330,17 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } if (vdc->vmsd) { - ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); + ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id, &local_err); if (ret) { + error_report_err(local_err); return ret; } } /* Subsections */ - ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); + ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1, &local_err); if (ret) { + error_report_err(local_err); return ret; } |