aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--hw/i386/amd_iommu.c102
-rw-r--r--hw/i386/amd_iommu.h2
-rw-r--r--hw/i386/intel_iommu.c15
-rw-r--r--hw/intc/loongarch_ipi_kvm.c27
-rw-r--r--hw/intc/riscv_aplic.c6
-rw-r--r--hw/net/virtio-net.c7
-rw-r--r--hw/pci/pcie_sriov.c42
-rw-r--r--hw/riscv/virt-acpi-build.c25
-rw-r--r--hw/virtio/vhost.c6
-rw-r--r--hw/virtio/virtio.c22
-rw-r--r--linux-user/strace.list3
-rw-r--r--net/vhost-vdpa.c5
-rw-r--r--target/loongarch/cpu_helper.c4
-rw-r--r--target/riscv/csr.c15
-rw-r--r--target/riscv/insn_trans/trans_rvv.c.inc323
-rw-r--r--target/riscv/op_helper.c15
-rw-r--r--target/riscv/pmp.c7
-rw-r--r--tests/data/acpi/aarch64/virt/HESTbin0 -> 132 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/APICbin116 -> 116 bytes
-rw-r--r--tests/data/acpi/riscv64/virt/FACPbin276 -> 276 bytes
-rw-r--r--tests/qtest/bios-tables-test.c2
22 files changed, 248 insertions, 381 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 069d77f..28cea34 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2322,6 +2322,7 @@ F: include/*/vhost*
F: subprojects/libvhost-user/
F: block/export/vhost-user*
F: util/vhost-user-server.c
+F: net/vhost*
vhost-shadow-virtqueue
R: Eugenio Pérez <eperezma@redhat.com>
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 5a24c17..26be69b 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -123,8 +123,13 @@ static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
uint16_t romask = lduw_le_p(&s->romask[addr]);
uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
uint16_t oldval = lduw_le_p(&s->mmior[addr]);
+
+ uint16_t oldval_preserved = oldval & (romask | w1cmask);
+ uint16_t newval_write = val & ~romask;
+ uint16_t newval_w1c_set = val & w1cmask;
+
stw_le_p(&s->mmior[addr],
- ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+ (oldval_preserved | newval_write) & ~newval_w1c_set);
}
static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
@@ -132,8 +137,13 @@ static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
uint32_t romask = ldl_le_p(&s->romask[addr]);
uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
uint32_t oldval = ldl_le_p(&s->mmior[addr]);
+
+ uint32_t oldval_preserved = oldval & (romask | w1cmask);
+ uint32_t newval_write = val & ~romask;
+ uint32_t newval_w1c_set = val & w1cmask;
+
stl_le_p(&s->mmior[addr],
- ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+ (oldval_preserved | newval_write) & ~newval_w1c_set);
}
static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
@@ -141,14 +151,19 @@ static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
uint64_t romask = ldq_le_p(&s->romask[addr]);
uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
uint64_t oldval = ldq_le_p(&s->mmior[addr]);
+
+ uint64_t oldval_preserved = oldval & (romask | w1cmask);
+ uint64_t newval_write = val & ~romask;
+ uint64_t newval_w1c_set = val & w1cmask;
+
stq_le_p(&s->mmior[addr],
- ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
+ (oldval_preserved | newval_write) & ~newval_w1c_set);
}
-/* OR a 64-bit register with a 64-bit value */
+/* AND a 64-bit register with a 64-bit value */
static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
{
- return amdvi_readq(s, addr) | val;
+ return amdvi_readq(s, addr) & val;
}
/* OR a 64-bit register with a 64-bit value storing result in the register */
@@ -177,19 +192,31 @@ static void amdvi_generate_msi_interrupt(AMDVIState *s)
}
}
+static uint32_t get_next_eventlog_entry(AMDVIState *s)
+{
+ uint32_t evtlog_size = s->evtlog_len * AMDVI_EVENT_LEN;
+ return (s->evtlog_tail + AMDVI_EVENT_LEN) % evtlog_size;
+}
+
static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
{
+ uint32_t evtlog_tail_next;
+
/* event logging not enabled */
if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
AMDVI_MMIO_STATUS_EVT_OVF)) {
return;
}
+ evtlog_tail_next = get_next_eventlog_entry(s);
+
/* event log buffer full */
- if (s->evtlog_tail >= s->evtlog_len) {
- amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
- /* generate interrupt */
- amdvi_generate_msi_interrupt(s);
+ if (evtlog_tail_next == s->evtlog_head) {
+ /* generate overflow interrupt */
+ if (s->evtlog_intr) {
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
+ amdvi_generate_msi_interrupt(s);
+ }
return;
}
@@ -198,9 +225,13 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
}
- s->evtlog_tail += AMDVI_EVENT_LEN;
- amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
- amdvi_generate_msi_interrupt(s);
+ s->evtlog_tail = evtlog_tail_next;
+ amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail);
+
+ if (s->evtlog_intr) {
+ amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVENT_INT);
+ amdvi_generate_msi_interrupt(s);
+ }
}
static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
@@ -592,18 +623,31 @@ static void amdvi_cmdbuf_run(AMDVIState *s)
}
}
-static void amdvi_mmio_trace(hwaddr addr, unsigned size)
+static inline uint8_t amdvi_mmio_get_index(hwaddr addr)
{
uint8_t index = (addr & ~0x2000) / 8;
if ((addr & 0x2000)) {
/* high table */
index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
- trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
} else {
index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
- trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
}
+
+ return index;
+}
+
+static void amdvi_mmio_trace_read(hwaddr addr, unsigned size)
+{
+ uint8_t index = amdvi_mmio_get_index(addr);
+ trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
+}
+
+static void amdvi_mmio_trace_write(hwaddr addr, unsigned size, uint64_t val)
+{
+ uint8_t index = amdvi_mmio_get_index(addr);
+ trace_amdvi_mmio_write(amdvi_mmio_low[index], addr, size, val,
+ addr & ~0x07);
}
static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
@@ -623,7 +667,7 @@ static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
} else if (size == 8) {
val = amdvi_readq(s, addr);
}
- amdvi_mmio_trace(addr, size);
+ amdvi_mmio_trace_read(addr, size);
return val;
}
@@ -633,7 +677,6 @@ static void amdvi_handle_control_write(AMDVIState *s)
unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
- s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
s->evtlog_enabled = s->enabled && !!(control &
AMDVI_MMIO_CONTROL_EVENTLOGEN);
@@ -704,9 +747,19 @@ static inline void amdvi_handle_excllim_write(AMDVIState *s)
static inline void amdvi_handle_evtbase_write(AMDVIState *s)
{
uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
+
+ if (amdvi_readq(s, AMDVI_MMIO_STATUS) & AMDVI_MMIO_STATUS_EVENT_INT)
+ /* Do not reset if eventlog interrupt bit is set*/
+ return;
+
s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
& AMDVI_MMIO_EVTLOG_SIZE_MASK);
+
+ /* clear tail and head pointer to 0 when event base is updated */
+ s->evtlog_tail = s->evtlog_head = 0;
+ amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_HEAD, s->evtlog_head);
+ amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail);
}
static inline void amdvi_handle_evttail_write(AMDVIState *s)
@@ -770,7 +823,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
return;
}
- amdvi_mmio_trace(addr, size);
+ amdvi_mmio_trace_write(addr, size, val);
switch (addr & ~0x07) {
case AMDVI_MMIO_CONTROL:
amdvi_mmio_reg_write(s, size, val, addr);
@@ -835,6 +888,9 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
amdvi_mmio_reg_write(s, size, val, addr);
amdvi_handle_pprtail_write(s);
break;
+ case AMDVI_MMIO_STATUS:
+ amdvi_mmio_reg_write(s, size, val, addr);
+ break;
}
}
@@ -1542,7 +1598,6 @@ static void amdvi_init(AMDVIState *s)
s->excl_allow = false;
s->mmio_enabled = false;
s->enabled = false;
- s->ats_enabled = false;
s->cmdbuf_enabled = false;
/* reset MMIO */
@@ -1613,7 +1668,8 @@ static const VMStateDescription vmstate_amdvi_sysbus_migratable = {
/* Updated in amdvi_handle_control_write() */
VMSTATE_BOOL(enabled, AMDVIState),
VMSTATE_BOOL(ga_enabled, AMDVIState),
- VMSTATE_BOOL(ats_enabled, AMDVIState),
+ /* bool ats_enabled is obsolete */
+ VMSTATE_UNUSED(1), /* was ats_enabled */
VMSTATE_BOOL(cmdbuf_enabled, AMDVIState),
VMSTATE_BOOL(completion_wait_intr, AMDVIState),
VMSTATE_BOOL(evtlog_enabled, AMDVIState),
@@ -1686,9 +1742,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
amdvi_uint64_equal, g_free, g_free);
- /* Pseudo address space under root PCI bus. */
- x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
-
/* set up MMIO */
memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s,
"amdvi-mmio", AMDVI_MMIO_SIZE);
@@ -1711,6 +1764,9 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST,
&s->mr_ir, 1);
+ /* Pseudo address space under root PCI bus. */
+ x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
+
if (kvm_enabled() && x86ms->apic_id_limit > 255 && !s->xtsup) {
error_report("AMD IOMMU with x2APIC configuration requires xtsup=on");
exit(EXIT_FAILURE);
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index 8b42913..2476296 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -111,6 +111,7 @@
#define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4)
#define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3)
#define AMDVI_MMIO_STATUS_COMP_INT (1 << 2)
+#define AMDVI_MMIO_STATUS_EVENT_INT (1 << 1)
#define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0)
#define AMDVI_CMDBUF_ID_BYTE 0x07
@@ -322,7 +323,6 @@ struct AMDVIState {
uint64_t mmio_addr;
bool enabled; /* IOMMU enabled */
- bool ats_enabled; /* address translation enabled */
bool cmdbuf_enabled; /* command buffer enabled */
bool evtlog_enabled; /* event log enabled */
bool excl_enabled;
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index fe9a5f2..83c5e44 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -2828,6 +2828,7 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI,
VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE};
+ bool ret = true;
if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false,
__func__, "wait")) {
@@ -2839,8 +2840,6 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
uint32_t status_data = (uint32_t)(inv_desc->lo >>
VTD_INV_DESC_WAIT_DATA_SHIFT);
- assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
-
/* FIXME: need to be masked with HAW? */
dma_addr_t status_addr = inv_desc->hi;
trace_vtd_inv_desc_wait_sw(status_addr, status_data);
@@ -2849,18 +2848,22 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
&status_data, sizeof(status_data),
MEMTXATTRS_UNSPECIFIED)) {
trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
- return false;
+ ret = false;
}
- } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
+ }
+
+ if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
/* Interrupt flag */
vtd_generate_completion_event(s);
- } else {
+ }
+
+ if (!(inv_desc->lo & (VTD_INV_DESC_WAIT_IF | VTD_INV_DESC_WAIT_SW))) {
error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
" (unknown type)", __func__, inv_desc->hi,
inv_desc->lo);
return false;
}
- return true;
+ return ret;
}
static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c
index 4cb3acc..dd4c367 100644
--- a/hw/intc/loongarch_ipi_kvm.c
+++ b/hw/intc/loongarch_ipi_kvm.c
@@ -23,36 +23,41 @@ static void kvm_ipi_access_regs(void *opaque, bool write)
LoongarchIPIState *lis = LOONGARCH_IPI(opaque);
IPICore *core;
uint64_t attr;
- int cpu, fd = lis->dev_fd;
+ int i, cpu_index, fd = lis->dev_fd;
if (fd == 0) {
return;
}
- for (cpu = 0; cpu < ipi->num_cpu; cpu++) {
- core = &ipi->cpu[cpu];
- attr = (cpu << 16) | CORE_STATUS_OFF;
+ for (i = 0; i < ipi->num_cpu; i++) {
+ core = &ipi->cpu[i];
+ if (core->cpu == NULL) {
+ continue;
+ }
+ cpu_index = i;
+
+ attr = (cpu_index << 16) | CORE_STATUS_OFF;
kvm_ipi_access_reg(fd, attr, &core->status, write);
- attr = (cpu << 16) | CORE_EN_OFF;
+ attr = (cpu_index << 16) | CORE_EN_OFF;
kvm_ipi_access_reg(fd, attr, &core->en, write);
- attr = (cpu << 16) | CORE_SET_OFF;
+ attr = (cpu_index << 16) | CORE_SET_OFF;
kvm_ipi_access_reg(fd, attr, &core->set, write);
- attr = (cpu << 16) | CORE_CLEAR_OFF;
+ attr = (cpu_index << 16) | CORE_CLEAR_OFF;
kvm_ipi_access_reg(fd, attr, &core->clear, write);
- attr = (cpu << 16) | CORE_BUF_20;
+ attr = (cpu_index << 16) | CORE_BUF_20;
kvm_ipi_access_reg(fd, attr, &core->buf[0], write);
- attr = (cpu << 16) | CORE_BUF_28;
+ attr = (cpu_index << 16) | CORE_BUF_28;
kvm_ipi_access_reg(fd, attr, &core->buf[2], write);
- attr = (cpu << 16) | CORE_BUF_30;
+ attr = (cpu_index << 16) | CORE_BUF_30;
kvm_ipi_access_reg(fd, attr, &core->buf[4], write);
- attr = (cpu << 16) | CORE_BUF_38;
+ attr = (cpu_index << 16) | CORE_BUF_38;
kvm_ipi_access_reg(fd, attr, &core->buf[6], write);
}
}
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
index 4fa5f75..a1d9fa5 100644
--- a/hw/intc/riscv_aplic.c
+++ b/hw/intc/riscv_aplic.c
@@ -628,7 +628,7 @@ static void riscv_aplic_request(void *opaque, int irq, int level)
static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size)
{
- uint32_t irq, word, idc;
+ uint32_t irq, word, idc, sm;
RISCVAPLICState *aplic = opaque;
/* Reads must be 4 byte words */
@@ -696,6 +696,10 @@ static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size)
} else if ((APLIC_TARGET_BASE <= addr) &&
(addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) {
irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1;
+ sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK;
+ if (sm == APLIC_SOURCECFG_SM_INACTIVE) {
+ return 0;
+ }
return aplic->target[irq];
} else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) &&
(addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) {
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index c4c49b0..6b5b5da 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -929,8 +929,9 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
vhost_net_save_acked_features(nc->peer);
}
- if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
- memset(n->vlans, 0xff, MAX_VLAN >> 3);
+ if (virtio_has_feature(vdev->guest_features ^ features, VIRTIO_NET_F_CTRL_VLAN)) {
+ bool vlan = virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN);
+ memset(n->vlans, vlan ? 0 : 0xff, MAX_VLAN >> 3);
}
if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
@@ -3942,6 +3943,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
n->vlans = g_malloc0(MAX_VLAN >> 3);
+ memset(n->vlans, 0xff, MAX_VLAN >> 3);
nc = qemu_get_queue(n->nic);
nc->rxfilter_notify_enabled = 1;
@@ -4041,7 +4043,6 @@ static void virtio_net_reset(VirtIODevice *vdev)
memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
- memset(n->vlans, 0, MAX_VLAN >> 3);
/* Flush any async TX */
for (i = 0; i < n->max_queue_pairs; i++) {
diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c
index 3ad1874..8a4bf0d 100644
--- a/hw/pci/pcie_sriov.c
+++ b/hw/pci/pcie_sriov.c
@@ -64,6 +64,27 @@ static void unregister_vfs(PCIDevice *dev)
pci_set_word(dev->wmask + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0xffff);
}
+static void consume_config(PCIDevice *dev)
+{
+ uint8_t *cfg = dev->config + dev->exp.sriov_cap;
+
+ if (pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) {
+ register_vfs(dev);
+ } else {
+ uint8_t *wmask = dev->wmask + dev->exp.sriov_cap;
+ uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF);
+ uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI;
+
+ unregister_vfs(dev);
+
+ if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) {
+ wmask_val |= PCI_SRIOV_CTRL_VFE;
+ }
+
+ pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val);
+ }
+}
+
static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset,
uint16_t vf_dev_id, uint16_t init_vfs,
uint16_t total_vfs, uint16_t vf_offset,
@@ -416,30 +437,13 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address,
trace_sriov_config_write(dev->name, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn), off, val, len);
- if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) {
- if (val & PCI_SRIOV_CTRL_VFE) {
- register_vfs(dev);
- } else {
- unregister_vfs(dev);
- }
- } else if (range_covers_byte(off, len, PCI_SRIOV_NUM_VF)) {
- uint8_t *cfg = dev->config + sriov_cap;
- uint8_t *wmask = dev->wmask + sriov_cap;
- uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF);
- uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI;
-
- if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) {
- wmask_val |= PCI_SRIOV_CTRL_VFE;
- }
-
- pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val);
- }
+ consume_config(dev);
}
void pcie_sriov_pf_post_load(PCIDevice *dev)
{
if (dev->exp.sriov_cap) {
- register_vfs(dev);
+ consume_config(dev);
}
}
diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c
index ee1416d..f1406cb 100644
--- a/hw/riscv/virt-acpi-build.c
+++ b/hw/riscv/virt-acpi-build.c
@@ -270,11 +270,8 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s)
#define RHCT_NODE_ARRAY_OFFSET 56
/*
- * ACPI spec, Revision 6.5+
- * 5.2.36 RISC-V Hart Capabilities Table (RHCT)
- * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16
- * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view
- * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view
+ * ACPI spec, Revision 6.6
+ * 5.2.37 RISC-V Hart Capabilities Table (RHCT)
*/
static void build_rhct(GArray *table_data,
BIOSLinker *linker,
@@ -421,7 +418,10 @@ static void build_rhct(GArray *table_data,
acpi_table_end(linker, &table);
}
-/* FADT */
+/*
+ * ACPI spec, Revision 6.6
+ * 5.2.9 Fixed ACPI Description Table (MADT)
+ */
static void build_fadt_rev6(GArray *table_data,
BIOSLinker *linker,
RISCVVirtState *s,
@@ -429,7 +429,7 @@ static void build_fadt_rev6(GArray *table_data,
{
AcpiFadtData fadt = {
.rev = 6,
- .minor_ver = 5,
+ .minor_ver = 6,
.flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI,
.xdsdt_tbl_offset = &dsdt_tbl_offset,
};
@@ -508,11 +508,8 @@ static void build_dsdt(GArray *table_data,
}
/*
- * ACPI spec, Revision 6.5+
+ * ACPI spec, Revision 6.6
* 5.2.12 Multiple APIC Description Table (MADT)
- * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15
- * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view
- * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view
*/
static void build_madt(GArray *table_data,
BIOSLinker *linker,
@@ -537,7 +534,7 @@ static void build_madt(GArray *table_data,
hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket);
- AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id,
+ AcpiTable table = { .sig = "APIC", .rev = 7, .oem_id = s->oem_id,
.oem_table_id = s->oem_table_id };
acpi_table_begin(&table, table_data);
@@ -812,10 +809,8 @@ static void build_rimt(GArray *table_data, BIOSLinker *linker,
}
/*
- * ACPI spec, Revision 6.5+
+ * ACPI spec, Revision 6.6
* 5.2.16 System Resource Affinity Table (SRAT)
- * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/25
- * https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view
*/
static void
build_srat(GArray *table_data, BIOSLinker *linker, RISCVVirtState *vms)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index c30ea11..6557c58 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1110,7 +1110,8 @@ static bool vhost_log_global_start(MemoryListener *listener, Error **errp)
r = vhost_migration_log(listener, true);
if (r < 0) {
- abort();
+ error_setg_errno(errp, -r, "vhost: Failed to start logging");
+ return false;
}
return true;
}
@@ -1121,7 +1122,8 @@ static void vhost_log_global_stop(MemoryListener *listener)
r = vhost_migration_log(listener, false);
if (r < 0) {
- abort();
+ /* Not fatal, so report it, but take no further action */
+ warn_report("vhost: Failed to stop logging");
}
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 2ab1d20..9a81ad9 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -938,18 +938,18 @@ static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
- unsigned int i, steps, max_steps;
+ unsigned int i, steps, max_steps, ndescs;
i = vq->used_idx % vq->vring.num;
steps = 0;
/*
- * We shouldn't need to increase 'i' by more than the distance
- * between used_idx and last_avail_idx.
+ * We shouldn't need to increase 'i' by more than or equal to
+ * the distance between used_idx and last_avail_idx (max_steps).
*/
max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num;
/* Search for element in vq->used_elems */
- while (steps <= max_steps) {
+ while (steps < max_steps) {
/* Found element, set length and mark as filled */
if (vq->used_elems[i].index == elem->index) {
vq->used_elems[i].len = len;
@@ -957,8 +957,18 @@ static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem,
break;
}
- i += vq->used_elems[i].ndescs;
- steps += vq->used_elems[i].ndescs;
+ ndescs = vq->used_elems[i].ndescs;
+
+ /* Defensive sanity check */
+ if (unlikely(ndescs == 0 || ndescs > vq->vring.num)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: %s invalid ndescs %u at position %u\n",
+ __func__, vq->vdev->name, ndescs, i);
+ return;
+ }
+
+ i += ndescs;
+ steps += ndescs;
if (i >= vq->vring.num) {
i -= vq->vring.num;
diff --git a/linux-user/strace.list b/linux-user/strace.list
index fdf94ef..ab81835 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -1716,3 +1716,6 @@
{ TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64,
print_syscall_ret_clock_gettime64 },
#endif
+#ifdef TARGET_NR_riscv_hwprobe
+{ TARGET_NR_riscv_hwprobe, "riscv_hwprobe" , "%s(%p,%d,%d,%d,%d,%d)", NULL, NULL },
+#endif
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 6a30a44..74d26a9 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -1840,9 +1840,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
&has_cvq, errp);
- if (queue_pairs < 0) {
- qemu_close(vdpa_device_fd);
- return queue_pairs;
+ if (queue_pairs <= 0) {
+ goto err;
}
r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index e172b11..b5f732f 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -196,8 +196,8 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical,
}
/* Check valid extension */
- addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
- if (!(addr_high == 0 || addr_high == -1)) {
+ addr_high = (int64_t)address >> (TARGET_VIRT_ADDR_SPACE_BITS - 1);
+ if (!(addr_high == 0 || addr_high == -1ULL)) {
return TLBRET_BADADDR;
}
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 8631be9..8842e07 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -374,8 +374,11 @@ static RISCVException aia_smode(CPURISCVState *env, int csrno)
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
{
int ret;
+ int csr_priv = get_field(csrno, 0x300);
- if (!riscv_cpu_cfg(env)->ext_ssaia) {
+ if (csr_priv == PRV_M && !riscv_cpu_cfg(env)->ext_smaia) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ } else if (!riscv_cpu_cfg(env)->ext_ssaia) {
return RISCV_EXCP_ILLEGAL_INST;
}
@@ -5577,7 +5580,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
csr_priv = get_field(csrno, 0x300);
if (!env->debugger && (effective_priv < csr_priv)) {
- if (csr_priv == (PRV_S + 1) && env->virt_enabled) {
+ if (csr_priv <= (PRV_S + 1) && env->virt_enabled) {
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
}
return RISCV_EXCP_ILLEGAL_INST;
@@ -5862,8 +5865,8 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
NULL, read_mstatus_i128 },
[CSR_MISA] = { "misa", any, read_misa, write_misa,
NULL, read_misa_i128 },
- [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
- [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
+ [CSR_MIDELEG] = { "mideleg", smode, NULL, NULL, rmw_mideleg },
+ [CSR_MEDELEG] = { "medeleg", smode, read_medeleg, write_medeleg },
[CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
[CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
[CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
@@ -5871,7 +5874,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
write_mstatush },
- [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore,
+ [CSR_MEDELEGH] = { "medelegh", smode32, read_zero, write_ignore,
.min_priv_ver = PRIV_VERSION_1_13_0 },
[CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh,
.min_priv_ver = PRIV_VERSION_1_13_0 },
@@ -5911,7 +5914,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
/* Machine-Level High-Half CSRs (AIA) */
- [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
+ [CSR_MIDELEGH] = { "midelegh", aia_smode32, NULL, NULL, rmw_midelegh },
[CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
[CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
[CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 610bf9f..71f98fb 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -864,286 +864,32 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
/*
- * MAXSZ returns the maximum vector size can be operated in bytes,
- * which is used in GVEC IR when vl_eq_vlmax flag is set to true
- * to accelerate vector operation.
- */
-static inline uint32_t MAXSZ(DisasContext *s)
-{
- int max_sz = s->cfg_ptr->vlenb << 3;
- return max_sz >> (3 - s->lmul);
-}
-
-static inline uint32_t get_log2(uint32_t a)
-{
- uint32_t i = 0;
- for (; a > 0;) {
- a >>= 1;
- i++;
- }
- return i;
-}
-
-typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long);
-
-/*
- * Simulate the strided load/store main loop:
- *
- * for (i = env->vstart; i < env->vl; env->vstart = ++i) {
- * k = 0;
- * while (k < nf) {
- * if (!vm && !vext_elem_mask(v0, i)) {
- * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
- * (i + k * max_elems + 1) * esz);
- * k++;
- * continue;
- * }
- * target_ulong addr = base + stride * i + (k << log2_esz);
- * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra);
- * k++;
- * }
- * }
- */
-static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1,
- uint32_t rs2, uint32_t vm, uint32_t nf,
- gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn,
- bool is_load)
-{
- TCGv addr = tcg_temp_new();
- TCGv base = get_gpr(s, rs1, EXT_NONE);
- TCGv stride = get_gpr(s, rs2, EXT_NONE);
-
- TCGv i = tcg_temp_new();
- TCGv i_esz = tcg_temp_new();
- TCGv k = tcg_temp_new();
- TCGv k_esz = tcg_temp_new();
- TCGv k_max = tcg_temp_new();
- TCGv mask = tcg_temp_new();
- TCGv mask_offs = tcg_temp_new();
- TCGv mask_offs_64 = tcg_temp_new();
- TCGv mask_elem = tcg_temp_new();
- TCGv mask_offs_rem = tcg_temp_new();
- TCGv vreg = tcg_temp_new();
- TCGv dest_offs = tcg_temp_new();
- TCGv stride_offs = tcg_temp_new();
-
- uint32_t max_elems = MAXSZ(s) >> s->sew;
-
- TCGLabel *start = gen_new_label();
- TCGLabel *end = gen_new_label();
- TCGLabel *start_k = gen_new_label();
- TCGLabel *inc_k = gen_new_label();
- TCGLabel *end_k = gen_new_label();
-
- MemOp atomicity = MO_ATOM_NONE;
- if (s->sew == 0) {
- atomicity = MO_ATOM_NONE;
- } else {
- atomicity = MO_ATOM_IFALIGN_PAIR;
- }
-
- mark_vs_dirty(s);
-
- tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0));
-
- /* Start of outer loop. */
- tcg_gen_mov_tl(i, cpu_vstart);
- gen_set_label(start);
- tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end);
- tcg_gen_shli_tl(i_esz, i, s->sew);
- /* Start of inner loop. */
- tcg_gen_movi_tl(k, 0);
- gen_set_label(start_k);
- tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k);
- /*
- * If we are in mask agnostic regime and the operation is not unmasked we
- * set the inactive elements to 1.
- */
- if (!vm && s->vma) {
- TCGLabel *active_element = gen_new_label();
- /* (i + k * max_elems) * esz */
- tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew));
- tcg_gen_add_tl(mask_offs, mask_offs, i_esz);
-
- /*
- * Check whether the i bit of the mask is 0 or 1.
- *
- * static inline int vext_elem_mask(void *v0, int index)
- * {
- * int idx = index / 64;
- * int pos = index % 64;
- * return (((uint64_t *)v0)[idx] >> pos) & 1;
- * }
- */
- tcg_gen_shri_tl(mask_offs_64, mask_offs, 3);
- tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask);
- tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0);
- tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8));
- tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem);
- tcg_gen_andi_tl(mask_elem, mask_elem, 1);
- tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0),
- active_element);
- /*
- * Set masked-off elements in the destination vector register to 1s.
- * Store instructions simply skip this bit as memory ops access memory
- * only for active elements.
- */
- if (is_load) {
- tcg_gen_shli_tl(mask_offs, mask_offs, s->sew);
- tcg_gen_add_tl(mask_offs, mask_offs, dest);
- st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0);
- }
- tcg_gen_br(inc_k);
- gen_set_label(active_element);
- }
- /*
- * The element is active, calculate the address with stride:
- * target_ulong addr = base + stride * i + (k << log2_esz);
- */
- tcg_gen_mul_tl(stride_offs, stride, i);
- tcg_gen_shli_tl(k_esz, k, s->sew);
- tcg_gen_add_tl(stride_offs, stride_offs, k_esz);
- tcg_gen_add_tl(addr, base, stride_offs);
- /* Calculate the offset in the dst/src vector register. */
- tcg_gen_shli_tl(k_max, k, get_log2(max_elems));
- tcg_gen_add_tl(dest_offs, i, k_max);
- tcg_gen_shli_tl(dest_offs, dest_offs, s->sew);
- tcg_gen_add_tl(dest_offs, dest_offs, dest);
- if (is_load) {
- tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
- st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
- } else {
- ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0);
- tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity);
- }
- /*
- * We don't execute the load/store above if the element was inactive.
- * We jump instead directly to incrementing k and continuing the loop.
- */
- if (!vm && s->vma) {
- gen_set_label(inc_k);
- }
- tcg_gen_addi_tl(k, k, 1);
- tcg_gen_br(start_k);
- /* End of the inner loop. */
- gen_set_label(end_k);
-
- tcg_gen_addi_tl(i, i, 1);
- tcg_gen_mov_tl(cpu_vstart, i);
- tcg_gen_br(start);
-
- /* End of the outer loop. */
- gen_set_label(end);
-
- return;
-}
-
-
-/*
- * Set the tail bytes of the strided loads/stores to 1:
- *
- * for (k = 0; k < nf; ++k) {
- * cnt = (k * max_elems + vl) * esz;
- * tot = (k * max_elems + max_elems) * esz;
- * for (i = cnt; i < tot; i += esz) {
- * store_1s(-1, vd[vl+i]);
- * }
- * }
+ *** stride load and store
*/
-static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf,
- gen_tl_ldst *st_fn)
-{
- TCGv i = tcg_temp_new();
- TCGv k = tcg_temp_new();
- TCGv tail_cnt = tcg_temp_new();
- TCGv tail_tot = tcg_temp_new();
- TCGv tail_addr = tcg_temp_new();
-
- TCGLabel *start = gen_new_label();
- TCGLabel *end = gen_new_label();
- TCGLabel *start_i = gen_new_label();
- TCGLabel *end_i = gen_new_label();
-
- uint32_t max_elems_b = MAXSZ(s);
- uint32_t esz = 1 << s->sew;
-
- /* Start of the outer loop. */
- tcg_gen_movi_tl(k, 0);
- tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew);
- tcg_gen_movi_tl(tail_tot, max_elems_b);
- tcg_gen_add_tl(tail_addr, dest, tail_cnt);
- gen_set_label(start);
- tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end);
- /* Start of the inner loop. */
- tcg_gen_mov_tl(i, tail_cnt);
- gen_set_label(start_i);
- tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i);
- /* store_1s(-1, vd[vl+i]); */
- st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0);
- tcg_gen_addi_tl(tail_addr, tail_addr, esz);
- tcg_gen_addi_tl(i, i, esz);
- tcg_gen_br(start_i);
- /* End of the inner loop. */
- gen_set_label(end_i);
- /* Update the counts */
- tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b);
- tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b);
- tcg_gen_addi_tl(k, k, 1);
- tcg_gen_br(start);
- /* End of the outer loop. */
- gen_set_label(end);
-
- return;
-}
+typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
+ TCGv, TCGv_env, TCGv_i32);
static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
- uint32_t data, DisasContext *s, bool is_load)
+ uint32_t data, gen_helper_ldst_stride *fn,
+ DisasContext *s)
{
- if (!s->vstart_eq_zero) {
- return false;
- }
-
- TCGv dest = tcg_temp_new();
-
- uint32_t nf = FIELD_EX32(data, VDATA, NF);
- uint32_t vm = FIELD_EX32(data, VDATA, VM);
-
- /* Destination register and mask register */
- tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd));
-
- /*
- * Select the appropriate load/tore to retrieve data from the vector
- * register given a specific sew.
- */
- static gen_tl_ldst * const ld_fns[4] = {
- tcg_gen_ld8u_tl, tcg_gen_ld16u_tl,
- tcg_gen_ld32u_tl, tcg_gen_ld_tl
- };
-
- static gen_tl_ldst * const st_fns[4] = {
- tcg_gen_st8_tl, tcg_gen_st16_tl,
- tcg_gen_st32_tl, tcg_gen_st_tl
- };
+ TCGv_ptr dest, mask;
+ TCGv base, stride;
+ TCGv_i32 desc;
- gen_tl_ldst *ld_fn = ld_fns[s->sew];
- gen_tl_ldst *st_fn = st_fns[s->sew];
+ dest = tcg_temp_new_ptr();
+ mask = tcg_temp_new_ptr();
+ base = get_gpr(s, rs1, EXT_NONE);
+ stride = get_gpr(s, rs2, EXT_NONE);
+ desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
+ s->cfg_ptr->vlenb, data));
- if (ld_fn == NULL || st_fn == NULL) {
- return false;
- }
+ tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
+ tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
mark_vs_dirty(s);
- gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load);
-
- tcg_gen_movi_tl(cpu_vstart, 0);
-
- /*
- * Set the tail bytes to 1 if tail agnostic:
- */
- if (s->vta != 0 && is_load) {
- gen_ldst_stride_tail_loop(s, dest, nf, st_fn);
- }
+ fn(dest, mask, base, stride, tcg_env, desc);
finalize_rvv_inst(s);
return true;
@@ -1152,6 +898,16 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
+ gen_helper_ldst_stride *fn;
+ static gen_helper_ldst_stride * const fns[4] = {
+ gen_helper_vlse8_v, gen_helper_vlse16_v,
+ gen_helper_vlse32_v, gen_helper_vlse64_v
+ };
+
+ fn = fns[eew];
+ if (fn == NULL) {
+ return false;
+ }
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
@@ -1159,7 +915,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
data = FIELD_DP32(data, VDATA, NF, a->nf);
data = FIELD_DP32(data, VDATA, VTA, s->vta);
data = FIELD_DP32(data, VDATA, VMA, s->vma);
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -1177,13 +933,23 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
{
uint32_t data = 0;
+ gen_helper_ldst_stride *fn;
+ static gen_helper_ldst_stride * const fns[4] = {
+ /* masked stride store */
+ gen_helper_vsse8_v, gen_helper_vsse16_v,
+ gen_helper_vsse32_v, gen_helper_vsse64_v
+ };
uint8_t emul = vext_get_emul(s, eew);
data = FIELD_DP32(data, VDATA, VM, a->vm);
data = FIELD_DP32(data, VDATA, LMUL, emul);
data = FIELD_DP32(data, VDATA, NF, a->nf);
+ fn = fns[eew];
+ if (fn == NULL) {
+ return false;
+ }
- return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false);
+ return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
}
static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
@@ -1534,6 +1300,17 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false)
*** Vector Integer Arithmetic Instructions
*/
+/*
+ * MAXSZ returns the maximum vector size can be operated in bytes,
+ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
+ * to accelerate vector operation.
+ */
+static inline uint32_t MAXSZ(DisasContext *s)
+{
+ int max_sz = s->cfg_ptr->vlenb * 8;
+ return max_sz >> (3 - s->lmul);
+}
+
static bool opivv_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 15460bf..110292e 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -355,21 +355,22 @@ target_ulong helper_sret(CPURISCVState *env)
}
static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
- target_ulong prev_priv)
+ target_ulong prev_priv,
+ uintptr_t ra)
{
if (!(env->priv >= PRV_M)) {
- riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra);
}
if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
env->priv_ver,
env->misa_ext) && (retpc & 0x3)) {
- riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
+ riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, ra);
}
if (riscv_cpu_cfg(env)->pmp &&
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
- riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
+ riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, ra);
}
}
static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
@@ -394,8 +395,9 @@ target_ulong helper_mret(CPURISCVState *env)
target_ulong retpc = env->mepc & get_xepc_mask(env);
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+ uintptr_t ra = GETPC();
- check_ret_from_m_mode(env, retpc, prev_priv);
+ check_ret_from_m_mode(env, retpc, prev_priv, ra);
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
(prev_priv != PRV_M);
@@ -443,8 +445,9 @@ target_ulong helper_mnret(CPURISCVState *env)
target_ulong retpc = env->mnepc;
target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
target_ulong prev_virt;
+ uintptr_t ra = GETPC();
- check_ret_from_m_mode(env, retpc, prev_priv);
+ check_ret_from_m_mode(env, retpc, prev_priv, ra);
prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
(prev_priv != PRV_M);
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 3540327..72f1372 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -211,11 +211,12 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
break;
case PMP_AMATCH_TOR:
- sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
- ea = (this_addr << 2) - 1u;
- if (sa > ea) {
+ if (prev_addr >= this_addr) {
sa = ea = 0u;
+ break;
}
+ sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
+ ea = (this_addr << 2) - 1u;
break;
case PMP_AMATCH_NA4:
diff --git a/tests/data/acpi/aarch64/virt/HEST b/tests/data/acpi/aarch64/virt/HEST
new file mode 100644
index 0000000..4c5d8c5
--- /dev/null
+++ b/tests/data/acpi/aarch64/virt/HEST
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/APIC b/tests/data/acpi/riscv64/virt/APIC
index 66a25df..3fb5b75 100644
--- a/tests/data/acpi/riscv64/virt/APIC
+++ b/tests/data/acpi/riscv64/virt/APIC
Binary files differ
diff --git a/tests/data/acpi/riscv64/virt/FACP b/tests/data/acpi/riscv64/virt/FACP
index a5276b6..78e1b14 100644
--- a/tests/data/acpi/riscv64/virt/FACP
+++ b/tests/data/acpi/riscv64/virt/FACP
Binary files differ
diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c
index 6aec68d..e7e6926 100644
--- a/tests/qtest/bios-tables-test.c
+++ b/tests/qtest/bios-tables-test.c
@@ -2208,7 +2208,7 @@ static void test_acpi_aarch64_virt_tcg(void)
data.smbios_cpu_max_speed = 2900;
data.smbios_cpu_curr_speed = 2700;
- test_acpi_one("-cpu cortex-a57 "
+ test_acpi_one("-cpu cortex-a57 -machine ras=on "
"-smbios type=4,max-speed=2900,current-speed=2700", &data);
free_test_data(&data);
}