aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/acpi/aml-build.c68
-rw-r--r--hw/arm/Kconfig4
-rw-r--r--hw/arm/aspeed_ast2600.c16
-rw-r--r--hw/arm/musicpal.c381
-rw-r--r--hw/arm/npcm7xx_boards.c10
-rw-r--r--hw/arm/virt-acpi-build.c10
-rw-r--r--hw/arm/virt.c184
-rw-r--r--hw/audio/Kconfig3
-rw-r--r--hw/intc/arm_gic.c11
-rw-r--r--hw/intc/arm_gicv3_its.c452
-rw-r--r--hw/intc/arm_gicv3_redist.c4
-rw-r--r--hw/misc/aspeed_i3c.c384
-rw-r--r--hw/misc/meson.build1
-rw-r--r--hw/misc/trace-events6
-rw-r--r--hw/net/meson.build1
-rw-r--r--hw/net/mv88w8618_eth.c403
-rw-r--r--hw/virtio/virtio-mem.c36
17 files changed, 1282 insertions, 692 deletions
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b3b3310..bb2cad6 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,7 +2001,11 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
const char *oem_id, const char *oem_table_id)
{
- int pptt_start = table_data->len;
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ GQueue *list = g_queue_new();
+ guint pptt_start = table_data->len;
+ guint parent_offset;
+ guint length, i;
int uid = 0;
int socket;
AcpiTable table = { .sig = "PPTT", .rev = 2,
@@ -2010,9 +2014,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
acpi_table_begin(&table, table_data);
for (socket = 0; socket < ms->smp.sockets; socket++) {
- uint32_t socket_offset = table_data->len - pptt_start;
- int core;
-
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
build_processor_hierarchy_node(
table_data,
/*
@@ -2021,35 +2024,64 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
*/
(1 << 0),
0, socket, NULL, 0);
+ }
- for (core = 0; core < ms->smp.cores; core++) {
- uint32_t core_offset = table_data->len - pptt_start;
- int thread;
+ if (mc->smp_props.clusters_supported) {
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int cluster;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
+ build_processor_hierarchy_node(
+ table_data,
+ (0 << 0), /* not a physical package */
+ parent_offset, cluster, NULL, 0);
+ }
+ }
+ }
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int core;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (core = 0; core < ms->smp.cores; core++) {
if (ms->smp.threads > 1) {
+ g_queue_push_tail(list,
+ GUINT_TO_POINTER(table_data->len - pptt_start));
build_processor_hierarchy_node(
table_data,
(0 << 0), /* not a physical package */
- socket_offset, core, NULL, 0);
-
- for (thread = 0; thread < ms->smp.threads; thread++) {
- build_processor_hierarchy_node(
- table_data,
- (1 << 1) | /* ACPI Processor ID valid */
- (1 << 2) | /* Processor is a Thread */
- (1 << 3), /* Node is a Leaf */
- core_offset, uid++, NULL, 0);
- }
+ parent_offset, core, NULL, 0);
} else {
build_processor_hierarchy_node(
table_data,
(1 << 1) | /* ACPI Processor ID valid */
(1 << 3), /* Node is a Leaf */
- socket_offset, uid++, NULL, 0);
+ parent_offset, uid++, NULL, 0);
}
}
}
+ length = g_queue_get_length(list);
+ for (i = 0; i < length; i++) {
+ int thread;
+
+ parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+ for (thread = 0; thread < ms->smp.threads; thread++) {
+ build_processor_hierarchy_node(
+ table_data,
+ (1 << 1) | /* ACPI Processor ID valid */
+ (1 << 2) | /* Processor is a Thread */
+ (1 << 3), /* Node is a Leaf */
+ parent_offset, uid++, NULL, 0);
+ }
+ }
+
+ g_queue_free(list);
acpi_table_end(linker, &table);
}
diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig
index e652590..2e00491 100644
--- a/hw/arm/Kconfig
+++ b/hw/arm/Kconfig
@@ -28,6 +28,7 @@ config ARM_VIRT
select ACPI_HW_REDUCED
select ACPI_APEI
select ACPI_VIOT
+ select VIRTIO_MEM_SUPPORTED
config CHEETAH
bool
@@ -94,6 +95,9 @@ config MUSCA
select SPLIT_IRQ
select UNIMP
+config MARVELL_88W8618
+ bool
+
config MUSICPAL
bool
select OR_IRQ
diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c
index e33483f..8f37bdb 100644
--- a/hw/arm/aspeed_ast2600.c
+++ b/hw/arm/aspeed_ast2600.c
@@ -61,6 +61,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = {
[ASPEED_DEV_UART1] = 0x1E783000,
[ASPEED_DEV_UART5] = 0x1E784000,
[ASPEED_DEV_VUART] = 0x1E787000,
+ [ASPEED_DEV_I3C] = 0x1E7A0000,
[ASPEED_DEV_SDRAM] = 0x80000000,
};
@@ -108,6 +109,7 @@ static const int aspeed_soc_ast2600_irqmap[] = {
[ASPEED_DEV_ETH4] = 33,
[ASPEED_DEV_KCS] = 138, /* 138 -> 142 */
[ASPEED_DEV_DP] = 62,
+ [ASPEED_DEV_I3C] = 102, /* 102 -> 107 */
};
static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl)
@@ -223,6 +225,8 @@ static void aspeed_soc_ast2600_init(Object *obj)
snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname);
object_initialize_child(obj, "hace", &s->hace, typename);
+
+ object_initialize_child(obj, "i3c", &s->i3c, TYPE_ASPEED_I3C);
}
/*
@@ -523,6 +527,18 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0,
aspeed_soc_get_irq(s, ASPEED_DEV_HACE));
+
+ /* I3C */
+ if (!sysbus_realize(SYS_BUS_DEVICE(&s->i3c), errp)) {
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]);
+ for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) {
+ qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore),
+ sc->irqmap[ASPEED_DEV_I3C] + i);
+ /* The AST2600 I3C controller has one IRQ per bus. */
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i3c.devices[i]), 0, irq);
+ }
}
static void aspeed_soc_ast2600_class_init(ObjectClass *oc, void *data)
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index 2680ec5..7c840fb 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -34,12 +34,12 @@
#include "ui/pixel_ops.h"
#include "qemu/cutils.h"
#include "qom/object.h"
+#include "hw/net/mv88w8618_eth.h"
#define MP_MISC_BASE 0x80002000
#define MP_MISC_SIZE 0x00001000
#define MP_ETH_BASE 0x80008000
-#define MP_ETH_SIZE 0x00001000
#define MP_WLAN_BASE 0x8000C000
#define MP_WLAN_SIZE 0x00000800
@@ -84,384 +84,6 @@
/* Wolfson 8750 I2C address */
#define MP_WM_ADDR 0x1A
-/* Ethernet register offsets */
-#define MP_ETH_SMIR 0x010
-#define MP_ETH_PCXR 0x408
-#define MP_ETH_SDCMR 0x448
-#define MP_ETH_ICR 0x450
-#define MP_ETH_IMR 0x458
-#define MP_ETH_FRDP0 0x480
-#define MP_ETH_FRDP1 0x484
-#define MP_ETH_FRDP2 0x488
-#define MP_ETH_FRDP3 0x48C
-#define MP_ETH_CRDP0 0x4A0
-#define MP_ETH_CRDP1 0x4A4
-#define MP_ETH_CRDP2 0x4A8
-#define MP_ETH_CRDP3 0x4AC
-#define MP_ETH_CTDP0 0x4E0
-#define MP_ETH_CTDP1 0x4E4
-
-/* MII PHY access */
-#define MP_ETH_SMIR_DATA 0x0000FFFF
-#define MP_ETH_SMIR_ADDR 0x03FF0000
-#define MP_ETH_SMIR_OPCODE (1 << 26) /* Read value */
-#define MP_ETH_SMIR_RDVALID (1 << 27)
-
-/* PHY registers */
-#define MP_ETH_PHY1_BMSR 0x00210000
-#define MP_ETH_PHY1_PHYSID1 0x00410000
-#define MP_ETH_PHY1_PHYSID2 0x00610000
-
-#define MP_PHY_BMSR_LINK 0x0004
-#define MP_PHY_BMSR_AUTONEG 0x0008
-
-#define MP_PHY_88E3015 0x01410E20
-
-/* TX descriptor status */
-#define MP_ETH_TX_OWN (1U << 31)
-
-/* RX descriptor status */
-#define MP_ETH_RX_OWN (1U << 31)
-
-/* Interrupt cause/mask bits */
-#define MP_ETH_IRQ_RX_BIT 0
-#define MP_ETH_IRQ_RX (1 << MP_ETH_IRQ_RX_BIT)
-#define MP_ETH_IRQ_TXHI_BIT 2
-#define MP_ETH_IRQ_TXLO_BIT 3
-
-/* Port config bits */
-#define MP_ETH_PCXR_2BSM_BIT 28 /* 2-byte incoming suffix */
-
-/* SDMA command bits */
-#define MP_ETH_CMD_TXHI (1 << 23)
-#define MP_ETH_CMD_TXLO (1 << 22)
-
-typedef struct mv88w8618_tx_desc {
- uint32_t cmdstat;
- uint16_t res;
- uint16_t bytes;
- uint32_t buffer;
- uint32_t next;
-} mv88w8618_tx_desc;
-
-typedef struct mv88w8618_rx_desc {
- uint32_t cmdstat;
- uint16_t bytes;
- uint16_t buffer_size;
- uint32_t buffer;
- uint32_t next;
-} mv88w8618_rx_desc;
-
-#define TYPE_MV88W8618_ETH "mv88w8618_eth"
-OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_eth_state, MV88W8618_ETH)
-
-struct mv88w8618_eth_state {
- /*< private >*/
- SysBusDevice parent_obj;
- /*< public >*/
-
- MemoryRegion iomem;
- qemu_irq irq;
- MemoryRegion *dma_mr;
- AddressSpace dma_as;
- uint32_t smir;
- uint32_t icr;
- uint32_t imr;
- int mmio_index;
- uint32_t vlan_header;
- uint32_t tx_queue[2];
- uint32_t rx_queue[4];
- uint32_t frx_queue[4];
- uint32_t cur_rx[4];
- NICState *nic;
- NICConf conf;
-};
-
-static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr,
- mv88w8618_rx_desc *desc)
-{
- cpu_to_le32s(&desc->cmdstat);
- cpu_to_le16s(&desc->bytes);
- cpu_to_le16s(&desc->buffer_size);
- cpu_to_le32s(&desc->buffer);
- cpu_to_le32s(&desc->next);
- dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
-}
-
-static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr,
- mv88w8618_rx_desc *desc)
-{
- dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
- le32_to_cpus(&desc->cmdstat);
- le16_to_cpus(&desc->bytes);
- le16_to_cpus(&desc->buffer_size);
- le32_to_cpus(&desc->buffer);
- le32_to_cpus(&desc->next);
-}
-
-static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
-{
- mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
- uint32_t desc_addr;
- mv88w8618_rx_desc desc;
- int i;
-
- for (i = 0; i < 4; i++) {
- desc_addr = s->cur_rx[i];
- if (!desc_addr) {
- continue;
- }
- do {
- eth_rx_desc_get(&s->dma_as, desc_addr, &desc);
- if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) {
- dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header,
- buf, size, MEMTXATTRS_UNSPECIFIED);
- desc.bytes = size + s->vlan_header;
- desc.cmdstat &= ~MP_ETH_RX_OWN;
- s->cur_rx[i] = desc.next;
-
- s->icr |= MP_ETH_IRQ_RX;
- if (s->icr & s->imr) {
- qemu_irq_raise(s->irq);
- }
- eth_rx_desc_put(&s->dma_as, desc_addr, &desc);
- return size;
- }
- desc_addr = desc.next;
- } while (desc_addr != s->rx_queue[i]);
- }
- return size;
-}
-
-static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr,
- mv88w8618_tx_desc *desc)
-{
- cpu_to_le32s(&desc->cmdstat);
- cpu_to_le16s(&desc->res);
- cpu_to_le16s(&desc->bytes);
- cpu_to_le32s(&desc->buffer);
- cpu_to_le32s(&desc->next);
- dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
-}
-
-static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr,
- mv88w8618_tx_desc *desc)
-{
- dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
- le32_to_cpus(&desc->cmdstat);
- le16_to_cpus(&desc->res);
- le16_to_cpus(&desc->bytes);
- le32_to_cpus(&desc->buffer);
- le32_to_cpus(&desc->next);
-}
-
-static void eth_send(mv88w8618_eth_state *s, int queue_index)
-{
- uint32_t desc_addr = s->tx_queue[queue_index];
- mv88w8618_tx_desc desc;
- uint32_t next_desc;
- uint8_t buf[2048];
- int len;
-
- do {
- eth_tx_desc_get(&s->dma_as, desc_addr, &desc);
- next_desc = desc.next;
- if (desc.cmdstat & MP_ETH_TX_OWN) {
- len = desc.bytes;
- if (len < 2048) {
- dma_memory_read(&s->dma_as, desc.buffer, buf, len,
- MEMTXATTRS_UNSPECIFIED);
- qemu_send_packet(qemu_get_queue(s->nic), buf, len);
- }
- desc.cmdstat &= ~MP_ETH_TX_OWN;
- s->icr |= 1 << (MP_ETH_IRQ_TXLO_BIT - queue_index);
- eth_tx_desc_put(&s->dma_as, desc_addr, &desc);
- }
- desc_addr = next_desc;
- } while (desc_addr != s->tx_queue[queue_index]);
-}
-
-static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset,
- unsigned size)
-{
- mv88w8618_eth_state *s = opaque;
-
- switch (offset) {
- case MP_ETH_SMIR:
- if (s->smir & MP_ETH_SMIR_OPCODE) {
- switch (s->smir & MP_ETH_SMIR_ADDR) {
- case MP_ETH_PHY1_BMSR:
- return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |
- MP_ETH_SMIR_RDVALID;
- case MP_ETH_PHY1_PHYSID1:
- return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;
- case MP_ETH_PHY1_PHYSID2:
- return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;
- default:
- return MP_ETH_SMIR_RDVALID;
- }
- }
- return 0;
-
- case MP_ETH_ICR:
- return s->icr;
-
- case MP_ETH_IMR:
- return s->imr;
-
- case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
- return s->frx_queue[(offset - MP_ETH_FRDP0)/4];
-
- case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
- return s->rx_queue[(offset - MP_ETH_CRDP0)/4];
-
- case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
- return s->tx_queue[(offset - MP_ETH_CTDP0)/4];
-
- default:
- return 0;
- }
-}
-
-static void mv88w8618_eth_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
-{
- mv88w8618_eth_state *s = opaque;
-
- switch (offset) {
- case MP_ETH_SMIR:
- s->smir = value;
- break;
-
- case MP_ETH_PCXR:
- s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2;
- break;
-
- case MP_ETH_SDCMR:
- if (value & MP_ETH_CMD_TXHI) {
- eth_send(s, 1);
- }
- if (value & MP_ETH_CMD_TXLO) {
- eth_send(s, 0);
- }
- if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) {
- qemu_irq_raise(s->irq);
- }
- break;
-
- case MP_ETH_ICR:
- s->icr &= value;
- break;
-
- case MP_ETH_IMR:
- s->imr = value;
- if (s->icr & s->imr) {
- qemu_irq_raise(s->irq);
- }
- break;
-
- case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
- s->frx_queue[(offset - MP_ETH_FRDP0)/4] = value;
- break;
-
- case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
- s->rx_queue[(offset - MP_ETH_CRDP0)/4] =
- s->cur_rx[(offset - MP_ETH_CRDP0)/4] = value;
- break;
-
- case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
- s->tx_queue[(offset - MP_ETH_CTDP0)/4] = value;
- break;
- }
-}
-
-static const MemoryRegionOps mv88w8618_eth_ops = {
- .read = mv88w8618_eth_read,
- .write = mv88w8618_eth_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
-static void eth_cleanup(NetClientState *nc)
-{
- mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
-
- s->nic = NULL;
-}
-
-static NetClientInfo net_mv88w8618_info = {
- .type = NET_CLIENT_DRIVER_NIC,
- .size = sizeof(NICState),
- .receive = eth_receive,
- .cleanup = eth_cleanup,
-};
-
-static void mv88w8618_eth_init(Object *obj)
-{
- SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- DeviceState *dev = DEVICE(sbd);
- mv88w8618_eth_state *s = MV88W8618_ETH(dev);
-
- sysbus_init_irq(sbd, &s->irq);
- memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s,
- "mv88w8618-eth", MP_ETH_SIZE);
- sysbus_init_mmio(sbd, &s->iomem);
-}
-
-static void mv88w8618_eth_realize(DeviceState *dev, Error **errp)
-{
- mv88w8618_eth_state *s = MV88W8618_ETH(dev);
-
- if (!s->dma_mr) {
- error_setg(errp, TYPE_MV88W8618_ETH " 'dma-memory' link not set");
- return;
- }
-
- address_space_init(&s->dma_as, s->dma_mr, "emac-dma");
- s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf,
- object_get_typename(OBJECT(dev)), dev->id, s);
-}
-
-static const VMStateDescription mv88w8618_eth_vmsd = {
- .name = "mv88w8618_eth",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (VMStateField[]) {
- VMSTATE_UINT32(smir, mv88w8618_eth_state),
- VMSTATE_UINT32(icr, mv88w8618_eth_state),
- VMSTATE_UINT32(imr, mv88w8618_eth_state),
- VMSTATE_UINT32(vlan_header, mv88w8618_eth_state),
- VMSTATE_UINT32_ARRAY(tx_queue, mv88w8618_eth_state, 2),
- VMSTATE_UINT32_ARRAY(rx_queue, mv88w8618_eth_state, 4),
- VMSTATE_UINT32_ARRAY(frx_queue, mv88w8618_eth_state, 4),
- VMSTATE_UINT32_ARRAY(cur_rx, mv88w8618_eth_state, 4),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static Property mv88w8618_eth_properties[] = {
- DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf),
- DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr,
- TYPE_MEMORY_REGION, MemoryRegion *),
- DEFINE_PROP_END_OF_LIST(),
-};
-
-static void mv88w8618_eth_class_init(ObjectClass *klass, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(klass);
-
- dc->vmsd = &mv88w8618_eth_vmsd;
- device_class_set_props(dc, mv88w8618_eth_properties);
- dc->realize = mv88w8618_eth_realize;
-}
-
-static const TypeInfo mv88w8618_eth_info = {
- .name = TYPE_MV88W8618_ETH,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(mv88w8618_eth_state),
- .instance_init = mv88w8618_eth_init,
- .class_init = mv88w8618_eth_class_init,
-};
-
/* LCD register offsets */
#define MP_LCD_IRQCTRL 0x180
#define MP_LCD_IRQSTAT 0x184
@@ -1746,7 +1368,6 @@ static void musicpal_register_types(void)
type_register_static(&mv88w8618_pic_info);
type_register_static(&mv88w8618_pit_info);
type_register_static(&mv88w8618_flashcfg_info);
- type_register_static(&mv88w8618_eth_info);
type_register_static(&mv88w8618_wlan_info);
type_register_static(&musicpal_lcd_info);
type_register_static(&musicpal_gpio_info);
diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c
index 7d0f314..d701e5c 100644
--- a/hw/arm/npcm7xx_boards.c
+++ b/hw/arm/npcm7xx_boards.c
@@ -332,7 +332,15 @@ static void kudo_bmc_i2c_init(NPCM7xxState *soc)
{
I2CSlave *i2c_mux;
- i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x75);
+ i2c_mux = i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1),
+ TYPE_PCA9548, 0x75);
+
+ /* tmp105 is compatible with the lm75 */
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 4), "tmp105", 0x5c);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), "tmp105", 0x5c);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 6), "tmp105", 0x5c);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 7), "tmp105", 0x5c);
+
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x77);
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), TYPE_PCA9548, 0x77);
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index f2514ce..449fab0 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -158,10 +158,9 @@ static void acpi_dsdt_add_virtio(Aml *scope,
}
static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
- uint32_t irq, bool use_highmem, bool highmem_ecam,
- VirtMachineState *vms)
+ uint32_t irq, VirtMachineState *vms)
{
- int ecam_id = VIRT_ECAM_ID(highmem_ecam);
+ int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
struct GPEXConfig cfg = {
.mmio32 = memmap[VIRT_PCIE_MMIO],
.pio = memmap[VIRT_PCIE_PIO],
@@ -170,7 +169,7 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
.bus = vms->bus,
};
- if (use_highmem) {
+ if (vms->highmem_mmio) {
cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO];
}
@@ -869,8 +868,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]);
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
- acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
- vms->highmem, vms->highmem_ecam, vms);
+ acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms);
if (vms->acpi_dev) {
build_ged_aml(scope, "\\_SB."GED_DEVICE,
HOTPLUG_HANDLER(vms->acpi_dev),
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 84c2444..141350b 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -71,9 +71,11 @@
#include "hw/arm/smmuv3.h"
#include "hw/acpi/acpi.h"
#include "target/arm/internals.h"
+#include "hw/mem/memory-device.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#include "hw/acpi/generic_event_device.h"
+#include "hw/virtio/virtio-mem-pci.h"
#include "hw/virtio/virtio-iommu.h"
#include "hw/char/pl011.h"
#include "qemu/guest-random.h"
@@ -434,9 +436,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
* can contain several layers of clustering within a single physical
* package and cluster nodes can be contained in parent cluster nodes.
*
- * Given that cluster is not yet supported in the vCPU topology,
- * we currently generate one cluster node within each socket node
- * by default.
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
*/
qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
@@ -446,14 +447,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
if (ms->smp.threads > 1) {
map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
- cpu / (ms->smp.cores * ms->smp.threads),
+ "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+ cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+ (cpu / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters,
(cpu / ms->smp.threads) % ms->smp.cores,
cpu % ms->smp.threads);
} else {
map_path = g_strdup_printf(
- "/cpus/cpu-map/socket%d/cluster0/core%d",
- cpu / ms->smp.cores,
+ "/cpus/cpu-map/socket%d/cluster%d/core%d",
+ cpu / (ms->smp.clusters * ms->smp.cores),
+ (cpu / ms->smp.cores) % ms->smp.clusters,
cpu % ms->smp.cores);
}
qemu_fdt_add_path(ms->fdt, map_path);
@@ -1416,7 +1419,7 @@ static void create_pcie(VirtMachineState *vms)
mmio_reg, base_mmio, size_mmio);
memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
- if (vms->highmem) {
+ if (vms->highmem_mmio) {
/* Map high MMIO space */
MemoryRegion *high_mmio_alias = g_new0(MemoryRegion, 1);
@@ -1470,7 +1473,7 @@ static void create_pcie(VirtMachineState *vms)
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
2, base_ecam, 2, size_ecam);
- if (vms->highmem) {
+ if (vms->highmem_mmio) {
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
2, base_pio, 2, size_pio,
@@ -1664,10 +1667,10 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx)
return arm_cpu_mp_affinity(idx, clustersz);
}
-static void virt_set_memmap(VirtMachineState *vms)
+static void virt_set_memmap(VirtMachineState *vms, int pa_bits)
{
MachineState *ms = MACHINE(vms);
- hwaddr base, device_memory_base, device_memory_size;
+ hwaddr base, device_memory_base, device_memory_size, memtop;
int i;
vms->memmap = extended_memmap;
@@ -1683,6 +1686,14 @@ static void virt_set_memmap(VirtMachineState *vms)
}
/*
+ * !highmem is exactly the same as limiting the PA space to 32bit,
+ * irrespective of the underlying capabilities of the HW.
+ */
+ if (!vms->highmem) {
+ pa_bits = 32;
+ }
+
+ /*
* We compute the base of the high IO region depending on the
* amount of initial and device memory. The device memory start/size
* is aligned on 1GiB. We never put the high IO region below 256GiB
@@ -1694,7 +1705,12 @@ static void virt_set_memmap(VirtMachineState *vms)
device_memory_size = ms->maxram_size - ms->ram_size + ms->ram_slots * GiB;
/* Base address of the high IO region */
- base = device_memory_base + ROUND_UP(device_memory_size, GiB);
+ memtop = base = device_memory_base + ROUND_UP(device_memory_size, GiB);
+ if (memtop > BIT_ULL(pa_bits)) {
+ error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n",
+ pa_bits, memtop - BIT_ULL(pa_bits));
+ exit(EXIT_FAILURE);
+ }
if (base < device_memory_base) {
error_report("maxmem/slots too huge");
exit(EXIT_FAILURE);
@@ -1703,15 +1719,43 @@ static void virt_set_memmap(VirtMachineState *vms)
base = vms->memmap[VIRT_MEM].base + LEGACY_RAMLIMIT_BYTES;
}
+ /* We know for sure that at least the memory fits in the PA space */
+ vms->highest_gpa = memtop - 1;
+
for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) {
hwaddr size = extended_memmap[i].size;
+ bool fits;
base = ROUND_UP(base, size);
vms->memmap[i].base = base;
vms->memmap[i].size = size;
+
+ /*
+ * Check each device to see if they fit in the PA space,
+ * moving highest_gpa as we go.
+ *
+ * For each device that doesn't fit, disable it.
+ */
+ fits = (base + size) <= BIT_ULL(pa_bits);
+ if (fits) {
+ vms->highest_gpa = base + size - 1;
+ }
+
+ switch (i) {
+ case VIRT_HIGH_GIC_REDIST2:
+ vms->highmem_redists &= fits;
+ break;
+ case VIRT_HIGH_PCIE_ECAM:
+ vms->highmem_ecam &= fits;
+ break;
+ case VIRT_HIGH_PCIE_MMIO:
+ vms->highmem_mmio &= fits;
+ break;
+ }
+
base += size;
}
- vms->highest_gpa = base - 1;
+
if (device_memory_size > 0) {
ms->device_memory = g_malloc0(sizeof(*ms->device_memory));
ms->device_memory->base = device_memory_base;
@@ -1902,12 +1946,43 @@ static void machvirt_init(MachineState *machine)
unsigned int smp_cpus = machine->smp.cpus;
unsigned int max_cpus = machine->smp.max_cpus;
+ if (!cpu_type_valid(machine->cpu_type)) {
+ error_report("mach-virt: CPU type %s not supported", machine->cpu_type);
+ exit(1);
+ }
+
+ possible_cpus = mc->possible_cpu_arch_ids(machine);
+
/*
* In accelerated mode, the memory map is computed earlier in kvm_type()
* to create a VM with the right number of IPA bits.
*/
if (!vms->memmap) {
- virt_set_memmap(vms);
+ Object *cpuobj;
+ ARMCPU *armcpu;
+ int pa_bits;
+
+ /*
+ * Instanciate a temporary CPU object to find out about what
+ * we are about to deal with. Once this is done, get rid of
+ * the object.
+ */
+ cpuobj = object_new(possible_cpus->cpus[0].type);
+ armcpu = ARM_CPU(cpuobj);
+
+ if (object_property_get_bool(cpuobj, "aarch64", NULL)) {
+ pa_bits = arm_pamax(armcpu);
+ } else if (arm_feature(&armcpu->env, ARM_FEATURE_LPAE)) {
+ /* v7 with LPAE */
+ pa_bits = 40;
+ } else {
+ /* Anything else */
+ pa_bits = 32;
+ }
+
+ object_unref(cpuobj);
+
+ virt_set_memmap(vms, pa_bits);
}
/* We can probe only here because during property set
@@ -1915,11 +1990,6 @@ static void machvirt_init(MachineState *machine)
*/
finalize_gic_version(vms);
- if (!cpu_type_valid(machine->cpu_type)) {
- error_report("mach-virt: CPU type %s not supported", machine->cpu_type);
- exit(1);
- }
-
if (vms->secure) {
/*
* The Secure view of the world is the same as the NonSecure,
@@ -1989,7 +2059,6 @@ static void machvirt_init(MachineState *machine)
create_fdt(vms);
- possible_cpus = mc->possible_cpu_arch_ids(machine);
assert(possible_cpus->len == max_cpus);
for (n = 0; n < possible_cpus->len; n++) {
Object *cpuobj;
@@ -2127,7 +2196,7 @@ static void machvirt_init(MachineState *machine)
machine->ram_size, "mach-virt.tag");
}
- vms->highmem_ecam &= vms->highmem && (!firmware_loaded || aarch64);
+ vms->highmem_ecam &= (!firmware_loaded || aarch64);
create_rtc(vms);
@@ -2500,6 +2569,64 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev,
dev, &error_abort);
}
+static void virt_virtio_md_pci_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
+ Error *local_err = NULL;
+
+ if (!hotplug_dev2 && dev->hotplugged) {
+ /*
+ * Without a bus hotplug handler, we cannot control the plug/unplug
+ * order. We should never reach this point when hotplugging on ARM.
+ * However, it's nice to add a safety net, similar to what we have
+ * on x86.
+ */
+ error_setg(errp, "hotplug of virtio based memory devices not supported"
+ " on this bus.");
+ return;
+ }
+ /*
+ * First, see if we can plug this memory device at all. If that
+ * succeeds, branch of to the actual hotplug handler.
+ */
+ memory_device_pre_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev), NULL,
+ &local_err);
+ if (!local_err && hotplug_dev2) {
+ hotplug_handler_pre_plug(hotplug_dev2, dev, &local_err);
+ }
+ error_propagate(errp, local_err);
+}
+
+static void virt_virtio_md_pci_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ HotplugHandler *hotplug_dev2 = qdev_get_bus_hotplug_handler(dev);
+ Error *local_err = NULL;
+
+ /*
+ * Plug the memory device first and then branch off to the actual
+ * hotplug handler. If that one fails, we can easily undo the memory
+ * device bits.
+ */
+ memory_device_plug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
+ if (hotplug_dev2) {
+ hotplug_handler_plug(hotplug_dev2, dev, &local_err);
+ if (local_err) {
+ memory_device_unplug(MEMORY_DEVICE(dev), MACHINE(hotplug_dev));
+ }
+ }
+ error_propagate(errp, local_err);
+}
+
+static void virt_virtio_md_pci_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ /* We don't support hot unplug of virtio based memory devices */
+ error_setg(errp, "virtio based memory devices cannot be unplugged.");
+}
+
+
static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -2507,6 +2634,8 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev,
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_memory_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
+ virt_virtio_md_pci_pre_plug(hotplug_dev, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
hwaddr db_start = 0, db_end = 0;
char *resv_prop_str;
@@ -2558,6 +2687,11 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev,
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_memory_plug(hotplug_dev, dev, errp);
}
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
+ virt_virtio_md_pci_plug(hotplug_dev, dev, errp);
+ }
+
if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
PCIDevice *pdev = PCI_DEVICE(dev);
@@ -2614,6 +2748,8 @@ static void virt_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev,
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
virt_dimm_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) {
+ virt_virtio_md_pci_unplug_request(hotplug_dev, dev, errp);
} else {
error_setg(errp, "device unplug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -2638,6 +2774,7 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine,
if (device_is_dynamic_sysbus(mc, dev) ||
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) ||
object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) {
return HOTPLUG_HANDLER(machine);
}
@@ -2657,7 +2794,7 @@ static int virt_kvm_type(MachineState *ms, const char *type_str)
max_vm_pa_size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa);
/* we freeze the memory map to compute the highest gpa */
- virt_set_memmap(vms);
+ virt_set_memmap(vms, max_vm_pa_size);
requested_pa_size = 64 - clz64(vms->highest_gpa);
@@ -2718,6 +2855,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
hc->unplug_request = virt_machine_device_unplug_request_cb;
hc->unplug = virt_machine_device_unplug_cb;
mc->nvdimm_supported = true;
+ mc->smp_props.clusters_supported = true;
mc->auto_enable_numa_with_memhp = true;
mc->auto_enable_numa_with_memdev = true;
mc->default_ram_id = "mach-virt.ram";
@@ -2827,6 +2965,8 @@ static void virt_instance_init(Object *obj)
vms->gic_version = VIRT_GIC_VERSION_NOSEL;
vms->highmem_ecam = !vmc->no_highmem_ecam;
+ vms->highmem_mmio = true;
+ vms->highmem_redists = true;
if (vmc->no_its) {
vms->its = false;
diff --git a/hw/audio/Kconfig b/hw/audio/Kconfig
index e9c6fed..e76c69c 100644
--- a/hw/audio/Kconfig
+++ b/hw/audio/Kconfig
@@ -47,6 +47,3 @@ config PL041
config CS4231
bool
-
-config MARVELL_88W8618
- bool
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index a994b1f..492b242 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -1662,6 +1662,15 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
}
break;
}
+ case 0xfc:
+ if (s->revision == REV_11MPCORE) {
+ /* Reserved on 11MPCore */
+ *data = 0;
+ } else {
+ /* GICv1 or v2; Arm implementation */
+ *data = (s->revision << 16) | 0x43b;
+ }
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR,
"gic_cpu_read: Bad offset %x\n", (int)offset);
@@ -1727,6 +1736,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
} else {
s->apr[regno][cpu] = value;
}
+ s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
break;
}
case 0xe0: case 0xe4: case 0xe8: case 0xec:
@@ -1743,6 +1753,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
return MEMTX_OK;
}
s->nsapr[regno][cpu] = value;
+ s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
break;
}
case 0x1000:
diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c
index fa3cdb5..b2f6a8c 100644
--- a/hw/intc/arm_gicv3_its.c
+++ b/hw/intc/arm_gicv3_its.c
@@ -45,6 +45,23 @@ typedef struct {
uint64_t itel;
} IteEntry;
+/*
+ * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
+ * if a command parameter is not correct. These include both "stall
+ * processing of the command queue" and "ignore this command, and
+ * keep processing the queue". In our implementation we choose that
+ * memory transaction errors reading the command packet provoke a
+ * stall, but errors in parameters cause us to ignore the command
+ * and continue processing.
+ * The process_* functions which handle individual ITS commands all
+ * return an ItsCmdResult which tells process_cmdq() whether it should
+ * stall or keep going.
+ */
+typedef enum ItsCmdResult {
+ CMD_STALL = 0,
+ CMD_CONTINUE = 1,
+} ItsCmdResult;
+
static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
{
uint64_t result = 0;
@@ -66,44 +83,62 @@ static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
return result;
}
-static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
- MemTxResult *res)
+static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
+ uint32_t idx, MemTxResult *res)
{
+ /*
+ * Given a TableDesc describing one of the ITS in-guest-memory
+ * tables and an index into it, return the guest address
+ * corresponding to that table entry.
+ * If there was a memory error reading the L1 table of an
+ * indirect table, *res is set accordingly, and we return -1.
+ * If the L1 table entry is marked not valid, we return -1 with
+ * *res set to MEMTX_OK.
+ *
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t l2t_addr;
- uint64_t value;
- bool valid_l2t;
- uint32_t l2t_id;
+ uint32_t l2idx;
+ uint64_t l2;
uint32_t num_l2_entries;
- if (s->ct.indirect) {
- l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+ *res = MEMTX_OK;
- value = address_space_ldq_le(as,
- s->ct.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+ if (!td->indirect) {
+ /* Single level table */
+ return td->base_addr + idx * td->entry_sz;
+ }
- if (*res == MEMTX_OK) {
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ /* Two level table */
+ l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
- if (valid_l2t) {
- num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ l2 = address_space_ldq_le(as,
+ td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ if (*res != MEMTX_OK) {
+ return -1;
+ }
+ if (!(l2 & L2_TABLE_VALID_MASK)) {
+ return -1;
+ }
- l2t_addr = value & ((1ULL << 51) - 1);
+ num_l2_entries = td->page_sz / td->entry_sz;
+ return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
+}
- *cte = address_space_ldq_le(as, l2t_addr +
- ((icid % num_l2_entries) * GITS_CTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
- }
- }
- } else {
- /* Flat level table */
- *cte = address_space_ldq_le(as, s->ct.base_addr +
- (icid * GITS_CTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
+ MemTxResult *res)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res);
+
+ if (entry_addr == -1) {
+ return false; /* not valid */
}
+ *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
return FIELD_EX64(*cte, CTE, VALID);
}
@@ -172,41 +207,12 @@ static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t l2t_addr;
- uint64_t value;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
-
- if (s->dt.indirect) {
- l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
-
- value = address_space_ldq_le(as,
- s->dt.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
-
- if (*res == MEMTX_OK) {
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, res);
- if (valid_l2t) {
- num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
-
- l2t_addr = value & ((1ULL << 51) - 1);
-
- value = address_space_ldq_le(as, l2t_addr +
- ((devid % num_l2_entries) * GITS_DTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
- }
- }
- } else {
- /* Flat level table */
- value = address_space_ldq_le(as, s->dt.base_addr +
- (devid * GITS_DTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+ if (entry_addr == -1) {
+ return 0; /* a DTE entry with the Valid bit clear */
}
-
- return value;
+ return address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
}
/*
@@ -217,21 +223,20 @@ static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
* 3. handling of ITS CLEAR command
* 4. handling of ITS DISCARD command
*/
-static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
- ItsCmdType cmd)
+static ItsCmdResult process_its_cmd(GICv3ITSState *s, uint64_t value,
+ uint32_t offset, ItsCmdType cmd)
{
AddressSpace *as = &s->gicv3->dma_as;
uint32_t devid, eventid;
MemTxResult res = MEMTX_OK;
bool dte_valid;
uint64_t dte = 0;
- uint32_t max_eventid;
+ uint64_t num_eventids;
uint16_t icid = 0;
uint32_t pIntid = 0;
bool ite_valid = false;
uint64_t cte = 0;
bool cte_valid = false;
- bool result = false;
uint64_t rdbase;
if (cmd == NONE) {
@@ -245,103 +250,111 @@ static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
}
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
eventid = (value & EVENTID_MASK);
+ if (devid >= s->dt.num_ids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_ids);
+ return CMD_CONTINUE;
+ }
+
dte = get_dte(s, devid, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
dte_valid = FIELD_EX64(dte, DTE, VALID);
- if (dte_valid) {
- max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
+ if (!dte_valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid dte: %"PRIx64" for %d\n",
+ __func__, dte, devid);
+ return CMD_CONTINUE;
+ }
- ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
+ num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
- if (res != MEMTX_OK) {
- return result;
- }
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: eventid %d >= %"
+ PRId64 "\n",
+ __func__, eventid, num_eventids);
+ return CMD_CONTINUE;
+ }
- if (ite_valid) {
- cte_valid = get_cte(s, icid, &cte, &res);
- }
+ ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
- if (res != MEMTX_OK) {
- return result;
- }
- } else {
+ if (!ite_valid) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: "
- "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
- __func__, dte, devid, res);
- return result;
+ "%s: invalid command attributes: invalid ITE\n",
+ __func__);
+ return CMD_CONTINUE;
}
-
- /*
- * In this implementation, in case of guest errors we ignore the
- * command and move onto the next command in the queue.
- */
- if (devid >= s->dt.num_ids) {
+ if (icid >= s->ct.num_ids) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: devid %d>=%d",
- __func__, devid, s->dt.num_ids);
+ "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
+ __func__, icid);
+ return CMD_CONTINUE;
+ }
- } else if (!dte_valid || !ite_valid || !cte_valid) {
+ cte_valid = get_cte(s, icid, &cte, &res);
+ if (res != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ if (!cte_valid) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes: "
- "dte: %s, ite: %s, cte: %s\n",
- __func__,
- dte_valid ? "valid" : "invalid",
- ite_valid ? "valid" : "invalid",
- cte_valid ? "valid" : "invalid");
- } else if (eventid > max_eventid) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: eventid %d > %d\n",
- __func__, eventid, max_eventid);
- } else {
- /*
- * Current implementation only supports rdbase == procnum
- * Hence rdbase physical address is ignored
- */
- rdbase = FIELD_EX64(cte, CTE, RDBASE);
+ "invalid cte: %"PRIx64"\n",
+ __func__, cte);
+ return CMD_CONTINUE;
+ }
- if (rdbase >= s->gicv3->num_cpu) {
- return result;
- }
+ /*
+ * Current implementation only supports rdbase == procnum
+ * Hence rdbase physical address is ignored
+ */
+ rdbase = FIELD_EX64(cte, CTE, RDBASE);
- if ((cmd == CLEAR) || (cmd == DISCARD)) {
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
- } else {
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
- }
+ if (rdbase >= s->gicv3->num_cpu) {
+ return CMD_CONTINUE;
+ }
- if (cmd == DISCARD) {
- IteEntry ite = {};
- /* remove mapping from interrupt translation table */
- result = update_ite(s, eventid, dte, ite);
- }
+ if ((cmd == CLEAR) || (cmd == DISCARD)) {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
+ } else {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
}
- return result;
+ if (cmd == DISCARD) {
+ IteEntry ite = {};
+ /* remove mapping from interrupt translation table */
+ return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
+ }
+ return CMD_CONTINUE;
}
-static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
- bool ignore_pInt)
+static ItsCmdResult process_mapti(GICv3ITSState *s, uint64_t value,
+ uint32_t offset, bool ignore_pInt)
{
AddressSpace *as = &s->gicv3->dma_as;
uint32_t devid, eventid;
uint32_t pIntid = 0;
- uint32_t max_eventid, max_Intid;
+ uint64_t num_eventids;
+ uint32_t num_intids;
bool dte_valid;
MemTxResult res = MEMTX_OK;
uint16_t icid = 0;
uint64_t dte = 0;
- bool result = false;
+ IteEntry ite = {};
devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
offset += NUM_BYTES_IN_DW;
@@ -349,7 +362,7 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
eventid = (value & EVENTID_MASK);
@@ -365,58 +378,59 @@ static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
icid = value & ICID_MASK;
+ if (devid >= s->dt.num_ids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_ids);
+ return CMD_CONTINUE;
+ }
+
dte = get_dte(s, devid, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
dte_valid = FIELD_EX64(dte, DTE, VALID);
- max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
- max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
+ num_eventids = 1ULL << (FIELD_EX64(dte, DTE, SIZE) + 1);
+ num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
- if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
- || !dte_valid || (eventid > max_eventid) ||
- (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) &&
+ if ((icid >= s->ct.num_ids)
+ || !dte_valid || (eventid >= num_eventids) ||
+ (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
(pIntid != INTID_SPURIOUS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes "
- "devid %d or icid %d or eventid %d or pIntid %d or"
- "unmapped dte %d\n", __func__, devid, icid, eventid,
+ "icid %d or eventid %d or pIntid %d or"
+ "unmapped dte %d\n", __func__, icid, eventid,
pIntid, dte_valid);
/*
* in this implementation, in case of error
* we ignore this command and move onto the next
* command in the queue
*/
- } else {
- /* add ite entry to interrupt translation table */
- IteEntry ite = {};
- ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
-
- result = update_ite(s, eventid, dte, ite);
+ return CMD_CONTINUE;
}
- return result;
+ /* add ite entry to interrupt translation table */
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
+
+ return update_ite(s, eventid, dte, ite) ? CMD_CONTINUE : CMD_STALL;
}
static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
uint64_t rdbase)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t value;
- uint64_t l2t_addr;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
+ uint64_t entry_addr;
uint64_t cte = 0;
MemTxResult res = MEMTX_OK;
@@ -430,54 +444,27 @@ static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
}
- /*
- * The specification defines the format of level 1 entries of a
- * 2-level table, but the format of level 2 entries and the format
- * of flat-mapped tables is IMPDEF.
- */
- if (s->ct.indirect) {
- l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
-
- value = address_space_ldq_le(as,
- s->ct.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, &res);
-
- if (res != MEMTX_OK) {
- return false;
- }
-
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
-
- if (valid_l2t) {
- num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
-
- l2t_addr = value & ((1ULL << 51) - 1);
-
- address_space_stq_le(as, l2t_addr +
- ((icid % num_l2_entries) * GITS_CTE_SIZE),
- cte, MEMTXATTRS_UNSPECIFIED, &res);
- }
- } else {
- /* Flat level table */
- address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
- cte, MEMTXATTRS_UNSPECIFIED, &res);
- }
+ entry_addr = table_entry_addr(s, &s->ct, icid, &res);
if (res != MEMTX_OK) {
+ /* memory access error: stall */
return false;
- } else {
+ }
+ if (entry_addr == -1) {
+ /* No L2 table for this index: discard write and continue */
return true;
}
+
+ address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
}
-static bool process_mapc(GICv3ITSState *s, uint32_t offset)
+static ItsCmdResult process_mapc(GICv3ITSState *s, uint32_t offset)
{
AddressSpace *as = &s->gicv3->dma_as;
uint16_t icid;
uint64_t rdbase;
bool valid;
MemTxResult res = MEMTX_OK;
- bool result = false;
uint64_t value;
offset += NUM_BYTES_IN_DW;
@@ -487,7 +474,7 @@ static bool process_mapc(GICv3ITSState *s, uint32_t offset)
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
icid = value & ICID_MASK;
@@ -506,22 +493,17 @@ static bool process_mapc(GICv3ITSState *s, uint32_t offset)
* we ignore this command and move onto the next
* command in the queue
*/
- } else {
- result = update_cte(s, icid, valid, rdbase);
+ return CMD_CONTINUE;
}
- return result;
+ return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
}
static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
uint8_t size, uint64_t itt_addr)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t value;
- uint64_t l2t_addr;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
+ uint64_t entry_addr;
uint64_t dte = 0;
MemTxResult res = MEMTX_OK;
@@ -536,47 +518,21 @@ static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
return true;
}
- /*
- * The specification defines the format of level 1 entries of a
- * 2-level table, but the format of level 2 entries and the format
- * of flat-mapped tables is IMPDEF.
- */
- if (s->dt.indirect) {
- l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
-
- value = address_space_ldq_le(as,
- s->dt.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, &res);
-
- if (res != MEMTX_OK) {
- return false;
- }
-
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
-
- if (valid_l2t) {
- num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
-
- l2t_addr = value & ((1ULL << 51) - 1);
-
- address_space_stq_le(as, l2t_addr +
- ((devid % num_l2_entries) * GITS_DTE_SIZE),
- dte, MEMTXATTRS_UNSPECIFIED, &res);
- }
- } else {
- /* Flat level table */
- address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
- dte, MEMTXATTRS_UNSPECIFIED, &res);
- }
+ entry_addr = table_entry_addr(s, &s->dt, devid, &res);
if (res != MEMTX_OK) {
+ /* memory access error: stall */
return false;
- } else {
+ }
+ if (entry_addr == -1) {
+ /* No L2 table for this index: discard write and continue */
return true;
}
+ address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
}
-static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
+static ItsCmdResult process_mapd(GICv3ITSState *s, uint64_t value,
+ uint32_t offset)
{
AddressSpace *as = &s->gicv3->dma_as;
uint32_t devid;
@@ -584,7 +540,6 @@ static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
uint64_t itt_addr;
bool valid;
MemTxResult res = MEMTX_OK;
- bool result = false;
devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
@@ -593,7 +548,7 @@ static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
size = (value & SIZE_MASK);
@@ -603,7 +558,7 @@ static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- return result;
+ return CMD_STALL;
}
itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
@@ -620,11 +575,10 @@ static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
* we ignore this command and move onto the next
* command in the queue
*/
- } else {
- result = update_dte(s, devid, valid, size, itt_addr);
+ return CMD_CONTINUE;
}
- return result;
+ return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
}
/*
@@ -639,7 +593,6 @@ static void process_cmdq(GICv3ITSState *s)
uint64_t data;
AddressSpace *as = &s->gicv3->dma_as;
MemTxResult res = MEMTX_OK;
- bool result = true;
uint8_t cmd;
int i;
@@ -666,20 +619,27 @@ static void process_cmdq(GICv3ITSState *s)
}
while (wr_offset != rd_offset) {
+ ItsCmdResult result = CMD_CONTINUE;
+
cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
MEMTXATTRS_UNSPECIFIED, &res);
if (res != MEMTX_OK) {
- result = false;
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: could not read command at 0x%" PRIx64 "\n",
+ __func__, s->cq.base_addr + cq_offset);
+ break;
}
+
cmd = (data & CMD_MASK);
switch (cmd) {
case GITS_CMD_INT:
- res = process_its_cmd(s, data, cq_offset, INTERRUPT);
+ result = process_its_cmd(s, data, cq_offset, INTERRUPT);
break;
case GITS_CMD_CLEAR:
- res = process_its_cmd(s, data, cq_offset, CLEAR);
+ result = process_its_cmd(s, data, cq_offset, CLEAR);
break;
case GITS_CMD_SYNC:
/*
@@ -719,18 +679,16 @@ static void process_cmdq(GICv3ITSState *s)
default:
break;
}
- if (result) {
+ if (result == CMD_CONTINUE) {
rd_offset++;
rd_offset %= s->cq.num_entries;
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
} else {
- /*
- * in this implementation, in case of dma read/write error
- * we stall the command processing
- */
+ /* CMD_STALL */
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: %x cmd processing failed\n", __func__, cmd);
+ "%s: 0x%x cmd processing failed, stalling\n",
+ __func__, cmd);
break;
}
}
diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c
index c8ff3ec..99b11ca 100644
--- a/hw/intc/arm_gicv3_redist.c
+++ b/hw/intc/arm_gicv3_redist.c
@@ -462,7 +462,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
break;
}
- if (r == MEMTX_ERROR) {
+ if (r != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest read at offset " TARGET_FMT_plx
" size %u\n", __func__, offset, size);
@@ -521,7 +521,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
break;
}
- if (r == MEMTX_ERROR) {
+ if (r != MEMTX_OK) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write at offset " TARGET_FMT_plx
" size %u\n", __func__, offset, size);
diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c
new file mode 100644
index 0000000..f54f5da
--- /dev/null
+++ b/hw/misc/aspeed_i3c.c
@@ -0,0 +1,384 @@
+/*
+ * ASPEED I3C Controller
+ *
+ * Copyright (C) 2021 ASPEED Technology Inc.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/misc/aspeed_i3c.h"
+#include "hw/registerfields.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+/* I3C Controller Registers */
+REG32(I3C1_REG0, 0x10)
+REG32(I3C1_REG1, 0x14)
+ FIELD(I3C1_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C1_REG1, SA_EN, 15, 1)
+REG32(I3C2_REG0, 0x20)
+REG32(I3C2_REG1, 0x24)
+ FIELD(I3C2_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C2_REG1, SA_EN, 15, 1)
+REG32(I3C3_REG0, 0x30)
+REG32(I3C3_REG1, 0x34)
+ FIELD(I3C3_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C3_REG1, SA_EN, 15, 1)
+REG32(I3C4_REG0, 0x40)
+REG32(I3C4_REG1, 0x44)
+ FIELD(I3C4_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C4_REG1, SA_EN, 15, 1)
+REG32(I3C5_REG0, 0x50)
+REG32(I3C5_REG1, 0x54)
+ FIELD(I3C5_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C5_REG1, SA_EN, 15, 1)
+REG32(I3C6_REG0, 0x60)
+REG32(I3C6_REG1, 0x64)
+ FIELD(I3C6_REG1, I2C_MODE, 0, 1)
+ FIELD(I3C6_REG1, SA_EN, 15, 1)
+
+/* I3C Device Registers */
+REG32(DEVICE_CTRL, 0x00)
+REG32(DEVICE_ADDR, 0x04)
+REG32(HW_CAPABILITY, 0x08)
+REG32(COMMAND_QUEUE_PORT, 0x0c)
+REG32(RESPONSE_QUEUE_PORT, 0x10)
+REG32(RX_TX_DATA_PORT, 0x14)
+REG32(IBI_QUEUE_STATUS, 0x18)
+REG32(IBI_QUEUE_DATA, 0x18)
+REG32(QUEUE_THLD_CTRL, 0x1c)
+REG32(DATA_BUFFER_THLD_CTRL, 0x20)
+REG32(IBI_QUEUE_CTRL, 0x24)
+REG32(IBI_MR_REQ_REJECT, 0x2c)
+REG32(IBI_SIR_REQ_REJECT, 0x30)
+REG32(RESET_CTRL, 0x34)
+REG32(SLV_EVENT_CTRL, 0x38)
+REG32(INTR_STATUS, 0x3c)
+REG32(INTR_STATUS_EN, 0x40)
+REG32(INTR_SIGNAL_EN, 0x44)
+REG32(INTR_FORCE, 0x48)
+REG32(QUEUE_STATUS_LEVEL, 0x4c)
+REG32(DATA_BUFFER_STATUS_LEVEL, 0x50)
+REG32(PRESENT_STATE, 0x54)
+REG32(CCC_DEVICE_STATUS, 0x58)
+REG32(DEVICE_ADDR_TABLE_POINTER, 0x5c)
+ FIELD(DEVICE_ADDR_TABLE_POINTER, DEPTH, 16, 16)
+ FIELD(DEVICE_ADDR_TABLE_POINTER, ADDR, 0, 16)
+REG32(DEV_CHAR_TABLE_POINTER, 0x60)
+REG32(VENDOR_SPECIFIC_REG_POINTER, 0x6c)
+REG32(SLV_MIPI_PID_VALUE, 0x70)
+REG32(SLV_PID_VALUE, 0x74)
+REG32(SLV_CHAR_CTRL, 0x78)
+REG32(SLV_MAX_LEN, 0x7c)
+REG32(MAX_READ_TURNAROUND, 0x80)
+REG32(MAX_DATA_SPEED, 0x84)
+REG32(SLV_DEBUG_STATUS, 0x88)
+REG32(SLV_INTR_REQ, 0x8c)
+REG32(DEVICE_CTRL_EXTENDED, 0xb0)
+REG32(SCL_I3C_OD_TIMING, 0xb4)
+REG32(SCL_I3C_PP_TIMING, 0xb8)
+REG32(SCL_I2C_FM_TIMING, 0xbc)
+REG32(SCL_I2C_FMP_TIMING, 0xc0)
+REG32(SCL_EXT_LCNT_TIMING, 0xc8)
+REG32(SCL_EXT_TERMN_LCNT_TIMING, 0xcc)
+REG32(BUS_FREE_TIMING, 0xd4)
+REG32(BUS_IDLE_TIMING, 0xd8)
+REG32(I3C_VER_ID, 0xe0)
+REG32(I3C_VER_TYPE, 0xe4)
+REG32(EXTENDED_CAPABILITY, 0xe8)
+REG32(SLAVE_CONFIG, 0xec)
+
+static const uint32_t ast2600_i3c_device_resets[ASPEED_I3C_DEVICE_NR_REGS] = {
+ [R_HW_CAPABILITY] = 0x000e00bf,
+ [R_QUEUE_THLD_CTRL] = 0x01000101,
+ [R_I3C_VER_ID] = 0x3130302a,
+ [R_I3C_VER_TYPE] = 0x6c633033,
+ [R_DEVICE_ADDR_TABLE_POINTER] = 0x00080280,
+ [R_DEV_CHAR_TABLE_POINTER] = 0x00020200,
+ [A_VENDOR_SPECIFIC_REG_POINTER] = 0x000000b0,
+ [R_SLV_MAX_LEN] = 0x00ff00ff,
+};
+
+static uint64_t aspeed_i3c_device_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ AspeedI3CDevice *s = ASPEED_I3C_DEVICE(opaque);
+ uint32_t addr = offset >> 2;
+ uint64_t value;
+
+ switch (addr) {
+ case R_COMMAND_QUEUE_PORT:
+ value = 0;
+ break;
+ default:
+ value = s->regs[addr];
+ break;
+ }
+
+ trace_aspeed_i3c_device_read(s->id, offset, value);
+
+ return value;
+}
+
+static void aspeed_i3c_device_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ AspeedI3CDevice *s = ASPEED_I3C_DEVICE(opaque);
+ uint32_t addr = offset >> 2;
+
+ trace_aspeed_i3c_device_write(s->id, offset, value);
+
+ switch (addr) {
+ case R_HW_CAPABILITY:
+ case R_RESPONSE_QUEUE_PORT:
+ case R_IBI_QUEUE_DATA:
+ case R_QUEUE_STATUS_LEVEL:
+ case R_PRESENT_STATE:
+ case R_CCC_DEVICE_STATUS:
+ case R_DEVICE_ADDR_TABLE_POINTER:
+ case R_VENDOR_SPECIFIC_REG_POINTER:
+ case R_SLV_CHAR_CTRL:
+ case R_SLV_MAX_LEN:
+ case R_MAX_READ_TURNAROUND:
+ case R_I3C_VER_ID:
+ case R_I3C_VER_TYPE:
+ case R_EXTENDED_CAPABILITY:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: write to readonly register[0x%02" HWADDR_PRIx
+ "] = 0x%08" PRIx64 "\n",
+ __func__, offset, value);
+ break;
+ case R_RX_TX_DATA_PORT:
+ break;
+ case R_RESET_CTRL:
+ break;
+ default:
+ s->regs[addr] = value;
+ break;
+ }
+}
+
+static const VMStateDescription aspeed_i3c_device_vmstate = {
+ .name = TYPE_ASPEED_I3C,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32_ARRAY(regs, AspeedI3CDevice, ASPEED_I3C_DEVICE_NR_REGS),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static const MemoryRegionOps aspeed_i3c_device_ops = {
+ .read = aspeed_i3c_device_read,
+ .write = aspeed_i3c_device_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void aspeed_i3c_device_reset(DeviceState *dev)
+{
+ AspeedI3CDevice *s = ASPEED_I3C_DEVICE(dev);
+
+ memcpy(s->regs, ast2600_i3c_device_resets, sizeof(s->regs));
+}
+
+static void aspeed_i3c_device_realize(DeviceState *dev, Error **errp)
+{
+ AspeedI3CDevice *s = ASPEED_I3C_DEVICE(dev);
+ g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I3C_DEVICE ".%d",
+ s->id);
+
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
+
+ memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i3c_device_ops,
+ s, name, ASPEED_I3C_DEVICE_NR_REGS << 2);
+}
+
+static uint64_t aspeed_i3c_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ AspeedI3CState *s = ASPEED_I3C(opaque);
+ uint64_t val = 0;
+
+ val = s->regs[addr >> 2];
+
+ trace_aspeed_i3c_read(addr, val);
+
+ return val;
+}
+
+static void aspeed_i3c_write(void *opaque,
+ hwaddr addr,
+ uint64_t data,
+ unsigned int size)
+{
+ AspeedI3CState *s = ASPEED_I3C(opaque);
+
+ trace_aspeed_i3c_write(addr, data);
+
+ addr >>= 2;
+
+ /* I3C controller register */
+ switch (addr) {
+ case R_I3C1_REG1:
+ case R_I3C2_REG1:
+ case R_I3C3_REG1:
+ case R_I3C4_REG1:
+ case R_I3C5_REG1:
+ case R_I3C6_REG1:
+ if (data & R_I3C1_REG1_I2C_MODE_MASK) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Unsupported I2C mode [0x%08" HWADDR_PRIx
+ "]=%08" PRIx64 "\n",
+ __func__, addr << 2, data);
+ break;
+ }
+ if (data & R_I3C1_REG1_SA_EN_MASK) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s: Unsupported slave mode [%08" HWADDR_PRIx
+ "]=0x%08" PRIx64 "\n",
+ __func__, addr << 2, data);
+ break;
+ }
+ s->regs[addr] = data;
+ break;
+ default:
+ s->regs[addr] = data;
+ break;
+ }
+}
+
+static const MemoryRegionOps aspeed_i3c_ops = {
+ .read = aspeed_i3c_read,
+ .write = aspeed_i3c_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ }
+};
+
+static void aspeed_i3c_reset(DeviceState *dev)
+{
+ AspeedI3CState *s = ASPEED_I3C(dev);
+ memset(s->regs, 0, sizeof(s->regs));
+}
+
+static void aspeed_i3c_instance_init(Object *obj)
+{
+ AspeedI3CState *s = ASPEED_I3C(obj);
+ int i;
+
+ for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) {
+ object_initialize_child(obj, "device[*]", &s->devices[i],
+ TYPE_ASPEED_I3C_DEVICE);
+ }
+}
+
+static void aspeed_i3c_realize(DeviceState *dev, Error **errp)
+{
+ int i;
+ AspeedI3CState *s = ASPEED_I3C(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ memory_region_init(&s->iomem_container, OBJECT(s),
+ TYPE_ASPEED_I3C ".container", 0x8000);
+
+ sysbus_init_mmio(sbd, &s->iomem_container);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_i3c_ops, s,
+ TYPE_ASPEED_I3C ".regs", ASPEED_I3C_NR_REGS << 2);
+
+ memory_region_add_subregion(&s->iomem_container, 0x0, &s->iomem);
+
+ for (i = 0; i < ASPEED_I3C_NR_DEVICES; ++i) {
+ Object *dev = OBJECT(&s->devices[i]);
+
+ if (!object_property_set_uint(dev, "device-id", i, errp)) {
+ return;
+ }
+
+ if (!sysbus_realize(SYS_BUS_DEVICE(dev), errp)) {
+ return;
+ }
+
+ /*
+ * Register Address of I3CX Device =
+ * (Base Address of Global Register) + (Offset of I3CX) + Offset
+ * X = 0, 1, 2, 3, 4, 5
+ * Offset of I3C0 = 0x2000
+ * Offset of I3C1 = 0x3000
+ * Offset of I3C2 = 0x4000
+ * Offset of I3C3 = 0x5000
+ * Offset of I3C4 = 0x6000
+ * Offset of I3C5 = 0x7000
+ */
+ memory_region_add_subregion(&s->iomem_container,
+ 0x2000 + i * 0x1000, &s->devices[i].mr);
+ }
+
+}
+
+static Property aspeed_i3c_device_properties[] = {
+ DEFINE_PROP_UINT8("device-id", AspeedI3CDevice, id, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_i3c_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "Aspeed I3C Device";
+ dc->realize = aspeed_i3c_device_realize;
+ dc->reset = aspeed_i3c_device_reset;
+ device_class_set_props(dc, aspeed_i3c_device_properties);
+}
+
+static const TypeInfo aspeed_i3c_device_info = {
+ .name = TYPE_ASPEED_I3C_DEVICE,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedI3CDevice),
+ .class_init = aspeed_i3c_device_class_init,
+};
+
+static const VMStateDescription vmstate_aspeed_i3c = {
+ .name = TYPE_ASPEED_I3C,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AspeedI3CState, ASPEED_I3C_NR_REGS),
+ VMSTATE_STRUCT_ARRAY(devices, AspeedI3CState, ASPEED_I3C_NR_DEVICES, 1,
+ aspeed_i3c_device_vmstate, AspeedI3CDevice),
+ VMSTATE_END_OF_LIST(),
+ }
+};
+
+static void aspeed_i3c_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = aspeed_i3c_realize;
+ dc->reset = aspeed_i3c_reset;
+ dc->desc = "Aspeed I3C Controller";
+ dc->vmsd = &vmstate_aspeed_i3c;
+}
+
+static const TypeInfo aspeed_i3c_info = {
+ .name = TYPE_ASPEED_I3C,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = aspeed_i3c_instance_init,
+ .instance_size = sizeof(AspeedI3CState),
+ .class_init = aspeed_i3c_class_init,
+};
+
+static void aspeed_i3c_register_types(void)
+{
+ type_register_static(&aspeed_i3c_device_info);
+ type_register_static(&aspeed_i3c_info);
+}
+
+type_init(aspeed_i3c_register_types);
diff --git a/hw/misc/meson.build b/hw/misc/meson.build
index 3f41a3a5..d1a1169 100644
--- a/hw/misc/meson.build
+++ b/hw/misc/meson.build
@@ -105,6 +105,7 @@ softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c'))
softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c'))
softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files(
'aspeed_hace.c',
+ 'aspeed_i3c.c',
'aspeed_lpc.c',
'aspeed_scu.c',
'aspeed_sdmc.c',
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index 2da96d1..1c373dd 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -199,6 +199,12 @@ armsse_mhu_write(uint64_t offset, uint64_t data, unsigned size) "SSE-200 MHU wri
# aspeed_xdma.c
aspeed_xdma_write(uint64_t offset, uint64_t data) "XDMA write: offset 0x%" PRIx64 " data 0x%" PRIx64
+# aspeed_i3c.c
+aspeed_i3c_read(uint64_t offset, uint64_t data) "I3C read: offset 0x%" PRIx64 " data 0x%" PRIx64
+aspeed_i3c_write(uint64_t offset, uint64_t data) "I3C write: offset 0x%" PRIx64 " data 0x%" PRIx64
+aspeed_i3c_device_read(uint32_t deviceid, uint64_t offset, uint64_t data) "I3C Dev[%u] read: offset 0x%" PRIx64 " data 0x%" PRIx64
+aspeed_i3c_device_write(uint32_t deviceid, uint64_t offset, uint64_t data) "I3C Dev[%u] write: offset 0x%" PRIx64 " data 0x%" PRIx64
+
# bcm2835_property.c
bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu"
diff --git a/hw/net/meson.build b/hw/net/meson.build
index bdf71f1..685b75b 100644
--- a/hw/net/meson.build
+++ b/hw/net/meson.build
@@ -26,6 +26,7 @@ softmmu_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c')
softmmu_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c'))
softmmu_ss.add(when: 'CONFIG_IMX_FEC', if_true: files('imx_fec.c'))
softmmu_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-emac.c'))
+softmmu_ss.add(when: 'CONFIG_MARVELL_88W8618', if_true: files('mv88w8618_eth.c'))
softmmu_ss.add(when: 'CONFIG_CADENCE', if_true: files('cadence_gem.c'))
softmmu_ss.add(when: 'CONFIG_STELLARIS_ENET', if_true: files('stellaris_enet.c'))
diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c
new file mode 100644
index 0000000..ef30b0d
--- /dev/null
+++ b/hw/net/mv88w8618_eth.c
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Marvell MV88W8618 / Freecom MusicPal emulation.
+ *
+ * Copyright (c) 2008 Jan Kiszka
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "hw/net/mv88w8618_eth.h"
+#include "migration/vmstate.h"
+#include "sysemu/dma.h"
+#include "net/net.h"
+
+#define MP_ETH_SIZE 0x00001000
+
+/* Ethernet register offsets */
+#define MP_ETH_SMIR 0x010
+#define MP_ETH_PCXR 0x408
+#define MP_ETH_SDCMR 0x448
+#define MP_ETH_ICR 0x450
+#define MP_ETH_IMR 0x458
+#define MP_ETH_FRDP0 0x480
+#define MP_ETH_FRDP1 0x484
+#define MP_ETH_FRDP2 0x488
+#define MP_ETH_FRDP3 0x48C
+#define MP_ETH_CRDP0 0x4A0
+#define MP_ETH_CRDP1 0x4A4
+#define MP_ETH_CRDP2 0x4A8
+#define MP_ETH_CRDP3 0x4AC
+#define MP_ETH_CTDP0 0x4E0
+#define MP_ETH_CTDP1 0x4E4
+
+/* MII PHY access */
+#define MP_ETH_SMIR_DATA 0x0000FFFF
+#define MP_ETH_SMIR_ADDR 0x03FF0000
+#define MP_ETH_SMIR_OPCODE (1 << 26) /* Read value */
+#define MP_ETH_SMIR_RDVALID (1 << 27)
+
+/* PHY registers */
+#define MP_ETH_PHY1_BMSR 0x00210000
+#define MP_ETH_PHY1_PHYSID1 0x00410000
+#define MP_ETH_PHY1_PHYSID2 0x00610000
+
+#define MP_PHY_BMSR_LINK 0x0004
+#define MP_PHY_BMSR_AUTONEG 0x0008
+
+#define MP_PHY_88E3015 0x01410E20
+
+/* TX descriptor status */
+#define MP_ETH_TX_OWN (1U << 31)
+
+/* RX descriptor status */
+#define MP_ETH_RX_OWN (1U << 31)
+
+/* Interrupt cause/mask bits */
+#define MP_ETH_IRQ_RX_BIT 0
+#define MP_ETH_IRQ_RX (1 << MP_ETH_IRQ_RX_BIT)
+#define MP_ETH_IRQ_TXHI_BIT 2
+#define MP_ETH_IRQ_TXLO_BIT 3
+
+/* Port config bits */
+#define MP_ETH_PCXR_2BSM_BIT 28 /* 2-byte incoming suffix */
+
+/* SDMA command bits */
+#define MP_ETH_CMD_TXHI (1 << 23)
+#define MP_ETH_CMD_TXLO (1 << 22)
+
+typedef struct mv88w8618_tx_desc {
+ uint32_t cmdstat;
+ uint16_t res;
+ uint16_t bytes;
+ uint32_t buffer;
+ uint32_t next;
+} mv88w8618_tx_desc;
+
+typedef struct mv88w8618_rx_desc {
+ uint32_t cmdstat;
+ uint16_t bytes;
+ uint16_t buffer_size;
+ uint32_t buffer;
+ uint32_t next;
+} mv88w8618_rx_desc;
+
+OBJECT_DECLARE_SIMPLE_TYPE(mv88w8618_eth_state, MV88W8618_ETH)
+
+struct mv88w8618_eth_state {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+ MemoryRegion *dma_mr;
+ AddressSpace dma_as;
+ uint32_t smir;
+ uint32_t icr;
+ uint32_t imr;
+ int mmio_index;
+ uint32_t vlan_header;
+ uint32_t tx_queue[2];
+ uint32_t rx_queue[4];
+ uint32_t frx_queue[4];
+ uint32_t cur_rx[4];
+ NICState *nic;
+ NICConf conf;
+};
+
+static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_rx_desc *desc)
+{
+ cpu_to_le32s(&desc->cmdstat);
+ cpu_to_le16s(&desc->bytes);
+ cpu_to_le16s(&desc->buffer_size);
+ cpu_to_le32s(&desc->buffer);
+ cpu_to_le32s(&desc->next);
+ dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+}
+
+static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_rx_desc *desc)
+{
+ dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ le32_to_cpus(&desc->cmdstat);
+ le16_to_cpus(&desc->bytes);
+ le16_to_cpus(&desc->buffer_size);
+ le32_to_cpus(&desc->buffer);
+ le32_to_cpus(&desc->next);
+}
+
+static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
+{
+ mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
+ uint32_t desc_addr;
+ mv88w8618_rx_desc desc;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ desc_addr = s->cur_rx[i];
+ if (!desc_addr) {
+ continue;
+ }
+ do {
+ eth_rx_desc_get(&s->dma_as, desc_addr, &desc);
+ if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) {
+ dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header,
+ buf, size, MEMTXATTRS_UNSPECIFIED);
+ desc.bytes = size + s->vlan_header;
+ desc.cmdstat &= ~MP_ETH_RX_OWN;
+ s->cur_rx[i] = desc.next;
+
+ s->icr |= MP_ETH_IRQ_RX;
+ if (s->icr & s->imr) {
+ qemu_irq_raise(s->irq);
+ }
+ eth_rx_desc_put(&s->dma_as, desc_addr, &desc);
+ return size;
+ }
+ desc_addr = desc.next;
+ } while (desc_addr != s->rx_queue[i]);
+ }
+ return size;
+}
+
+static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_tx_desc *desc)
+{
+ cpu_to_le32s(&desc->cmdstat);
+ cpu_to_le16s(&desc->res);
+ cpu_to_le16s(&desc->bytes);
+ cpu_to_le32s(&desc->buffer);
+ cpu_to_le32s(&desc->next);
+ dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+}
+
+static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr,
+ mv88w8618_tx_desc *desc)
+{
+ dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED);
+ le32_to_cpus(&desc->cmdstat);
+ le16_to_cpus(&desc->res);
+ le16_to_cpus(&desc->bytes);
+ le32_to_cpus(&desc->buffer);
+ le32_to_cpus(&desc->next);
+}
+
+static void eth_send(mv88w8618_eth_state *s, int queue_index)
+{
+ uint32_t desc_addr = s->tx_queue[queue_index];
+ mv88w8618_tx_desc desc;
+ uint32_t next_desc;
+ uint8_t buf[2048];
+ int len;
+
+ do {
+ eth_tx_desc_get(&s->dma_as, desc_addr, &desc);
+ next_desc = desc.next;
+ if (desc.cmdstat & MP_ETH_TX_OWN) {
+ len = desc.bytes;
+ if (len < 2048) {
+ dma_memory_read(&s->dma_as, desc.buffer, buf, len,
+ MEMTXATTRS_UNSPECIFIED);
+ qemu_send_packet(qemu_get_queue(s->nic), buf, len);
+ }
+ desc.cmdstat &= ~MP_ETH_TX_OWN;
+ s->icr |= 1 << (MP_ETH_IRQ_TXLO_BIT - queue_index);
+ eth_tx_desc_put(&s->dma_as, desc_addr, &desc);
+ }
+ desc_addr = next_desc;
+ } while (desc_addr != s->tx_queue[queue_index]);
+}
+
+static uint64_t mv88w8618_eth_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ mv88w8618_eth_state *s = opaque;
+
+ switch (offset) {
+ case MP_ETH_SMIR:
+ if (s->smir & MP_ETH_SMIR_OPCODE) {
+ switch (s->smir & MP_ETH_SMIR_ADDR) {
+ case MP_ETH_PHY1_BMSR:
+ return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |
+ MP_ETH_SMIR_RDVALID;
+ case MP_ETH_PHY1_PHYSID1:
+ return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;
+ case MP_ETH_PHY1_PHYSID2:
+ return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;
+ default:
+ return MP_ETH_SMIR_RDVALID;
+ }
+ }
+ return 0;
+
+ case MP_ETH_ICR:
+ return s->icr;
+
+ case MP_ETH_IMR:
+ return s->imr;
+
+ case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
+ return s->frx_queue[(offset - MP_ETH_FRDP0) / 4];
+
+ case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
+ return s->rx_queue[(offset - MP_ETH_CRDP0) / 4];
+
+ case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
+ return s->tx_queue[(offset - MP_ETH_CTDP0) / 4];
+
+ default:
+ return 0;
+ }
+}
+
+static void mv88w8618_eth_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ mv88w8618_eth_state *s = opaque;
+
+ switch (offset) {
+ case MP_ETH_SMIR:
+ s->smir = value;
+ break;
+
+ case MP_ETH_PCXR:
+ s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2;
+ break;
+
+ case MP_ETH_SDCMR:
+ if (value & MP_ETH_CMD_TXHI) {
+ eth_send(s, 1);
+ }
+ if (value & MP_ETH_CMD_TXLO) {
+ eth_send(s, 0);
+ }
+ if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) {
+ qemu_irq_raise(s->irq);
+ }
+ break;
+
+ case MP_ETH_ICR:
+ s->icr &= value;
+ break;
+
+ case MP_ETH_IMR:
+ s->imr = value;
+ if (s->icr & s->imr) {
+ qemu_irq_raise(s->irq);
+ }
+ break;
+
+ case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
+ s->frx_queue[(offset - MP_ETH_FRDP0) / 4] = value;
+ break;
+
+ case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
+ s->rx_queue[(offset - MP_ETH_CRDP0) / 4] =
+ s->cur_rx[(offset - MP_ETH_CRDP0) / 4] = value;
+ break;
+
+ case MP_ETH_CTDP0 ... MP_ETH_CTDP1:
+ s->tx_queue[(offset - MP_ETH_CTDP0) / 4] = value;
+ break;
+ }
+}
+
+static const MemoryRegionOps mv88w8618_eth_ops = {
+ .read = mv88w8618_eth_read,
+ .write = mv88w8618_eth_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void eth_cleanup(NetClientState *nc)
+{
+ mv88w8618_eth_state *s = qemu_get_nic_opaque(nc);
+
+ s->nic = NULL;
+}
+
+static NetClientInfo net_mv88w8618_info = {
+ .type = NET_CLIENT_DRIVER_NIC,
+ .size = sizeof(NICState),
+ .receive = eth_receive,
+ .cleanup = eth_cleanup,
+};
+
+static void mv88w8618_eth_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ DeviceState *dev = DEVICE(sbd);
+ mv88w8618_eth_state *s = MV88W8618_ETH(dev);
+
+ sysbus_init_irq(sbd, &s->irq);
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_eth_ops, s,
+ "mv88w8618-eth", MP_ETH_SIZE);
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static void mv88w8618_eth_realize(DeviceState *dev, Error **errp)
+{
+ mv88w8618_eth_state *s = MV88W8618_ETH(dev);
+
+ if (!s->dma_mr) {
+ error_setg(errp, TYPE_MV88W8618_ETH " 'dma-memory' link not set");
+ return;
+ }
+
+ address_space_init(&s->dma_as, s->dma_mr, "emac-dma");
+ s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+}
+
+static const VMStateDescription mv88w8618_eth_vmsd = {
+ .name = "mv88w8618_eth",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(smir, mv88w8618_eth_state),
+ VMSTATE_UINT32(icr, mv88w8618_eth_state),
+ VMSTATE_UINT32(imr, mv88w8618_eth_state),
+ VMSTATE_UINT32(vlan_header, mv88w8618_eth_state),
+ VMSTATE_UINT32_ARRAY(tx_queue, mv88w8618_eth_state, 2),
+ VMSTATE_UINT32_ARRAY(rx_queue, mv88w8618_eth_state, 4),
+ VMSTATE_UINT32_ARRAY(frx_queue, mv88w8618_eth_state, 4),
+ VMSTATE_UINT32_ARRAY(cur_rx, mv88w8618_eth_state, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property mv88w8618_eth_properties[] = {
+ DEFINE_NIC_PROPERTIES(mv88w8618_eth_state, conf),
+ DEFINE_PROP_LINK("dma-memory", mv88w8618_eth_state, dma_mr,
+ TYPE_MEMORY_REGION, MemoryRegion *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void mv88w8618_eth_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->vmsd = &mv88w8618_eth_vmsd;
+ device_class_set_props(dc, mv88w8618_eth_properties);
+ dc->realize = mv88w8618_eth_realize;
+}
+
+static const TypeInfo mv88w8618_eth_info = {
+ .name = TYPE_MV88W8618_ETH,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mv88w8618_eth_state),
+ .instance_init = mv88w8618_eth_init,
+ .class_init = mv88w8618_eth_class_init,
+};
+
+static void musicpal_register_types(void)
+{
+ type_register_static(&mv88w8618_eth_info);
+}
+
+type_init(musicpal_register_types)
+
diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
index 04c223b..f55dcf6 100644
--- a/hw/virtio/virtio-mem.c
+++ b/hw/virtio/virtio-mem.c
@@ -46,14 +46,25 @@
*/
#define VIRTIO_MEM_MIN_BLOCK_SIZE ((uint32_t)(1 * MiB))
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc64__)
-#define VIRTIO_MEM_DEFAULT_THP_SIZE ((uint32_t)(2 * MiB))
-#else
- /* fallback to 1 MiB (e.g., the THP size on s390x) */
-#define VIRTIO_MEM_DEFAULT_THP_SIZE VIRTIO_MEM_MIN_BLOCK_SIZE
+static uint32_t virtio_mem_default_thp_size(void)
+{
+ uint32_t default_thp_size = VIRTIO_MEM_MIN_BLOCK_SIZE;
+
+#if defined(__x86_64__) || defined(__arm__) || defined(__powerpc64__)
+ default_thp_size = 2 * MiB;
+#elif defined(__aarch64__)
+ if (qemu_real_host_page_size == 4 * KiB) {
+ default_thp_size = 2 * MiB;
+ } else if (qemu_real_host_page_size == 16 * KiB) {
+ default_thp_size = 32 * MiB;
+ } else if (qemu_real_host_page_size == 64 * KiB) {
+ default_thp_size = 512 * MiB;
+ }
#endif
+ return default_thp_size;
+}
+
/*
* We want to have a reasonable default block size such that
* 1. We avoid splitting THPs when unplugging memory, which degrades
@@ -86,11 +97,8 @@ static uint32_t virtio_mem_thp_size(void)
if (g_file_get_contents(HPAGE_PMD_SIZE_PATH, &content, NULL, NULL) &&
!qemu_strtou64(content, &endptr, 0, &tmp) &&
(!endptr || *endptr == '\n')) {
- /*
- * Sanity-check the value, if it's too big (e.g., aarch64 with 64k base
- * pages) or weird, fallback to something smaller.
- */
- if (!tmp || !is_power_of_2(tmp) || tmp > 16 * MiB) {
+ /* Sanity-check the value and fallback to something reasonable. */
+ if (!tmp || !is_power_of_2(tmp)) {
warn_report("Read unsupported THP size: %" PRIx64, tmp);
} else {
thp_size = tmp;
@@ -98,7 +106,7 @@ static uint32_t virtio_mem_thp_size(void)
}
if (!thp_size) {
- thp_size = VIRTIO_MEM_DEFAULT_THP_SIZE;
+ thp_size = virtio_mem_default_thp_size();
warn_report("Could not detect THP size, falling back to %" PRIx64
" MiB.", thp_size / MiB);
}
@@ -138,7 +146,7 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
* The memory block size corresponds mostly to the section size.
*
* This allows e.g., to add 20MB with a section size of 128MB on x86_64, and
- * a section size of 1GB on arm64 (as long as the start address is properly
+ * a section size of 512MB on arm64 (as long as the start address is properly
* aligned, similar to ordinary DIMMs).
*
* We can change this at any time and maybe even make it configurable if
@@ -147,6 +155,8 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
*/
#if defined(TARGET_X86_64) || defined(TARGET_I386)
#define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB))
+#elif defined(TARGET_ARM)
+#define VIRTIO_MEM_USABLE_EXTENT (2 * (512 * MiB))
#else
#error VIRTIO_MEM_USABLE_EXTENT not defined
#endif