diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2021-05-04 17:05:53 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-05-04 17:05:53 +0100 |
commit | 87c6cef605a077b20a245d54167127ccf7996e23 (patch) | |
tree | 62553013584befc8e93edc79bdd9752450f50c04 /hw/misc | |
parent | 3e13d8e34b53d8f9a3421a816ccfbdc5fa874e98 (diff) | |
parent | 9cccb912cfa865f0bae6b156fa94e2f216dd8835 (diff) | |
download | qemu-87c6cef605a077b20a245d54167127ccf7996e23.zip qemu-87c6cef605a077b20a245d54167127ccf7996e23.tar.gz qemu-87c6cef605a077b20a245d54167127ccf7996e23.tar.bz2 |
Merge remote-tracking branch 'remotes/legoater/tags/pull-aspeed-20210503' into staging
Aspeed patches :
* Fixes for the DMA space
* New model for ASPEED's Hash and Crypto Engine (Joel and Klaus)
* Acceptance tests (Joel)
* A fix for the XDMA model
* Some extra features for the SMC controller.
* Two new boards : rainier-bmc and quanta-q7l1-bmc (Patrick)
# gpg: Signature made Mon 03 May 2021 06:23:36 BST
# gpg: using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1
# gpg: Good signature from "Cédric Le Goater <clg@kaod.org>" [undefined]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: A0F6 6548 F048 95EB FE6B 0B60 51A3 43C7 CFFB ECA1
* remotes/legoater/tags/pull-aspeed-20210503:
aspeed: Add support for the quanta-q7l1-bmc board
hw/block: m25p80: Add support for mt25ql02g and mt25qu02g
aspeed: Add support for the rainier-bmc board
aspeed: Deprecate the swift-bmc machine
tests/qtest: Rename m25p80 test in aspeed_smc test
aspeed/smc: Add extra controls to request DMA
aspeed/smc: Add a 'features' attribute to the object class
hw/misc/aspeed_xdma: Add AST2600 support
tests/acceptance: Test ast2600 machine
tests/acceptance: Test ast2400 and ast2500 machines
tests/qtest: Add test for Aspeed HACE
aspeed: Integrate HACE
hw: Model ASPEED's Hash and Crypto Engine
hw/arm/aspeed: Do not sysbus-map mmio flash region directly, use alias
aspeed/i2c: Rename DMA address space
aspeed/i2c: Fix DMA address mask
aspeed/smc: Remove unused "sdram-base" property
aspeed/smc: Use the RAM memory region for DMAs
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/misc')
-rw-r--r-- | hw/misc/aspeed_hace.c | 389 | ||||
-rw-r--r-- | hw/misc/aspeed_xdma.c | 124 | ||||
-rw-r--r-- | hw/misc/meson.build | 1 |
3 files changed, 491 insertions, 23 deletions
diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c new file mode 100644 index 0000000..10f00e6 --- /dev/null +++ b/hw/misc/aspeed_hace.c @@ -0,0 +1,389 @@ +/* + * ASPEED Hash and Crypto Engine + * + * Copyright (C) 2021 IBM Corp. + * + * Joel Stanley <joel@jms.id.au> + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "hw/misc/aspeed_hace.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "crypto/hash.h" +#include "hw/qdev-properties.h" +#include "hw/irq.h" + +#define R_CRYPT_CMD (0x10 / 4) + +#define R_STATUS (0x1c / 4) +#define HASH_IRQ BIT(9) +#define CRYPT_IRQ BIT(12) +#define TAG_IRQ BIT(15) + +#define R_HASH_SRC (0x20 / 4) +#define R_HASH_DEST (0x24 / 4) +#define R_HASH_SRC_LEN (0x2c / 4) + +#define R_HASH_CMD (0x30 / 4) +/* Hash algorithm selection */ +#define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6)) +#define HASH_ALGO_MD5 0 +#define HASH_ALGO_SHA1 BIT(5) +#define HASH_ALGO_SHA224 BIT(6) +#define HASH_ALGO_SHA256 (BIT(4) | BIT(6)) +#define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6)) +/* SHA512 algorithm selection */ +#define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12)) +#define HASH_ALGO_SHA512_SHA512 0 +#define HASH_ALGO_SHA512_SHA384 BIT(10) +#define HASH_ALGO_SHA512_SHA256 BIT(11) +#define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11)) +/* HMAC modes */ +#define HASH_HMAC_MASK (BIT(7) | BIT(8)) +#define HASH_DIGEST 0 +#define HASH_DIGEST_HMAC BIT(7) +#define HASH_DIGEST_ACCUM BIT(8) +#define HASH_HMAC_KEY (BIT(7) | BIT(8)) +/* Cascaded operation modes */ +#define HASH_ONLY 0 +#define HASH_ONLY2 BIT(0) +#define HASH_CRYPT_THEN_HASH BIT(1) +#define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1)) +/* Other cmd bits */ +#define HASH_IRQ_EN BIT(9) +#define HASH_SG_EN BIT(18) +/* Scatter-gather data list */ +#define SG_LIST_LEN_SIZE 4 +#define SG_LIST_LEN_MASK 0x0FFFFFFF +#define SG_LIST_LEN_LAST BIT(31) +#define SG_LIST_ADDR_SIZE 4 +#define SG_LIST_ADDR_MASK 0x7FFFFFFF +#define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE) +#define ASPEED_HACE_MAX_SG 256 /* max number of entries */ + +static const struct { + uint32_t mask; + QCryptoHashAlgorithm algo; +} hash_algo_map[] = { + { HASH_ALGO_MD5, QCRYPTO_HASH_ALG_MD5 }, + { HASH_ALGO_SHA1, QCRYPTO_HASH_ALG_SHA1 }, + { HASH_ALGO_SHA224, QCRYPTO_HASH_ALG_SHA224 }, + { HASH_ALGO_SHA256, QCRYPTO_HASH_ALG_SHA256 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA512, QCRYPTO_HASH_ALG_SHA512 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA384, QCRYPTO_HASH_ALG_SHA384 }, + { HASH_ALGO_SHA512_SERIES | HASH_ALGO_SHA512_SHA256, QCRYPTO_HASH_ALG_SHA256 }, +}; + +static int hash_algo_lookup(uint32_t reg) +{ + int i; + + reg &= HASH_ALGO_MASK | SHA512_HASH_ALGO_MASK; + + for (i = 0; i < ARRAY_SIZE(hash_algo_map); i++) { + if (reg == hash_algo_map[i].mask) { + return hash_algo_map[i].algo; + } + } + + return -1; +} + +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) +{ + struct iovec iov[ASPEED_HACE_MAX_SG]; + g_autofree uint8_t *digest_buf; + size_t digest_len = 0; + int i; + + if (sg_mode) { + uint32_t len = 0; + + for (i = 0; !(len & SG_LIST_LEN_LAST); i++) { + uint32_t addr, src; + hwaddr plen; + + if (i == ASPEED_HACE_MAX_SG) { + qemu_log_mask(LOG_GUEST_ERROR, + "aspeed_hace: guest failed to set end of sg list marker\n"); + break; + } + + src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE); + + len = address_space_ldl_le(&s->dram_as, src, + MEMTXATTRS_UNSPECIFIED, NULL); + + addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, + MEMTXATTRS_UNSPECIFIED, NULL); + addr &= SG_LIST_ADDR_MASK; + + iov[i].iov_len = len & SG_LIST_LEN_MASK; + plen = iov[i].iov_len; + iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false, + MEMTXATTRS_UNSPECIFIED); + } + } else { + hwaddr len = s->regs[R_HASH_SRC_LEN]; + + iov[0].iov_len = len; + iov[0].iov_base = address_space_map(&s->dram_as, s->regs[R_HASH_SRC], + &len, false, + MEMTXATTRS_UNSPECIFIED); + i = 1; + } + + if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); + return; + } + + if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST], + MEMTXATTRS_UNSPECIFIED, + digest_buf, digest_len)) { + qemu_log_mask(LOG_GUEST_ERROR, + "aspeed_hace: address space write failed\n"); + } + + for (; i > 0; i--) { + address_space_unmap(&s->dram_as, iov[i - 1].iov_base, + iov[i - 1].iov_len, false, + iov[i - 1].iov_len); + } + + /* + * Set status bits to indicate completion. Testing shows hardware sets + * these irrespective of HASH_IRQ_EN. + */ + s->regs[R_STATUS] |= HASH_IRQ; +} + +static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) +{ + AspeedHACEState *s = ASPEED_HACE(opaque); + + addr >>= 2; + + if (addr >= ASPEED_HACE_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return 0; + } + + return s->regs[addr]; +} + +static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedHACEState *s = ASPEED_HACE(opaque); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + + addr >>= 2; + + if (addr >= ASPEED_HACE_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, addr << 2); + return; + } + + switch (addr) { + case R_STATUS: + if (data & HASH_IRQ) { + data &= ~HASH_IRQ; + + if (s->regs[addr] & HASH_IRQ) { + qemu_irq_lower(s->irq); + } + } + break; + case R_HASH_SRC: + data &= ahc->src_mask; + break; + case R_HASH_DEST: + data &= ahc->dest_mask; + break; + case R_HASH_SRC_LEN: + data &= 0x0FFFFFFF; + break; + case R_HASH_CMD: { + int algo; + data &= ahc->hash_mask; + + if ((data & HASH_HMAC_MASK)) { + qemu_log_mask(LOG_UNIMP, + "%s: HMAC engine command mode %"PRIx64" not implemented", + __func__, (data & HASH_HMAC_MASK) >> 8); + } + if (data & BIT(1)) { + qemu_log_mask(LOG_UNIMP, + "%s: Cascaded mode not implemented", + __func__); + } + algo = hash_algo_lookup(data); + if (algo < 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", + __func__, data & ahc->hash_mask); + break; + } + do_hash_operation(s, algo, data & HASH_SG_EN); + + if (data & HASH_IRQ_EN) { + qemu_irq_raise(s->irq); + } + break; + } + case R_CRYPT_CMD: + qemu_log_mask(LOG_UNIMP, "%s: Crypt commands not implemented\n", + __func__); + break; + default: + break; + } + + s->regs[addr] = data; +} + +static const MemoryRegionOps aspeed_hace_ops = { + .read = aspeed_hace_read, + .write = aspeed_hace_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_hace_reset(DeviceState *dev) +{ + struct AspeedHACEState *s = ASPEED_HACE(dev); + + memset(s->regs, 0, sizeof(s->regs)); +} + +static void aspeed_hace_realize(DeviceState *dev, Error **errp) +{ + AspeedHACEState *s = ASPEED_HACE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, + TYPE_ASPEED_HACE, 0x1000); + + if (!s->dram_mr) { + error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); + return; + } + + address_space_init(&s->dram_as, s->dram_mr, "dram"); + + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property aspeed_hace_properties[] = { + DEFINE_PROP_LINK("dram", AspeedHACEState, dram_mr, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_END_OF_LIST(), +}; + + +static const VMStateDescription vmstate_aspeed_hace = { + .name = TYPE_ASPEED_HACE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static void aspeed_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_hace_realize; + dc->reset = aspeed_hace_reset; + device_class_set_props(dc, aspeed_hace_properties); + dc->vmsd = &vmstate_aspeed_hace; +} + +static const TypeInfo aspeed_hace_info = { + .name = TYPE_ASPEED_HACE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedHACEState), + .class_init = aspeed_hace_class_init, + .class_size = sizeof(AspeedHACEClass) +}; + +static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2400 Hash and Crypto Engine"; + + ahc->src_mask = 0x0FFFFFFF; + ahc->dest_mask = 0x0FFFFFF8; + ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ +} + +static const TypeInfo aspeed_ast2400_hace_info = { + .name = TYPE_ASPEED_AST2400_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2400_hace_class_init, +}; + +static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2500 Hash and Crypto Engine"; + + ahc->src_mask = 0x3fffffff; + ahc->dest_mask = 0x3ffffff8; + ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */ +} + +static const TypeInfo aspeed_ast2500_hace_info = { + .name = TYPE_ASPEED_AST2500_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2500_hace_class_init, +}; + +static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedHACEClass *ahc = ASPEED_HACE_CLASS(klass); + + dc->desc = "AST2600 Hash and Crypto Engine"; + + ahc->src_mask = 0x7FFFFFFF; + ahc->dest_mask = 0x7FFFFFF8; + ahc->hash_mask = 0x00147FFF; +} + +static const TypeInfo aspeed_ast2600_hace_info = { + .name = TYPE_ASPEED_AST2600_HACE, + .parent = TYPE_ASPEED_HACE, + .class_init = aspeed_ast2600_hace_class_init, +}; + +static void aspeed_hace_register_types(void) +{ + type_register_static(&aspeed_ast2400_hace_info); + type_register_static(&aspeed_ast2500_hace_info); + type_register_static(&aspeed_ast2600_hace_info); + type_register_static(&aspeed_hace_info); +} + +type_init(aspeed_hace_register_types); diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c index 533d237..1c21577 100644 --- a/hw/misc/aspeed_xdma.c +++ b/hw/misc/aspeed_xdma.c @@ -30,6 +30,19 @@ #define XDMA_IRQ_ENG_STAT_US_COMP BIT(4) #define XDMA_IRQ_ENG_STAT_DS_COMP BIT(5) #define XDMA_IRQ_ENG_STAT_RESET 0xF8000000 + +#define XDMA_AST2600_BMC_CMDQ_ADDR 0x14 +#define XDMA_AST2600_BMC_CMDQ_ENDP 0x18 +#define XDMA_AST2600_BMC_CMDQ_WRP 0x1c +#define XDMA_AST2600_BMC_CMDQ_RDP 0x20 +#define XDMA_AST2600_IRQ_CTRL 0x38 +#define XDMA_AST2600_IRQ_CTRL_US_COMP BIT(16) +#define XDMA_AST2600_IRQ_CTRL_DS_COMP BIT(17) +#define XDMA_AST2600_IRQ_CTRL_W_MASK 0x017003FF +#define XDMA_AST2600_IRQ_STATUS 0x3c +#define XDMA_AST2600_IRQ_STATUS_US_COMP BIT(16) +#define XDMA_AST2600_IRQ_STATUS_DS_COMP BIT(17) + #define XDMA_MEM_SIZE 0x1000 #define TO_REG(addr) ((addr) / sizeof(uint32_t)) @@ -52,56 +65,48 @@ static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val, unsigned int idx; uint32_t val32 = (uint32_t)val; AspeedXDMAState *xdma = opaque; + AspeedXDMAClass *axc = ASPEED_XDMA_GET_CLASS(xdma); if (addr >= ASPEED_XDMA_REG_SIZE) { return; } - switch (addr) { - case XDMA_BMC_CMDQ_ENDP: + if (addr == axc->cmdq_endp) { xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK; - break; - case XDMA_BMC_CMDQ_WRP: + } else if (addr == axc->cmdq_wrp) { idx = TO_REG(addr); xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK; - xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx]; + xdma->regs[TO_REG(axc->cmdq_rdp)] = xdma->regs[idx]; trace_aspeed_xdma_write(addr, val); if (xdma->bmc_cmdq_readp_set) { xdma->bmc_cmdq_readp_set = 0; } else { - xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |= - XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; + xdma->regs[TO_REG(axc->intr_status)] |= axc->intr_complete; - if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] & - (XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP)) + if (xdma->regs[TO_REG(axc->intr_ctrl)] & axc->intr_complete) { qemu_irq_raise(xdma->irq); + } } - break; - case XDMA_BMC_CMDQ_RDP: + } else if (addr == axc->cmdq_rdp) { trace_aspeed_xdma_write(addr, val); if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) { xdma->bmc_cmdq_readp_set = 1; } - break; - case XDMA_IRQ_ENG_CTRL: - xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK; - break; - case XDMA_IRQ_ENG_STAT: + } else if (addr == axc->intr_ctrl) { + xdma->regs[TO_REG(addr)] = val32 & axc->intr_ctrl_mask; + } else if (addr == axc->intr_status) { trace_aspeed_xdma_write(addr, val); idx = TO_REG(addr); - if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) { - xdma->regs[idx] &= - ~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP); + if (val32 & axc->intr_complete) { + xdma->regs[idx] &= ~axc->intr_complete; qemu_irq_lower(xdma->irq); } - break; - default: + } else { xdma->regs[TO_REG(addr)] = val32; - break; } } @@ -127,10 +132,11 @@ static void aspeed_xdma_realize(DeviceState *dev, Error **errp) static void aspeed_xdma_reset(DeviceState *dev) { AspeedXDMAState *xdma = ASPEED_XDMA(dev); + AspeedXDMAClass *axc = ASPEED_XDMA_GET_CLASS(xdma); xdma->bmc_cmdq_readp_set = 0; memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE); - xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET; + xdma->regs[TO_REG(axc->intr_status)] = XDMA_IRQ_ENG_STAT_RESET; qemu_irq_lower(xdma->irq); } @@ -144,6 +150,73 @@ static const VMStateDescription aspeed_xdma_vmstate = { }, }; +static void aspeed_2600_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2600 XDMA Controller"; + + axc->cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_AST2600_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_AST2600_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_AST2600_IRQ_CTRL; + axc->intr_ctrl_mask = XDMA_AST2600_IRQ_CTRL_W_MASK; + axc->intr_status = XDMA_AST2600_IRQ_STATUS; + axc->intr_complete = XDMA_AST2600_IRQ_STATUS_US_COMP | + XDMA_AST2600_IRQ_STATUS_DS_COMP; +} + +static const TypeInfo aspeed_2600_xdma_info = { + .name = TYPE_ASPEED_2600_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2600_xdma_class_init, +}; + +static void aspeed_2500_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2500 XDMA Controller"; + + axc->cmdq_endp = XDMA_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_IRQ_ENG_CTRL; + axc->intr_ctrl_mask = XDMA_IRQ_ENG_CTRL_W_MASK; + axc->intr_status = XDMA_IRQ_ENG_STAT; + axc->intr_complete = XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; +}; + +static const TypeInfo aspeed_2500_xdma_info = { + .name = TYPE_ASPEED_2500_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2500_xdma_class_init, +}; + +static void aspeed_2400_xdma_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedXDMAClass *axc = ASPEED_XDMA_CLASS(klass); + + dc->desc = "ASPEED 2400 XDMA Controller"; + + axc->cmdq_endp = XDMA_BMC_CMDQ_ENDP; + axc->cmdq_wrp = XDMA_BMC_CMDQ_WRP; + axc->cmdq_rdp = XDMA_BMC_CMDQ_RDP; + axc->intr_ctrl = XDMA_IRQ_ENG_CTRL; + axc->intr_ctrl_mask = XDMA_IRQ_ENG_CTRL_W_MASK; + axc->intr_status = XDMA_IRQ_ENG_STAT; + axc->intr_complete = XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP; +}; + +static const TypeInfo aspeed_2400_xdma_info = { + .name = TYPE_ASPEED_2400_XDMA, + .parent = TYPE_ASPEED_XDMA, + .class_init = aspeed_2400_xdma_class_init, +}; + static void aspeed_xdma_class_init(ObjectClass *classp, void *data) { DeviceClass *dc = DEVICE_CLASS(classp); @@ -158,10 +231,15 @@ static const TypeInfo aspeed_xdma_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(AspeedXDMAState), .class_init = aspeed_xdma_class_init, + .class_size = sizeof(AspeedXDMAClass), + .abstract = true, }; static void aspeed_xdma_register_type(void) { type_register_static(&aspeed_xdma_info); + type_register_static(&aspeed_2400_xdma_info); + type_register_static(&aspeed_2500_xdma_info); + type_register_static(&aspeed_2600_xdma_info); } type_init(aspeed_xdma_register_type); diff --git a/hw/misc/meson.build b/hw/misc/meson.build index 21034dc..1e7b8b0 100644 --- a/hw/misc/meson.build +++ b/hw/misc/meson.build @@ -109,6 +109,7 @@ softmmu_ss.add(when: 'CONFIG_PVPANIC_ISA', if_true: files('pvpanic-isa.c')) softmmu_ss.add(when: 'CONFIG_PVPANIC_PCI', if_true: files('pvpanic-pci.c')) softmmu_ss.add(when: 'CONFIG_AUX', if_true: files('auxbus.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( + 'aspeed_hace.c', 'aspeed_lpc.c', 'aspeed_scu.c', 'aspeed_sdmc.c', |