diff options
Diffstat (limited to 'hw')
-rw-r--r-- | hw/arm/aspeed_ast27x0-fc.c | 10 | ||||
-rw-r--r-- | hw/arm/aspeed_ast27x0.c | 14 | ||||
-rw-r--r-- | hw/arm/fby35.c | 1 | ||||
-rw-r--r-- | hw/audio/asc.c | 9 | ||||
-rw-r--r-- | hw/core/machine.c | 4 | ||||
-rw-r--r-- | hw/i386/kvm/xen-stubs.c | 13 | ||||
-rw-r--r-- | hw/i386/kvm/xen_evtchn.c | 2 | ||||
-rw-r--r-- | hw/i386/monitor.c | 2 | ||||
-rw-r--r-- | hw/i386/sgx-stub.c | 2 | ||||
-rw-r--r-- | hw/i386/sgx.c | 2 | ||||
-rw-r--r-- | hw/intc/aspeed_intc.c | 12 | ||||
-rw-r--r-- | hw/misc/aspeed_hace.c | 479 | ||||
-rw-r--r-- | hw/misc/trace-events | 8 | ||||
-rw-r--r-- | hw/s390x/cpu-topology.c | 4 | ||||
-rw-r--r-- | hw/s390x/s390-skeys.c | 1 | ||||
-rw-r--r-- | hw/scsi/scsi-disk.c | 55 |
16 files changed, 397 insertions, 221 deletions
diff --git a/hw/arm/aspeed_ast27x0-fc.c b/hw/arm/aspeed_ast27x0-fc.c index 125a3ad..7087be4 100644 --- a/hw/arm/aspeed_ast27x0-fc.c +++ b/hw/arm/aspeed_ast27x0-fc.c @@ -48,7 +48,7 @@ struct Ast2700FCState { bool mmio_exec; }; -#define AST2700FC_BMC_RAM_SIZE (2 * GiB) +#define AST2700FC_BMC_RAM_SIZE (1 * GiB) #define AST2700FC_CM4_DRAM_SIZE (32 * MiB) #define AST2700FC_HW_STRAP1 0x000000C0 @@ -68,6 +68,7 @@ static void ast2700fc_ca35_init(MachineState *machine) memory_region_init(&s->ca35_memory, OBJECT(&s->ca35), "ca35-memory", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), 0, &s->ca35_memory); if (!memory_region_init_ram(&s->ca35_dram, OBJECT(&s->ca35), "ca35-dram", AST2700FC_BMC_RAM_SIZE, &error_abort)) { @@ -86,6 +87,13 @@ static void ast2700fc_ca35_init(MachineState *machine) AST2700FC_BMC_RAM_SIZE, &error_abort)) { return; } + + for (int i = 0; i < sc->macs_num; i++) { + if (!qemu_configure_nic_device(DEVICE(&soc->ftgmac100[i]), + true, NULL)) { + break; + } + } if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap1", AST2700FC_HW_STRAP1, &error_abort)) { return; diff --git a/hw/arm/aspeed_ast27x0.c b/hw/arm/aspeed_ast27x0.c index 1974a25..6aa3841 100644 --- a/hw/arm/aspeed_ast27x0.c +++ b/hw/arm/aspeed_ast27x0.c @@ -23,14 +23,14 @@ #include "qobject/qlist.h" #include "qemu/log.h" -#define AST2700_SOC_IO_SIZE 0x01000000 +#define AST2700_SOC_IO_SIZE 0x00FE0000 #define AST2700_SOC_IOMEM_SIZE 0x01000000 #define AST2700_SOC_DPMCU_SIZE 0x00040000 #define AST2700_SOC_LTPI_SIZE 0x01000000 static const hwaddr aspeed_soc_ast2700_memmap[] = { - [ASPEED_DEV_IOMEM] = 0x00000000, [ASPEED_DEV_VBOOTROM] = 0x00000000, + [ASPEED_DEV_IOMEM] = 0x00020000, [ASPEED_DEV_SRAM] = 0x10000000, [ASPEED_DEV_DPMCU] = 0x11000000, [ASPEED_DEV_IOMEM0] = 0x12000000, @@ -346,8 +346,9 @@ static void aspeed_ram_capacity_write(void *opaque, hwaddr addr, uint64_t data, * If writes the data to the address which is beyond the ram size, * it would write the data to the "address % ram_size". */ - result = address_space_write(&s->dram_as, addr % ram_size, - MEMTXATTRS_UNSPECIFIED, &data, 4); + address_space_stl_le(&s->dram_as, addr % ram_size, data, + MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed, addr:0x%" HWADDR_PRIx @@ -360,9 +361,10 @@ static const MemoryRegionOps aspeed_ram_capacity_ops = { .read = aspeed_ram_capacity_read, .write = aspeed_ram_capacity_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { - .min_access_size = 1, - .max_access_size = 8, + .min_access_size = 4, + .max_access_size = 4, }, }; diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index e123fa6..c14fc2e 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -77,6 +77,7 @@ static void fby35_bmc_init(Fby35State *s) memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), 0, &s->bmc_memory); memory_region_init_ram(&s->bmc_dram, OBJECT(&s->bmc), "bmc-dram", FBY35_BMC_RAM_SIZE, &error_abort); diff --git a/hw/audio/asc.c b/hw/audio/asc.c index 18382cc..edd42d6 100644 --- a/hw/audio/asc.c +++ b/hw/audio/asc.c @@ -12,6 +12,7 @@ #include "qemu/osdep.h" #include "qemu/timer.h" +#include "qapi/error.h" #include "hw/sysbus.h" #include "hw/irq.h" #include "audio/audio.h" @@ -653,11 +654,17 @@ static void asc_realize(DeviceState *dev, Error **errp) s->voice = AUD_open_out(&s->card, s->voice, "asc.out", s, asc_out_cb, &as); + if (!s->voice) { + AUD_remove_card(&s->card); + error_setg(errp, "Initializing audio stream failed"); + return; + } + s->shift = 1; s->samples = AUD_get_buffer_size_out(s->voice) >> s->shift; s->mixbuf = g_malloc0(s->samples << s->shift); - s->silentbuf = g_malloc0(s->samples << s->shift); + s->silentbuf = g_malloc(s->samples << s->shift); memset(s->silentbuf, 0x80, s->samples << s->shift); /* Add easc registers if required */ diff --git a/hw/core/machine.c b/hw/core/machine.c index b8ae155..c3f3a50 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -37,7 +37,9 @@ #include "hw/virtio/virtio-iommu.h" #include "audio/audio.h" -GlobalProperty hw_compat_10_0[] = {}; +GlobalProperty hw_compat_10_0[] = { + { "scsi-hd", "dpofua", "off" }, +}; const size_t hw_compat_10_0_len = G_N_ELEMENTS(hw_compat_10_0); GlobalProperty hw_compat_9_2[] = { diff --git a/hw/i386/kvm/xen-stubs.c b/hw/i386/kvm/xen-stubs.c index d03131e..ce73119 100644 --- a/hw/i386/kvm/xen-stubs.c +++ b/hw/i386/kvm/xen-stubs.c @@ -12,7 +12,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" #include "xen_evtchn.h" #include "xen_primary_console.h" @@ -38,15 +37,3 @@ void xen_primary_console_create(void) void xen_primary_console_set_be_port(uint16_t port) { } -#ifdef TARGET_I386 -EvtchnInfoList *qmp_xen_event_list(Error **errp) -{ - error_setg(errp, "Xen event channel emulation not enabled"); - return NULL; -} - -void qmp_xen_event_inject(uint32_t port, Error **errp) -{ - error_setg(errp, "Xen event channel emulation not enabled"); -} -#endif diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index b519054..dd566c4 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -19,7 +19,7 @@ #include "monitor/monitor.h" #include "monitor/hmp.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-commands-misc-i386.h" #include "qobject/qdict.h" #include "qom/object.h" #include "exec/target_page.h" diff --git a/hw/i386/monitor.c b/hw/i386/monitor.c index 1921e4d..79df965 100644 --- a/hw/i386/monitor.c +++ b/hw/i386/monitor.c @@ -26,7 +26,7 @@ #include "monitor/monitor.h" #include "qobject/qdict.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-commands-misc-i386.h" #include "hw/i386/x86.h" #include "hw/rtc/mc146818rtc.h" diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c index ccb21a9..d295e54 100644 --- a/hw/i386/sgx-stub.c +++ b/hw/i386/sgx-stub.c @@ -3,8 +3,8 @@ #include "monitor/hmp-target.h" #include "hw/i386/pc.h" #include "hw/i386/sgx-epc.h" +#include "qapi/qapi-commands-misc-i386.h" #include "qapi/error.h" -#include "qapi/qapi-commands-misc-target.h" void sgx_epc_build_srat(GArray *table_data) { diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c index c80203b..e280154 100644 --- a/hw/i386/sgx.c +++ b/hw/i386/sgx.c @@ -19,7 +19,7 @@ #include "monitor/hmp-target.h" #include "qapi/error.h" #include "qemu/error-report.h" -#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qapi-commands-misc-i386.h" #include "system/address-spaces.h" #include "system/hw_accel.h" #include "system/reset.h" diff --git a/hw/intc/aspeed_intc.c b/hw/intc/aspeed_intc.c index 33fcbe7..5cd786d 100644 --- a/hw/intc/aspeed_intc.c +++ b/hw/intc/aspeed_intc.c @@ -737,6 +737,7 @@ static const MemoryRegionOps aspeed_intc_ops = { .read = aspeed_intc_read, .write = aspeed_intc_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -747,6 +748,7 @@ static const MemoryRegionOps aspeed_intcio_ops = { .read = aspeed_intcio_read, .write = aspeed_intcio_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -757,6 +759,7 @@ static const MemoryRegionOps aspeed_ssp_intc_ops = { .read = aspeed_intc_read, .write = aspeed_ssp_intc_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -767,6 +770,7 @@ static const MemoryRegionOps aspeed_ssp_intcio_ops = { .read = aspeed_intcio_read, .write = aspeed_ssp_intcio_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -777,6 +781,7 @@ static const MemoryRegionOps aspeed_tsp_intc_ops = { .read = aspeed_intc_read, .write = aspeed_tsp_intc_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -787,6 +792,7 @@ static const MemoryRegionOps aspeed_tsp_intcio_ops = { .read = aspeed_intcio_read, .write = aspeed_tsp_intcio_write, .endianness = DEVICE_LITTLE_ENDIAN, + .impl.min_access_size = 4, .valid = { .min_access_size = 4, .max_access_size = 4, @@ -995,7 +1001,8 @@ static AspeedINTCIRQ aspeed_2700ssp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { {5, 5, 1, R_SSPINT165_EN, R_SSPINT165_STATUS}, }; -static void aspeed_2700ssp_intcio_class_init(ObjectClass *klass, const void *data) +static void aspeed_2700ssp_intcio_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); @@ -1063,7 +1070,8 @@ static AspeedINTCIRQ aspeed_2700tsp_intcio_irqs[ASPEED_INTC_MAX_INPINS] = { {5, 5, 1, R_TSPINT165_EN, R_TSPINT165_STATUS}, }; -static void aspeed_2700tsp_intcio_class_init(ObjectClass *klass, const void *data) +static void aspeed_2700tsp_intcio_class_init(ObjectClass *klass, + const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); AspeedINTCClass *aic = ASPEED_INTC_CLASS(klass); diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index f4bff32..8924a30 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -10,14 +10,17 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/log.h" #include "qemu/error-report.h" +#include "qemu/iov.h" #include "hw/misc/aspeed_hace.h" #include "qapi/error.h" #include "migration/vmstate.h" #include "crypto/hash.h" #include "hw/qdev-properties.h" #include "hw/irq.h" +#include "trace.h" #define R_CRYPT_CMD (0x10 / 4) @@ -27,9 +30,12 @@ #define TAG_IRQ BIT(15) #define R_HASH_SRC (0x20 / 4) -#define R_HASH_DEST (0x24 / 4) +#define R_HASH_DIGEST (0x24 / 4) #define R_HASH_KEY_BUFF (0x28 / 4) #define R_HASH_SRC_LEN (0x2c / 4) +#define R_HASH_SRC_HI (0x90 / 4) +#define R_HASH_DIGEST_HI (0x94 / 4) +#define R_HASH_KEY_BUFF_HI (0x98 / 4) #define R_HASH_CMD (0x30 / 4) /* Hash algorithm selection */ @@ -84,6 +90,42 @@ static const struct { QCRYPTO_HASH_ALGO_SHA256 }, }; +static void hace_hexdump(const char *desc, const char *buf, size_t size) +{ + g_autoptr(GString) str = g_string_sized_new(64); + size_t len; + size_t i; + + for (i = 0; i < size; i += len) { + len = MIN(16, size - i); + g_string_truncate(str, 0); + qemu_hexdump_line(str, buf + i, len, 1, 4); + trace_aspeed_hace_hexdump(desc, i, str->str); + } +} + +static void hace_iov_hexdump(const char *desc, const struct iovec *iov, + const unsigned int iov_cnt) +{ + size_t size = 0; + char *buf; + int i; + + for (i = 0; i < iov_cnt; i++) { + size += iov[i].iov_len; + } + + buf = g_malloc(size); + + if (!buf) { + return; + } + + iov_to_buf(iov, iov_cnt, 0, buf, size); + hace_hexdump(desc, buf, size); + g_free(buf); +} + static int hash_algo_lookup(uint32_t reg) { int i; @@ -142,171 +184,269 @@ static bool has_padding(AspeedHACEState *s, struct iovec *iov, return false; } -static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id, - uint32_t *pad_offset) +static uint64_t hash_get_source_addr(AspeedHACEState *s) { - int i, iov_count; - if (*pad_offset != 0) { - s->iov_cache[s->iov_count].iov_base = iov[id].iov_base; - s->iov_cache[s->iov_count].iov_len = *pad_offset; - ++s->iov_count; - } - for (i = 0; i < s->iov_count; i++) { - iov[i].iov_base = s->iov_cache[i].iov_base; - iov[i].iov_len = s->iov_cache[i].iov_len; + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + uint64_t src_addr = 0; + + src_addr = deposit64(src_addr, 0, 32, s->regs[R_HASH_SRC]); + if (ahc->has_dma64) { + src_addr = deposit64(src_addr, 32, 32, s->regs[R_HASH_SRC_HI]); } - iov_count = s->iov_count; - s->iov_count = 0; - s->total_req_len = 0; - return iov_count; + + return src_addr; } -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, - bool acc_mode) +static int hash_prepare_direct_iov(AspeedHACEState *s, struct iovec *iov, + bool acc_mode, bool *acc_final_request) { - struct iovec iov[ASPEED_HACE_MAX_SG]; uint32_t total_msg_len; uint32_t pad_offset; - g_autofree uint8_t *digest_buf = NULL; - size_t digest_len = 0; - bool sg_acc_mode_final_request = false; - int i; + uint64_t src; void *haddr; - Error *local_err = NULL; + hwaddr plen; + int iov_idx; + + plen = s->regs[R_HASH_SRC_LEN]; + src = hash_get_source_addr(s); + trace_aspeed_hace_hash_addr("src", src); + haddr = address_space_map(&s->dram_as, src, &plen, false, + MEMTXATTRS_UNSPECIFIED); + if (haddr == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unable to map address, addr=0x%" HWADDR_PRIx + " ,plen=0x%" HWADDR_PRIx "\n", + __func__, src, plen); + return -1; + } - if (acc_mode && s->hash_ctx == NULL) { - s->hash_ctx = qcrypto_hash_new(algo, &local_err); - if (s->hash_ctx == NULL) { - qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s", - error_get_pretty(local_err)); - error_free(local_err); - return; + iov[0].iov_base = haddr; + iov_idx = 1; + + if (acc_mode) { + s->total_req_len += plen; + + if (has_padding(s, &iov[0], plen, &total_msg_len, + &pad_offset)) { + /* Padding being present indicates the final request */ + *acc_final_request = true; + iov[0].iov_len = pad_offset; + } else { + iov[0].iov_len = plen; } + } else { + iov[0].iov_len = plen; } - if (sg_mode) { - uint32_t len = 0; - - for (i = 0; !(len & SG_LIST_LEN_LAST); i++) { - uint32_t addr, src; - hwaddr plen; + return iov_idx; +} - if (i == ASPEED_HACE_MAX_SG) { - qemu_log_mask(LOG_GUEST_ERROR, - "aspeed_hace: guest failed to set end of sg list marker\n"); - break; - } +static int hash_prepare_sg_iov(AspeedHACEState *s, struct iovec *iov, + bool acc_mode, bool *acc_final_request) +{ + uint32_t total_msg_len; + uint32_t pad_offset; + uint32_t len = 0; + uint32_t sg_addr; + uint64_t src; + int iov_idx; + hwaddr plen; + void *haddr; - src = s->regs[R_HASH_SRC] + (i * SG_LIST_ENTRY_SIZE); + src = hash_get_source_addr(s); + for (iov_idx = 0; !(len & SG_LIST_LEN_LAST); iov_idx++) { + if (iov_idx == ASPEED_HACE_MAX_SG) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to set end of sg list marker\n", + __func__); + return -1; + } - len = address_space_ldl_le(&s->dram_as, src, + len = address_space_ldl_le(&s->dram_as, src, + MEMTXATTRS_UNSPECIFIED, NULL); + sg_addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, MEMTXATTRS_UNSPECIFIED, NULL); + sg_addr &= SG_LIST_ADDR_MASK; + trace_aspeed_hace_hash_sg(iov_idx, src, sg_addr, len); + /* + * To maintain compatibility with older SoCs such as the AST2600, + * the AST2700 HW automatically set bit 34 of the 64-bit sg_addr. + * As a result, the firmware only needs to provide a 32-bit sg_addr + * containing bits [31:0]. This is sufficient for the AST2700, as + * it uses a DRAM offset rather than a DRAM address. + */ + plen = len & SG_LIST_LEN_MASK; + haddr = address_space_map(&s->dram_as, sg_addr, &plen, false, + MEMTXATTRS_UNSPECIFIED); - addr = address_space_ldl_le(&s->dram_as, src + SG_LIST_LEN_SIZE, - MEMTXATTRS_UNSPECIFIED, NULL); - addr &= SG_LIST_ADDR_MASK; + if (haddr == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unable to map address, sg_addr=0x%x, " + "plen=0x%" HWADDR_PRIx "\n", + __func__, sg_addr, plen); + return -1; + } - plen = len & SG_LIST_LEN_MASK; - haddr = address_space_map(&s->dram_as, addr, &plen, false, - MEMTXATTRS_UNSPECIFIED); - if (haddr == NULL) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: qcrypto failed\n", __func__); - return; - } - iov[i].iov_base = haddr; - if (acc_mode) { - s->total_req_len += plen; - - if (has_padding(s, &iov[i], plen, &total_msg_len, - &pad_offset)) { - /* Padding being present indicates the final request */ - sg_acc_mode_final_request = true; - iov[i].iov_len = pad_offset; - } else { - iov[i].iov_len = plen; - } + src += SG_LIST_ENTRY_SIZE; + + iov[iov_idx].iov_base = haddr; + if (acc_mode) { + s->total_req_len += plen; + + if (has_padding(s, &iov[iov_idx], plen, &total_msg_len, + &pad_offset)) { + /* Padding being present indicates the final request */ + *acc_final_request = true; + iov[iov_idx].iov_len = pad_offset; } else { - iov[i].iov_len = plen; + iov[iov_idx].iov_len = plen; } + } else { + iov[iov_idx].iov_len = plen; } - } else { - hwaddr len = s->regs[R_HASH_SRC_LEN]; + } - haddr = address_space_map(&s->dram_as, s->regs[R_HASH_SRC], - &len, false, MEMTXATTRS_UNSPECIFIED); - if (haddr == NULL) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__); + return iov_idx; +} + +static uint64_t hash_get_digest_addr(AspeedHACEState *s) +{ + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); + uint64_t digest_addr = 0; + + digest_addr = deposit64(digest_addr, 0, 32, s->regs[R_HASH_DIGEST]); + if (ahc->has_dma64) { + digest_addr = deposit64(digest_addr, 32, 32, s->regs[R_HASH_DIGEST_HI]); + } + + return digest_addr; +} + +static void hash_write_digest_and_unmap_iov(AspeedHACEState *s, + struct iovec *iov, + int iov_idx, + uint8_t *digest_buf, + size_t digest_len) +{ + uint64_t digest_addr = 0; + + digest_addr = hash_get_digest_addr(s); + trace_aspeed_hace_hash_addr("digest", digest_addr); + if (address_space_write(&s->dram_as, digest_addr, + MEMTXATTRS_UNSPECIFIED, + digest_buf, digest_len)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to write digest to 0x%" HWADDR_PRIx "\n", + __func__, digest_addr); + } + + if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) { + hace_hexdump("digest", (char *)digest_buf, digest_len); + } + + for (; iov_idx > 0; iov_idx--) { + address_space_unmap(&s->dram_as, iov[iov_idx - 1].iov_base, + iov[iov_idx - 1].iov_len, false, + iov[iov_idx - 1].iov_len); + } +} + +static void hash_execute_non_acc_mode(AspeedHACEState *s, int algo, + struct iovec *iov, int iov_idx) +{ + g_autofree uint8_t *digest_buf = NULL; + Error *local_err = NULL; + size_t digest_len = 0; + + if (qcrypto_hash_bytesv(algo, iov, iov_idx, &digest_buf, + &digest_len, &local_err) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: qcrypto hash bytesv failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); + return; + } + + hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); +} + +static void hash_execute_acc_mode(AspeedHACEState *s, int algo, + struct iovec *iov, int iov_idx, + bool final_request) +{ + g_autofree uint8_t *digest_buf = NULL; + Error *local_err = NULL; + size_t digest_len = 0; + + trace_aspeed_hace_hash_execute_acc_mode(final_request); + + if (s->hash_ctx == NULL) { + s->hash_ctx = qcrypto_hash_new(algo, &local_err); + if (s->hash_ctx == NULL) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash new failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); return; } - iov[0].iov_base = haddr; - iov[0].iov_len = len; - i = 1; - - if (s->iov_count) { - /* - * In aspeed sdk kernel driver, sg_mode is disabled in hash_final(). - * Thus if we received a request with sg_mode disabled, it is - * required to check whether cache is empty. If no, we should - * combine cached iov and the current iov. - */ - s->total_req_len += len; - if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) { - i = reconstruct_iov(s, iov, 0, &pad_offset); - } - } } - if (acc_mode) { - if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) { - qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s", - error_get_pretty(local_err)); + if (qcrypto_hash_updatev(s->hash_ctx, iov, iov_idx, &local_err) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto hash updatev failed : %s", + __func__, error_get_pretty(local_err)); + error_free(local_err); + return; + } + + if (final_request) { + if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf, + &digest_len, &local_err)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: qcrypto hash finalize bytes failed : %s", + __func__, error_get_pretty(local_err)); error_free(local_err); - return; + local_err = NULL; } - if (sg_acc_mode_final_request) { - if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf, - &digest_len, &local_err)) { - qemu_log_mask(LOG_GUEST_ERROR, - "qcrypto hash finalize failed : %s", - error_get_pretty(local_err)); - error_free(local_err); - local_err = NULL; - } + qcrypto_hash_free(s->hash_ctx); - qcrypto_hash_free(s->hash_ctx); + s->hash_ctx = NULL; + s->total_req_len = 0; + } - s->hash_ctx = NULL; - s->iov_count = 0; - s->total_req_len = 0; - } - } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, - &digest_len, &local_err) < 0) { - qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s", - error_get_pretty(local_err)); - error_free(local_err); - return; + hash_write_digest_and_unmap_iov(s, iov, iov_idx, digest_buf, digest_len); +} + +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode, + bool acc_mode) +{ + struct iovec iov[ASPEED_HACE_MAX_SG]; + bool acc_final_request = false; + int iov_idx = -1; + + /* Prepares the iov for hashing operations based on the selected mode */ + if (sg_mode) { + iov_idx = hash_prepare_sg_iov(s, iov, acc_mode, &acc_final_request); + } else { + iov_idx = hash_prepare_direct_iov(s, iov, acc_mode, + &acc_final_request); } - if (address_space_write(&s->dram_as, s->regs[R_HASH_DEST], - MEMTXATTRS_UNSPECIFIED, - digest_buf, digest_len)) { + if (iov_idx <= 0) { qemu_log_mask(LOG_GUEST_ERROR, - "aspeed_hace: address space write failed\n"); + "%s: Failed to prepare iov\n", __func__); + return; } - for (; i > 0; i--) { - address_space_unmap(&s->dram_as, iov[i - 1].iov_base, - iov[i - 1].iov_len, false, - iov[i - 1].iov_len); + if (trace_event_get_state_backends(TRACE_ASPEED_HACE_HEXDUMP)) { + hace_iov_hexdump("plaintext", iov, iov_idx); } - /* - * Set status bits to indicate completion. Testing shows hardware sets - * these irrespective of HASH_IRQ_EN. - */ - s->regs[R_STATUS] |= HASH_IRQ; + /* Executes the hash operation */ + if (acc_mode) { + hash_execute_acc_mode(s, algo, iov, iov_idx, acc_final_request); + } else { + hash_execute_non_acc_mode(s, algo, iov, iov_idx); + } } static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) @@ -315,12 +455,7 @@ static uint64_t aspeed_hace_read(void *opaque, hwaddr addr, unsigned int size) addr >>= 2; - if (addr >= ASPEED_HACE_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n", - __func__, addr << 2); - return 0; - } + trace_aspeed_hace_read(addr << 2, s->regs[addr]); return s->regs[addr]; } @@ -333,12 +468,7 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, addr >>= 2; - if (addr >= ASPEED_HACE_NR_REGS) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", - __func__, addr << 2); - return; - } + trace_aspeed_hace_write(addr << 2, data); switch (addr) { case R_STATUS: @@ -362,7 +492,7 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, case R_HASH_SRC: data &= ahc->src_mask; break; - case R_HASH_DEST: + case R_HASH_DIGEST: data &= ahc->dest_mask; break; case R_HASH_KEY_BUFF: @@ -390,10 +520,16 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid hash algorithm selection 0x%"PRIx64"\n", __func__, data & ahc->hash_mask); - break; + } else { + do_hash_operation(s, algo, data & HASH_SG_EN, + ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); } - do_hash_operation(s, algo, data & HASH_SG_EN, - ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM)); + + /* + * Set status bits to indicate completion. Testing shows hardware sets + * these irrespective of HASH_IRQ_EN. + */ + s->regs[R_STATUS] |= HASH_IRQ; if (data & HASH_IRQ_EN) { qemu_irq_raise(s->irq); @@ -410,6 +546,15 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data, } } break; + case R_HASH_SRC_HI: + data &= ahc->src_hi_mask; + break; + case R_HASH_DIGEST_HI: + data &= ahc->dest_hi_mask; + break; + case R_HASH_KEY_BUFF_HI: + data &= ahc->key_hi_mask; + break; default: break; } @@ -430,14 +575,14 @@ static const MemoryRegionOps aspeed_hace_ops = { static void aspeed_hace_reset(DeviceState *dev) { struct AspeedHACEState *s = ASPEED_HACE(dev); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); if (s->hash_ctx != NULL) { qcrypto_hash_free(s->hash_ctx); s->hash_ctx = NULL; } - memset(s->regs, 0, sizeof(s->regs)); - s->iov_count = 0; + memset(s->regs, 0, ahc->nr_regs << 2); s->total_req_len = 0; } @@ -445,11 +590,13 @@ static void aspeed_hace_realize(DeviceState *dev, Error **errp) { AspeedHACEState *s = ASPEED_HACE(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedHACEClass *ahc = ASPEED_HACE_GET_CLASS(s); sysbus_init_irq(sbd, &s->irq); + s->regs = g_new(uint32_t, ahc->nr_regs); memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_hace_ops, s, - TYPE_ASPEED_HACE, 0x1000); + TYPE_ASPEED_HACE, ahc->nr_regs << 2); if (!s->dram_mr) { error_setg(errp, TYPE_ASPEED_HACE ": 'dram' link not set"); @@ -469,21 +616,28 @@ static const Property aspeed_hace_properties[] = { static const VMStateDescription vmstate_aspeed_hace = { .name = TYPE_ASPEED_HACE, - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .fields = (const VMStateField[]) { - VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), VMSTATE_UINT32(total_req_len, AspeedHACEState), - VMSTATE_UINT32(iov_count, AspeedHACEState), VMSTATE_END_OF_LIST(), } }; +static void aspeed_hace_unrealize(DeviceState *dev) +{ + AspeedHACEState *s = ASPEED_HACE(dev); + + g_free(s->regs); + s->regs = NULL; +} + static void aspeed_hace_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = aspeed_hace_realize; + dc->unrealize = aspeed_hace_unrealize; device_class_set_legacy_reset(dc, aspeed_hace_reset); device_class_set_props(dc, aspeed_hace_properties); dc->vmsd = &vmstate_aspeed_hace; @@ -504,6 +658,7 @@ static void aspeed_ast2400_hace_class_init(ObjectClass *klass, const void *data) dc->desc = "AST2400 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x0FFFFFFF; ahc->dest_mask = 0x0FFFFFF8; ahc->key_mask = 0x0FFFFFC0; @@ -523,6 +678,7 @@ static void aspeed_ast2500_hace_class_init(ObjectClass *klass, const void *data) dc->desc = "AST2500 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x3fffffff; ahc->dest_mask = 0x3ffffff8; ahc->key_mask = 0x3FFFFFC0; @@ -542,6 +698,7 @@ static void aspeed_ast2600_hace_class_init(ObjectClass *klass, const void *data) dc->desc = "AST2600 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; ahc->key_mask = 0x7FFFFFF8; @@ -561,6 +718,7 @@ static void aspeed_ast1030_hace_class_init(ObjectClass *klass, const void *data) dc->desc = "AST1030 Hash and Crypto Engine"; + ahc->nr_regs = 0x64 >> 2; ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; ahc->key_mask = 0x7FFFFFF8; @@ -580,17 +738,36 @@ static void aspeed_ast2700_hace_class_init(ObjectClass *klass, const void *data) dc->desc = "AST2700 Hash and Crypto Engine"; + ahc->nr_regs = 0x9C >> 2; ahc->src_mask = 0x7FFFFFFF; ahc->dest_mask = 0x7FFFFFF8; ahc->key_mask = 0x7FFFFFF8; ahc->hash_mask = 0x00147FFF; /* + * The AST2700 supports a maximum DRAM size of 8 GB, with a DRAM + * addressable range from 0x0_0000_0000 to 0x1_FFFF_FFFF. Since this range + * fits within 34 bits, only bits [33:0] are needed to store the DRAM + * offset. To optimize address storage, the high physical address bits + * [1:0] of the source, digest and key buffer addresses are stored as + * dram_offset bits [33:32]. + * + * This approach eliminates the need to reduce the high part of the DRAM + * physical address for DMA operations. Previously, this was calculated as + * (high physical address bits [7:0] - 4), since the DRAM start address is + * 0x4_00000000, making the high part address [7:0] - 4. + */ + ahc->src_hi_mask = 0x00000003; + ahc->dest_hi_mask = 0x00000003; + ahc->key_hi_mask = 0x00000003; + + /* * Currently, it does not support the CRYPT command. Instead, it only * sends an interrupt to notify the firmware that the crypt command * has completed. It is a temporary workaround. */ ahc->raise_crypt_interrupt_workaround = true; + ahc->has_dma64 = true; } static const TypeInfo aspeed_ast2700_hace_info = { diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 4383808..e3f64c0 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -302,6 +302,14 @@ aspeed_peci_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" aspeed_peci_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 aspeed_peci_raise_interrupt(uint32_t ctrl, uint32_t status) "ctrl 0x%" PRIx32 " status 0x%" PRIx32 +# aspeed_hace.c +aspeed_hace_read(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_hace_write(uint64_t offset, uint64_t data) "offset 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_hace_hash_sg(int index, uint64_t list_addr, uint64_t buf_addr, uint32_t len) "%d: list_addr 0x%" PRIx64 " buf_addr 0x%" PRIx64 " len 0x%" PRIx32 +aspeed_hace_hash_addr(const char *s, uint64_t addr) "%s: 0x%" PRIx64 +aspeed_hace_hash_execute_acc_mode(bool final_request) "final request: %d" +aspeed_hace_hexdump(const char *desc, uint32_t offset, char *s) "%s: 0x%08x: %s" + # bcm2835_property.c bcm2835_mbox_property(uint32_t tag, uint32_t bufsize, size_t resplen) "mbox property tag:0x%08x in_sz:%u out_sz:%zu" diff --git a/hw/s390x/cpu-topology.c b/hw/s390x/cpu-topology.c index 7d4e1f5..b513f89 100644 --- a/hw/s390x/cpu-topology.c +++ b/hw/s390x/cpu-topology.c @@ -23,8 +23,8 @@ #include "target/s390x/cpu.h" #include "hw/s390x/s390-virtio-ccw.h" #include "hw/s390x/cpu-topology.h" -#include "qapi/qapi-commands-machine-target.h" -#include "qapi/qapi-events-machine-target.h" +#include "qapi/qapi-commands-machine-s390x.h" +#include "qapi/qapi-events-machine-s390x.h" /* * s390_topology is used to keep the topology information. diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index aedb62b..8eeecfd 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -17,7 +17,6 @@ #include "hw/s390x/storage-keys.h" #include "qapi/error.h" #include "qapi/qapi-commands-machine.h" -#include "qapi/qapi-commands-misc-target.h" #include "qobject/qdict.h" #include "qemu/error-report.h" #include "system/memory_mapping.h" diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index cb4af1b..b4782c6 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -74,7 +74,7 @@ struct SCSIDiskClass { */ DMAIOFunc *dma_readv; DMAIOFunc *dma_writev; - bool (*need_fua_emulation)(SCSICommand *cmd); + bool (*need_fua)(SCSICommand *cmd); void (*update_sense)(SCSIRequest *r); }; @@ -85,7 +85,7 @@ typedef struct SCSIDiskReq { uint32_t sector_count; uint32_t buflen; bool started; - bool need_fua_emulation; + bool need_fua; struct iovec iov; QEMUIOVector qiov; BlockAcctCookie acct; @@ -389,24 +389,6 @@ static bool scsi_is_cmd_fua(SCSICommand *cmd) } } -static void scsi_write_do_fua(SCSIDiskReq *r) -{ - SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - - assert(r->req.aiocb == NULL); - assert(!r->req.io_canceled); - - if (r->need_fua_emulation) { - block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, - BLOCK_ACCT_FLUSH); - r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); - return; - } - - scsi_req_complete(&r->req, GOOD); - scsi_req_unref(&r->req); -} - static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) { assert(r->req.aiocb == NULL); @@ -416,12 +398,7 @@ static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) r->sector += r->sector_count; r->sector_count = 0; - if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { - scsi_write_do_fua(r); - return; - } else { - scsi_req_complete(&r->req, GOOD); - } + scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); @@ -564,7 +541,7 @@ static void scsi_read_data(SCSIRequest *req) first = !r->started; r->started = true; - if (first && r->need_fua_emulation) { + if (first && r->need_fua) { block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, BLOCK_ACCT_FLUSH); r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); @@ -589,8 +566,7 @@ static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { - scsi_write_do_fua(r); - return; + scsi_req_complete(&r->req, GOOD); } else { scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); @@ -623,6 +599,7 @@ static void scsi_write_data(SCSIRequest *req) SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); + BlockCompletionFunc *cb; /* No data transfer may already be in progress */ assert(r->req.aiocb == NULL); @@ -648,11 +625,10 @@ static void scsi_write_data(SCSIRequest *req) if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || r->req.cmd.buf[0] == VERIFY_16) { - if (r->req.sg) { - scsi_dma_complete_noio(r, 0); - } else { - scsi_write_complete_noio(r, 0); - } + block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, + BLOCK_ACCT_FLUSH); + cb = r->req.sg ? scsi_dma_complete : scsi_write_complete; + r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, cb, r); return; } @@ -2391,7 +2367,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } - r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); + r->need_fua = sdc->need_fua(&r->req.cmd); if (r->sector_count == 0) { scsi_req_complete(&r->req, GOOD); } @@ -3137,7 +3113,8 @@ BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, { SCSIDiskReq *r = opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); + int flags = r->need_fua ? BDRV_REQ_FUA : 0; + return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, flags, cb, cb_opaque); } static char *scsi_property_get_loadparm(Object *obj, Error **errp) @@ -3186,7 +3163,7 @@ static void scsi_disk_base_class_initfn(ObjectClass *klass, const void *data) device_class_set_legacy_reset(dc, scsi_disk_reset); sdc->dma_readv = scsi_dma_readv; sdc->dma_writev = scsi_dma_writev; - sdc->need_fua_emulation = scsi_is_cmd_fua; + sdc->need_fua = scsi_is_cmd_fua; } static const TypeInfo scsi_disk_base_info = { @@ -3215,7 +3192,7 @@ static const Property scsi_hd_properties[] = { DEFINE_PROP_BIT("removable", SCSIDiskState, features, SCSI_DISK_F_REMOVABLE, false), DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, - SCSI_DISK_F_DPOFUA, false), + SCSI_DISK_F_DPOFUA, true), DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), @@ -3338,7 +3315,7 @@ static void scsi_block_class_initfn(ObjectClass *klass, const void *data) sdc->dma_readv = scsi_block_dma_readv; sdc->dma_writev = scsi_block_dma_writev; sdc->update_sense = scsi_block_update_sense; - sdc->need_fua_emulation = scsi_block_no_fua; + sdc->need_fua = scsi_block_no_fua; dc->desc = "SCSI block device passthrough"; device_class_set_props(dc, scsi_block_properties); dc->vmsd = &vmstate_scsi_disk_state; |