aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-07-26 15:10:45 +1000
committerRichard Henderson <richard.henderson@linaro.org>2024-07-26 15:10:45 +1000
commit93b799fafd9170da3a79a533ea6f73a18de82e22 (patch)
tree9fe0972bbff5c2ec6e899ae4dc76cfcc349575f9 /hw
parent8e466dd092469e5ab0f355775c571ea96f3a8e23 (diff)
parentd741ecffd2ca260ce7875a4596f17736b5ccb7c3 (diff)
downloadqemu-staging.zip
qemu-staging.tar.gz
qemu-staging.tar.bz2
Merge tag 'pull-ppc-for-9.1-2-20240726-1' of https://gitlab.com/npiggin/qemu into stagingstaging
fixes # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCgAdFiEETkN92lZhb0MpsKeVZ7MCdqhiHK4FAmai5TsACgkQZ7MCdqhi # HK4rgA//eh0ax3JnBGma1rVEDL5n5cdEYV+ATFYGc529CUZFUar3IMqSw3in8bJy # uvQ6Cr/7IuusNEtoiYtdN1yNasqsm3fZB/hZ/Ekz32TsbpBRdkJW3ucavAu2rGM/ # EKRo7Y8gciy/Mj9y2JlIZqsDqYe+gribfGQvIg27DX+caAW/lKQdAdt4oJMTSdmr # XR8JjtMdhUazKrI+bc/4EG6tIQyUdp+S1/z1q6Wthqt58dNRElTjkD9op4AsUWMu # CE4a8ALCZoj3P3m+xf7xi7fT2JC2xgmNRCi3KbbhVEHdbFB6ViNYNuEYRS6GmpdC # C6J/ZR6QXs6KB1KO7EyB+vsuxLX4Eb8aeCFxwMlzJ9Fo4g8JudABXOFzYTKX1xBn # DUIGX91YACV43M2MvP/KuEU4zWpREO+U8MbQs/6s6fYsnCO2eKVJt/0Aaf1hmk37 # gY5Ak2DRx5TBvxlFy87zgHxHWTh/dGZodpN3IvCIDzVLnHGFlfluJbFRaoZSOecb # 1vxDHORjIruLcAxNVEGkJ/6MxOrnjjoUzSPUQcbgJ5BpFZOdeGLiMAULu/HBLBd9 # 7dvVw+PeNEPJttYumljOD6nYc/jENhLQsvkc3++bwGNc/rpi4YngtB4jhT1HV2Cl # oLool2ooKZgV4qx6IzeYo9feElvWVNK5XPzqDpSDlt9MaI+yTYM= # =FxPm # -----END PGP SIGNATURE----- # gpg: Signature made Fri 26 Jul 2024 09:52:27 AM AEST # gpg: using RSA key 4E437DDA56616F4329B0A79567B30276A8621CAE # gpg: Good signature from "Nicholas Piggin <npiggin@gmail.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 4E43 7DDA 5661 6F43 29B0 A795 67B3 0276 A862 1CAE * tag 'pull-ppc-for-9.1-2-20240726-1' of https://gitlab.com/npiggin/qemu: (96 commits) target/ppc: Remove includes from mmu-book3s-v3.h target/ppc/mmu-radix64: Remove externally unused parts from header target/ppc: Unexport some functions from mmu-book3s-v3.h target/ppc/mmu-hash32.c: Move get_pteg_offset32() to the header target/ppc/mmu-hash32.c: Inline and remove ppc_hash32_pte_raddr() target/ppc/mmu_common.c: Remove mmu_ctx_t target/ppc/mmu_common.c: Stop using ctx in get_bat_6xx_tlb() target/ppc: Remove bat_size_prot() target/ppc/mmu_common.c: Use defines instead of numeric constants target/ppc/mmu_common.c: Rename function parameter target/ppc/mmu_common.c: Stop using ctx in ppc6xx_tlb_check() target/ppc/mmu_common.c: Remove key field from mmu_ctx_t target/ppc/mmu_common.c: Init variable in function that relies on it target/ppc/mmu-hash32.c: Inline and remove ppc_hash32_pte_prot() target/ppc: Add function to get protection key for hash32 MMU target/ppc/mmu_common.c: Remove ptem field from mmu_ctx_t target/ppc/mmu_common.c: Inline and remove ppc6xx_tlb_pte_check() target/ppc/mmu_common.c: Simplify a switch statement target/ppc/mmu_common.c: Remove single use local variable target/ppc/mmu_common.c: Convert local variable to bool ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/block/m25p80.c3
-rw-r--r--hw/intc/pnv_xive2.c566
-rw-r--r--hw/intc/pnv_xive2_regs.h108
-rw-r--r--hw/intc/xive.c12
-rw-r--r--hw/intc/xive2.c33
-rw-r--r--hw/ppc/Kconfig3
-rw-r--r--hw/ppc/meson.build1
-rw-r--r--hw/ppc/pnv.c389
-rw-r--r--hw/ppc/pnv_adu.c206
-rw-r--r--hw/ppc/pnv_chiptod.c7
-rw-r--r--hw/ppc/pnv_core.c127
-rw-r--r--hw/ppc/pnv_lpc.c162
-rw-r--r--hw/ppc/pnv_xscom.c9
-rw-r--r--hw/ppc/spapr.c1
-rw-r--r--hw/ppc/spapr_caps.c1
-rw-r--r--hw/ppc/spapr_cpu_core.c16
-rw-r--r--hw/ppc/spapr_vhyp_mmu.c21
-rw-r--r--hw/ppc/spapr_vof.c2
-rw-r--r--hw/ppc/trace-events4
-rw-r--r--hw/ppc/vof.c2
-rw-r--r--hw/ssi/Kconfig4
-rw-r--r--hw/ssi/meson.build1
-rw-r--r--hw/ssi/pnv_spi.c1268
-rw-r--r--hw/ssi/trace-events21
24 files changed, 2730 insertions, 237 deletions
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 9e99107..0b94af3 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -357,6 +357,9 @@ static const FlashPartInfo known_devices[] = {
.sfdp_read = m25p80_sfdp_w25q512jv },
{ INFO("w25q01jvq", 0xef4021, 0, 64 << 10, 2048, ER_4K),
.sfdp_read = m25p80_sfdp_w25q01jvq },
+
+ /* Microchip */
+ { INFO("25csm04", 0x29cc00, 0x100, 64 << 10, 8, 0) },
};
typedef enum {
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
index 2fb4fa2..7860910 100644
--- a/hw/intc/pnv_xive2.c
+++ b/hw/intc/pnv_xive2.c
@@ -25,6 +25,7 @@
#include "hw/ppc/ppc.h"
#include "hw/qdev-properties.h"
#include "sysemu/reset.h"
+#include "sysemu/qtest.h"
#include <libfdt.h>
@@ -32,6 +33,16 @@
#undef XIVE2_DEBUG
+/* XIVE Sync or Flush Notification Block */
+typedef struct XiveSfnBlock {
+ uint8_t bytes[32];
+} XiveSfnBlock;
+
+/* XIVE Thread Sync or Flush Notification Area */
+typedef struct XiveThreadNA {
+ XiveSfnBlock topo[16];
+} XiveThreadNA;
+
/*
* Virtual structures table (VST)
*/
@@ -45,16 +56,16 @@ typedef struct XiveVstInfo {
static const XiveVstInfo vst_infos[] = {
- [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
- [VST_ESB] = { "ESB", 1, 16 },
- [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
+ [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
+ [VST_ESB] = { "ESB", 1, 16 },
+ [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
- [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
- [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
- [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
+ [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
+ [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
+ [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
- [VST_IC] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
- [VST_SYNC] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
+ [VST_IC] = { "IC", 1, /* ? */ 16 }, /* Topology # */
+ [VST_SYNC] = { "SYNC", sizeof(XiveThreadNA), 16 }, /* Topology # */
/*
* This table contains the backing store pages for the interrupt
@@ -206,6 +217,20 @@ static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
}
+static uint8_t pnv_xive2_nvc_table_compress_shift(PnvXive2 *xive)
+{
+ uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ return shift > 8 ? 0 : shift;
+}
+
+static uint8_t pnv_xive2_nvg_table_compress_shift(PnvXive2 *xive)
+{
+ uint8_t shift = GETFIELD(PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ return shift > 8 ? 0 : shift;
+}
+
static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
uint32_t idx)
{
@@ -219,6 +244,11 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
}
vsd = xive->vsds[type][blk];
+ if (vsd == 0) {
+ xive2_error(xive, "VST: vsd == 0 block id %d for VST %s %d !?",
+ blk, info->name, idx);
+ return 0;
+ }
/* Remote VST access */
if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
@@ -227,6 +257,12 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
}
+ if (type == VST_NVG) {
+ idx >>= pnv_xive2_nvg_table_compress_shift(xive);
+ } else if (type == VST_NVC) {
+ idx >>= pnv_xive2_nvc_table_compress_shift(xive);
+ }
+
if (VSD_INDIRECT & vsd) {
return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
}
@@ -329,40 +365,115 @@ static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
word_number);
}
-static int pnv_xive2_end_update(PnvXive2 *xive)
+static inline int pnv_xive2_get_current_pir(PnvXive2 *xive)
{
- uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- int i;
+ if (!qtest_enabled()) {
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ return ppc_cpu_pir(cpu);
+ }
+ return 0;
+}
+
+/*
+ * After SW injects a Queue Sync or Cache Flush operation, HW will notify
+ * SW of the completion of the operation by writing a byte of all 1's (0xff)
+ * to a specific memory location. The memory location is calculated by first
+ * looking up a base address in the SYNC VSD using the Topology ID of the
+ * originating thread as the "block" number. This points to a
+ * 64k block of memory that is further divided into 128 512 byte chunks of
+ * memory, which is indexed by the thread id of the requesting thread.
+ * Finally, this 512 byte chunk of memory is divided into 16 32 byte
+ * chunks which are indexed by the topology id of the targeted IC's chip.
+ * The values below are the offsets into that 32 byte chunk of memory for
+ * each type of cache flush or queue sync operation.
+ */
+#define PNV_XIVE2_QUEUE_IPI 0x00
+#define PNV_XIVE2_QUEUE_HW 0x01
+#define PNV_XIVE2_QUEUE_NXC 0x02
+#define PNV_XIVE2_QUEUE_INT 0x03
+#define PNV_XIVE2_QUEUE_OS 0x04
+#define PNV_XIVE2_QUEUE_POOL 0x05
+#define PNV_XIVE2_QUEUE_HARD 0x06
+#define PNV_XIVE2_CACHE_ENDC 0x08
+#define PNV_XIVE2_CACHE_ESBC 0x09
+#define PNV_XIVE2_CACHE_EASC 0x0a
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO 0x10
+#define PNV_XIVE2_QUEUE_NXC_LD_LCL_CO 0x11
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI 0x12
+#define PNV_XIVE2_QUEUE_NXC_ST_LCL_CI 0x13
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI 0x14
+#define PNV_XIVE2_QUEUE_NXC_ST_RMT_CI 0x15
+#define PNV_XIVE2_CACHE_NXC 0x18
+
+static int pnv_xive2_inject_notify(PnvXive2 *xive, int type)
+{
+ uint64_t addr;
+ int pir = pnv_xive2_get_current_pir(xive);
+ int thread_nr = PNV10_PIR2THREAD(pir);
+ int thread_topo_id = PNV10_PIR2CHIP(pir);
+ int ic_topo_id = xive->chip->chip_id;
+ uint64_t offset = ic_topo_id * sizeof(XiveSfnBlock);
+ uint8_t byte = 0xff;
+ MemTxResult result;
+
+ /* Retrieve the address of requesting thread's notification area */
+ addr = pnv_xive2_vst_addr(xive, VST_SYNC, thread_topo_id, thread_nr);
+
+ if (!addr) {
+ xive2_error(xive, "VST: no SYNC entry %x/%x !?",
+ thread_topo_id, thread_nr);
+ return -1;
+ }
+
+ address_space_stb(&address_space_memory, addr + offset + type, byte,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ assert(result == MEMTX_OK);
+
+ return 0;
+}
+
+static int pnv_xive2_end_update(PnvXive2 *xive, uint8_t watch_engine)
+{
+ uint8_t blk;
+ uint32_t idx;
+ int i, spec_reg, data_reg;
uint64_t endc_watch[4];
+ assert(watch_engine < ARRAY_SIZE(endc_watch));
+
+ spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
+ idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
+
for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
- endc_watch[i] =
- cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
+ endc_watch[i] = cpu_to_be64(xive->vc_regs[data_reg + i]);
}
return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
XIVE_VST_WORD_ALL);
}
-static void pnv_xive2_end_cache_load(PnvXive2 *xive)
+static void pnv_xive2_end_cache_load(PnvXive2 *xive, uint8_t watch_engine)
{
- uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
- xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ uint8_t blk;
+ uint32_t idx;
uint64_t endc_watch[4] = { 0 };
- int i;
+ int i, spec_reg, data_reg;
+
+ assert(watch_engine < ARRAY_SIZE(endc_watch));
+
+ spec_reg = (VC_ENDC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (VC_ENDC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID, xive->vc_regs[spec_reg]);
+ idx = GETFIELD(VC_ENDC_WATCH_INDEX, xive->vc_regs[spec_reg]);
if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
}
for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
- xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
- be64_to_cpu(endc_watch[i]);
+ xive->vc_regs[data_reg + i] = be64_to_cpu(endc_watch[i]);
}
}
@@ -379,40 +490,75 @@ static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
word_number);
}
-static int pnv_xive2_nvp_update(PnvXive2 *xive)
+static int pnv_xive2_nxc_to_table_type(uint8_t nxc_type, uint32_t *table_type)
{
- uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- int i;
+ switch (nxc_type) {
+ case PC_NXC_WATCH_NXC_NVP:
+ *table_type = VST_NVP;
+ break;
+ case PC_NXC_WATCH_NXC_NVG:
+ *table_type = VST_NVG;
+ break;
+ case PC_NXC_WATCH_NXC_NVC:
+ *table_type = VST_NVC;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: invalid table type for nxc operation\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int pnv_xive2_nxc_update(PnvXive2 *xive, uint8_t watch_engine)
+{
+ uint8_t blk, nxc_type;
+ uint32_t idx, table_type = -1;
+ int i, spec_reg, data_reg;
uint64_t nxc_watch[4];
+ assert(watch_engine < ARRAY_SIZE(nxc_watch));
+
+ spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
+ blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
+ idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
+
+ assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
+
for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
- nxc_watch[i] =
- cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
+ nxc_watch[i] = cpu_to_be64(xive->pc_regs[data_reg + i]);
}
- return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
+ return pnv_xive2_vst_write(xive, table_type, blk, idx, nxc_watch,
XIVE_VST_WORD_ALL);
}
-static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
+static void pnv_xive2_nxc_cache_load(PnvXive2 *xive, uint8_t watch_engine)
{
- uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
- uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
- xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ uint8_t blk, nxc_type;
+ uint32_t idx, table_type = -1;
uint64_t nxc_watch[4] = { 0 };
- int i;
+ int i, spec_reg, data_reg;
+
+ assert(watch_engine < ARRAY_SIZE(nxc_watch));
+
+ spec_reg = (PC_NXC_WATCH0_SPEC + watch_engine * 0x40) >> 3;
+ data_reg = (PC_NXC_WATCH0_DATA0 + watch_engine * 0x40) >> 3;
+ nxc_type = GETFIELD(PC_NXC_WATCH_NXC_TYPE, xive->pc_regs[spec_reg]);
+ blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID, xive->pc_regs[spec_reg]);
+ idx = GETFIELD(PC_NXC_WATCH_INDEX, xive->pc_regs[spec_reg]);
- if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
- xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
+ assert(!pnv_xive2_nxc_to_table_type(nxc_type, &table_type));
+
+ if (pnv_xive2_vst_read(xive, table_type, blk, idx, nxc_watch)) {
+ xive2_error(xive, "VST: no NXC entry %x/%x in %s table!?",
+ blk, idx, vst_infos[table_type].name);
}
for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
- xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
- be64_to_cpu(nxc_watch[i]);
+ xive->pc_regs[data_reg + i] = be64_to_cpu(nxc_watch[i]);
}
}
@@ -581,6 +727,7 @@ static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
case CQ_TAR_NVPG:
case CQ_TAR_ESB:
case CQ_TAR_END:
+ case CQ_TAR_NVC:
xive->tables[tsel][entry] = val;
break;
default:
@@ -641,6 +788,9 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
* entries provisioned by FW (such as skiboot) and resize the
* ESB window accordingly.
*/
+ if (memory_region_is_mapped(&xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
+ }
if (!(VSD_INDIRECT & vsd)) {
memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
* (1ull << xsrc->esb_shift));
@@ -656,6 +806,9 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
/*
* Backing store pages for the END.
*/
+ if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
+ }
if (!(VSD_INDIRECT & vsd)) {
memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
* (1ull << end_xsrc->esb_shift));
@@ -680,13 +833,10 @@ static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
* Both PC and VC sub-engines are configured as each use the Virtual
* Structure Tables
*/
-static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd,
+ uint8_t type, uint8_t blk)
{
uint8_t mode = GETFIELD(VSD_MODE, vsd);
- uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
- xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
- uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
- xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
if (type > VST_ERQ) {
@@ -721,6 +871,16 @@ static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
}
}
+static void pnv_xive2_vc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+{
+ uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+
+ pnv_xive2_vst_set_data(xive, vsd, type, blk);
+}
+
/*
* MMIO handlers
*/
@@ -964,12 +1124,70 @@ static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
},
};
+static uint8_t pnv_xive2_cache_watch_assign(uint64_t engine_mask,
+ uint64_t *state)
+{
+ uint8_t val = 0xFF;
+ int i;
+
+ for (i = 3; i >= 0; i--) {
+ if (BIT(i) & engine_mask) {
+ if (!(BIT(i) & *state)) {
+ *state |= BIT(i);
+ val = 3 - i;
+ break;
+ }
+ }
+ }
+ return val;
+}
+
+static void pnv_xive2_cache_watch_release(uint64_t *state, uint8_t watch_engine)
+{
+ uint8_t engine_bit = 3 - watch_engine;
+
+ if (*state & BIT(engine_bit)) {
+ *state &= ~BIT(engine_bit);
+ }
+}
+
+static uint8_t pnv_xive2_endc_cache_watch_assign(PnvXive2 *xive)
+{
+ uint64_t engine_mask = GETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN,
+ xive->vc_regs[VC_ENDC_CFG >> 3]);
+ uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
+ uint8_t val;
+
+ /*
+ * We keep track of which engines are currently busy in the
+ * VC_ENDC_WATCH_ASSIGN register directly. When the firmware reads
+ * the register, we don't return its value but the ID of an engine
+ * it can use.
+ * There are 4 engines. 0xFF means no engine is available.
+ */
+ val = pnv_xive2_cache_watch_assign(engine_mask, &state);
+ if (val != 0xFF) {
+ xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
+ }
+ return val;
+}
+
+static void pnv_xive2_endc_cache_watch_release(PnvXive2 *xive,
+ uint8_t watch_engine)
+{
+ uint64_t state = xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3];
+
+ pnv_xive2_cache_watch_release(&state, watch_engine);
+ xive->vc_regs[VC_ENDC_WATCH_ASSIGN >> 3] = state;
+}
+
static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint64_t val = 0;
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1000,24 +1218,44 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
val = xive->vc_regs[reg];
break;
+ case VC_ENDC_WATCH_ASSIGN:
+ val = pnv_xive2_endc_cache_watch_assign(xive);
+ break;
+
+ case VC_ENDC_CFG:
+ val = xive->vc_regs[reg];
+ break;
+
/*
* END cache updates
*/
case VC_ENDC_WATCH0_SPEC:
+ case VC_ENDC_WATCH1_SPEC:
+ case VC_ENDC_WATCH2_SPEC:
+ case VC_ENDC_WATCH3_SPEC:
+ watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
+ pnv_xive2_endc_cache_watch_release(xive, watch_engine);
val = xive->vc_regs[reg];
break;
case VC_ENDC_WATCH0_DATA0:
+ case VC_ENDC_WATCH1_DATA0:
+ case VC_ENDC_WATCH2_DATA0:
+ case VC_ENDC_WATCH3_DATA0:
/*
* Load DATA registers from cache with data requested by the
* SPEC register
*/
- pnv_xive2_end_cache_load(xive);
+ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
+ pnv_xive2_end_cache_load(xive, watch_engine);
val = xive->vc_regs[reg];
break;
case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
+ case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
+ case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
val = xive->vc_regs[reg];
break;
@@ -1063,6 +1301,7 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1071,7 +1310,7 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
case VC_VSD_TABLE_ADDR:
break;
case VC_VSD_TABLE_DATA:
- pnv_xive2_vst_set_data(xive, val);
+ pnv_xive2_vc_vst_set_data(xive, val);
break;
/*
@@ -1083,6 +1322,10 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/* ESB update */
break;
+ case VC_ESBC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ESBC);
+ break;
+
case VC_ESBC_CFG:
break;
@@ -1095,19 +1338,36 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/* EAS update */
break;
+ case VC_EASC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_EASC);
+ break;
+
+ case VC_ENDC_CFG:
+ break;
+
/*
* END cache updates
*/
case VC_ENDC_WATCH0_SPEC:
+ case VC_ENDC_WATCH1_SPEC:
+ case VC_ENDC_WATCH2_SPEC:
+ case VC_ENDC_WATCH3_SPEC:
val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
break;
case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ case VC_ENDC_WATCH1_DATA1 ... VC_ENDC_WATCH1_DATA3:
+ case VC_ENDC_WATCH2_DATA1 ... VC_ENDC_WATCH2_DATA3:
+ case VC_ENDC_WATCH3_DATA1 ... VC_ENDC_WATCH3_DATA3:
break;
case VC_ENDC_WATCH0_DATA0:
+ case VC_ENDC_WATCH1_DATA0:
+ case VC_ENDC_WATCH2_DATA0:
+ case VC_ENDC_WATCH3_DATA0:
/* writing to DATA0 triggers the cache write */
+ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
xive->vc_regs[reg] = val;
- pnv_xive2_end_update(xive);
+ pnv_xive2_end_update(xive, watch_engine);
break;
@@ -1116,6 +1376,10 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
break;
+ case VC_ENDC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_ENDC);
+ break;
+
/*
* Indirect invalidation
*/
@@ -1157,12 +1421,43 @@ static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
},
};
+static uint8_t pnv_xive2_nxc_cache_watch_assign(PnvXive2 *xive)
+{
+ uint64_t engine_mask = GETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN,
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3]);
+ uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
+ uint8_t val;
+
+ /*
+ * We keep track of which engines are currently busy in the
+ * PC_NXC_WATCH_ASSIGN register directly. When the firmware reads
+ * the register, we don't return its value but the ID of an engine
+ * it can use.
+ * There are 4 engines. 0xFF means no engine is available.
+ */
+ val = pnv_xive2_cache_watch_assign(engine_mask, &state);
+ if (val != 0xFF) {
+ xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
+ }
+ return val;
+}
+
+static void pnv_xive2_nxc_cache_watch_release(PnvXive2 *xive,
+ uint8_t watch_engine)
+{
+ uint64_t state = xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3];
+
+ pnv_xive2_cache_watch_release(&state, watch_engine);
+ xive->pc_regs[PC_NXC_WATCH_ASSIGN >> 3] = state;
+}
+
static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint64_t val = -1;
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
@@ -1173,24 +1468,44 @@ static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
val = xive->pc_regs[reg];
break;
+ case PC_NXC_WATCH_ASSIGN:
+ val = pnv_xive2_nxc_cache_watch_assign(xive);
+ break;
+
+ case PC_NXC_PROC_CONFIG:
+ val = xive->pc_regs[reg];
+ break;
+
/*
* cache updates
*/
case PC_NXC_WATCH0_SPEC:
+ case PC_NXC_WATCH1_SPEC:
+ case PC_NXC_WATCH2_SPEC:
+ case PC_NXC_WATCH3_SPEC:
+ watch_engine = (offset - PC_NXC_WATCH0_SPEC) >> 6;
xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
+ pnv_xive2_nxc_cache_watch_release(xive, watch_engine);
val = xive->pc_regs[reg];
break;
case PC_NXC_WATCH0_DATA0:
+ case PC_NXC_WATCH1_DATA0:
+ case PC_NXC_WATCH2_DATA0:
+ case PC_NXC_WATCH3_DATA0:
/*
* Load DATA registers from cache with data requested by the
* SPEC register
*/
- pnv_xive2_nvp_cache_load(xive);
+ watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
+ pnv_xive2_nxc_cache_load(xive, watch_engine);
val = xive->pc_regs[reg];
break;
case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
+ case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
+ case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
val = xive->pc_regs[reg];
break;
@@ -1214,36 +1529,66 @@ static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
return val;
}
+static void pnv_xive2_pc_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+{
+ uint8_t type = GETFIELD(PC_VSD_TABLE_SELECT,
+ xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(PC_VSD_TABLE_ADDRESS,
+ xive->pc_regs[PC_VSD_TABLE_ADDR >> 3]);
+
+ pnv_xive2_vst_set_data(xive, vsd, type, blk);
+}
+
static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
uint32_t reg = offset >> 3;
+ uint8_t watch_engine;
switch (offset) {
/*
- * VSD table settings. Only taken into account in the VC
- * sub-engine because the Xive2Router model combines both VC and PC
- * sub-engines
+ * VSD table settings.
+ * The Xive2Router model combines both VC and PC sub-engines. We
+ * allow to configure the tables through both, for the rare cases
+ * where a table only really needs to be configured for one of
+ * them (e.g. the NVG table for the presenter). It assumes that
+ * firmware passes the same address to the VC and PC when tables
+ * are defined for both, which seems acceptable.
*/
case PC_VSD_TABLE_ADDR:
+ break;
case PC_VSD_TABLE_DATA:
+ pnv_xive2_pc_vst_set_data(xive, val);
+ break;
+
+ case PC_NXC_PROC_CONFIG:
break;
/*
* cache updates
*/
case PC_NXC_WATCH0_SPEC:
+ case PC_NXC_WATCH1_SPEC:
+ case PC_NXC_WATCH2_SPEC:
+ case PC_NXC_WATCH3_SPEC:
val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
break;
case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ case PC_NXC_WATCH1_DATA1 ... PC_NXC_WATCH1_DATA3:
+ case PC_NXC_WATCH2_DATA1 ... PC_NXC_WATCH2_DATA3:
+ case PC_NXC_WATCH3_DATA1 ... PC_NXC_WATCH3_DATA3:
break;
case PC_NXC_WATCH0_DATA0:
+ case PC_NXC_WATCH1_DATA0:
+ case PC_NXC_WATCH2_DATA0:
+ case PC_NXC_WATCH3_DATA0:
/* writing to DATA0 triggers the cache write */
+ watch_engine = (offset - PC_NXC_WATCH0_DATA0) >> 6;
xive->pc_regs[reg] = val;
- pnv_xive2_nvp_update(xive);
+ pnv_xive2_nxc_update(xive, watch_engine);
break;
/* case PC_NXC_FLUSH_CTRL: */
@@ -1251,6 +1596,10 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
break;
+ case PC_NXC_FLUSH_INJECT:
+ pnv_xive2_inject_notify(xive, PNV_XIVE2_CACHE_NXC);
+ break;
+
/*
* Indirect invalidation
*/
@@ -1547,13 +1896,19 @@ static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
/*
* Sync MMIO page (write only)
*/
-#define PNV_XIVE2_SYNC_IPI 0x000
-#define PNV_XIVE2_SYNC_HW 0x080
-#define PNV_XIVE2_SYNC_NxC 0x100
-#define PNV_XIVE2_SYNC_INT 0x180
-#define PNV_XIVE2_SYNC_OS_ESC 0x200
-#define PNV_XIVE2_SYNC_POOL_ESC 0x280
-#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+#define PNV_XIVE2_SYNC_IPI 0x000
+#define PNV_XIVE2_SYNC_HW 0x080
+#define PNV_XIVE2_SYNC_NxC 0x100
+#define PNV_XIVE2_SYNC_INT 0x180
+#define PNV_XIVE2_SYNC_OS_ESC 0x200
+#define PNV_XIVE2_SYNC_POOL_ESC 0x280
+#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_NCO 0x800
+#define PNV_XIVE2_SYNC_NXC_LD_LCL_CO 0x880
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_NCI 0x900
+#define PNV_XIVE2_SYNC_NXC_ST_LCL_CI 0x980
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_NCI 0xA00
+#define PNV_XIVE2_SYNC_NXC_ST_RMT_CI 0xA80
static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
unsigned size)
@@ -1565,22 +1920,72 @@ static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
return -1;
}
+/*
+ * The sync MMIO space spans two pages. The lower page is use for
+ * queue sync "poll" requests while the upper page is used for queue
+ * sync "inject" requests. Inject requests require the HW to write
+ * a byte of all 1's to a predetermined location in memory in order
+ * to signal completion of the request. Both pages have the same
+ * layout, so it is easiest to handle both with a single function.
+ */
static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
PnvXive2 *xive = PNV_XIVE2(opaque);
+ int inject_type;
+ hwaddr pg_offset_mask = (1ull << xive->ic_shift) - 1;
- switch (offset) {
+ /* adjust offset for inject page */
+ hwaddr adj_offset = offset & pg_offset_mask;
+
+ switch (adj_offset) {
case PNV_XIVE2_SYNC_IPI:
+ inject_type = PNV_XIVE2_QUEUE_IPI;
+ break;
case PNV_XIVE2_SYNC_HW:
+ inject_type = PNV_XIVE2_QUEUE_HW;
+ break;
case PNV_XIVE2_SYNC_NxC:
+ inject_type = PNV_XIVE2_QUEUE_NXC;
+ break;
case PNV_XIVE2_SYNC_INT:
+ inject_type = PNV_XIVE2_QUEUE_INT;
+ break;
case PNV_XIVE2_SYNC_OS_ESC:
+ inject_type = PNV_XIVE2_QUEUE_OS;
+ break;
case PNV_XIVE2_SYNC_POOL_ESC:
+ inject_type = PNV_XIVE2_QUEUE_POOL;
+ break;
case PNV_XIVE2_SYNC_HARD_ESC:
+ inject_type = PNV_XIVE2_QUEUE_HARD;
+ break;
+ case PNV_XIVE2_SYNC_NXC_LD_LCL_NCO:
+ inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_NCO;
+ break;
+ case PNV_XIVE2_SYNC_NXC_LD_LCL_CO:
+ inject_type = PNV_XIVE2_QUEUE_NXC_LD_LCL_CO;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_LCL_NCI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_NCI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_LCL_CI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_LCL_CI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_RMT_NCI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_NCI;
+ break;
+ case PNV_XIVE2_SYNC_NXC_ST_RMT_CI:
+ inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
break;
default:
xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ /* Write Queue Sync notification byte if writing to sync inject page */
+ if ((offset & ~pg_offset_mask) != 0) {
+ pnv_xive2_inject_notify(xive, inject_type);
}
}
@@ -1814,6 +2219,12 @@ static void pnv_xive2_reset(void *dev)
xive->cq_regs[CQ_XIVE_CFG >> 3] |=
SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
+ /* VC and PC cache watch assign mechanism */
+ xive->vc_regs[VC_ENDC_CFG >> 3] =
+ SETFIELD(VC_ENDC_CFG_CACHE_WATCH_ASSIGN, 0ull, 0b0111);
+ xive->pc_regs[PC_NXC_PROC_CONFIG >> 3] =
+ SETFIELD(PC_NXC_PROC_CONFIG_WATCH_ASSIGN, 0ull, 0b0111);
+
/* Set default page size to 64k */
xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
@@ -2025,33 +2436,6 @@ static void pnv_xive2_register_types(void)
type_init(pnv_xive2_register_types)
-static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
- GString *buf)
-{
- uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
- uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
-
- if (!xive2_nvp_is_valid(nvp)) {
- return;
- }
-
- g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x",
- nvp_idx, eq_blk, eq_idx,
- xive_get_field32(NVP2_W2_IPB, nvp->w2));
- /*
- * When the NVP is HW controlled, more fields are updated
- */
- if (xive2_nvp_is_hw(nvp)) {
- g_string_append_printf(buf, " CPPR:%02x",
- xive_get_field32(NVP2_W2_CPPR, nvp->w2));
- if (xive2_nvp_is_co(nvp)) {
- g_string_append_printf(buf, " CO:%04x",
- xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
- }
- }
- g_string_append_c(buf, '\n');
-}
-
/*
* If the table is direct, we can compute the number of PQ entries
* provisioned by FW.
diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h
index 7165dc8..e8b87b3 100644
--- a/hw/intc/pnv_xive2_regs.h
+++ b/hw/intc/pnv_xive2_regs.h
@@ -232,6 +232,10 @@
#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+/* ESBC cache flush inject register */
+#define X_VC_ESBC_FLUSH_INJECT 0x142
+#define VC_ESBC_FLUSH_INJECT 0x210
+
/* ESBC configuration */
#define X_VC_ESBC_CFG 0x148
#define VC_ESBC_CFG 0x240
@@ -250,6 +254,10 @@
#define VC_EASC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
#define VC_EASC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+/* EASC flush inject register */
+#define X_VC_EASC_FLUSH_INJECT 0x162
+#define VC_EASC_FLUSH_INJECT 0x310
+
/*
* VC2
*/
@@ -270,6 +278,10 @@
#define VC_ENDC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define VC_ENDC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+/* ENDC flush inject register */
+#define X_VC_ENDC_FLUSH_INJECT 0x182
+#define VC_ENDC_FLUSH_INJECT 0x410
+
/* ENDC Sync done */
#define X_VC_ENDC_SYNC_DONE 0x184
#define VC_ENDC_SYNC_DONE 0x420
@@ -283,6 +295,15 @@
#define VC_ENDC_SYNC_QUEUE_HARD PPC_BIT(6)
#define VC_QUEUE_COUNT 7
+/* ENDC cache watch assign */
+#define X_VC_ENDC_WATCH_ASSIGN 0x186
+#define VC_ENDC_WATCH_ASSIGN 0x430
+
+/* ENDC configuration register */
+#define X_VC_ENDC_CFG 0x188
+#define VC_ENDC_CFG 0x440
+#define VC_ENDC_CFG_CACHE_WATCH_ASSIGN PPC_BITMASK(32, 35)
+
/* ENDC cache watch specification 0 */
#define X_VC_ENDC_WATCH0_SPEC 0x1A0
#define VC_ENDC_WATCH0_SPEC 0x500
@@ -302,6 +323,42 @@
#define VC_ENDC_WATCH0_DATA2 0x530
#define VC_ENDC_WATCH0_DATA3 0x538
+/* ENDC cache watch 1 */
+#define X_VC_ENDC_WATCH1_SPEC 0x1A8
+#define VC_ENDC_WATCH1_SPEC 0x540
+#define X_VC_ENDC_WATCH1_DATA0 0x1AC
+#define X_VC_ENDC_WATCH1_DATA1 0x1AD
+#define X_VC_ENDC_WATCH1_DATA2 0x1AE
+#define X_VC_ENDC_WATCH1_DATA3 0x1AF
+#define VC_ENDC_WATCH1_DATA0 0x560
+#define VC_ENDC_WATCH1_DATA1 0x568
+#define VC_ENDC_WATCH1_DATA2 0x570
+#define VC_ENDC_WATCH1_DATA3 0x578
+
+/* ENDC cache watch 2 */
+#define X_VC_ENDC_WATCH2_SPEC 0x1B0
+#define VC_ENDC_WATCH2_SPEC 0x580
+#define X_VC_ENDC_WATCH2_DATA0 0x1B4
+#define X_VC_ENDC_WATCH2_DATA1 0x1B5
+#define X_VC_ENDC_WATCH2_DATA2 0x1B6
+#define X_VC_ENDC_WATCH2_DATA3 0x1B7
+#define VC_ENDC_WATCH2_DATA0 0x5A0
+#define VC_ENDC_WATCH2_DATA1 0x5A8
+#define VC_ENDC_WATCH2_DATA2 0x5B0
+#define VC_ENDC_WATCH2_DATA3 0x5B8
+
+/* ENDC cache watch 3 */
+#define X_VC_ENDC_WATCH3_SPEC 0x1B8
+#define VC_ENDC_WATCH3_SPEC 0x5C0
+#define X_VC_ENDC_WATCH3_DATA0 0x1BC
+#define X_VC_ENDC_WATCH3_DATA1 0x1BD
+#define X_VC_ENDC_WATCH3_DATA2 0x1BE
+#define X_VC_ENDC_WATCH3_DATA3 0x1BF
+#define VC_ENDC_WATCH3_DATA0 0x5E0
+#define VC_ENDC_WATCH3_DATA1 0x5E8
+#define VC_ENDC_WATCH3_DATA2 0x5F0
+#define VC_ENDC_WATCH3_DATA3 0x5F8
+
/*
* PC LSB1
*/
@@ -358,6 +415,21 @@
#define PC_NXC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
#define PC_NXC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+/* NxC Cache flush inject */
+#define X_PC_NXC_FLUSH_INJECT 0x282
+#define PC_NXC_FLUSH_INJECT 0x410
+
+/* NxC Cache watch assign */
+#define X_PC_NXC_WATCH_ASSIGN 0x286
+#define PC_NXC_WATCH_ASSIGN 0x430
+
+/* NxC Proc config */
+#define X_PC_NXC_PROC_CONFIG 0x28A
+#define PC_NXC_PROC_CONFIG 0x450
+#define PC_NXC_PROC_CONFIG_WATCH_ASSIGN PPC_BITMASK(0, 3)
+#define PC_NXC_PROC_CONFIG_NVG_TABLE_COMPRESS PPC_BITMASK(32, 35)
+#define PC_NXC_PROC_CONFIG_NVC_TABLE_COMPRESS PPC_BITMASK(36, 39)
+
/* NxC Cache Watch 0 Specification */
#define X_PC_NXC_WATCH0_SPEC 0x2A0
#define PC_NXC_WATCH0_SPEC 0x500
@@ -381,6 +453,42 @@
#define PC_NXC_WATCH0_DATA2 0x530
#define PC_NXC_WATCH0_DATA3 0x538
+/* NxC Cache Watch 1 */
+#define X_PC_NXC_WATCH1_SPEC 0x2A8
+#define PC_NXC_WATCH1_SPEC 0x540
+#define X_PC_NXC_WATCH1_DATA0 0x2AC
+#define X_PC_NXC_WATCH1_DATA1 0x2AD
+#define X_PC_NXC_WATCH1_DATA2 0x2AE
+#define X_PC_NXC_WATCH1_DATA3 0x2AF
+#define PC_NXC_WATCH1_DATA0 0x560
+#define PC_NXC_WATCH1_DATA1 0x568
+#define PC_NXC_WATCH1_DATA2 0x570
+#define PC_NXC_WATCH1_DATA3 0x578
+
+/* NxC Cache Watch 2 */
+#define X_PC_NXC_WATCH2_SPEC 0x2B0
+#define PC_NXC_WATCH2_SPEC 0x580
+#define X_PC_NXC_WATCH2_DATA0 0x2B4
+#define X_PC_NXC_WATCH2_DATA1 0x2B5
+#define X_PC_NXC_WATCH2_DATA2 0x2B6
+#define X_PC_NXC_WATCH2_DATA3 0x2B7
+#define PC_NXC_WATCH2_DATA0 0x5A0
+#define PC_NXC_WATCH2_DATA1 0x5A8
+#define PC_NXC_WATCH2_DATA2 0x5B0
+#define PC_NXC_WATCH2_DATA3 0x5B8
+
+/* NxC Cache Watch 3 */
+#define X_PC_NXC_WATCH3_SPEC 0x2B8
+#define PC_NXC_WATCH3_SPEC 0x5C0
+#define X_PC_NXC_WATCH3_DATA0 0x2BC
+#define X_PC_NXC_WATCH3_DATA1 0x2BD
+#define X_PC_NXC_WATCH3_DATA2 0x2BE
+#define X_PC_NXC_WATCH3_DATA3 0x2BF
+#define PC_NXC_WATCH3_DATA0 0x5E0
+#define PC_NXC_WATCH3_DATA1 0x5E8
+#define PC_NXC_WATCH3_DATA2 0x5F0
+#define PC_NXC_WATCH3_DATA3 0x5F8
+
/*
* TCTXT Registers
*/
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index 70f11f9..5a02dd8 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -692,9 +692,15 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf)
}
}
- g_string_append_printf(buf, "CPU[%04x]: "
- "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR W2\n",
- cpu_index);
+ if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) {
+ g_string_append_printf(buf, "CPU[%04x]: "
+ "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
+ " W2\n", cpu_index);
+ } else {
+ g_string_append_printf(buf, "CPU[%04x]: "
+ "QW NSR CPPR IPB LSMFB - LGS T PIPR"
+ " W2\n", cpu_index);
+ }
for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
index 3e7238c..1f15068 100644
--- a/hw/intc/xive2.c
+++ b/hw/intc/xive2.c
@@ -89,7 +89,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
pq = xive_get_field32(END2_W1_ESn, end->w1);
g_string_append_printf(buf,
- " %08x %c%c %c%c%c%c%c%c%c%c%c%c "
+ " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
"prio:%d nvp:%02x/%04x",
end_idx,
pq & XIVE_ESB_VAL_P ? 'P' : '-',
@@ -98,12 +98,15 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
xive2_end_is_enqueue(end) ? 'q' : '-',
xive2_end_is_notify(end) ? 'n' : '-',
xive2_end_is_backlog(end) ? 'b' : '-',
+ xive2_end_is_precluded_escalation(end) ? 'p' : '-',
xive2_end_is_escalate(end) ? 'e' : '-',
xive2_end_is_escalate_end(end) ? 'N' : '-',
xive2_end_is_uncond_escalation(end) ? 'u' : '-',
xive2_end_is_silent_escalation(end) ? 's' : '-',
xive2_end_is_firmware1(end) ? 'f' : '-',
xive2_end_is_firmware2(end) ? 'F' : '-',
+ xive2_end_is_ignore(end) ? 'i' : '-',
+ xive2_end_is_crowd(end) ? 'c' : '-',
priority, nvp_blk, nvp_idx);
if (qaddr_base) {
@@ -137,6 +140,32 @@ void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
(uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
}
+void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
+{
+ uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
+ uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
+
+ if (!xive2_nvp_is_valid(nvp)) {
+ return;
+ }
+
+ g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x",
+ nvp_idx, eq_blk, eq_idx,
+ xive_get_field32(NVP2_W2_IPB, nvp->w2));
+ /*
+ * When the NVP is HW controlled, more fields are updated
+ */
+ if (xive2_nvp_is_hw(nvp)) {
+ g_string_append_printf(buf, " CPPR:%02x",
+ xive_get_field32(NVP2_W2_CPPR, nvp->w2));
+ if (xive2_nvp_is_co(nvp)) {
+ g_string_append_printf(buf, " CO:%04x",
+ xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
+ }
+ }
+ g_string_append_c(buf, '\n');
+}
+
static void xive2_end_enqueue(Xive2End *end, uint32_t data)
{
uint64_t qaddr_base = xive2_end_qaddr(end);
@@ -650,7 +679,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
}
found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
- xive_get_field32(END2_W6_IGNORE, end.w7),
+ xive2_end_is_ignore(&end),
priority,
xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig
index 347212f..c235519 100644
--- a/hw/ppc/Kconfig
+++ b/hw/ppc/Kconfig
@@ -39,6 +39,9 @@ config POWERNV
select PCI_POWERNV
select PCA9552
select PCA9554
+ select SSI
+ select SSI_M25P80
+ select PNV_SPI
config PPC405
bool
diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build
index 3ebbf32..7cd9189 100644
--- a/hw/ppc/meson.build
+++ b/hw/ppc/meson.build
@@ -42,6 +42,7 @@ endif
ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files(
'pnv.c',
'pnv_xscom.c',
+ 'pnv_adu.c',
'pnv_core.c',
'pnv_i2c.c',
'pnv_lpc.c',
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 6b41d1d..3526852 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -141,9 +141,9 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
PnvChipClass *pnv_cc = PNV_CHIP_GET_CLASS(chip);
- g_autofree uint32_t *servers_prop = g_new(uint32_t, smt_threads);
+ uint32_t *servers_prop;
int i;
- uint32_t pir;
+ uint32_t pir, tir;
uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
0xffffffff, 0xffffffff};
uint32_t tbfreq = PNV_TIMEBASE_FREQ;
@@ -154,7 +154,10 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
char *nodename;
int cpus_offset = get_cpus_node(fdt);
- pir = pnv_cc->chip_pir(chip, pc->hwid, 0);
+ pnv_cc->get_pir_tir(chip, pc->hwid, 0, &pir, &tir);
+
+ /* Only one DT node per (big) core */
+ g_assert(tir == 0);
nodename = g_strdup_printf("%s@%x", dc->fw_name, pir);
offset = fdt_add_subnode(fdt, cpus_offset, nodename);
@@ -235,11 +238,28 @@ static int pnv_dt_core(PnvChip *chip, PnvCore *pc, void *fdt)
}
/* Build interrupt servers properties */
- for (i = 0; i < smt_threads; i++) {
- servers_prop[i] = cpu_to_be32(pnv_cc->chip_pir(chip, pc->hwid, i));
+ if (pc->big_core) {
+ servers_prop = g_new(uint32_t, smt_threads * 2);
+ for (i = 0; i < smt_threads; i++) {
+ pnv_cc->get_pir_tir(chip, pc->hwid, i, &pir, NULL);
+ servers_prop[i * 2] = cpu_to_be32(pir);
+
+ pnv_cc->get_pir_tir(chip, pc->hwid + 1, i, &pir, NULL);
+ servers_prop[i * 2 + 1] = cpu_to_be32(pir);
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(*servers_prop) * smt_threads
+ * 2)));
+ } else {
+ servers_prop = g_new(uint32_t, smt_threads);
+ for (i = 0; i < smt_threads; i++) {
+ pnv_cc->get_pir_tir(chip, pc->hwid, i, &pir, NULL);
+ servers_prop[i] = cpu_to_be32(pir);
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(*servers_prop) * smt_threads)));
}
- _FDT((fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
- servers_prop, sizeof(*servers_prop) * smt_threads)));
+ g_free(servers_prop);
return offset;
}
@@ -248,14 +268,17 @@ static void pnv_dt_icp(PnvChip *chip, void *fdt, uint32_t hwid,
uint32_t nr_threads)
{
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
- uint32_t pir = pcc->chip_pir(chip, hwid, 0);
- uint64_t addr = PNV_ICP_BASE(chip) | (pir << 12);
+ uint32_t pir;
+ uint64_t addr;
char *name;
const char compat[] = "IBM,power8-icp\0IBM,ppc-xicp";
uint32_t irange[2], i, rsize;
uint64_t *reg;
int offset;
+ pcc->get_pir_tir(chip, hwid, 0, &pir, NULL);
+ addr = PNV_ICP_BASE(chip) | (pir << 12);
+
irange[0] = cpu_to_be32(pir);
irange[1] = cpu_to_be32(nr_threads);
@@ -385,6 +408,10 @@ static void pnv_chip_power9_dt_populate(PnvChip *chip, void *fdt)
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features",
pa_features_300, sizeof(pa_features_300))));
+
+ if (pnv_core->big_core) {
+ i++; /* Big-core groups two QEMU cores */
+ }
}
if (chip->ram_size) {
@@ -446,6 +473,10 @@ static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt)
_FDT((fdt_setprop(fdt, offset, "ibm,pa-features",
pa_features_31, sizeof(pa_features_31))));
+
+ if (pnv_core->big_core) {
+ i++; /* Big-core groups two QEMU cores */
+ }
}
if (chip->ram_size) {
@@ -727,7 +758,8 @@ static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp)
Pnv8Chip *chip8 = PNV8_CHIP(chip);
qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_EXTERNAL);
- qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq);
+ qdev_connect_gpio_out_named(DEVICE(&chip8->lpc), "LPCHC", 0, irq);
+
return pnv_lpc_isa_create(&chip8->lpc, true, errp);
}
@@ -736,25 +768,48 @@ static ISABus *pnv_chip_power8nvl_isa_create(PnvChip *chip, Error **errp)
Pnv8Chip *chip8 = PNV8_CHIP(chip);
qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip8->psi), PSIHB_IRQ_LPC_I2C);
- qdev_connect_gpio_out(DEVICE(&chip8->lpc), 0, irq);
+ qdev_connect_gpio_out_named(DEVICE(&chip8->lpc), "LPCHC", 0, irq);
+
return pnv_lpc_isa_create(&chip8->lpc, false, errp);
}
static ISABus *pnv_chip_power9_isa_create(PnvChip *chip, Error **errp)
{
Pnv9Chip *chip9 = PNV9_CHIP(chip);
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPCHC);
+ qemu_irq irq;
+
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPCHC);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "LPCHC", 0, irq);
+
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ0);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 0, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ1);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 1, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ2);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 2, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip9->psi), PSIHB9_IRQ_LPC_SIRQ3);
+ qdev_connect_gpio_out_named(DEVICE(&chip9->lpc), "SERIRQ", 3, irq);
- qdev_connect_gpio_out(DEVICE(&chip9->lpc), 0, irq);
return pnv_lpc_isa_create(&chip9->lpc, false, errp);
}
static ISABus *pnv_chip_power10_isa_create(PnvChip *chip, Error **errp)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
- qemu_irq irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPCHC);
+ qemu_irq irq;
+
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPCHC);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "LPCHC", 0, irq);
+
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ0);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 0, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ1);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 1, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ2);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 2, irq);
+ irq = qdev_get_gpio_in(DEVICE(&chip10->psi), PSIHB9_IRQ_LPC_SIRQ3);
+ qdev_connect_gpio_out_named(DEVICE(&chip10->lpc), "SERIRQ", 3, irq);
- qdev_connect_gpio_out(DEVICE(&chip10->lpc), 0, irq);
return pnv_lpc_isa_create(&chip10->lpc, false, errp);
}
@@ -875,6 +930,7 @@ static void pnv_init(MachineState *machine)
PnvMachineState *pnv = PNV_MACHINE(machine);
MachineClass *mc = MACHINE_GET_CLASS(machine);
PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(machine);
+ int max_smt_threads = pmc->max_smt_threads;
char *fw_filename;
long fw_size;
uint64_t chip_ram_start = 0;
@@ -970,20 +1026,52 @@ static void pnv_init(MachineState *machine)
exit(1);
}
+ /* Set lpar-per-core mode if lpar-per-thread is not supported */
+ if (!pmc->has_lpar_per_thread) {
+ pnv->lpar_per_core = true;
+ }
+
pnv->num_chips =
machine->smp.max_cpus / (machine->smp.cores * machine->smp.threads);
- if (machine->smp.threads > 8) {
- error_report("Cannot support more than 8 threads/core "
- "on a powernv machine");
+ if (pnv->big_core) {
+ if (machine->smp.threads % 2 == 1) {
+ error_report("Cannot support %d threads with big-core option "
+ "because it must be an even number",
+ machine->smp.threads);
+ exit(1);
+ }
+ max_smt_threads *= 2;
+ }
+
+ if (machine->smp.threads > max_smt_threads) {
+ error_report("Cannot support more than %d threads/core "
+ "on %s machine", max_smt_threads, mc->desc);
+ if (pmc->max_smt_threads == 4) {
+ error_report("(use big-core=on for 8 threads per core)");
+ }
exit(1);
}
+
+ if (pnv->big_core) {
+ /*
+ * powernv models PnvCore as a SMT4 core. Big-core requires 2xPnvCore
+ * per core, so adjust topology here. pnv_dt_core() processor
+ * device-tree and TCG SMT code make the 2 cores appear as one big core
+ * from software point of view. pnv pervasive models and xscoms tend to
+ * see the big core as 2 small core halves.
+ */
+ machine->smp.cores *= 2;
+ machine->smp.threads /= 2;
+ }
+
if (!is_power_of_2(machine->smp.threads)) {
- error_report("Cannot support %d threads/core on a powernv"
+ error_report("Cannot support %d threads/core on a powernv "
"machine because it must be a power of 2",
machine->smp.threads);
exit(1);
}
+
/*
* TODO: should we decide on how many chips we can create based
* on #cores and Venice vs. Murano vs. Naples chip type etc...,
@@ -1017,6 +1105,10 @@ static void pnv_init(MachineState *machine)
&error_fatal);
object_property_set_int(chip, "nr-threads", machine->smp.threads,
&error_fatal);
+ object_property_set_bool(chip, "big-core", pnv->big_core,
+ &error_fatal);
+ object_property_set_bool(chip, "lpar-per-core", pnv->lpar_per_core,
+ &error_fatal);
/*
* The POWER8 machine use the XICS interrupt interface.
* Propagate the XICS fabric to the chip and its controllers.
@@ -1079,10 +1171,16 @@ static void pnv_init(MachineState *machine)
* 25:28 Core number
* 29:31 Thread ID
*/
-static uint32_t pnv_chip_pir_p8(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
+static void pnv_get_pir_tir_p8(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
{
- return (chip->chip_id << 7) | (core_id << 3) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 7) | (core_id << 3) | thread_id;
+ }
+ if (tir) {
+ *tir = thread_id;
+ }
}
static void pnv_chip_power8_intc_create(PnvChip *chip, PowerPCCPU *cpu,
@@ -1134,14 +1232,26 @@ static void pnv_chip_power8_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
*
* We only care about the lower bits. uint32_t is fine for the moment.
*/
-static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
-{
- if (chip->nr_threads == 8) {
- return (chip->chip_id << 8) | ((thread_id & 1) << 2) | (core_id << 3) |
- (thread_id >> 1);
+static void pnv_get_pir_tir_p9(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
+{
+ if (chip->big_core) {
+ /* Big-core interleaves thread ID between small-cores */
+ thread_id <<= 1;
+ thread_id |= core_id & 1;
+ core_id >>= 1;
+
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 3) | thread_id;
+ }
} else {
- return (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ }
+ }
+ if (tir) {
+ *tir = thread_id;
}
}
@@ -1156,14 +1266,26 @@ static uint32_t pnv_chip_pir_p9(PnvChip *chip, uint32_t core_id,
*
* We only care about the lower bits. uint32_t is fine for the moment.
*/
-static uint32_t pnv_chip_pir_p10(PnvChip *chip, uint32_t core_id,
- uint32_t thread_id)
-{
- if (chip->nr_threads == 8) {
- return (chip->chip_id << 8) | ((core_id / 4) << 4) |
- ((core_id % 2) << 3) | thread_id;
+static void pnv_get_pir_tir_p10(PnvChip *chip,
+ uint32_t core_id, uint32_t thread_id,
+ uint32_t *pir, uint32_t *tir)
+{
+ if (chip->big_core) {
+ /* Big-core interleaves thread ID between small-cores */
+ thread_id <<= 1;
+ thread_id |= core_id & 1;
+ core_id >>= 1;
+
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 3) | thread_id;
+ }
} else {
- return (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ if (pir) {
+ *pir = (chip->chip_id << 8) | (core_id << 2) | thread_id;
+ }
+ }
+ if (tir) {
+ *tir = thread_id;
}
}
@@ -1343,8 +1465,11 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp)
int core_hwid = CPU_CORE(pnv_core)->core_id;
for (j = 0; j < CPU_CORE(pnv_core)->nr_threads; j++) {
- uint32_t pir = pcc->chip_pir(chip, core_hwid, j);
- PnvICPState *icp = PNV_ICP(xics_icp_get(chip8->xics, pir));
+ uint32_t pir;
+ PnvICPState *icp;
+
+ pcc->get_pir_tir(chip, core_hwid, j, &pir, NULL);
+ icp = PNV_ICP(xics_icp_get(chip8->xics, pir));
memory_region_add_subregion(&chip8->icp_mmio, pir << 12,
&icp->mmio);
@@ -1456,7 +1581,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */
k->cores_mask = POWER8E_CORE_MASK;
k->num_phbs = 3;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1480,7 +1605,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */
k->cores_mask = POWER8_CORE_MASK;
k->num_phbs = 3;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1504,7 +1629,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */
k->cores_mask = POWER8_CORE_MASK;
k->num_phbs = 4;
- k->chip_pir = pnv_chip_pir_p8;
+ k->get_pir_tir = pnv_get_pir_tir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
k->intc_destroy = pnv_chip_power8_intc_destroy;
@@ -1527,6 +1652,7 @@ static void pnv_chip_power9_instance_init(Object *obj)
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
int i;
+ object_initialize_child(obj, "adu", &chip9->adu, TYPE_PNV_ADU);
object_initialize_child(obj, "xive", &chip9->xive, TYPE_PNV_XIVE);
object_property_add_alias(obj, "xive-fabric", OBJECT(&chip9->xive),
"xive-fabric");
@@ -1637,6 +1763,15 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
return;
}
+ /* ADU */
+ object_property_set_link(OBJECT(&chip9->adu), "lpc", OBJECT(&chip9->lpc),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip9->adu), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV9_XSCOM_ADU_BASE,
+ &chip9->adu.xscom_regs);
+
pnv_chip_quad_realize(chip9, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1777,7 +1912,7 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
k->chip_cfam_id = 0x220d104900008000ull; /* P9 Nimbus DD2.0 */
k->cores_mask = POWER9_CORE_MASK;
- k->chip_pir = pnv_chip_pir_p9;
+ k->get_pir_tir = pnv_get_pir_tir_p9;
k->intc_create = pnv_chip_power9_intc_create;
k->intc_reset = pnv_chip_power9_intc_reset;
k->intc_destroy = pnv_chip_power9_intc_destroy;
@@ -1803,6 +1938,7 @@ static void pnv_chip_power10_instance_init(Object *obj)
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
int i;
+ object_initialize_child(obj, "adu", &chip10->adu, TYPE_PNV_ADU);
object_initialize_child(obj, "xive", &chip10->xive, TYPE_PNV_XIVE2);
object_property_add_alias(obj, "xive-fabric", OBJECT(&chip10->xive),
"xive-fabric");
@@ -1826,6 +1962,11 @@ static void pnv_chip_power10_instance_init(Object *obj)
for (i = 0; i < pcc->i2c_num_engines; i++) {
object_initialize_child(obj, "i2c[*]", &chip10->i2c[i], TYPE_PNV_I2C);
}
+
+ for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) {
+ object_initialize_child(obj, "pib_spic[*]", &chip10->pib_spic[i],
+ TYPE_PNV_SPI);
+ }
}
static void pnv_chip_power10_quad_realize(Pnv10Chip *chip10, Error **errp)
@@ -1895,6 +2036,15 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
return;
}
+ /* ADU */
+ object_property_set_link(OBJECT(&chip10->adu), "lpc", OBJECT(&chip10->lpc),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->adu), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_ADU_BASE,
+ &chip10->adu.xscom_regs);
+
pnv_chip_power10_quad_realize(chip10, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -2040,7 +2190,21 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
qdev_get_gpio_in(DEVICE(&chip10->psi),
PSIHB9_IRQ_SBE_I2C));
}
-
+ /* PIB SPI Controller */
+ for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) {
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "spic_num",
+ i, &error_fatal);
+ /* pib_spic[2] connected to 25csm04 which implements 1 byte transfer */
+ object_property_set_int(OBJECT(&chip10->pib_spic[i]), "transfer_len",
+ (i == 2) ? 1 : 4, &error_fatal);
+ if (!sysbus_realize(SYS_BUS_DEVICE(OBJECT
+ (&chip10->pib_spic[i])), errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_PIB_SPIC_BASE +
+ i * PNV10_XSCOM_PIB_SPIC_SIZE,
+ &chip10->pib_spic[i].xscom_spic_regs);
+ }
}
static void pnv_rainier_i2c_init(PnvMachineState *pnv)
@@ -2087,9 +2251,9 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
PnvChipClass *k = PNV_CHIP_CLASS(klass);
static const int i2c_ports_per_engine[PNV10_CHIP_MAX_I2C] = {14, 14, 2, 16};
- k->chip_cfam_id = 0x120da04900008000ull; /* P10 DD1.0 (with NX) */
+ k->chip_cfam_id = 0x220da04980000000ull; /* P10 DD2.0 (with NX) */
k->cores_mask = POWER10_CORE_MASK;
- k->chip_pir = pnv_chip_pir_p10;
+ k->get_pir_tir = pnv_get_pir_tir_p10;
k->intc_create = pnv_chip_power10_intc_create;
k->intc_reset = pnv_chip_power10_intc_reset;
k->intc_destroy = pnv_chip_power10_intc_destroy;
@@ -2108,7 +2272,8 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
&k->parent_realize);
}
-static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
+static void pnv_chip_core_sanitize(PnvMachineState *pnv, PnvChip *chip,
+ Error **errp)
{
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
int cores_max;
@@ -2129,6 +2294,17 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
}
chip->cores_mask &= pcc->cores_mask;
+ /* Ensure small-cores a paired up in big-core mode */
+ if (pnv->big_core) {
+ uint64_t even_cores = chip->cores_mask & 0x5555555555555555ULL;
+ uint64_t odd_cores = chip->cores_mask & 0xaaaaaaaaaaaaaaaaULL;
+
+ if (even_cores ^ (odd_cores >> 1)) {
+ error_setg(errp, "warning: unpaired cores in big-core mode !");
+ return;
+ }
+ }
+
/* now that we have a sane layout, let check the number of cores */
cores_max = ctpop64(chip->cores_mask);
if (chip->nr_cores > cores_max) {
@@ -2140,11 +2316,12 @@ static void pnv_chip_core_sanitize(PnvChip *chip, Error **errp)
static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
{
+ PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
+ PnvMachineClass *pmc = PNV_MACHINE_GET_CLASS(pnv);
Error *error = NULL;
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip);
const char *typename = pnv_chip_core_typename(chip);
int i, core_hwid;
- PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
if (!object_class_by_name(typename)) {
error_setg(errp, "Unable to find PowerNV CPU Core '%s'", typename);
@@ -2152,7 +2329,7 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
}
/* Cores */
- pnv_chip_core_sanitize(chip, &error);
+ pnv_chip_core_sanitize(pnv, chip, &error);
if (error) {
error_propagate(errp, error);
return;
@@ -2183,8 +2360,15 @@ static void pnv_chip_core_realize(PnvChip *chip, Error **errp)
&error_fatal);
object_property_set_int(OBJECT(pnv_core), "hrmor", pnv->fw_load_addr,
&error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "big-core", chip->big_core,
+ &error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "quirk-tb-big-core",
+ pmc->quirk_tb_big_core, &error_fatal);
+ object_property_set_bool(OBJECT(pnv_core), "lpar-per-core",
+ chip->lpar_per_core, &error_fatal);
object_property_set_link(OBJECT(pnv_core), "chip", OBJECT(chip),
&error_abort);
+
qdev_realize(DEVICE(pnv_core), NULL, &error_fatal);
/* Each core has an XSCOM MMIO region */
@@ -2216,6 +2400,8 @@ static Property pnv_chip_properties[] = {
DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1),
DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0),
DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1),
+ DEFINE_PROP_BOOL("big-core", PnvChip, big_core, false),
+ DEFINE_PROP_BOOL("lpar-per-core", PnvChip, lpar_per_core, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2424,6 +2610,46 @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
return total_count;
}
+static bool pnv_machine_get_big_core(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ return pnv->big_core;
+}
+
+static void pnv_machine_set_big_core(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ pnv->big_core = value;
+}
+
+static bool pnv_machine_get_lpar_per_core(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ return pnv->lpar_per_core;
+}
+
+static void pnv_machine_set_lpar_per_core(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+ pnv->lpar_per_core = value;
+}
+
+static bool pnv_machine_get_hb(Object *obj, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+
+ return !!pnv->fw_load_addr;
+}
+
+static void pnv_machine_set_hb(Object *obj, bool value, Error **errp)
+{
+ PnvMachineState *pnv = PNV_MACHINE(obj);
+
+ if (value) {
+ pnv->fw_load_addr = 0x8000000;
+ }
+}
+
static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -2446,6 +2672,9 @@ static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 8;
+ /* POWER8 is always lpar-per-core mode */
+ pmc->has_lpar_per_thread = false;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
}
@@ -2470,9 +2699,23 @@ static void pnv_machine_power9_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 4;
+ pmc->has_lpar_per_thread = true;
pmc->dt_power_mgt = pnv_dt_power_mgt;
machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB);
+
+ object_class_property_add_bool(oc, "big-core",
+ pnv_machine_get_big_core,
+ pnv_machine_set_big_core);
+ object_class_property_set_description(oc, "big-core",
+ "Use big-core (aka fused-core) mode");
+
+ object_class_property_add_bool(oc, "lpar-per-core",
+ pnv_machine_get_lpar_per_core,
+ pnv_machine_set_lpar_per_core);
+ object_class_property_set_description(oc, "lpar-per-core",
+ "Use 1 LPAR per core mode");
}
static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
@@ -2494,6 +2737,9 @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
+ pmc->max_smt_threads = 4;
+ pmc->has_lpar_per_thread = true;
+ pmc->quirk_tb_big_core = true;
pmc->dt_power_mgt = pnv_dt_power_mgt;
xfc->match_nvt = pnv10_xive_match_nvt;
@@ -2507,6 +2753,23 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
pnv_machine_p10_common_class_init(oc, data);
mc->desc = "IBM PowerNV (Non-Virtualized) POWER10";
+
+ /*
+ * This is the parent of POWER10 Rainier class, so properies go here
+ * rather than common init (which would add them to both parent and
+ * child which is invalid).
+ */
+ object_class_property_add_bool(oc, "big-core",
+ pnv_machine_get_big_core,
+ pnv_machine_set_big_core);
+ object_class_property_set_description(oc, "big-core",
+ "Use big-core (aka fused-core) mode");
+
+ object_class_property_add_bool(oc, "lpar-per-core",
+ pnv_machine_get_lpar_per_core,
+ pnv_machine_set_lpar_per_core);
+ object_class_property_set_description(oc, "lpar-per-core",
+ "Use 1 LPAR per core mode");
}
static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data)
@@ -2519,22 +2782,6 @@ static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, void *data)
pmc->i2c_init = pnv_rainier_i2c_init;
}
-static bool pnv_machine_get_hb(Object *obj, Error **errp)
-{
- PnvMachineState *pnv = PNV_MACHINE(obj);
-
- return !!pnv->fw_load_addr;
-}
-
-static void pnv_machine_set_hb(Object *obj, bool value, Error **errp)
-{
- PnvMachineState *pnv = PNV_MACHINE(obj);
-
- if (value) {
- pnv->fw_load_addr = 0x8000000;
- }
-}
-
static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{
CPUPPCState *env = cpu_env(cs);
@@ -2561,11 +2808,23 @@ static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
*/
env->spr[SPR_SRR1] |= SRR1_WAKESCOM;
}
+ if (arg.host_int == 1) {
+ cpu_resume(cs);
+ }
+}
+
+/*
+ * Send a SRESET (NMI) interrupt to the CPU, and resume execution if it was
+ * paused.
+ */
+void pnv_cpu_do_nmi_resume(CPUState *cs)
+{
+ async_run_on_cpu(cs, pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_HOST_INT(1));
}
static void pnv_cpu_do_nmi(PnvChip *chip, PowerPCCPU *cpu, void *opaque)
{
- async_run_on_cpu(CPU(cpu), pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
+ async_run_on_cpu(CPU(cpu), pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_HOST_INT(0));
}
static void pnv_nmi(NMIState *n, int cpu_index, Error **errp)
diff --git a/hw/ppc/pnv_adu.c b/hw/ppc/pnv_adu.c
new file mode 100644
index 0000000..81b7d6e
--- /dev/null
+++ b/hw/ppc/pnv_adu.c
@@ -0,0 +1,206 @@
+/*
+ * QEMU PowerPC PowerNV ADU unit
+ *
+ * The ADU unit actually implements XSCOM, which is the bridge between MMIO
+ * and PIB. However it also includes control and status registers and other
+ * functions that are exposed as PIB (xscom) registers.
+ *
+ * To keep things simple, pnv_xscom.c remains the XSCOM bridge
+ * implementation, and pnv_adu.c implements the ADU registers and other
+ * functions.
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+
+#include "hw/qdev-properties.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_adu.h"
+#include "hw/ppc/pnv_chip.h"
+#include "hw/ppc/pnv_lpc.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "trace.h"
+
+#define ADU_LPC_BASE_REG 0x40
+#define ADU_LPC_CMD_REG 0x41
+#define ADU_LPC_DATA_REG 0x42
+#define ADU_LPC_STATUS_REG 0x43
+
+static uint64_t pnv_adu_xscom_read(void *opaque, hwaddr addr, unsigned width)
+{
+ PnvADU *adu = PNV_ADU(opaque);
+ uint32_t offset = addr >> 3;
+ uint64_t val = 0;
+
+ switch (offset) {
+ case 0x18: /* Receive status reg */
+ case 0x12: /* log register */
+ case 0x13: /* error register */
+ break;
+ case ADU_LPC_BASE_REG:
+ /*
+ * LPC Address Map in Pervasive ADU Workbook
+ *
+ * return PNV10_LPCM_BASE(chip) & PPC_BITMASK(8, 31);
+ * XXX: implement as class property, or get from LPC?
+ */
+ qemu_log_mask(LOG_UNIMP, "ADU: LPC_BASE_REG is not implemented\n");
+ break;
+ case ADU_LPC_CMD_REG:
+ val = adu->lpc_cmd_reg;
+ break;
+ case ADU_LPC_DATA_REG:
+ val = adu->lpc_data_reg;
+ break;
+ case ADU_LPC_STATUS_REG:
+ val = PPC_BIT(0); /* ack / done */
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "ADU Unimplemented read register: Ox%08x\n",
+ offset);
+ }
+
+ trace_pnv_adu_xscom_read(addr, val);
+
+ return val;
+}
+
+static bool lpc_cmd_read(PnvADU *adu)
+{
+ return !!(adu->lpc_cmd_reg & PPC_BIT(0));
+}
+
+static bool lpc_cmd_write(PnvADU *adu)
+{
+ return !lpc_cmd_read(adu);
+}
+
+static uint32_t lpc_cmd_addr(PnvADU *adu)
+{
+ return (adu->lpc_cmd_reg & PPC_BITMASK(32, 63)) >> PPC_BIT_NR(63);
+}
+
+static uint32_t lpc_cmd_size(PnvADU *adu)
+{
+ return (adu->lpc_cmd_reg & PPC_BITMASK(5, 11)) >> PPC_BIT_NR(11);
+}
+
+static void pnv_adu_xscom_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ PnvADU *adu = PNV_ADU(opaque);
+ uint32_t offset = addr >> 3;
+
+ trace_pnv_adu_xscom_write(addr, val);
+
+ switch (offset) {
+ case 0x18: /* Receive status reg */
+ case 0x12: /* log register */
+ case 0x13: /* error register */
+ break;
+
+ case ADU_LPC_BASE_REG:
+ qemu_log_mask(LOG_UNIMP,
+ "ADU: Changing LPC_BASE_REG is not implemented\n");
+ break;
+
+ case ADU_LPC_CMD_REG:
+ adu->lpc_cmd_reg = val;
+ if (lpc_cmd_read(adu)) {
+ uint32_t lpc_addr = lpc_cmd_addr(adu);
+ uint32_t lpc_size = lpc_cmd_size(adu);
+ uint64_t data = 0;
+
+ pnv_lpc_opb_read(adu->lpc, lpc_addr, (void *)&data, lpc_size);
+
+ /*
+ * ADU access is performed within 8-byte aligned sectors. Smaller
+ * access sizes don't get formatted to the least significant byte,
+ * but rather appear in the data reg at the same offset as the
+ * address in memory. This shifts them into that position.
+ */
+ adu->lpc_data_reg = be64_to_cpu(data) >> ((lpc_addr & 7) * 8);
+ }
+ break;
+
+ case ADU_LPC_DATA_REG:
+ adu->lpc_data_reg = val;
+ if (lpc_cmd_write(adu)) {
+ uint32_t lpc_addr = lpc_cmd_addr(adu);
+ uint32_t lpc_size = lpc_cmd_size(adu);
+ uint64_t data;
+
+ data = cpu_to_be64(val) >> ((lpc_addr & 7) * 8); /* See above */
+ pnv_lpc_opb_write(adu->lpc, lpc_addr, (void *)&data, lpc_size);
+ }
+ break;
+
+ case ADU_LPC_STATUS_REG:
+ qemu_log_mask(LOG_UNIMP,
+ "ADU: Changing LPC_STATUS_REG is not implemented\n");
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "ADU Unimplemented write register: Ox%08x\n",
+ offset);
+ }
+}
+
+const MemoryRegionOps pnv_adu_xscom_ops = {
+ .read = pnv_adu_xscom_read,
+ .write = pnv_adu_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void pnv_adu_realize(DeviceState *dev, Error **errp)
+{
+ PnvADU *adu = PNV_ADU(dev);
+
+ assert(adu->lpc);
+
+ /* XScom regions for ADU registers */
+ pnv_xscom_region_init(&adu->xscom_regs, OBJECT(dev),
+ &pnv_adu_xscom_ops, adu, "xscom-adu",
+ PNV9_XSCOM_ADU_SIZE);
+}
+
+static Property pnv_adu_properties[] = {
+ DEFINE_PROP_LINK("lpc", PnvADU, lpc, TYPE_PNV_LPC, PnvLpcController *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_adu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = pnv_adu_realize;
+ dc->desc = "PowerNV ADU";
+ device_class_set_props(dc, pnv_adu_properties);
+ dc->user_creatable = false;
+}
+
+static const TypeInfo pnv_adu_type_info = {
+ .name = TYPE_PNV_ADU,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(PnvADU),
+ .class_init = pnv_adu_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { } },
+};
+
+static void pnv_adu_register_types(void)
+{
+ type_register_static(&pnv_adu_type_info);
+}
+
+type_init(pnv_adu_register_types);
diff --git a/hw/ppc/pnv_chiptod.c b/hw/ppc/pnv_chiptod.c
index 3831a72..1e41fe5 100644
--- a/hw/ppc/pnv_chiptod.c
+++ b/hw/ppc/pnv_chiptod.c
@@ -364,8 +364,7 @@ static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr,
qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg"
" TOD_MOVE_TOD_TO_TB_REG with no slave target\n");
} else {
- PowerPCCPU *cpu = chiptod->slave_pc_target->threads[0];
- CPUPPCState *env = &cpu->env;
+ PnvCore *pc = chiptod->slave_pc_target;
/*
* Moving TOD to TB will set the TB of all threads in a
@@ -377,8 +376,8 @@ static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr,
* thread 0.
*/
- if (env->pnv_tod_tbst.tb_ready_for_tod) {
- env->pnv_tod_tbst.tod_sent_to_tb = 1;
+ if (pc->tod_state.tb_ready_for_tod) {
+ pc->tod_state.tod_sent_to_tb = 1;
} else {
qemu_log_mask(LOG_GUEST_ERROR, "pnv_chiptod: xscom write reg"
" TOD_MOVE_TOD_TO_TB_REG with TB not ready to"
diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c
index f40ab72..a306939 100644
--- a/hw/ppc/pnv_core.c
+++ b/hw/ppc/pnv_core.c
@@ -58,6 +58,10 @@ static void pnv_core_cpu_reset(PnvCore *pc, PowerPCCPU *cpu)
env->nip = 0x10;
env->msr |= MSR_HVB; /* Hypervisor mode */
env->spr[SPR_HRMOR] = pc->hrmor;
+ if (pc->big_core) {
+ /* Clear "small core" bit on Power9/10 (this is set in default PVR) */
+ env->spr[SPR_PVR] &= ~PPC_BIT(51);
+ }
hreg_compute_hflags(env);
ppc_maybe_interrupt(env);
@@ -181,16 +185,43 @@ static const MemoryRegionOps pnv_core_power9_xscom_ops = {
*/
#define PNV10_XSCOM_EC_CORE_THREAD_STATE 0x412
+#define PNV10_XSCOM_EC_CORE_THREAD_INFO 0x413
+#define PNV10_XSCOM_EC_CORE_DIRECT_CONTROLS 0x449
+#define PNV10_XSCOM_EC_CORE_RAS_STATUS 0x454
static uint64_t pnv_core_power10_xscom_read(void *opaque, hwaddr addr,
unsigned int width)
{
+ PnvCore *pc = PNV_CORE(opaque);
+ int nr_threads = CPU_CORE(pc)->nr_threads;
+ int i;
uint32_t offset = addr >> 3;
uint64_t val = 0;
switch (offset) {
case PNV10_XSCOM_EC_CORE_THREAD_STATE:
- val = 0;
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUState *cs = CPU(cpu);
+
+ if (cs->halted) {
+ val |= PPC_BIT(56 + i);
+ }
+ }
+ if (pc->lpar_per_core) {
+ val |= PPC_BIT(62);
+ }
+ break;
+ case PNV10_XSCOM_EC_CORE_THREAD_INFO:
+ break;
+ case PNV10_XSCOM_EC_CORE_RAS_STATUS:
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUState *cs = CPU(cpu);
+ if (cs->stopped) {
+ val |= PPC_BIT(0 + 8 * i) | PPC_BIT(1 + 8 * i);
+ }
+ }
break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp read 0x%08x\n", __func__,
@@ -203,9 +234,46 @@ static uint64_t pnv_core_power10_xscom_read(void *opaque, hwaddr addr,
static void pnv_core_power10_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int width)
{
+ PnvCore *pc = PNV_CORE(opaque);
+ int nr_threads = CPU_CORE(pc)->nr_threads;
+ int i;
uint32_t offset = addr >> 3;
switch (offset) {
+ case PNV10_XSCOM_EC_CORE_DIRECT_CONTROLS:
+ for (i = 0; i < nr_threads; i++) {
+ PowerPCCPU *cpu = pc->threads[i];
+ CPUState *cs = CPU(cpu);
+
+ if (val & PPC_BIT(7 + 8 * i)) { /* stop */
+ val &= ~PPC_BIT(7 + 8 * i);
+ cpu_pause(cs);
+ }
+ if (val & PPC_BIT(6 + 8 * i)) { /* start */
+ val &= ~PPC_BIT(6 + 8 * i);
+ cpu_resume(cs);
+ }
+ if (val & PPC_BIT(4 + 8 * i)) { /* sreset */
+ val &= ~PPC_BIT(4 + 8 * i);
+ pnv_cpu_do_nmi_resume(cs);
+ }
+ if (val & PPC_BIT(3 + 8 * i)) { /* clear maint */
+ /*
+ * Hardware has very particular cases for where clear maint
+ * must be used and where start must be used to resume a
+ * thread. These are not modelled exactly, just treat
+ * this and start the same.
+ */
+ val &= ~PPC_BIT(3 + 8 * i);
+ cpu_resume(cs);
+ }
+ }
+ if (val) {
+ qemu_log_mask(LOG_UNIMP, "%s: unimp bits in DIRECT_CONTROLS "
+ "0x%016" PRIx64 "\n", __func__, val);
+ }
+ break;
+
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp write 0x%08x\n", __func__,
offset);
@@ -227,8 +295,9 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp,
{
CPUPPCState *env = &cpu->env;
int core_hwid;
- ppc_spr_t *pir = &env->spr_cb[SPR_PIR];
- ppc_spr_t *tir = &env->spr_cb[SPR_TIR];
+ ppc_spr_t *pir_spr = &env->spr_cb[SPR_PIR];
+ ppc_spr_t *tir_spr = &env->spr_cb[SPR_TIR];
+ uint32_t pir, tir;
Error *local_err = NULL;
PnvChipClass *pcc = PNV_CHIP_GET_CLASS(pc->chip);
@@ -244,8 +313,20 @@ static void pnv_core_cpu_realize(PnvCore *pc, PowerPCCPU *cpu, Error **errp,
core_hwid = object_property_get_uint(OBJECT(pc), "hwid", &error_abort);
- tir->default_value = thread_index;
- pir->default_value = pcc->chip_pir(pc->chip, core_hwid, thread_index);
+ pcc->get_pir_tir(pc->chip, core_hwid, thread_index, &pir, &tir);
+ pir_spr->default_value = pir;
+ tir_spr->default_value = tir;
+
+ if (pc->big_core) {
+ /* 2 "small cores" get the same core index for SMT operations */
+ env->core_index = core_hwid >> 1;
+ } else {
+ env->core_index = core_hwid;
+ }
+
+ if (pc->lpar_per_core) {
+ cpu_ppc_set_1lpar(cpu);
+ }
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, PNV_TIMEBASE_FREQ);
@@ -278,16 +359,22 @@ static void pnv_core_realize(DeviceState *dev, Error **errp)
pc->threads = g_new(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
PowerPCCPU *cpu;
+ PnvCPUState *pnv_cpu;
obj = object_new(typename);
cpu = POWERPC_CPU(obj);
pc->threads[i] = POWERPC_CPU(obj);
+ if (cc->nr_threads > 1) {
+ cpu->env.has_smt_siblings = true;
+ }
snprintf(name, sizeof(name), "thread[%d]", i);
object_property_add_child(OBJECT(pc), name, obj);
cpu->machine_data = g_new0(PnvCPUState, 1);
+ pnv_cpu = pnv_cpu_state(cpu);
+ pnv_cpu->pnv_core = pc;
object_unref(obj);
}
@@ -344,6 +431,10 @@ static void pnv_core_unrealize(DeviceState *dev)
static Property pnv_core_properties[] = {
DEFINE_PROP_UINT32("hwid", PnvCore, hwid, 0),
DEFINE_PROP_UINT64("hrmor", PnvCore, hrmor, 0),
+ DEFINE_PROP_BOOL("big-core", PnvCore, big_core, false),
+ DEFINE_PROP_BOOL("quirk-tb-big-core", PnvCore, tod_state.big_core_quirk,
+ false),
+ DEFINE_PROP_BOOL("lpar-per-core", PnvCore, lpar_per_core, false),
DEFINE_PROP_LINK("chip", PnvCore, chip, TYPE_PNV_CHIP, PnvChip *),
DEFINE_PROP_END_OF_LIST(),
};
@@ -504,6 +595,7 @@ static const MemoryRegionOps pnv_quad_power10_xscom_ops = {
static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
unsigned int width)
{
+ PnvQuad *eq = PNV_QUAD(opaque);
uint32_t offset = addr >> 3;
uint64_t val = -1;
@@ -511,10 +603,14 @@ static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
* Forth nibble selects the core within a quad, mask it to process read
* for any core.
*/
- switch (offset & ~0xf000) {
- case P10_QME_SPWU_HYP:
+ switch (offset & ~PPC_BITMASK32(16, 19)) {
case P10_QME_SSH_HYP:
- return 0;
+ val = 0;
+ if (eq->special_wakeup_done) {
+ val |= PPC_BIT(1); /* SPWU DONE */
+ val |= PPC_BIT(4); /* SSH SPWU DONE */
+ }
+ break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp read 0x%08x\n", __func__,
offset);
@@ -526,9 +622,22 @@ static uint64_t pnv_qme_power10_xscom_read(void *opaque, hwaddr addr,
static void pnv_qme_power10_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int width)
{
+ PnvQuad *eq = PNV_QUAD(opaque);
uint32_t offset = addr >> 3;
+ bool set;
+ int i;
- switch (offset) {
+ switch (offset & ~PPC_BITMASK32(16, 19)) {
+ case P10_QME_SPWU_HYP:
+ set = !!(val & PPC_BIT(0));
+ eq->special_wakeup_done = set;
+ for (i = 0; i < 4; i++) {
+ /* These bits select cores in the quad */
+ if (offset & PPC_BIT32(16 + i)) {
+ eq->special_wakeup[i] = set;
+ }
+ }
+ break;
default:
qemu_log_mask(LOG_UNIMP, "%s: unimp write 0x%08x\n", __func__,
offset);
diff --git a/hw/ppc/pnv_lpc.c b/hw/ppc/pnv_lpc.c
index d692858..f8aad95 100644
--- a/hw/ppc/pnv_lpc.c
+++ b/hw/ppc/pnv_lpc.c
@@ -64,6 +64,7 @@ enum {
#define LPC_HC_IRQSER_START_4CLK 0x00000000
#define LPC_HC_IRQSER_START_6CLK 0x01000000
#define LPC_HC_IRQSER_START_8CLK 0x02000000
+#define LPC_HC_IRQSER_AUTO_CLEAR 0x00800000
#define LPC_HC_IRQMASK 0x34 /* same bit defs as LPC_HC_IRQSTAT */
#define LPC_HC_IRQSTAT 0x38
#define LPC_HC_IRQ_SERIRQ0 0x80000000 /* all bits down to ... */
@@ -235,16 +236,16 @@ int pnv_dt_lpc(PnvChip *chip, void *fdt, int root_offset, uint64_t lpcm_addr,
* TODO: rework to use address_space_stq() and address_space_ldq()
* instead.
*/
-static bool opb_read(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
- int sz)
+bool pnv_lpc_opb_read(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz)
{
/* XXX Handle access size limits and FW read caching here */
return !address_space_read(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
data, sz);
}
-static bool opb_write(PnvLpcController *lpc, uint32_t addr, uint8_t *data,
- int sz)
+bool pnv_lpc_opb_write(PnvLpcController *lpc, uint32_t addr,
+ uint8_t *data, int sz)
{
/* XXX Handle access size limits here */
return !address_space_write(&lpc->opb_as, addr, MEMTXATTRS_UNSPECIFIED,
@@ -276,7 +277,7 @@ static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
}
if (cmd & ECCB_CTL_READ) {
- success = opb_read(lpc, opb_addr, data, sz);
+ success = pnv_lpc_opb_read(lpc, opb_addr, data, sz);
if (success) {
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE |
(((uint64_t)data[0]) << 24 |
@@ -293,7 +294,7 @@ static void pnv_lpc_do_eccb(PnvLpcController *lpc, uint64_t cmd)
data[2] = lpc->eccb_data_reg >> 8;
data[3] = lpc->eccb_data_reg;
- success = opb_write(lpc, opb_addr, data, sz);
+ success = pnv_lpc_opb_write(lpc, opb_addr, data, sz);
lpc->eccb_stat_reg = ECCB_STAT_OP_DONE;
}
/* XXX Which error bit (if any) to signal OPB error ? */
@@ -420,32 +421,90 @@ static const MemoryRegionOps pnv_lpc_mmio_ops = {
.endianness = DEVICE_BIG_ENDIAN,
};
-static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
+/* Program the POWER9 LPC irq to PSI serirq routing table */
+static void pnv_lpc_eval_serirq_routes(PnvLpcController *lpc)
{
- bool lpc_to_opb_irq = false;
+ int irq;
- /* Update LPC controller to OPB line */
- if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) {
- uint32_t irqs;
+ if (!lpc->psi_has_serirq) {
+ if ((lpc->opb_irq_route0 & PPC_BITMASK(8, 13)) ||
+ (lpc->opb_irq_route1 & PPC_BITMASK(4, 31))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "OPB: setting serirq routing on POWER8 system, ignoring.\n");
+ }
+ return;
+ }
- irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
- lpc_to_opb_irq = (irqs != 0);
+ for (irq = 0; irq <= 13; irq++) {
+ int serirq = (lpc->opb_irq_route1 >> (31 - 5 - (irq * 2))) & 0x3;
+ lpc->irq_to_serirq_route[irq] = serirq;
}
- /* We don't honor the polarity register, it's pointless and unused
- * anyway
- */
- if (lpc_to_opb_irq) {
- lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
- } else {
- lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
+ for (irq = 14; irq < ISA_NUM_IRQS; irq++) {
+ int serirq = (lpc->opb_irq_route0 >> (31 - 9 - (irq * 2))) & 0x3;
+ lpc->irq_to_serirq_route[irq] = serirq;
}
+}
- /* Update OPB internal latch */
- lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
+static void pnv_lpc_eval_irqs(PnvLpcController *lpc)
+{
+ uint32_t active_irqs = 0;
+
+ if (lpc->lpc_hc_irqstat & PPC_BITMASK32(16, 31)) {
+ qemu_log_mask(LOG_UNIMP, "LPC HC Unimplemented irqs in IRQSTAT: "
+ "0x%08"PRIx32"\n", lpc->lpc_hc_irqstat);
+ }
+
+ if (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_EN) {
+ active_irqs = lpc->lpc_hc_irqstat & lpc->lpc_hc_irqmask;
+ }
/* Reflect the interrupt */
- qemu_set_irq(lpc->psi_irq, lpc->opb_irq_stat != 0);
+ if (!lpc->psi_has_serirq) {
+ /*
+ * POWER8 ORs all irqs together (also with LPCHC internal interrupt
+ * sources) and outputs a single line that raises the PSI LPCHC irq
+ * which then latches an OPB IRQ status register that sends the irq
+ * to PSI.
+ *
+ * We don't honor the polarity register, it's pointless and unused
+ * anyway
+ */
+ if (active_irqs) {
+ lpc->opb_irq_input |= OPB_MASTER_IRQ_LPC;
+ } else {
+ lpc->opb_irq_input &= ~OPB_MASTER_IRQ_LPC;
+ }
+
+ /* Update OPB internal latch */
+ lpc->opb_irq_stat |= lpc->opb_irq_input & lpc->opb_irq_mask;
+
+ qemu_set_irq(lpc->psi_irq_lpchc, lpc->opb_irq_stat != 0);
+ } else {
+ /*
+ * POWER9 and POWER10 have routing fields in OPB master registers that
+ * send LPC irqs to 4 output lines that raise the PSI SERIRQ irqs.
+ * These don't appear to get latched into an OPB register like the
+ * LPCHC irqs.
+ *
+ * POWER9 LPC controller internal irqs still go via the OPB
+ * and LPCHC PSI irqs like P8, but we have no such internal sources
+ * modelled yet.
+ */
+ bool serirq_out[4] = { false, false, false, false };
+ int irq;
+
+ for (irq = 0; irq < ISA_NUM_IRQS; irq++) {
+ if (active_irqs & (LPC_HC_IRQ_SERIRQ0 >> irq)) {
+ serirq_out[lpc->irq_to_serirq_route[irq]] = true;
+ }
+ }
+
+ qemu_set_irq(lpc->psi_irq_serirq[0], serirq_out[0]);
+ qemu_set_irq(lpc->psi_irq_serirq[1], serirq_out[1]);
+ qemu_set_irq(lpc->psi_irq_serirq[2], serirq_out[2]);
+ qemu_set_irq(lpc->psi_irq_serirq[3], serirq_out[3]);
+ }
}
static uint64_t lpc_hc_read(void *opaque, hwaddr addr, unsigned size)
@@ -505,7 +564,14 @@ static void lpc_hc_write(void *opaque, hwaddr addr, uint64_t val,
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_IRQSTAT:
- lpc->lpc_hc_irqstat &= ~val;
+ /*
+ * This register is write-to-clear for the IRQSER (LPC device IRQ)
+ * status. However if the device has not de-asserted its interrupt
+ * that will just raise this IRQ status bit again. Model this by
+ * keeping track of the inputs and only clearing if the inputs are
+ * deasserted.
+ */
+ lpc->lpc_hc_irqstat &= ~(val & ~lpc->lpc_hc_irq_inputs);
pnv_lpc_eval_irqs(lpc);
break;
case LPC_HC_ERROR_ADDRESS:
@@ -536,10 +602,10 @@ static uint64_t opb_master_read(void *opaque, hwaddr addr, unsigned size)
uint64_t val = 0xfffffffffffffffful;
switch (addr) {
- case OPB_MASTER_LS_ROUTE0: /* TODO */
+ case OPB_MASTER_LS_ROUTE0:
val = lpc->opb_irq_route0;
break;
- case OPB_MASTER_LS_ROUTE1: /* TODO */
+ case OPB_MASTER_LS_ROUTE1:
val = lpc->opb_irq_route1;
break;
case OPB_MASTER_LS_IRQ_STAT:
@@ -568,11 +634,15 @@ static void opb_master_write(void *opaque, hwaddr addr,
PnvLpcController *lpc = opaque;
switch (addr) {
- case OPB_MASTER_LS_ROUTE0: /* TODO */
+ case OPB_MASTER_LS_ROUTE0:
lpc->opb_irq_route0 = val;
+ pnv_lpc_eval_serirq_routes(lpc);
+ pnv_lpc_eval_irqs(lpc);
break;
- case OPB_MASTER_LS_ROUTE1: /* TODO */
+ case OPB_MASTER_LS_ROUTE1:
lpc->opb_irq_route1 = val;
+ pnv_lpc_eval_serirq_routes(lpc);
+ pnv_lpc_eval_irqs(lpc);
break;
case OPB_MASTER_LS_IRQ_STAT:
lpc->opb_irq_stat &= ~val;
@@ -657,6 +727,8 @@ static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp)
PnvLpcClass *plc = PNV_LPC_GET_CLASS(dev);
Error *local_err = NULL;
+ object_property_set_bool(OBJECT(lpc), "psi-serirq", true, &error_abort);
+
plc->parent_realize(dev, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -666,6 +738,9 @@ static void pnv_lpc_power9_realize(DeviceState *dev, Error **errp)
/* P9 uses a MMIO region */
memory_region_init_io(&lpc->xscom_regs, OBJECT(lpc), &pnv_lpc_mmio_ops,
lpc, "lpcm", PNV9_LPCM_SIZE);
+
+ /* P9 LPC routes ISA irqs to 4 PSI SERIRQ lines */
+ qdev_init_gpio_out_named(dev, lpc->psi_irq_serirq, "SERIRQ", 4);
}
static void pnv_lpc_power9_class_init(ObjectClass *klass, void *data)
@@ -744,13 +819,19 @@ static void pnv_lpc_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion(&lpc->opb_mr, LPC_HC_REGS_OPB_ADDR,
&lpc->lpc_hc_regs);
- qdev_init_gpio_out(dev, &lpc->psi_irq, 1);
+ qdev_init_gpio_out_named(dev, &lpc->psi_irq_lpchc, "LPCHC", 1);
}
+static Property pnv_lpc_properties[] = {
+ DEFINE_PROP_BOOL("psi-serirq", PnvLpcController, psi_has_serirq, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void pnv_lpc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ device_class_set_props(dc, pnv_lpc_properties);
dc->realize = pnv_lpc_realize;
dc->desc = "PowerNV LPC Controller";
dc->user_creatable = false;
@@ -796,18 +877,34 @@ static void pnv_lpc_isa_irq_handler_cpld(void *opaque, int n, int level)
}
if (pnv->cpld_irqstate != old_state) {
- qemu_set_irq(lpc->psi_irq, pnv->cpld_irqstate != 0);
+ qemu_set_irq(lpc->psi_irq_lpchc, pnv->cpld_irqstate != 0);
}
}
static void pnv_lpc_isa_irq_handler(void *opaque, int n, int level)
{
PnvLpcController *lpc = PNV_LPC(opaque);
+ uint32_t irq_bit = LPC_HC_IRQ_SERIRQ0 >> n;
- /* The Naples HW latches the 1 levels, clearing is done by SW */
if (level) {
- lpc->lpc_hc_irqstat |= LPC_HC_IRQ_SERIRQ0 >> n;
+ lpc->lpc_hc_irq_inputs |= irq_bit;
+
+ /*
+ * The LPC HC in Naples and later latches LPC IRQ into a bit field in
+ * the IRQSTAT register, and that drives the PSI IRQ to the IC.
+ * Software clears this bit manually (see LPC_HC_IRQSTAT handler).
+ */
+ lpc->lpc_hc_irqstat |= irq_bit;
pnv_lpc_eval_irqs(lpc);
+ } else {
+ lpc->lpc_hc_irq_inputs &= ~irq_bit;
+
+ /* POWER9 adds an auto-clear mode that clears IRQSTAT bits on EOI */
+ if (lpc->psi_has_serirq &&
+ (lpc->lpc_hc_irqser_ctrl & LPC_HC_IRQSER_AUTO_CLEAR)) {
+ lpc->lpc_hc_irqstat &= ~irq_bit;
+ pnv_lpc_eval_irqs(lpc);
+ }
}
}
@@ -838,6 +935,7 @@ ISABus *pnv_lpc_isa_create(PnvLpcController *lpc, bool use_cpld, Error **errp)
handler = pnv_lpc_isa_irq_handler;
}
+ /* POWER has a 17th irq, QEMU only implements the 16 regular device irqs */
irqs = qemu_allocate_irqs(handler, lpc, ISA_NUM_IRQS);
isa_bus_register_input_irqs(isa_bus, irqs);
diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c
index a17816d..d192bbe 100644
--- a/hw/ppc/pnv_xscom.c
+++ b/hw/ppc/pnv_xscom.c
@@ -75,11 +75,6 @@ static uint64_t xscom_read_default(PnvChip *chip, uint32_t pcba)
case PRD_P9_IPOLL_REG_MASK:
case PRD_P9_IPOLL_REG_STATUS:
- /* P9 xscom reset */
- case 0x0090018: /* Receive status reg */
- case 0x0090012: /* log register */
- case 0x0090013: /* error register */
-
/* P8 xscom reset */
case 0x2020007: /* ADU stuff, log register */
case 0x2020009: /* ADU stuff, error register */
@@ -119,10 +114,6 @@ static bool xscom_write_default(PnvChip *chip, uint32_t pcba, uint64_t val)
case 0x1010c03: /* PIBAM FIR MASK */
case 0x1010c04: /* PIBAM FIR MASK */
case 0x1010c05: /* PIBAM FIR MASK */
- /* P9 xscom reset */
- case 0x0090018: /* Receive status reg */
- case 0x0090012: /* log register */
- case 0x0090013: /* error register */
/* P8 xscom reset */
case 0x2020007: /* ADU stuff, log register */
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 98fa3aa..370d7c3 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -2195,6 +2195,7 @@ static const VMStateDescription vmstate_spapr = {
&vmstate_spapr_cap_fwnmi,
&vmstate_spapr_fwnmi,
&vmstate_spapr_cap_rpt_invalidate,
+ &vmstate_spapr_cap_ail_mode_3,
&vmstate_spapr_cap_nested_papr,
NULL
}
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index 0a15415..2f74923 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -974,6 +974,7 @@ SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER);
SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST);
SPAPR_CAP_MIG_STATE(fwnmi, SPAPR_CAP_FWNMI);
SPAPR_CAP_MIG_STATE(rpt_invalidate, SPAPR_CAP_RPT_INVALIDATE);
+SPAPR_CAP_MIG_STATE(ail_mode_3, SPAPR_CAP_AIL_MODE_3);
void spapr_caps_init(SpaprMachineState *spapr)
{
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index e7c9edd..56090ab 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -300,11 +300,13 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp)
g_autofree char *id = NULL;
CPUState *cs;
PowerPCCPU *cpu;
+ CPUPPCState *env;
obj = object_new(scc->cpu_type);
cs = CPU(obj);
cpu = POWERPC_CPU(obj);
+ env = &cpu->env;
/*
* All CPUs start halted. CPU0 is unhalted from the machine level reset code
* and the rest are explicitly started up by the guest using an RTAS call.
@@ -315,6 +317,8 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp)
return NULL;
}
+ env->core_index = cc->core_id;
+
cpu->node_id = sc->node_id;
id = g_strdup_printf("thread[%d]", i);
@@ -345,9 +349,15 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
qemu_register_reset(spapr_cpu_core_reset_handler, sc);
sc->threads = g_new0(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
- sc->threads[i] = spapr_create_vcpu(sc, i, errp);
- if (!sc->threads[i] ||
- !spapr_realize_vcpu(sc->threads[i], spapr, sc, i, errp)) {
+ PowerPCCPU *cpu;
+
+ cpu = spapr_create_vcpu(sc, i, errp);
+ sc->threads[i] = cpu;
+ if (cpu && cc->nr_threads > 1) {
+ cpu->env.has_smt_siblings = true;
+ }
+
+ if (!cpu || !spapr_realize_vcpu(cpu, spapr, sc, i, errp)) {
spapr_cpu_core_unrealize(dev);
return;
}
diff --git a/hw/ppc/spapr_vhyp_mmu.c b/hw/ppc/spapr_vhyp_mmu.c
index b3dd8b3..2d41d7f 100644
--- a/hw/ppc/spapr_vhyp_mmu.c
+++ b/hw/ppc/spapr_vhyp_mmu.c
@@ -15,19 +15,6 @@
#include "helper_regs.h"
#include "hw/ppc/spapr.h"
#include "mmu-hash64.h"
-#include "mmu-book3s-v3.h"
-
-
-static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
-{
- /*
- * hash value/pteg group index is normalized by HPT mask
- */
- if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
- return false;
- }
- return true;
-}
static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
target_ulong opcode, target_ulong *args)
@@ -70,7 +57,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
pteh &= ~0x60ULL;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
@@ -119,7 +106,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return REMOVE_PARM;
}
@@ -250,7 +237,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
const ppc_hash_pte64_t *hptes;
target_ulong v, r;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
@@ -287,7 +274,7 @@ static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
int i, ridx, n_entries = 1;
const ppc_hash_pte64_t *hptes;
- if (!valid_ptex(cpu, ptex)) {
+ if (!ppc_hash64_valid_ptex(cpu, ptex)) {
return H_PARAMETER;
}
diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c
index 09f29be..c02eaac 100644
--- a/hw/ppc/spapr_vof.c
+++ b/hw/ppc/spapr_vof.c
@@ -28,7 +28,7 @@ target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr,
void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt)
{
- char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
+ g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
vof_build_dt(fdt, spapr->vof);
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index bf29bbf..1f125ce 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -95,6 +95,10 @@ vof_write(uint32_t ih, unsigned cb, const char *msg) "ih=0x%x [%u] \"%s\""
vof_avail(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64
vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx64" size=0x%"PRIx64
+# pnv_adu.c
+pnv_adu_xscom_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_adu_xscom_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+
# pnv_chiptod.c
pnv_chiptod_xscom_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
pnv_chiptod_xscom_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c
index e3b430a..b5b6514 100644
--- a/hw/ppc/vof.c
+++ b/hw/ppc/vof.c
@@ -646,7 +646,7 @@ static void vof_dt_memory_available(void *fdt, GArray *claimed, uint64_t base)
mem0_reg = fdt_getprop(fdt, offset, "reg", &proplen);
g_assert(mem0_reg && proplen == sizeof(uint32_t) * (ac + sc));
if (sc == 2) {
- mem0_end = be64_to_cpu(*(uint64_t *)(mem0_reg + sizeof(uint32_t) * ac));
+ mem0_end = ldq_be_p(mem0_reg + sizeof(uint32_t) * ac);
} else {
mem0_end = be32_to_cpu(*(uint32_t *)(mem0_reg + sizeof(uint32_t) * ac));
}
diff --git a/hw/ssi/Kconfig b/hw/ssi/Kconfig
index 83ee53c..8d180de 100644
--- a/hw/ssi/Kconfig
+++ b/hw/ssi/Kconfig
@@ -24,3 +24,7 @@ config STM32F2XX_SPI
config BCM2835_SPI
bool
select SSI
+
+config PNV_SPI
+ bool
+ select SSI
diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build
index b999aeb..b7ad7fc 100644
--- a/hw/ssi/meson.build
+++ b/hw/ssi/meson.build
@@ -12,3 +12,4 @@ system_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c'))
system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c'))
system_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c'))
system_ss.add(when: 'CONFIG_BCM2835_SPI', if_true: files('bcm2835_spi.c'))
+system_ss.add(when: 'CONFIG_PNV_SPI', if_true: files('pnv_spi.c'))
diff --git a/hw/ssi/pnv_spi.c b/hw/ssi/pnv_spi.c
new file mode 100644
index 0000000..c1297ab
--- /dev/null
+++ b/hw/ssi/pnv_spi.c
@@ -0,0 +1,1268 @@
+/*
+ * QEMU PowerPC SPI model
+ *
+ * Copyright (c) 2024, IBM Corporation.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "hw/qdev-properties.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "hw/ssi/pnv_spi.h"
+#include "hw/ssi/pnv_spi_regs.h"
+#include "hw/ssi/ssi.h"
+#include <libfdt.h>
+#include "hw/irq.h"
+#include "trace.h"
+
+#define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
+#define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
+
+/*
+ * Macro from include/hw/ppc/fdt.h
+ * fdt.h cannot be included here as it contain ppc target specific dependency.
+ */
+#define _FDT(exp) \
+ do { \
+ int _ret = (exp); \
+ if (_ret < 0) { \
+ qemu_log_mask(LOG_GUEST_ERROR, \
+ "error creating device tree: %s: %s", \
+ #exp, fdt_strerror(_ret)); \
+ exit(1); \
+ } \
+ } while (0)
+
+/* PnvXferBuffer */
+typedef struct PnvXferBuffer {
+
+ uint32_t len;
+ uint8_t *data;
+
+} PnvXferBuffer;
+
+/* pnv_spi_xfer_buffer_methods */
+static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
+{
+ PnvXferBuffer *payload = g_malloc0(sizeof(*payload));
+
+ return payload;
+}
+
+static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
+{
+ free(payload->data);
+ free(payload);
+}
+
+static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
+ uint32_t offset, uint32_t length)
+{
+ if (payload->len < (offset + length)) {
+ payload->len = offset + length;
+ payload->data = g_realloc(payload->data, payload->len);
+ }
+ return &payload->data[offset];
+}
+
+static bool does_rdr_match(PnvSpi *s)
+{
+ /*
+ * According to spec, the mask bits that are 0 are compared and the
+ * bits that are 1 are ignored.
+ */
+ uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK,
+ s->regs[SPI_MM_REG]);
+ uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL,
+ s->regs[SPI_MM_REG]);
+
+ if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
+ GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
+ return true;
+ }
+ return false;
+}
+
+static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
+{
+ uint8_t byte;
+
+ /*
+ * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
+ * Check the offset before using it.
+ */
+ if (offset < PNV_SPI_REG_SIZE) {
+ byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
+ } else {
+ /*
+ * Log an error and return a 0xFF since we have to assign something
+ * to byte before returning.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
+ "from TDR\n", offset);
+ byte = 0xff;
+ }
+ return byte;
+}
+
+static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
+ uint8_t ecc_count, uint8_t shift_in_count)
+{
+ uint8_t byte;
+ int count = 0;
+
+ while (count < nr_bytes) {
+ shift_in_count++;
+ if ((ecc_count != 0) &&
+ (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
+ shift_in_count = 0;
+ } else {
+ byte = read_buf[count];
+ trace_pnv_spi_shift_rx(byte, count);
+ s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
+ }
+ count++;
+ } /* end of while */
+ return shift_in_count;
+}
+
+static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
+{
+ uint8_t ecc_count;
+ uint8_t shift_in_count;
+
+ /*
+ * Processing here must handle:
+ * - Which bytes in the payload we should move to the RDR
+ * - Explicit mode counter configuration settings
+ * - RDR full and RDR overrun status
+ */
+
+ /*
+ * First check that the response payload is the exact same
+ * number of bytes as the request payload was
+ */
+ if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
+ "bytes, expected %d, got %d\n",
+ (s->N1_bytes + s->N2_bytes), rsp_payload->len);
+ } else {
+ uint8_t ecc_control;
+ trace_pnv_spi_rx_received(rsp_payload->len);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * Adding an ECC count let's us know when we have found a payload byte
+ * that was shifted in but cannot be loaded into RDR. Bits 29-30 of
+ * clock_config_reset_control register equal to either 0b00 or 0b10
+ * indicate that we are taking in data with ECC and either applying
+ * the ECC or discarding it.
+ */
+ ecc_count = 0;
+ ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ ecc_count = 1;
+ }
+ /*
+ * Use the N1_rx and N2_rx counts to control shifting data from the
+ * payload into the RDR. Keep an overall count of the number of bytes
+ * shifted into RDR so we can discard every 9th byte when ECC is
+ * enabled.
+ */
+ shift_in_count = 0;
+ /* Handle the N1 portion of the frame first */
+ if (s->N1_rx != 0) {
+ trace_pnv_spi_rx_read_N1frame();
+ shift_in_count = read_from_frame(s, &rsp_payload->data[0],
+ s->N1_bytes, ecc_count, shift_in_count);
+ }
+ /* Handle the N2 portion of the frame */
+ if (s->N2_rx != 0) {
+ trace_pnv_spi_rx_read_N2frame();
+ shift_in_count = read_from_frame(s,
+ &rsp_payload->data[s->N1_bytes], s->N2_bytes,
+ ecc_count, shift_in_count);
+ }
+ if ((s->N1_rx + s->N2_rx) > 0) {
+ /*
+ * Data was received so handle RDR status.
+ * It is easier to handle RDR_full and RDR_overrun status here
+ * since the RDR register's shift_byte_in method is called
+ * multiple times in a row. Controlling RDR status is done here
+ * instead of in the RDR scoped methods for that reason.
+ */
+ if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
+ /*
+ * Data was shifted into the RDR before having been read
+ * causing previous data to have been overrun.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
+ } else {
+ /*
+ * Set status to indicate that the received data register is
+ * full. This flag is only cleared once the RDR is unloaded.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
+ }
+ }
+ } /* end of else */
+} /* end of spi_response() */
+
+static void transfer(PnvSpi *s, PnvXferBuffer *payload)
+{
+ uint32_t tx;
+ uint32_t rx;
+ PnvXferBuffer *rsp_payload = NULL;
+
+ rsp_payload = pnv_spi_xfer_buffer_new();
+ for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
+ tx = 0;
+ for (int i = 0; i < s->transfer_len; i++) {
+ if ((offset + i) >= payload->len) {
+ tx <<= 8;
+ } else {
+ tx = (tx << 8) | payload->data[offset + i];
+ }
+ }
+ rx = ssi_transfer(s->ssi_bus, tx);
+ for (int i = 0; i < s->transfer_len; i++) {
+ if ((offset + i) >= payload->len) {
+ break;
+ }
+ *(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) =
+ (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
+ }
+ }
+ if (rsp_payload != NULL) {
+ spi_response(s, s->N1_bits, rsp_payload);
+ }
+}
+
+static inline uint8_t get_seq_index(PnvSpi *s)
+{
+ return GETFIELD(SPI_STS_SEQ_INDEX, s->status);
+}
+
+static inline void next_sequencer_fsm(PnvSpi *s)
+{
+ uint8_t seq_index = get_seq_index(s);
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1));
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
+}
+
+/*
+ * Calculate the N1 counters based on passed in opcode and
+ * internal register values.
+ * The method assumes that the opcode is a Shift_N1 opcode
+ * and doesn't test it.
+ * The counters returned are:
+ * N1 bits: Number of bits in the payload data that are significant
+ * to the responder.
+ * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
+ * N1_tx: Total number of bytes taken from TDR for N1
+ * N1_rx: Total number of bytes taken from the payload for N1
+ */
+static void calculate_N1(PnvSpi *s, uint8_t opcode)
+{
+ /*
+ * Shift_N1 opcode form: 0x3M
+ * Implicit mode:
+ * If M != 0 the shift count is M bytes and M is the number of tx bytes.
+ * Forced Implicit mode:
+ * M is the shift count but tx and rx is determined by the count control
+ * register fields. Note that we only check for forced Implicit mode when
+ * M != 0 since the mode doesn't make sense when M = 0.
+ * Explicit mode:
+ * If M == 0 then shift count is number of bits defined in the
+ * Counter Configuration Register's shift_count_N1 field.
+ */
+ if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
+ /* Explicit mode */
+ s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
+ s->N1_bytes = (s->N1_bits + 7) / 8;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ /* If tx count control for N1 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_tx = s->N1_bytes;
+ }
+ /* If rx count control for N1 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_rx = s->N1_bytes;
+ }
+ } else {
+ /* Implicit mode/Forced Implicit mode, use M field from opcode */
+ s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->N1_bits = s->N1_bytes * 8;
+ /*
+ * Assume that we are going to transmit the count
+ * (pure Implicit only)
+ */
+ s->N1_tx = s->N1_bytes;
+ s->N1_rx = 0;
+ /* Let Forced Implicit mode have an effect on the counts */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ /*
+ * If Forced Implicit mode and count control doesn't
+ * indicate transmit then reset the tx count to 0
+ */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2,
+ s->regs[SPI_CTR_CFG_REG]) == 0) {
+ s->N1_tx = 0;
+ }
+ /* If rx count control for N1 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3,
+ s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N1_rx = s->N1_bytes;
+ }
+ }
+ }
+ /*
+ * Enforce an upper limit on the size of N1 that is equal to the known size
+ * of the shift register, 64 bits or 72 bits if ECC is enabled.
+ * If the size exceeds 72 bits it is a user error so log an error,
+ * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
+ * error bit.
+ */
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
+ s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
+ "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
+ s->N1_bytes, s->N1_bits);
+ s->N1_bytes = PNV_SPI_REG_SIZE + 1;
+ s->N1_bits = s->N1_bytes * 8;
+ }
+ } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
+ "bytes = 0x%x, bits = 0x%x\n",
+ s->N1_bytes, s->N1_bits);
+ s->N1_bytes = PNV_SPI_REG_SIZE;
+ s->N1_bits = s->N1_bytes * 8;
+ }
+} /* end of calculate_N1 */
+
+/*
+ * Shift_N1 operation handler method
+ */
+static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
+ PnvXferBuffer **payload, bool send_n1_alone)
+{
+ uint8_t n1_count;
+ bool stop = false;
+
+ /*
+ * If there isn't a current payload left over from a stopped sequence
+ * create a new one.
+ */
+ if (*payload == NULL) {
+ *payload = pnv_spi_xfer_buffer_new();
+ }
+ /*
+ * Use a combination of N1 counters to build the N1 portion of the
+ * transmit payload.
+ * We only care about transmit at this time since the request payload
+ * only represents data going out on the controller output line.
+ * Leave mode specific considerations in the calculate function since
+ * all we really care about are counters that tell use exactly how
+ * many bytes are in the payload and how many of those bytes to
+ * include from the TDR into the payload.
+ */
+ calculate_N1(s, opcode);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * Zero out the N2 counters here in case there is no N2 operation following
+ * the N1 operation in the sequencer. This keeps leftover N2 information
+ * from interfering with spi_response logic.
+ */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ /*
+ * N1_bytes is the overall size of the N1 portion of the frame regardless of
+ * whether N1 is used for tx, rx or both. Loop over the size to build a
+ * payload that is N1_bytes long.
+ * N1_tx is the count of bytes to take from the TDR and "shift" into the
+ * frame which means append those bytes to the payload for the N1 portion
+ * of the frame.
+ * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
+ * the frame until the overall N1 count is reached.
+ */
+ n1_count = 0;
+ while (n1_count < s->N1_bytes) {
+ /*
+ * Assuming that if N1_tx is not equal to 0 then it is the same as
+ * N1_bytes.
+ */
+ if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
+
+ if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
+ /*
+ * Note that we are only appending to the payload IF the TDR
+ * is full otherwise we don't touch the payload because we are
+ * going to NOT send the payload and instead tell the sequencer
+ * that called us to stop and wait for a TDR write so we have
+ * data to load into the payload.
+ */
+ uint8_t n1_byte = 0x00;
+ n1_byte = get_from_offset(s, n1_count);
+ trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
+ *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) =
+ n1_byte;
+ } else {
+ /*
+ * We hit a shift_n1 opcode TX but the TDR is empty, tell the
+ * sequencer to stop and break this loop.
+ */
+ trace_pnv_spi_sequencer_stop_requested("Shift N1"
+ "set for transmit but TDR is empty");
+ stop = true;
+ break;
+ }
+ } else {
+ /*
+ * Cases here:
+ * - we are receiving during the N1 frame segment and the RDR
+ * is full so we need to stop until the RDR is read
+ * - we are transmitting and we don't care about RDR status
+ * since we won't be loading RDR during the frame segment.
+ * - we are receiving and the RDR is empty so we allow the operation
+ * to proceed.
+ */
+ if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL,
+ s->status) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("shift N1"
+ "set for receive but RDR is full");
+ stop = true;
+ break;
+ } else {
+ trace_pnv_spi_tx_append_FF("n1_byte");
+ *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
+ = 0xff;
+ }
+ }
+ n1_count++;
+ } /* end of while */
+ /*
+ * If we are not stopping due to an empty TDR and we are doing an N1 TX
+ * and the TDR is full we need to clear the TDR_full status.
+ * Do this here instead of up in the loop above so we don't log the message
+ * in every loop iteration.
+ * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
+ * operation, which was found immediately after the current opcode. The TDR
+ * was unloaded and will be shifted so we have to clear the TDR_full status.
+ */
+ if (!stop && (s->N1_tx != 0) &&
+ (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
+ }
+ /*
+ * There are other reasons why the shifter would stop, such as a TDR empty
+ * or RDR full condition with N1 set to receive. If we haven't stopped due
+ * to either one of those conditions then check if the send_n1_alone flag is
+ * equal to False, indicating the next opcode is an N2 operation, AND if
+ * the N2 counter reload switch (bit 0 of the N2 count control field) is
+ * set. This condition requires a pacing write to "kick" off the N2
+ * shift which includes the N1 shift as well when send_n1_alone is False.
+ */
+ if (!stop && !send_n1_alone &&
+ (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
+ "active, stop N1 shift, TDR_underrun set to 1");
+ stop = true;
+ s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
+ }
+ /*
+ * If send_n1_alone is set AND we have a full TDR then this is the first and
+ * last payload to send and we don't have an N2 frame segment to add to the
+ * payload.
+ */
+ if (send_n1_alone && !stop) {
+ /* We have a TX and a full TDR or an RX and an empty RDR */
+ trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len);
+ transfer(s, *payload);
+ /* The N1 frame shift is complete so reset the N1 counters */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ pnv_spi_xfer_buffer_free(*payload);
+ *payload = NULL;
+ }
+ return stop;
+} /* end of operation_shiftn1() */
+
+/*
+ * Calculate the N2 counters based on passed in opcode and
+ * internal register values.
+ * The method assumes that the opcode is a Shift_N2 opcode
+ * and doesn't test it.
+ * The counters returned are:
+ * N2 bits: Number of bits in the payload data that are significant
+ * to the responder.
+ * N2_bytes: Total count of payload bytes for the N2 frame.
+ * N2_tx: Total number of bytes taken from TDR for N2
+ * N2_rx: Total number of bytes taken from the payload for N2
+ */
+static void calculate_N2(PnvSpi *s, uint8_t opcode)
+{
+ /*
+ * Shift_N2 opcode form: 0x4M
+ * Implicit mode:
+ * If M!=0 the shift count is M bytes and M is the number of rx bytes.
+ * Forced Implicit mode:
+ * M is the shift count but tx and rx is determined by the count control
+ * register fields. Note that we only check for Forced Implicit mode when
+ * M != 0 since the mode doesn't make sense when M = 0.
+ * Explicit mode:
+ * If M==0 then shift count is number of bits defined in the
+ * Counter Configuration Register's shift_count_N1 field.
+ */
+ if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
+ /* Explicit mode */
+ s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
+ s->N2_bytes = (s->N2_bits + 7) / 8;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ /* If tx count control for N2 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_tx = s->N2_bytes;
+ }
+ /* If rx count control for N2 is set, load the rx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_rx = s->N2_bytes;
+ }
+ } else {
+ /* Implicit mode/Forced Implicit mode, use M field from opcode */
+ s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ s->N2_bits = s->N2_bytes * 8;
+ /* Assume that we are going to receive the count */
+ s->N2_rx = s->N2_bytes;
+ s->N2_tx = 0;
+ /* Let Forced Implicit mode have an effect on the counts */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
+ /*
+ * If Forced Implicit mode and count control doesn't
+ * indicate a receive then reset the rx count to 0
+ */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3,
+ s->regs[SPI_CTR_CFG_REG]) == 0) {
+ s->N2_rx = 0;
+ }
+ /* If tx count control for N2 is set, load the tx value */
+ if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2,
+ s->regs[SPI_CTR_CFG_REG]) == 1) {
+ s->N2_tx = s->N2_bytes;
+ }
+ }
+ }
+ /*
+ * Enforce an upper limit on the size of N1 that is equal to the
+ * known size of the shift register, 64 bits or 72 bits if ECC
+ * is enabled.
+ * If the size exceeds 72 bits it is a user error so log an error,
+ * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
+ * error bit.
+ */
+ uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
+ s->regs[SPI_CLK_CFG_REG]);
+ if (ecc_control == 0 || ecc_control == 2) {
+ if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
+ /* Unsupported N2 shift size when ECC enabled */
+ s->N2_bytes = PNV_SPI_REG_SIZE + 1;
+ s->N2_bits = s->N2_bytes * 8;
+ }
+ } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
+ /* Unsupported N2 shift size */
+ s->N2_bytes = PNV_SPI_REG_SIZE;
+ s->N2_bits = s->N2_bytes * 8;
+ }
+} /* end of calculate_N2 */
+
+/*
+ * Shift_N2 operation handler method
+ */
+
+static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
+ PnvXferBuffer **payload)
+{
+ uint8_t n2_count;
+ bool stop = false;
+
+ /*
+ * If there isn't a current payload left over from a stopped sequence
+ * create a new one.
+ */
+ if (*payload == NULL) {
+ *payload = pnv_spi_xfer_buffer_new();
+ }
+ /*
+ * Use a combination of N2 counters to build the N2 portion of the
+ * transmit payload.
+ */
+ calculate_N2(s, opcode);
+ trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
+ s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
+ /*
+ * The only difference between this code and the code for shift N1 is
+ * that this code has to account for the possible presence of N1 transmit
+ * bytes already taken from the TDR.
+ * If there are bytes to be transmitted for the N2 portion of the frame
+ * and there are still bytes in TDR that have not been copied into the
+ * TX data of the payload, this code will handle transmitting those
+ * remaining bytes.
+ * If for some reason the transmit count(s) add up to more than the size
+ * of the TDR we will just append 0xFF to the transmit payload data until
+ * the payload is N1 + N2 bytes long.
+ */
+ n2_count = 0;
+ while (n2_count < s->N2_bytes) {
+ /*
+ * If the RDR is full and we need to RX just bail out, letting the
+ * code continue will end up building the payload twice in the same
+ * buffer since RDR full causes a sequence stop and restart.
+ */
+ if ((s->N2_rx != 0) &&
+ (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
+ trace_pnv_spi_sequencer_stop_requested("shift N2 set"
+ "for receive but RDR is full");
+ stop = true;
+ break;
+ }
+ if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) <
+ PNV_SPI_REG_SIZE)) {
+ /* Always append data for the N2 segment if it is set for TX */
+ uint8_t n2_byte = 0x00;
+ n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
+ trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
+ *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
+ = n2_byte;
+ } else {
+ /*
+ * Regardless of whether or not N2 is set for TX or RX, we need
+ * the number of bytes in the payload to match the overall length
+ * of the operation.
+ */
+ trace_pnv_spi_tx_append_FF("n2_byte");
+ *(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
+ = 0xff;
+ }
+ n2_count++;
+ } /* end of while */
+ if (!stop) {
+ /* We have a TX and a full TDR or an RX and an empty RDR */
+ trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len);
+ transfer(s, *payload);
+ /*
+ * If we are doing an N2 TX and the TDR is full we need to clear the
+ * TDR_full status. Do this here instead of up in the loop above so we
+ * don't log the message in every loop iteration.
+ */
+ if ((s->N2_tx != 0) &&
+ (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
+ }
+ /*
+ * The N2 frame shift is complete so reset the N2 counters.
+ * Reset the N1 counters also in case the frame was a combination of
+ * N1 and N2 segments.
+ */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ s->N1_bits = 0;
+ s->N1_bytes = 0;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ pnv_spi_xfer_buffer_free(*payload);
+ *payload = NULL;
+ }
+ return stop;
+} /* end of operation_shiftn2()*/
+
+static void operation_sequencer(PnvSpi *s)
+{
+ /*
+ * Loop through each sequencer operation ID and perform the requested
+ * operations.
+ * Flag for indicating if we should send the N1 frame or wait to combine
+ * it with a preceding N2 frame.
+ */
+ bool send_n1_alone = true;
+ bool stop = false; /* Flag to stop the sequencer */
+ uint8_t opcode = 0;
+ uint8_t masked_opcode = 0;
+
+ /*
+ * PnvXferBuffer for containing the payload of the SPI frame.
+ * This is a static because there are cases where a sequence has to stop
+ * and wait for the target application to unload the RDR. If this occurs
+ * during a sequence where N1 is not sent alone and instead combined with
+ * N2 since the N1 tx length + the N2 tx length is less than the size of
+ * the TDR.
+ */
+ static PnvXferBuffer *payload;
+
+ if (payload == NULL) {
+ payload = pnv_spi_xfer_buffer_new();
+ }
+ /*
+ * Clear the sequencer FSM error bit - general_SPI_status[3]
+ * before starting a sequence.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
+ /*
+ * If the FSM is idle set the sequencer index to 0
+ * (new/restarted sequence)
+ */
+ if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
+ }
+ /*
+ * There are only 8 possible operation IDs to iterate through though
+ * some operations may cause more than one frame to be sequenced.
+ */
+ while (get_seq_index(s) < NUM_SEQ_OPS) {
+ opcode = s->seq_op[get_seq_index(s)];
+ /* Set sequencer state to decode */
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
+ /*
+ * Only the upper nibble of the operation ID is needed to know what
+ * kind of operation is requested.
+ */
+ masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
+ switch (masked_opcode) {
+ /*
+ * Increment the operation index in each case instead of just
+ * once at the end in case an operation like the branch
+ * operation needs to change the index.
+ */
+ case SEQ_OP_STOP:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ /* A stop operation in any position stops the sequencer */
+ trace_pnv_spi_sequencer_op("STOP", get_seq_index(s));
+
+ stop = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
+ break;
+
+ case SEQ_OP_SELECT_SLAVE:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s));
+ /*
+ * This device currently only supports a single responder
+ * connection at position 0. De-selecting a responder is fine
+ * and expected at the end of a sequence but selecting any
+ * responder other than 0 should cause an error.
+ */
+ s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
+ if (s->responder_select == 0) {
+ trace_pnv_spi_shifter_done();
+ qemu_set_irq(s->cs_line[0], 1);
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
+ (get_seq_index(s) + 1));
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
+ } else if (s->responder_select != 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
+ "not supported, select = 0x%x\n",
+ s->responder_select);
+ trace_pnv_spi_sequencer_stop_requested("invalid "
+ "responder select");
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ stop = true;
+ } else {
+ /*
+ * Only allow an FSM_START state when a responder is
+ * selected
+ */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
+ trace_pnv_spi_shifter_stating();
+ qemu_set_irq(s->cs_line[0], 0);
+ /*
+ * A Shift_N2 operation is only valid after a Shift_N1
+ * according to the spec. The spec doesn't say if that means
+ * immediately after or just after at any point. We will track
+ * the occurrence of a Shift_N1 to enforce this requirement in
+ * the most generic way possible by assuming that the rule
+ * applies once a valid responder select has occurred.
+ */
+ s->shift_n1_done = false;
+ next_sequencer_fsm(s);
+ }
+ break;
+
+ case SEQ_OP_SHIFT_N1:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s));
+ /*
+ * Only allow a shift_n1 when the state is not IDLE or DONE.
+ * In either of those two cases the sequencer is not in a proper
+ * state to perform shift operations because the sequencer has:
+ * - processed a responder deselect (DONE)
+ * - processed a stop opcode (IDLE)
+ * - encountered an error (IDLE)
+ */
+ if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
+ (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
+ "shifter state = 0x%llx", GETFIELD(
+ SPI_STS_SHIFTER_FSM, s->status));
+ /*
+ * Set sequencer FSM error bit 3 (general_SPI_status[3])
+ * in status reg.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
+ trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
+ stop = true;
+ } else {
+ /*
+ * Look for the special case where there is a shift_n1 set for
+ * transmit and it is followed by a shift_n2 set for transmit
+ * AND the combined transmit length of the two operations is
+ * less than or equal to the size of the TDR register. In this
+ * case we want to use both this current shift_n1 opcode and the
+ * following shift_n2 opcode to assemble the frame for
+ * transmission to the responder without requiring a refill of
+ * the TDR between the two operations.
+ */
+ if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1])
+ == SEQ_OP_SHIFT_N2) {
+ send_n1_alone = false;
+ }
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_SHIFT_N1);
+ stop = operation_shiftn1(s, opcode, &payload, send_n1_alone);
+ if (stop) {
+ /*
+ * The operation code says to stop, this can occur if:
+ * (1) RDR is full and the N1 shift is set for receive
+ * (2) TDR was empty at the time of the N1 shift so we need
+ * to wait for data.
+ * (3) Neither 1 nor 2 are occurring and we aren't sending
+ * N1 alone and N2 counter reload is set (bit 0 of the N2
+ * counter reload field). In this case TDR_underrun will
+ * will be set and the Payload has been loaded so it is
+ * ok to advance the sequencer.
+ */
+ if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
+ s->shift_n1_done = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_SHIFT_N2);
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
+ (get_seq_index(s) + 1));
+ } else {
+ /*
+ * This is case (1) or (2) so the sequencer needs to
+ * wait and NOT go to the next sequence yet.
+ */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_WAIT);
+ }
+ } else {
+ /* Ok to move on to the next index */
+ s->shift_n1_done = true;
+ next_sequencer_fsm(s);
+ }
+ }
+ break;
+
+ case SEQ_OP_SHIFT_N2:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s));
+ if (!s->shift_n1_done) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
+ "Shift_N1 is not done, shifter state = 0x%llx",
+ GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
+ /*
+ * In case the sequencer actually stops if an N2 shift is
+ * requested before any N1 shift is done. Set sequencer FSM
+ * error bit 3 (general_SPI_status[3]) in status reg.
+ */
+ s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
+ trace_pnv_spi_sequencer_stop_requested("shift_n2 "
+ "w/no shift_n1 done");
+ stop = true;
+ } else {
+ /* Ok to do a Shift_N2 */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_SHIFT_N2);
+ stop = operation_shiftn2(s, opcode, &payload);
+ /*
+ * If the operation code says to stop set the shifter state to
+ * wait and stop
+ */
+ if (stop) {
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
+ FSM_WAIT);
+ } else {
+ /* Ok to move on to the next index */
+ next_sequencer_fsm(s);
+ }
+ }
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_RDR:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s));
+ /*
+ * The memory mapping register RDR match value is compared against
+ * the 16 rightmost bytes of the RDR (potentially with masking).
+ * Since this comparison is performed against the contents of the
+ * RDR then a receive must have previously occurred otherwise
+ * there is no data to compare and the operation cannot be
+ * completed and will stop the sequencer until RDR full is set to
+ * 1.
+ */
+ if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
+ bool rdr_matched = false;
+ rdr_matched = does_rdr_match(s);
+ if (rdr_matched) {
+ trace_pnv_spi_RDR_match("success");
+ /* A match occurred, increment the sequencer index. */
+ next_sequencer_fsm(s);
+ } else {
+ trace_pnv_spi_RDR_match("failed");
+ /*
+ * Branch the sequencer to the index coded into the op
+ * code.
+ */
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
+ PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ }
+ /*
+ * Regardless of where the branch ended up we want the
+ * sequencer to continue shifting so we have to clear
+ * RDR_full.
+ */
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
+ } else {
+ trace_pnv_spi_sequencer_stop_requested("RDR not"
+ "full for 0x6x opcode");
+ stop = true;
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
+ }
+ break;
+
+ case SEQ_OP_TRANSFER_TDR:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
+ next_sequencer_fsm(s);
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_INC_1:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s));
+ /*
+ * The spec says the loop should execute count compare + 1 times.
+ * However we learned from engineering that we really only loop
+ * count_compare times, count compare = 0 makes this op code a
+ * no-op
+ */
+ if (s->loop_counter_1 !=
+ GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
+ /*
+ * Next index is the lower nibble of the branch operation ID,
+ * mask off all but the first three bits so we don't try to
+ * access beyond the sequencer_operation_reg boundary.
+ */
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
+ PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ s->loop_counter_1++;
+ } else {
+ /* Continue to next index if loop counter is reached */
+ next_sequencer_fsm(s);
+ }
+ break;
+
+ case SEQ_OP_BRANCH_IFNEQ_INC_2:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s));
+ uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
+ s->regs[SPI_CTR_CFG_REG]);
+ /*
+ * The spec says the loop should execute count compare + 1 times.
+ * However we learned from engineering that we really only loop
+ * count_compare times, count compare = 0 makes this op code a
+ * no-op
+ */
+ if (s->loop_counter_2 != condition2) {
+ /*
+ * Next index is the lower nibble of the branch operation ID,
+ * mask off all but the first three bits so we don't try to
+ * access beyond the sequencer_operation_reg boundary.
+ */
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX,
+ s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode));
+ s->loop_counter_2++;
+ } else {
+ /* Continue to next index if loop counter is reached */
+ next_sequencer_fsm(s);
+ }
+ break;
+
+ default:
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
+ /* Ignore unsupported operations. */
+ next_sequencer_fsm(s);
+ break;
+ } /* end of switch */
+ /*
+ * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
+ * we need to go ahead and end things as if there was a STOP at the
+ * end.
+ */
+ if (get_seq_index(s) == NUM_SEQ_OPS) {
+ /* All 8 opcodes completed, sequencer idling */
+ s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
+ s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
+ break;
+ }
+ /* Break the loop if a stop was requested */
+ if (stop) {
+ break;
+ }
+ } /* end of while */
+ return;
+} /* end of operation_sequencer() */
+
+/*
+ * The SPIC engine and its internal sequencer can be interrupted and reset by
+ * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
+ * Miscellaneous Register of sbe_register_bo device.
+ * Reset immediately aborts any SPI transaction in progress and returns the
+ * sequencer and state machines to idle state.
+ * The configuration register values are not changed. The status register is
+ * not reset. The engine registers are not reset.
+ * The SPIC engine reset does not have any affect on the attached devices.
+ * Reset handling of any attached devices is beyond the scope of the engine.
+ */
+static void do_reset(DeviceState *dev)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ DeviceState *ssi_dev;
+
+ trace_pnv_spi_reset();
+
+ /* Connect cs irq */
+ ssi_dev = ssi_get_cs(s->ssi_bus, 0);
+ if (ssi_dev) {
+ qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
+ qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
+ }
+
+ /* Reset all N1 and N2 counters, and other constants */
+ s->N2_bits = 0;
+ s->N2_bytes = 0;
+ s->N2_tx = 0;
+ s->N2_rx = 0;
+ s->N1_bits = 0;
+ s->N1_bytes = 0;
+ s->N1_tx = 0;
+ s->N1_rx = 0;
+ s->loop_counter_1 = 0;
+ s->loop_counter_2 = 0;
+ /* Disconnected from responder */
+ qemu_set_irq(s->cs_line[0], 1);
+}
+
+static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
+{
+ PnvSpi *s = PNV_SPI(opaque);
+ uint32_t reg = addr >> 3;
+ uint64_t val = ~0ull;
+
+ switch (reg) {
+ case ERROR_REG:
+ case SPI_CTR_CFG_REG:
+ case CONFIG_REG1:
+ case SPI_CLK_CFG_REG:
+ case SPI_MM_REG:
+ case SPI_XMIT_DATA_REG:
+ val = s->regs[reg];
+ break;
+ case SPI_RCV_DATA_REG:
+ val = s->regs[reg];
+ trace_pnv_spi_read_RDR(val);
+ s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
+ if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
+ trace_pnv_spi_start_sequencer();
+ operation_sequencer(s);
+ }
+ break;
+ case SPI_SEQ_OP_REG:
+ val = 0;
+ for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
+ val = (val << 8) | s->seq_op[i];
+ }
+ break;
+ case SPI_STS_REG:
+ val = s->status;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
+ "read at 0x%" PRIx32 "\n", reg);
+ }
+
+ trace_pnv_spi_read(addr, val);
+ return val;
+}
+
+static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ PnvSpi *s = PNV_SPI(opaque);
+ uint32_t reg = addr >> 3;
+
+ trace_pnv_spi_write(addr, val);
+
+ switch (reg) {
+ case ERROR_REG:
+ case SPI_CTR_CFG_REG:
+ case CONFIG_REG1:
+ case SPI_MM_REG:
+ case SPI_RCV_DATA_REG:
+ s->regs[reg] = val;
+ break;
+ case SPI_CLK_CFG_REG:
+ /*
+ * To reset the SPI controller write the sequence 0x5 0xA to
+ * reset_control field
+ */
+ if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
+ && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
+ /* SPI controller reset sequence completed, resetting */
+ s->regs[reg] = SPI_CLK_CFG_HARD_RST;
+ } else {
+ s->regs[reg] = val;
+ }
+ break;
+ case SPI_XMIT_DATA_REG:
+ /*
+ * Writing to the transmit data register causes the transmit data
+ * register full status bit in the status register to be set. Writing
+ * when the transmit data register full status bit is already set
+ * causes a "Resource Not Available" condition. This is not possible
+ * in the model since writes to this register are not asynchronous to
+ * the operation sequence like it would be in hardware.
+ */
+ s->regs[reg] = val;
+ trace_pnv_spi_write_TDR(val);
+ s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
+ s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
+ trace_pnv_spi_start_sequencer();
+ operation_sequencer(s);
+ break;
+ case SPI_SEQ_OP_REG:
+ for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
+ s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
+ }
+ break;
+ case SPI_STS_REG:
+ /* other fields are ignore_write */
+ s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
+ GETFIELD(SPI_STS_RDR, val));
+ s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
+ GETFIELD(SPI_STS_TDR, val));
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
+ "write at 0x%" PRIx32 "\n", reg);
+ }
+ return;
+}
+
+static const MemoryRegionOps pnv_spi_xscom_ops = {
+ .read = pnv_spi_xscom_read,
+ .write = pnv_spi_xscom_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static Property pnv_spi_properties[] = {
+ DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
+ DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_spi_realize(DeviceState *dev, Error **errp)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d",
+ s->spic_num);
+ s->ssi_bus = ssi_create_bus(dev, name);
+ s->cs_line = g_new0(qemu_irq, 1);
+ qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
+
+ /* spi scoms */
+ pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
+ s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
+}
+
+static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
+ int offset)
+{
+ PnvSpi *s = PNV_SPI(dev);
+ g_autofree char *name;
+ int s_offset;
+ const char compat[] = "ibm,power10-spi";
+ uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
+ s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
+ uint32_t reg[] = {
+ cpu_to_be32(spic_pcba),
+ cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
+ };
+ name = g_strdup_printf("pnv_spi@%x", spic_pcba);
+ s_offset = fdt_add_subnode(fdt, offset, name);
+ _FDT(s_offset);
+
+ _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
+ _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
+ _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
+ return 0;
+}
+
+static void pnv_spi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
+
+ xscomc->dt_xscom = pnv_spi_dt_xscom;
+
+ dc->desc = "PowerNV SPI";
+ dc->realize = pnv_spi_realize;
+ dc->reset = do_reset;
+ device_class_set_props(dc, pnv_spi_properties);
+}
+
+static const TypeInfo pnv_spi_info = {
+ .name = TYPE_PNV_SPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PnvSpi),
+ .class_init = pnv_spi_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
+static void pnv_spi_register_types(void)
+{
+ type_register_static(&pnv_spi_info);
+}
+
+type_init(pnv_spi_register_types);
diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events
index 7b5ad6a..089d269 100644
--- a/hw/ssi/trace-events
+++ b/hw/ssi/trace-events
@@ -32,3 +32,24 @@ ibex_spi_host_reset(const char *msg) "%s"
ibex_spi_host_transfer(uint32_t tx_data, uint32_t rx_data) "tx_data: 0x%" PRIx32 " rx_data: @0x%" PRIx32
ibex_spi_host_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64
ibex_spi_host_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size %u:"
+
+#pnv_spi.c
+pnv_spi_read(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_spi_write(uint64_t addr, uint64_t val) "addr 0x%" PRIx64 " val 0x%" PRIx64
+pnv_spi_read_RDR(uint64_t val) "data extracted = 0x%" PRIx64
+pnv_spi_write_TDR(uint64_t val) "being written, data written = 0x%" PRIx64
+pnv_spi_start_sequencer(void) ""
+pnv_spi_reset(void) "spic engine sequencer configuration and spi communication"
+pnv_spi_sequencer_op(const char* op, uint8_t index) "%s at index = 0x%x"
+pnv_spi_shifter_stating(void) "pull CS line low"
+pnv_spi_shifter_done(void) "pull the CS line high"
+pnv_spi_log_Ncounts(uint8_t N1_bits, uint8_t N1_bytes, uint8_t N1_tx, uint8_t N1_rx, uint8_t N2_bits, uint8_t N2_bytes, uint8_t N2_tx, uint8_t N2_rx) "N1_bits = %d, N1_bytes = %d, N1_tx = %d, N1_rx = %d, N2_bits = %d, N2_bytes = %d, N2_tx = %d, N2_rx = %d"
+pnv_spi_tx_append(const char* frame, uint8_t byte, uint8_t tdr_index) "%s = 0x%2.2x to payload from TDR at index %d"
+pnv_spi_tx_append_FF(const char* frame) "%s to Payload"
+pnv_spi_tx_request(const char* frame, uint32_t payload_len) "%s, payload len = %d"
+pnv_spi_rx_received(uint32_t payload_len) "payload len = %d"
+pnv_spi_rx_read_N1frame(void) ""
+pnv_spi_rx_read_N2frame(void) ""
+pnv_spi_shift_rx(uint8_t byte, uint32_t index) "byte = 0x%2.2x into RDR from payload index %d"
+pnv_spi_sequencer_stop_requested(const char* reason) "due to %s"
+pnv_spi_RDR_match(const char* result) "%s"