aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--asm/head.S55
-rw-r--r--asm/misc.S4
-rw-r--r--core/affinity.c2
-rw-r--r--core/chip.c40
-rw-r--r--core/cpu.c32
-rw-r--r--core/direct-controls.c363
-rw-r--r--core/hmi.c221
-rw-r--r--core/init.c2
-rw-r--r--core/mce.c129
-rw-r--r--core/test/run-timer.c2
-rw-r--r--doc/platforms-and-cpus.rst1
-rw-r--r--hw/chiptod.c30
-rw-r--r--hw/dts.c7
-rw-r--r--hw/lpc.c7
-rw-r--r--hw/xscom.c25
-rw-r--r--include/chip.h49
-rw-r--r--include/opal-api.h1
-rw-r--r--include/processor.h52
-rw-r--r--include/skiboot.h1
-rw-r--r--include/xscom-p10-regs.h54
-rw-r--r--include/xscom.h85
21 files changed, 1067 insertions, 95 deletions
diff --git a/asm/head.S b/asm/head.S
index d773bde..f85b0fe 100644
--- a/asm/head.S
+++ b/asm/head.S
@@ -324,7 +324,7 @@ boot_offset:
* r28 : PVR
* r27 : DTB pointer (or NULL)
* r26 : PIR thread mask
- * r25 : P9 fused core flag
+ * r25 : P9/10 fused core flag
*/
.global boot_entry
boot_entry:
@@ -342,6 +342,8 @@ boot_entry:
beq 3f
cmpwi cr0,%r3,PVR_TYPE_P9P
beq 3f
+ cmpwi cr0,%r3,PVR_TYPE_P10
+ beq 4f
attn /* Unsupported CPU type... what do we do ? */
b . /* loop here, just in case attn is disabled */
@@ -352,8 +354,17 @@ boot_entry:
mfspr %r3, SPR_SPRD
andi. %r25, %r3, 1
beq 1f
+ b 2f
- /* P8 or P9 fused -> 8 threads */
+4: /*
+ * P10 fused core check (SPRC/SPRD method does not work).
+ * PVR bit 12 set = normal code
+ */
+ andi. %r3, %r28, 0x1000
+ bne 1f
+ li %r25, 1
+
+ /* P8 or P9 fused or P10 fused -> 8 threads */
2: li %r26,7
@@ -730,6 +741,8 @@ init_shared_sprs:
beq 4f
cmpwi cr0,%r3,PVR_TYPE_P9P
beq 4f
+ cmpwi cr0,%r3,PVR_TYPE_P10
+ beq 5f
/* Unsupported CPU type... what do we do ? */
b 9f
@@ -806,6 +819,32 @@ init_shared_sprs:
LOAD_IMM64(%r3,0x00000103070F1F3F)
mtspr SPR_RPR,%r3
+ b 9f
+
+5: /* P10 */
+ /* TSCR: UM recommended value */
+ LOAD_IMM32(%r3,0x80287880)
+ mtspr SPR_TSCR, %r3
+
+ /* HID0:
+ * Boot with PPC_BIT(5) set (dis_recovery).
+ * Clear bit 5 to enable recovery.
+ */
+ LOAD_IMM64(%r3, 0)
+ sync
+ mtspr SPR_HID0,%r3
+ isync
+
+ LOAD_IMM64(%r4,SPR_HMEER_P10_HMI_ENABLE_MASK)
+ mfspr %r3,SPR_HMEER
+ or %r3,%r3,%r4
+ sync
+ mtspr SPR_HMEER,%r3
+ isync
+
+ LOAD_IMM64(%r3,0x00000103070F1F3F)
+ mtspr SPR_RPR,%r3
+
9: blr
.global init_replicated_sprs
@@ -822,6 +861,8 @@ init_replicated_sprs:
beq 4f
cmpwi cr0,%r3,PVR_TYPE_P9P
beq 4f
+ cmpwi cr0,%r3,PVR_TYPE_P10
+ beq 5f
/* Unsupported CPU type... what do we do ? */
b 9f
@@ -845,6 +886,16 @@ init_replicated_sprs:
LOAD_IMM64(%r3,0x0000000000000010)
mtspr SPR_DSCR,%r3
+5: /* P10 */
+ /* LPCR: sane value */
+ LOAD_IMM64(%r3,0x0040000000000000)
+ mtspr SPR_LPCR, %r3
+ sync
+ isync
+ /* DSCR: Stride-N Stream Enable */
+ LOAD_IMM64(%r3,0x0000000000000010)
+ mtspr SPR_DSCR,%r3
+
9: blr
.global enter_nap
diff --git a/asm/misc.S b/asm/misc.S
index 0334489..ea43763 100644
--- a/asm/misc.S
+++ b/asm/misc.S
@@ -99,13 +99,15 @@ cleanup_local_tlb:
.global cleanup_global_tlb
cleanup_global_tlb:
- /* Only supported on P9 for now */
+ /* Only supported on P9, P10 for now */
mfspr %r3,SPR_PVR
srdi %r3,%r3,16
cmpwi cr0,%r3,PVR_TYPE_P9
beq cr0,1f
cmpwi cr0,%r3,PVR_TYPE_P9P
beq cr0,1f
+ cmpwi cr0,%r3,PVR_TYPE_P10
+ beq cr0,1f
blr
/* Sync out previous updates */
diff --git a/core/affinity.c b/core/affinity.c
index 47ba33c..0209d3c 100644
--- a/core/affinity.c
+++ b/core/affinity.c
@@ -111,6 +111,8 @@ void add_core_associativity(struct cpu_thread *cpu)
core_id = (cpu->pir >> 3) & 0xf;
else if (proc_gen == proc_gen_p9)
core_id = (cpu->pir >> 2) & 0x1f;
+ else if (proc_gen == proc_gen_p10)
+ core_id = (cpu->pir >> 2) & 0x1f;
else
return;
diff --git a/core/chip.c b/core/chip.c
index f1269d3..f79e8cd 100644
--- a/core/chip.c
+++ b/core/chip.c
@@ -13,7 +13,9 @@ enum proc_chip_quirks proc_chip_quirks;
uint32_t pir_to_chip_id(uint32_t pir)
{
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ return P10_PIR2GCID(pir);
+ else if (proc_gen == proc_gen_p9)
return P9_PIR2GCID(pir);
else if (proc_gen == proc_gen_p8)
return P8_PIR2GCID(pir);
@@ -23,41 +25,59 @@ uint32_t pir_to_chip_id(uint32_t pir)
uint32_t pir_to_core_id(uint32_t pir)
{
- if (proc_gen == proc_gen_p9) {
+ if (proc_gen == proc_gen_p10) {
+ if (this_cpu()->is_fused_core)
+ return P10_PIRFUSED2NORMALCOREID(pir);
+ else
+ return P10_PIR2COREID(pir);
+ } else if (proc_gen == proc_gen_p9) {
if (this_cpu()->is_fused_core)
return P9_PIRFUSED2NORMALCOREID(pir);
else
return P9_PIR2COREID(pir);
- } else if (proc_gen == proc_gen_p8)
+ } else if (proc_gen == proc_gen_p8) {
return P8_PIR2COREID(pir);
- else
+ } else {
assert(false);
+ }
}
uint32_t pir_to_fused_core_id(uint32_t pir)
{
- if (proc_gen == proc_gen_p9) {
+ if (proc_gen == proc_gen_p10) {
+ if (this_cpu()->is_fused_core)
+ return P10_PIR2FUSEDCOREID(pir);
+ else
+ return P10_PIR2COREID(pir);
+ } else if (proc_gen == proc_gen_p9) {
if (this_cpu()->is_fused_core)
return P9_PIR2FUSEDCOREID(pir);
else
return P9_PIR2COREID(pir);
- } else if (proc_gen == proc_gen_p8)
+ } else if (proc_gen == proc_gen_p8) {
return P8_PIR2COREID(pir);
- else
+ } else {
assert(false);
+ }
}
uint32_t pir_to_thread_id(uint32_t pir)
{
- if (proc_gen == proc_gen_p9) {
+ if (proc_gen == proc_gen_p10) {
+ if (this_cpu()->is_fused_core)
+ return P10_PIRFUSED2NORMALTHREADID(pir);
+ else
+ return P10_PIR2THREADID(pir);
+ } else if (proc_gen == proc_gen_p9) {
if (this_cpu()->is_fused_core)
return P9_PIRFUSED2NORMALTHREADID(pir);
else
return P9_PIR2THREADID(pir);
- } else if (proc_gen == proc_gen_p8)
+ } else if (proc_gen == proc_gen_p8) {
return P8_PIR2THREADID(pir);
- else
+ } else {
assert(false);
+ }
}
struct proc_chip *next_chip(struct proc_chip *chip)
diff --git a/core/cpu.c b/core/cpu.c
index dbc1ff4..f58aeb2 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -100,7 +100,7 @@ static void cpu_wake(struct cpu_thread *cpu)
if (proc_gen == proc_gen_p8) {
/* Poke IPI */
icp_kick_cpu(cpu);
- } else if (proc_gen == proc_gen_p9) {
+ } else if (proc_gen == proc_gen_p9 || proc_gen == proc_gen_p10) {
p9_dbell_send(cpu->pir);
}
}
@@ -507,6 +507,9 @@ static void cpu_idle_pm(enum cpu_wake_cause wake_on)
case proc_gen_p9:
vec = cpu_idle_p9(wake_on);
break;
+ case proc_gen_p10:
+ vec = cpu_idle_p9(wake_on);
+ break;
default:
vec = 0;
prlog_once(PR_DEBUG, "cpu_idle_pm called with bad processor type\n");
@@ -605,7 +608,7 @@ static void cpu_pm_disable(void)
cpu_relax();
}
}
- } else if (proc_gen == proc_gen_p9) {
+ } else if (proc_gen == proc_gen_p9 || proc_gen == proc_gen_p10) {
for_each_available_cpu(cpu) {
if (cpu->in_sleep || cpu->in_idle)
p9_dbell_send(cpu->pir);
@@ -648,7 +651,7 @@ void cpu_set_sreset_enable(bool enabled)
pm_enabled = true;
}
- } else if (proc_gen == proc_gen_p9) {
+ } else if (proc_gen == proc_gen_p9 || proc_gen == proc_gen_p10) {
sreset_enabled = enabled;
sync();
/*
@@ -676,7 +679,7 @@ void cpu_set_ipi_enable(bool enabled)
pm_enabled = true;
}
- } else if (proc_gen == proc_gen_p9) {
+ } else if (proc_gen == proc_gen_p9 || proc_gen == proc_gen_p10) {
ipi_enabled = enabled;
sync();
if (!enabled)
@@ -1014,6 +1017,13 @@ void init_boot_cpu(void)
hid0_hile = SPR_HID0_POWER9_HILE;
hid0_attn = SPR_HID0_POWER9_ENABLE_ATTN;
break;
+ case PVR_TYPE_P10:
+ proc_gen = proc_gen_p10;
+ hile_supported = true;
+ radix_supported = true;
+ hid0_hile = SPR_HID0_POWER10_HILE;
+ hid0_attn = SPR_HID0_POWER10_ENABLE_ATTN;
+ break;
default:
proc_gen = proc_gen_unknown;
}
@@ -1033,6 +1043,14 @@ void init_boot_cpu(void)
prlog(PR_INFO, "CPU: P9 generation processor"
" (max %d threads/core)\n", cpu_thread_count);
break;
+ case proc_gen_p10:
+ if (is_fused_core(pvr))
+ cpu_thread_count = 8;
+ else
+ cpu_thread_count = 4;
+ prlog(PR_INFO, "CPU: P10 generation processor"
+ " (max %d threads/core)\n", cpu_thread_count);
+ break;
default:
prerror("CPU: Unknown PVR, assuming 1 thread\n");
cpu_thread_count = 1;
@@ -1535,7 +1553,8 @@ void cpu_fast_reboot_complete(void)
current_hile_mode = HAVE_LITTLE_ENDIAN;
/* and set HID0:RADIX */
- current_radix_mode = true;
+ if (proc_gen == proc_gen_p9)
+ current_radix_mode = true;
}
static int64_t opal_reinit_cpus(uint64_t flags)
@@ -1616,7 +1635,8 @@ static int64_t opal_reinit_cpus(uint64_t flags)
flags &= ~(OPAL_REINIT_CPUS_MMU_HASH |
OPAL_REINIT_CPUS_MMU_RADIX);
- if (radix != current_radix_mode) {
+
+ if (proc_gen == proc_gen_p9 && radix != current_radix_mode) {
if (radix)
req.set_bits |= SPR_HID0_POWER9_RADIX;
else
diff --git a/core/direct-controls.c b/core/direct-controls.c
index 0274367..f7509dd 100644
--- a/core/direct-controls.c
+++ b/core/direct-controls.c
@@ -12,6 +12,7 @@
#include <xscom.h>
#include <xscom-p8-regs.h>
#include <xscom-p9-regs.h>
+#include <xscom-p10-regs.h>
#include <timebase.h>
#include <chip.h>
@@ -268,6 +269,25 @@ static int p8_sreset_thread(struct cpu_thread *cpu)
* using scom registers.
*/
+static int p9_core_is_gated(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t sshhyp_addr;
+ uint64_t val;
+
+ sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP);
+
+ if (xscom_read(chip_id, sshhyp_addr, &val)) {
+ prlog(PR_ERR, "Could not query core gated on %u:%u:"
+ " Unable to read PPM_SSHHYP.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+
+ return !!(val & P9_CORE_GATED);
+}
+
static int p9_core_set_special_wakeup(struct cpu_thread *cpu)
{
uint32_t chip_id = pir_to_chip_id(cpu->pir);
@@ -301,7 +321,7 @@ static int p9_core_set_special_wakeup(struct cpu_thread *cpu)
* out of stop state. If CORE_GATED is still set then
* raise error.
*/
- if (dctl_core_is_gated(cpu)) {
+ if (p9_core_is_gated(cpu)) {
/* Deassert spwu for this strange error */
xscom_write(chip_id, swake_addr, 0);
prlog(PR_ERR, "Failed special wakeup on %u:%u"
@@ -517,6 +537,295 @@ static int p9_sreset_thread(struct cpu_thread *cpu)
return 0;
}
+/**************** POWER10 direct controls ****************/
+
+/* Long running instructions may take time to complete. Timeout 100ms */
+#define P10_QUIESCE_POLL_INTERVAL 100
+#define P10_QUIESCE_TIMEOUT 100000
+
+/* Waking may take up to 5ms for deepest sleep states. Set timeout to 100ms */
+#define P10_SPWU_POLL_INTERVAL 100
+#define P10_SPWU_TIMEOUT 100000
+
+/*
+ * This implements direct control facilities of processor cores and threads
+ * using scom registers.
+ */
+static int p10_core_is_gated(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t ssh_addr;
+ uint64_t val;
+
+ ssh_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SSH_HYP);
+
+ if (xscom_read(chip_id, ssh_addr, &val)) {
+ prlog(PR_ERR, "Could not query core gated on %u:%u:"
+ " Unable to read QME_SSH_HYP.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+
+ return !!(val & P10_SSH_CORE_GATED);
+}
+
+
+static int p10_core_set_special_wakeup(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t spwu_addr, ssh_addr;
+ uint64_t val;
+ int i;
+
+ /* P10 could use SPWU_HYP done bit instead of SSH? */
+ spwu_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SPWU_HYP);
+ ssh_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SSH_HYP);
+
+ if (xscom_write(chip_id, spwu_addr, P10_SPWU_REQ)) {
+ prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
+ " Unable to write QME_SPWU_HYP.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+
+ for (i = 0; i < P10_SPWU_TIMEOUT / P10_SPWU_POLL_INTERVAL; i++) {
+ if (xscom_read(chip_id, ssh_addr, &val)) {
+ prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
+ " Unable to read QME_SSH_HYP.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+ if (val & P10_SSH_SPWU_DONE) {
+ /*
+ * CORE_GATED will be unset on a successful special
+ * wakeup of the core which indicates that the core is
+ * out of stop state. If CORE_GATED is still set then
+ * raise error.
+ */
+ if (p10_core_is_gated(cpu)) {
+ /* Deassert spwu for this strange error */
+ xscom_write(chip_id, spwu_addr, 0);
+ prlog(PR_ERR, "Failed special wakeup on %u:%u"
+ " core remains gated.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ } else {
+ return 0;
+ }
+ }
+ time_wait_us(P10_SPWU_POLL_INTERVAL);
+ }
+
+ prlog(PR_ERR, "Could not set special wakeup on %u:%u:"
+ " operation timeout.\n",
+ chip_id, core_id);
+ /*
+ * As per the special wakeup protocol we should not de-assert
+ * the special wakeup on the core until WAKEUP_DONE is set.
+ * So even on error do not de-assert.
+ */
+
+ return OPAL_HARDWARE;
+}
+
+static int p10_core_clear_special_wakeup(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t spwu_addr;
+
+ spwu_addr = XSCOM_ADDR_P10_QME_CORE(core_id, P10_QME_SPWU_HYP);
+
+ /* Add a small delay here if spwu problems time_wait_us(1); */
+ if (xscom_write(chip_id, spwu_addr, 0)) {
+ prlog(PR_ERR, "Could not clear special wakeup on %u:%u:"
+ " Unable to write QME_SPWU_HYP.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+
+ return 0;
+}
+
+static int p10_thread_quiesced(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t thread_id = pir_to_thread_id(cpu->pir);
+ uint32_t ras_addr;
+ uint64_t ras_status;
+
+ ras_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_RAS_STATUS);
+ if (xscom_read(chip_id, ras_addr, &ras_status)) {
+ prlog(PR_ERR, "Could not check thread state on %u:%u:"
+ " Unable to read EC_RAS_STATUS.\n",
+ chip_id, core_id);
+ return OPAL_HARDWARE;
+ }
+
+ /*
+ * p10_thread_stop for the purpose of sreset wants QUIESCED
+ * and MAINT bits set. Step, RAM, etc. need more, but we don't
+ * use those in skiboot.
+ *
+ * P10 could try wait for more here in case of errors.
+ */
+ if (!(ras_status & P10_THREAD_QUIESCED(thread_id)))
+ return 0;
+
+ if (!(ras_status & P10_THREAD_MAINT(thread_id)))
+ return 0;
+
+ return 1;
+}
+
+static int p10_cont_thread(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t thread_id = pir_to_thread_id(cpu->pir);
+ uint32_t cts_addr;
+ uint32_t ti_addr;
+ uint32_t dctl_addr;
+ uint64_t core_thread_state;
+ uint64_t thread_info;
+ bool active, stop;
+ int rc;
+ int i;
+
+ rc = p10_thread_quiesced(cpu);
+ if (rc < 0)
+ return rc;
+ if (!rc) {
+ prlog(PR_ERR, "Could not cont thread %u:%u:%u:"
+ " Thread is not quiesced.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_BUSY;
+ }
+
+ cts_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_CORE_THREAD_STATE);
+ ti_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_THREAD_INFO);
+ dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
+
+ if (xscom_read(chip_id, cts_addr, &core_thread_state)) {
+ prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
+ " Unable to read EC_CORE_THREAD_STATE.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_HARDWARE;
+ }
+ if (core_thread_state & P10_THREAD_STOPPED(thread_id))
+ stop = true;
+ else
+ stop = false;
+
+ if (xscom_read(chip_id, ti_addr, &thread_info)) {
+ prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
+ " Unable to read EC_THREAD_INFO.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_HARDWARE;
+ }
+ if (thread_info & P10_THREAD_ACTIVE(thread_id))
+ active = true;
+ else
+ active = false;
+
+ if (!active || stop) {
+ if (xscom_write(chip_id, dctl_addr, P10_THREAD_CLEAR_MAINT(thread_id))) {
+ prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
+ " Unable to write EC_DIRECT_CONTROLS.\n",
+ chip_id, core_id, thread_id);
+ }
+ } else {
+ if (xscom_write(chip_id, dctl_addr, P10_THREAD_START(thread_id))) {
+ prlog(PR_ERR, "Could not resume thread %u:%u:%u:"
+ " Unable to write EC_DIRECT_CONTROLS.\n",
+ chip_id, core_id, thread_id);
+ }
+ }
+
+ for (i = 0; i < P10_QUIESCE_TIMEOUT / P10_QUIESCE_POLL_INTERVAL; i++) {
+ int rc = p10_thread_quiesced(cpu);
+ if (rc < 0)
+ break;
+ if (!rc)
+ return 0;
+
+ time_wait_us(P10_QUIESCE_POLL_INTERVAL);
+ }
+
+ prlog(PR_ERR, "Could not start thread %u:%u:%u:"
+ " Unable to start thread.\n",
+ chip_id, core_id, thread_id);
+
+ return OPAL_HARDWARE;
+}
+
+static int p10_stop_thread(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t thread_id = pir_to_thread_id(cpu->pir);
+ uint32_t dctl_addr;
+ int rc;
+ int i;
+
+ dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
+
+ rc = p10_thread_quiesced(cpu);
+ if (rc < 0)
+ return rc;
+ if (rc) {
+ prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
+ " Thread is quiesced already.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_BUSY;
+ }
+
+ if (xscom_write(chip_id, dctl_addr, P10_THREAD_STOP(thread_id))) {
+ prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
+ " Unable to write EC_DIRECT_CONTROLS.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_HARDWARE;
+ }
+
+ for (i = 0; i < P10_QUIESCE_TIMEOUT / P10_QUIESCE_POLL_INTERVAL; i++) {
+ int rc = p10_thread_quiesced(cpu);
+ if (rc < 0)
+ break;
+ if (rc)
+ return 0;
+
+ time_wait_us(P10_QUIESCE_POLL_INTERVAL);
+ }
+
+ prlog(PR_ERR, "Could not stop thread %u:%u:%u:"
+ " Unable to quiesce thread.\n",
+ chip_id, core_id, thread_id);
+
+ return OPAL_HARDWARE;
+}
+
+static int p10_sreset_thread(struct cpu_thread *cpu)
+{
+ uint32_t chip_id = pir_to_chip_id(cpu->pir);
+ uint32_t core_id = pir_to_core_id(cpu->pir);
+ uint32_t thread_id = pir_to_thread_id(cpu->pir);
+ uint32_t dctl_addr;
+
+ dctl_addr = XSCOM_ADDR_P10_EC(core_id, P10_EC_DIRECT_CONTROLS);
+
+ if (xscom_write(chip_id, dctl_addr, P10_THREAD_SRESET(thread_id))) {
+ prlog(PR_ERR, "Could not sreset thread %u:%u:%u:"
+ " Unable to write EC_DIRECT_CONTROLS.\n",
+ chip_id, core_id, thread_id);
+ return OPAL_HARDWARE;
+ }
+
+ return 0;
+}
+
/**************** generic direct controls ****************/
int dctl_set_special_wakeup(struct cpu_thread *t)
@@ -529,7 +838,9 @@ int dctl_set_special_wakeup(struct cpu_thread *t)
lock(&c->dctl_lock);
if (c->special_wakeup_count == 0) {
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ rc = p10_core_set_special_wakeup(c);
+ else if (proc_gen == proc_gen_p9)
rc = p9_core_set_special_wakeup(c);
else /* (proc_gen == proc_gen_p8) */
rc = p8_core_set_special_wakeup(c);
@@ -553,7 +864,9 @@ int dctl_clear_special_wakeup(struct cpu_thread *t)
if (!c->special_wakeup_count)
goto out;
if (c->special_wakeup_count == 1) {
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ rc = p10_core_clear_special_wakeup(c);
+ else if (proc_gen == proc_gen_p9)
rc = p9_core_clear_special_wakeup(c);
else /* (proc_gen == proc_gen_p8) */
rc = p8_core_clear_special_wakeup(c);
@@ -569,24 +882,13 @@ out:
int dctl_core_is_gated(struct cpu_thread *t)
{
struct cpu_thread *c = t->primary;
- uint32_t chip_id = pir_to_chip_id(c->pir);
- uint32_t core_id = pir_to_core_id(c->pir);
- uint32_t sshhyp_addr;
- uint64_t val;
- if (proc_gen != proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ return p10_core_is_gated(c);
+ else if (proc_gen == proc_gen_p9)
+ return p9_core_is_gated(c);
+ else
return OPAL_UNSUPPORTED;
-
- sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP);
-
- if (xscom_read(chip_id, sshhyp_addr, &val)) {
- prlog(PR_ERR, "Could not query core gated on %u:%u:"
- " Unable to read PPM_SSHHYP.\n",
- chip_id, core_id);
- return OPAL_HARDWARE;
- }
-
- return !!(val & P9_CORE_GATED);
}
static int dctl_stop(struct cpu_thread *t)
@@ -599,7 +901,9 @@ static int dctl_stop(struct cpu_thread *t)
unlock(&c->dctl_lock);
return OPAL_BUSY;
}
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ rc = p10_stop_thread(t);
+ else if (proc_gen == proc_gen_p9)
rc = p9_stop_thread(t);
else /* (proc_gen == proc_gen_p8) */
rc = p8_stop_thread(t);
@@ -615,7 +919,7 @@ static int dctl_cont(struct cpu_thread *t)
struct cpu_thread *c = t->primary;
int rc;
- if (proc_gen != proc_gen_p9)
+ if (proc_gen != proc_gen_p10 && proc_gen != proc_gen_p9)
return OPAL_UNSUPPORTED;
lock(&c->dctl_lock);
@@ -623,7 +927,10 @@ static int dctl_cont(struct cpu_thread *t)
unlock(&c->dctl_lock);
return OPAL_BUSY;
}
- rc = p9_cont_thread(t);
+ if (proc_gen == proc_gen_p10)
+ rc = p10_cont_thread(t);
+ else /* (proc_gen == proc_gen_p9) */
+ rc = p9_cont_thread(t);
if (!rc)
t->dctl_stopped = false;
unlock(&c->dctl_lock);
@@ -647,7 +954,9 @@ static int dctl_sreset(struct cpu_thread *t)
unlock(&c->dctl_lock);
return OPAL_BUSY;
}
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ rc = p10_sreset_thread(t);
+ else if (proc_gen == proc_gen_p9)
rc = p9_sreset_thread(t);
else /* (proc_gen == proc_gen_p8) */
rc = p8_sreset_thread(t);
@@ -752,7 +1061,7 @@ int sreset_all_others(void)
* Then sreset the target thread, which resumes execution on that thread.
* Then de-assert special wakeup on the core.
*/
-static int64_t p9_sreset_cpu(struct cpu_thread *cpu)
+static int64_t do_sreset_cpu(struct cpu_thread *cpu)
{
int rc;
@@ -792,7 +1101,7 @@ int64_t opal_signal_system_reset(int cpu_nr)
struct cpu_thread *cpu;
int64_t ret;
- if (proc_gen != proc_gen_p9)
+ if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p10)
return OPAL_UNSUPPORTED;
/*
@@ -811,7 +1120,7 @@ int64_t opal_signal_system_reset(int cpu_nr)
}
lock(&sreset_lock);
- ret = p9_sreset_cpu(cpu);
+ ret = do_sreset_cpu(cpu);
unlock(&sreset_lock);
return ret;
@@ -822,7 +1131,7 @@ void direct_controls_init(void)
if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
return;
- if (proc_gen != proc_gen_p9)
+ if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p10)
return;
opal_register(OPAL_SIGNAL_SYSTEM_RESET, opal_signal_system_reset, 1);
diff --git a/core/hmi.c b/core/hmi.c
index 120fe4b..35b6090 100644
--- a/core/hmi.c
+++ b/core/hmi.c
@@ -15,6 +15,7 @@
#include <xscom.h>
#include <xscom-p8-regs.h>
#include <xscom-p9-regs.h>
+#include <xscom-p10-regs.h>
#include <pci.h>
#include <cpu.h>
#include <chip.h>
@@ -27,7 +28,7 @@
#include <cpu.h>
/*
- * HMER register layout:
+ * P9 HMER register layout:
* +===+==========+============================+========+===================+
* |Bit|Name |Description |PowerKVM|Action |
* | | | |HMI | |
@@ -147,6 +148,78 @@
* NOTE: Per Dave Larson, never enable 8,9,21-23
*/
+/*
+ * P10 HMER register layout:
+ * Bit Name Description
+ * 0 malfunction_alert A processor core in the system has checkstopped
+ * (failed recovery). This is broadcasted to every
+ * processor in the system
+ *
+ * 1 reserved reserved
+ *
+ * 2 proc_rcvy_done Processor recovery occurred error-bit in fir not
+ * masked (see bit 11)
+ *
+ * 3 reserved reserved
+ *
+ * 4 tfac_error Timer facility experienced an error. TB, DEC,
+ * HDEC, PURR or SPURR may be corrupted (details in
+ * TFMR)
+ *
+ * 5 tfx_error Error occurred on transfer from tfac shadow to
+ * core
+ *
+ * 6 spurr_scale_limit Nominal frequency exceeded 399 percent
+ *
+ * 7 reserved reserved
+ *
+ * 8 xscom_fail An XSCOM operation caused by a cache inhibited
+ * load/store from this thread failed. A trap
+ * register is available.
+ *
+ * 9 xscom_done An XSCOM operation caused by a cache inhibited
+ * load/store from this thread completed. If
+ * hypervisor intends to use this bit, it is
+ * responsible for clearing it before performing the
+ * xscom operation. NOTE: this bit should always be
+ * masked in HMEER
+ *
+ * 10 reserved reserved
+ *
+ * 11 proc_rcvy_again Processor recovery occurred again before bit 2
+ * was cleared
+ *
+ * 12-15 reserved reserved
+ *
+ * 16 scom_fir_hmi An error inject to PC FIR has occurred to set HMI.
+ * This error inject can also set FIR(61) to cause
+ * recovery.
+ *
+ * 17 reserved reserved
+ *
+ * 18 trig_fir_hmi Debug trigger has occurred to set HMI. This
+ * trigger can also set FIR(60) to cause recovery
+ *
+ * 19-20 reserved reserved
+ *
+ * 21-23 xscom_status If bit 8 is active, the reason will be detailed in
+ * these bits. These bits are information only and
+ * always masked (mask = ‘0’) If hypervisor intends
+ * to use this field, it is responsible for clearing
+ * it before performing the xscom operation.
+ *
+ * 24:63 Not implemented Not implemented.
+ *
+ * P10 HMEER enabled bits:
+ * Name Action
+ * malfunction_alert Decode and log FIR bits.
+ * proc_rcvy_done Log and continue.
+ * tfac_error Log and attempt to recover time facilities.
+ * tfx_error Log and attempt to recover time facilities.
+ * spurr_scale_limit Log and continue. XXX?
+ * proc_rcvy_again Log and continue.
+ */
+
/* Used for tracking cpu threads inside hmi handling. */
#define HMI_STATE_CLEANUP_DONE 0x100
#define CORE_THREAD_MASK 0x0ff
@@ -174,13 +247,17 @@
(SPR_TFMR_TBST_CORRUPT | SPR_TFMR_TB_MISSING_SYNC | \
SPR_TFMR_TB_MISSING_STEP | SPR_TFMR_FW_CONTROL_ERR | \
SPR_TFMR_TFMR_CORRUPT | SPR_TFMR_TB_RESIDUE_ERR | \
- SPR_TFMR_HDEC_PARITY_ERROR)
+ SPR_TFMR_HDEC_PARITY_ERROR | SPR_TFMR_TFAC_XFER_ERROR)
/* TFMR "thread" errors */
#define SPR_TFMR_THREAD_ERRORS \
(SPR_TFMR_PURR_PARITY_ERR | SPR_TFMR_SPURR_PARITY_ERR | \
SPR_TFMR_DEC_PARITY_ERR)
+/*
+ * Starting from p9, core inits are setup to escalate all core
+ * local checkstop to system checkstop. Review this list when that changes.
+ */
static const struct core_xstop_bit_info {
uint8_t bit; /* CORE FIR bit number */
enum OpalHMI_CoreXstopReason reason;
@@ -203,10 +280,12 @@ static const struct core_xstop_bit_info {
{ 63, CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ },
};
-static const struct core_recoverable_bit_info {
+struct core_fir_bit_info {
uint8_t bit; /* CORE FIR bit number */
const char *reason;
-} recoverable_bits[] = {
+};
+
+static const struct core_fir_bit_info p9_recoverable_bits[] = {
{ 0, "IFU - SRAM (ICACHE parity, etc)" },
{ 2, "IFU - RegFile" },
{ 4, "IFU - Logic" },
@@ -226,6 +305,58 @@ static const struct core_recoverable_bit_info {
{ 43, "PC - Thread hang recovery" },
};
+static const struct core_fir_bit_info p10_core_fir_bits[] = {
+ { 0, "IFU - SRAM recoverable error (ICACHE parity error, etc.)" },
+ { 1, "PC - TC checkstop" },
+ { 2, "IFU - RegFile recoverable error" },
+ { 3, "IFU - RegFile core checkstop" },
+ { 4, "IFU - Logic recoverable error" },
+ { 5, "IFU - Logic core checkstop" },
+ { 7, "VSU - Inference accumulator recoverable error" },
+ { 8, "PC - Recovery core checkstop" },
+ { 9, "VSU - Slice Target File (STF) recoverable error" },
+ { 11, "ISU - Logic recoverable error" },
+ { 12, "ISU - Logic core checkstop" },
+ { 14, "ISU - Machine check received while ME=0 checkstop" },
+ { 15, "ISU - UE from L2" },
+ { 16, "ISU - Number of UEs from L2 above threshold" },
+ { 17, "ISU - UE on CI load" },
+ { 18, "MMU - TLB recoverable error" },
+ { 19, "MMU - SLB error" },
+ { 21, "MMU - CXT recoverable error" },
+ { 22, "MMU - Logic core checkstop" },
+ { 23, "MMU - MMU system checkstop" },
+ { 24, "VSU - Logic recoverable error" },
+ { 25, "VSU - Logic core checkstop" },
+ { 26, "PC - In maint mode and recovery in progress" },
+ { 28, "PC - PC system checkstop" },
+ { 29, "LSU - SRAM recoverable error (DCACHE parity error, etc.)" },
+ { 30, "LSU - Set deleted" },
+ { 31, "LSU - RegFile recoverable error" },
+ { 32, "LSU - RegFile core checkstop" },
+ { 33, "MMU - TLB multi hit error occurred" },
+ { 34, "MMU - SLB multi hit error occurred" },
+ { 35, "LSU - ERAT multi hit error occurred" },
+ { 36, "PC - Forward progress error" },
+ { 37, "LSU - Logic recoverable error" },
+ { 38, "LSU - Logic core checkstop" },
+ { 41, "LSU - System checkstop" },
+ { 43, "PC - Thread hang recoverable error" },
+ { 45, "PC - Logic core checkstop" },
+ { 47, "PC - TimeBase facility checkstop" },
+ { 52, "PC - Hang recovery failed core checkstop" },
+ { 53, "PC - Core internal hang detected" },
+ { 55, "PC - Nest hang detected" },
+ { 56, "PC - Other core chiplet recoverable error" },
+ { 57, "PC - Other core chiplet core checkstop" },
+ { 58, "PC - Other core chiplet system checkstop" },
+ { 59, "PC - SCOM satellite error detected" },
+ { 60, "PC - Debug trigger error inject" },
+ { 61, "PC - SCOM or firmware recoverable error inject" },
+ { 62, "PC - Firmware checkstop error inject" },
+ { 63, "PC - Firmware SPRC / SPRD checkstop" },
+};
+
static const struct nx_xstop_bit_info {
uint8_t bit; /* NX FIR bit number */
enum OpalHMI_NestAccelXstopReason reason;
@@ -270,6 +401,12 @@ static int setup_scom_addresses(void)
nx_dma_engine_fir = P9_NX_DMA_ENGINE_FIR;
nx_pbi_fir = P9_NX_PBI_FIR;
return 1;
+ case proc_gen_p10:
+ malf_alert_scom = P10_MALFUNC_ALERT;
+ nx_status_reg = P10_NX_STATUS_REG;
+ nx_dma_engine_fir = P10_NX_DMA_ENGINE_FIR;
+ nx_pbi_fir = P10_NX_PBI_FIR;
+ return 1;
default:
prerror("%s: Unknown CPU type\n", __func__);
break;
@@ -320,6 +457,10 @@ static int read_core_fir(uint32_t chip_id, uint32_t core_id, uint64_t *core_fir)
rc = xscom_read(chip_id,
XSCOM_ADDR_P9_EC(core_id, P9_CORE_FIR), core_fir);
break;
+ case proc_gen_p10:
+ rc = xscom_read(chip_id,
+ XSCOM_ADDR_P10_EC(core_id, P10_CORE_FIR), core_fir);
+ break;
default:
rc = OPAL_HARDWARE;
}
@@ -335,6 +476,10 @@ static int read_core_wof(uint32_t chip_id, uint32_t core_id, uint64_t *core_wof)
rc = xscom_read(chip_id,
XSCOM_ADDR_P9_EC(core_id, P9_CORE_WOF), core_wof);
break;
+ case proc_gen_p10:
+ rc = xscom_read(chip_id,
+ XSCOM_ADDR_P10_EC(core_id, P10_CORE_WOF), core_wof);
+ break;
default:
rc = OPAL_HARDWARE;
}
@@ -394,6 +539,13 @@ static bool decode_core_fir(struct cpu_thread *cpu,
loc ? loc : "Not Available",
cpu->chip_id, core_id, core_fir);
+ if (proc_gen == proc_gen_p10) {
+ for (i = 0; i < ARRAY_SIZE(p10_core_fir_bits); i++) {
+ if (core_fir & PPC_BIT(p10_core_fir_bits[i].bit))
+ prlog(PR_INFO, " %s\n", p10_core_fir_bits[i].reason);
+ }
+ }
+
/* Check CORE FIR bits and populate HMI event with error info. */
for (i = 0; i < ARRAY_SIZE(xstop_bits); i++) {
if (core_fir & PPC_BIT(xstop_bits[i].bit)) {
@@ -910,6 +1062,7 @@ static void hmi_print_debug(const uint8_t *msg, uint64_t hmer)
if (!loc)
loc = "Not Available";
+ /* Also covers P10 SPR_HMER_TFAC_SHADOW_XFER_ERROR */
if (hmer & (SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR)) {
prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: TFMR(%016lx) %s\n",
loc, this_cpu()->chip_id, core_id, thread_index,
@@ -1231,10 +1384,16 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
int i;
prlog(PR_DEBUG, "Core WOF = 0x%016llx recovered error:\n", core_wof);
- for (i = 0; i < ARRAY_SIZE(recoverable_bits); i++) {
- if (core_wof & PPC_BIT(recoverable_bits[i].bit))
- prlog(PR_DEBUG, "%s\n",
- recoverable_bits[i].reason);
+ if (proc_gen <= proc_gen_p9) {
+ for (i = 0; i < ARRAY_SIZE(p9_recoverable_bits); i++) {
+ if (core_wof & PPC_BIT(p9_recoverable_bits[i].bit))
+ prlog(PR_DEBUG, " %s\n", p9_recoverable_bits[i].reason);
+ }
+ } else if (proc_gen == proc_gen_p10) {
+ for (i = 0; i < ARRAY_SIZE(p10_core_fir_bits); i++) {
+ if (core_wof & PPC_BIT(p10_core_fir_bits[i].bit))
+ prlog(PR_DEBUG, " %s\n", p10_core_fir_bits[i].reason);
+ }
}
}
@@ -1245,7 +1404,8 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
queue_hmi_event(hmi_evt, recover, out_flags);
}
}
- if (hmer & SPR_HMER_PROC_RECV_ERROR_MASKED) {
+
+ if ((proc_gen <= proc_gen_p9) && (hmer & SPR_HMER_PROC_RECV_ERROR_MASKED)) {
handled |= SPR_HMER_PROC_RECV_ERROR_MASKED;
if (cpu_is_thread0(cpu) && hmi_evt) {
hmi_evt->severity = OpalHMI_SEV_NO_ERROR;
@@ -1254,6 +1414,7 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
}
hmi_print_debug("Processor recovery Done (masked).", hmer);
}
+
if (hmer & SPR_HMER_PROC_RECV_AGAIN) {
handled |= SPR_HMER_PROC_RECV_AGAIN;
if (cpu_is_thread0(cpu) && hmi_evt) {
@@ -1264,17 +1425,30 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
hmi_print_debug("Processor recovery occurred again before"
"bit2 was cleared\n", hmer);
}
+
+ /* XXX: what to do with this? */
+ if (hmer & SPR_HMER_SPURR_SCALE_LIMIT) {
+ handled |= SPR_HMER_SPURR_SCALE_LIMIT;
+ if (cpu_is_thread0(cpu) && hmi_evt) {
+ hmi_evt->severity = OpalHMI_SEV_NO_ERROR;
+ hmi_evt->type = OpalHMI_ERROR_PROC_RECOV_DONE;
+ queue_hmi_event(hmi_evt, recover, out_flags);
+ }
+ hmi_print_debug("Turbo versus nominal frequency exceeded limit.", hmer);
+ }
+
/* Assert if we see malfunction alert, we can not continue. */
if (hmer & SPR_HMER_MALFUNCTION_ALERT) {
handled |= SPR_HMER_MALFUNCTION_ALERT;
hmi_print_debug("Malfunction Alert", hmer);
+ recover = 0;
if (hmi_evt)
decode_malfunction(hmi_evt, out_flags);
}
/* Assert if we see Hypervisor resource error, we can not continue. */
- if (hmer & SPR_HMER_HYP_RESOURCE_ERR) {
+ if ((proc_gen <= proc_gen_p9) && (hmer & SPR_HMER_HYP_RESOURCE_ERR)) {
handled |= SPR_HMER_HYP_RESOURCE_ERR;
hmi_print_debug("Hypervisor resource error", hmer);
@@ -1285,7 +1459,21 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
queue_hmi_event(hmi_evt, recover, out_flags);
}
}
- if (hmer & SPR_HMER_TRIG_FIR_HMI) {
+
+ /* XXX: what to do with this? */
+ if ((proc_gen <= proc_gen_p9) && (hmer & SPR_HMER_THD_WAKE_BLOCKED_TM_SUSPEND)) {
+ handled |= SPR_HMER_THD_WAKE_BLOCKED_TM_SUSPEND;
+ hmer &= ~SPR_HMER_THD_WAKE_BLOCKED_TM_SUSPEND;
+
+ hmi_print_debug("Attempted to wake thread when threads in TM suspend mode.", hmer);
+ if (hmi_evt) {
+ hmi_evt->severity = OpalHMI_SEV_NO_ERROR;
+ hmi_evt->type = OpalHMI_ERROR_PROC_RECOV_DONE,
+ queue_hmi_event(hmi_evt, recover, out_flags);
+ }
+ }
+
+ if ((proc_gen <= proc_gen_p9) && (hmer & SPR_HMER_TRIG_FIR_HMI)) {
handled |= SPR_HMER_TRIG_FIR_HMI;
hmer &= ~SPR_HMER_TRIG_FIR_HMI;
@@ -1296,6 +1484,17 @@ static int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt,
queue_hmi_event(hmi_evt, recover, out_flags);
}
}
+ if ((proc_gen == proc_gen_p10) && (hmer & SPR_HMER_P10_TRIG_FIR_HMI)) {
+ handled |= SPR_HMER_P10_TRIG_FIR_HMI;
+ hmer &= ~SPR_HMER_P10_TRIG_FIR_HMI;
+
+ hmi_print_debug("Clearing unknown debug trigger", hmer);
+ if (hmi_evt) {
+ hmi_evt->severity = OpalHMI_SEV_NO_ERROR;
+ hmi_evt->type = OpalHMI_ERROR_DEBUG_TRIG_FIR,
+ queue_hmi_event(hmi_evt, recover, out_flags);
+ }
+ }
if (recover == 0)
disable_fast_reboot("Unrecoverable HMI");
diff --git a/core/init.c b/core/init.c
index 09749f4..65f136d 100644
--- a/core/init.c
+++ b/core/init.c
@@ -1167,7 +1167,7 @@ void __noreturn __nomcount main_cpu_entry(const void *fdt)
/* Initialize the rest of the cpu thread structs */
init_all_cpus();
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p9 || proc_gen == proc_gen_p10)
cpu_set_ipi_enable(true);
/* Add the /opal node to the device-tree */
diff --git a/core/mce.c b/core/mce.c
index 3f50916..47674ab 100644
--- a/core/mce.c
+++ b/core/mce.c
@@ -65,6 +65,42 @@ static const struct mce_ierror_table mce_p9_ierror_table[] = {
"instruction fetch page table access to foreign address", },
{ 0 } };
+static const struct mce_ierror_table mce_p10_ierror_table[] = {
+{ 0x00000000081c0000, 0x0000000000040000,
+ MCE_INSNFETCH | MCE_MEMORY_ERROR | MCE_INVOLVED_EA,
+ "instruction fetch memory uncorrectable error", },
+{ 0x00000000081c0000, 0x0000000000080000,
+ MCE_INSNFETCH | MCE_SLB_ERROR | MCE_INVOLVED_EA,
+ "instruction fetch SLB parity error", },
+{ 0x00000000081c0000, 0x00000000000c0000,
+ MCE_INSNFETCH | MCE_SLB_ERROR | MCE_INVOLVED_EA,
+ "instruction fetch SLB multi-hit error", },
+{ 0x00000000081c0000, 0x0000000000100000,
+ MCE_INSNFETCH | MCE_INVOLVED_EA | MCE_ERAT_ERROR,
+ "instruction fetch ERAT multi-hit error", },
+{ 0x00000000081c0000, 0x0000000000140000,
+ MCE_INSNFETCH | MCE_INVOLVED_EA | MCE_TLB_ERROR,
+ "instruction fetch TLB multi-hit error", },
+{ 0x00000000081c0000, 0x0000000000180000,
+ MCE_INSNFETCH | MCE_MEMORY_ERROR | MCE_TABLE_WALK | MCE_INVOLVED_EA,
+ "instruction fetch page table access memory uncorrectable error", },
+{ 0x00000000081c0000, 0x00000000001c0000,
+ MCE_INSNFETCH | MCE_INVOLVED_EA,
+ "instruction fetch to control real address", },
+{ 0x00000000081c0000, 0x00000000080c0000,
+ MCE_INSNFETCH | MCE_INVOLVED_EA,
+ "instruction fetch real address error", },
+{ 0x00000000081c0000, 0x0000000008100000,
+ MCE_INSNFETCH | MCE_TABLE_WALK | MCE_INVOLVED_EA,
+ "instruction fetch page table access real address error", },
+{ 0x00000000081c0000, 0x0000000008140000,
+ MCE_LOADSTORE | MCE_IMPRECISE,
+ "store real address asynchronous error", },
+{ 0x00000000081c0000, 0x00000000081c0000,
+ MCE_INSNFETCH | MCE_TABLE_WALK | MCE_INVOLVED_EA,
+ "instruction fetch page table access to control real address", },
+{ 0 } };
+
struct mce_derror_table {
unsigned long dsisr_value;
uint64_t type;
@@ -113,6 +149,42 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
"load/store to foreign address", },
{ 0 } };
+static const struct mce_derror_table mce_p10_derror_table[] = {
+{ 0x00008000,
+ MCE_LOADSTORE | MCE_MEMORY_ERROR,
+ "load/store memory uncorrectable error", },
+{ 0x00004000,
+ MCE_LOADSTORE | MCE_MEMORY_ERROR | MCE_TABLE_WALK | MCE_INVOLVED_EA,
+ "load/store page table access memory uncorrectable error", },
+{ 0x00000800,
+ MCE_LOADSTORE | MCE_INVOLVED_EA | MCE_ERAT_ERROR,
+ "load/store ERAT multi-hit error", },
+{ 0x00000400,
+ MCE_LOADSTORE | MCE_INVOLVED_EA | MCE_TLB_ERROR,
+ "load/store TLB multi-hit error", },
+{ 0x00000200,
+ MCE_TLBIE_ERROR,
+ "TLBIE or TLBIEL instruction programming error", },
+{ 0x00000100,
+ MCE_LOADSTORE | MCE_INVOLVED_EA | MCE_SLB_ERROR,
+ "load/store SLB parity error", },
+{ 0x00000080,
+ MCE_LOADSTORE | MCE_INVOLVED_EA | MCE_SLB_ERROR,
+ "load/store SLB multi-hit error", },
+{ 0x00000040,
+ MCE_LOADSTORE | MCE_INVOLVED_EA,
+ "load real address error", },
+{ 0x00000020,
+ MCE_LOADSTORE | MCE_TABLE_WALK,
+ "load/store page table access real address error", },
+{ 0x00000010,
+ MCE_LOADSTORE | MCE_TABLE_WALK,
+ "load/store page table access to control real address", },
+{ 0x00000008,
+ MCE_LOADSTORE,
+ "load/store to control real address", },
+{ 0 } };
+
static void decode_ierror(const struct mce_ierror_table table[],
uint64_t srr1,
uint64_t *type,
@@ -145,20 +217,11 @@ static void decode_derror(const struct mce_derror_table table[],
}
}
-void decode_mce(uint64_t srr0, uint64_t srr1,
+static void decode_mce_p9(uint64_t srr0, uint64_t srr1,
uint32_t dsisr, uint64_t dar,
uint64_t *type, const char **error_str,
uint64_t *address)
{
- *type = MCE_UNKNOWN;
- *error_str = "unknown error";
- *address = 0;
-
- if (proc_gen != proc_gen_p9) {
- *error_str = "unknown error (processor not supported)";
- return;
- }
-
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
@@ -198,3 +261,49 @@ void decode_mce(uint64_t srr0, uint64_t srr1,
*address = srr0;
}
}
+
+static void decode_mce_p10(uint64_t srr0, uint64_t srr1,
+ uint32_t dsisr, uint64_t dar,
+ uint64_t *type, const char **error_str,
+ uint64_t *address)
+{
+ /*
+ * Async machine check due to bad real address from store or foreign
+ * link time out comes with the load/store bit (PPC bit 42) set in
+ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
+ * directed to the ierror table so it will find the cause (which
+ * describes it correctly as a store error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ (srr1 & 0x081c0000) == 0x08140000) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ if (SRR1_MC_LOADSTORE(srr1)) {
+ decode_derror(mce_p10_derror_table, dsisr, type, error_str);
+ if (*type & MCE_INVOLVED_EA)
+ *address = dar;
+ } else {
+ decode_ierror(mce_p10_ierror_table, srr1, type, error_str);
+ if (*type & MCE_INVOLVED_EA)
+ *address = srr0;
+ }
+}
+
+void decode_mce(uint64_t srr0, uint64_t srr1,
+ uint32_t dsisr, uint64_t dar,
+ uint64_t *type, const char **error_str,
+ uint64_t *address)
+{
+ *type = MCE_UNKNOWN;
+ *error_str = "unknown error";
+ *address = 0;
+
+ if (proc_gen == proc_gen_p9) {
+ decode_mce_p9(srr0, srr1, dsisr, dar, type, error_str, address);
+ } else if (proc_gen == proc_gen_p10) {
+ decode_mce_p10(srr0, srr1, dsisr, dar, type, error_str, address);
+ } else {
+ *error_str = "unknown error (processor not supported)";
+ }
+}
diff --git a/core/test/run-timer.c b/core/test/run-timer.c
index fef5648..8f8b20e 100644
--- a/core/test/run-timer.c
+++ b/core/test/run-timer.c
@@ -16,7 +16,7 @@
#define smt_lowest()
#define smt_medium()
-enum proc_gen proc_gen = proc_gen_p9;
+enum proc_gen proc_gen = proc_gen_unknown;
static uint64_t stamp, last;
struct lock;
diff --git a/doc/platforms-and-cpus.rst b/doc/platforms-and-cpus.rst
index 658e00e..2f5e943 100644
--- a/doc/platforms-and-cpus.rst
+++ b/doc/platforms-and-cpus.rst
@@ -17,6 +17,7 @@ Power9N 0x004e1xxx Nimbus 24 small core
Power9C 0x004e2xxx Cumulus 12 small core
Power9C 0x004e3xxx Cumulus 24 small core
Power9P 0x004fxxxx Axone
+Power10 0x0080xxxx
=============== =============== =====================
Platforms
diff --git a/hw/chiptod.c b/hw/chiptod.c
index f445fd4..4e62fd7 100644
--- a/hw/chiptod.c
+++ b/hw/chiptod.c
@@ -959,6 +959,30 @@ bool chiptod_wakeup_resync(void)
return false;
}
+/*
+ * Fixup for p10 TOD bug workaround.
+ *
+ * The TOD may fail to start if all clocks in the system are derived from
+ * the same reference oscillator.
+ *
+ * Avoiding this is pretty easy: Whenever we clear/reset the TOD registers,
+ * make sure to init bits 26:31 of TOD_SLAVE_PATH_CTRL (0x40005) to 0b111111
+ * instead of 0b000000. The value 0 in TOD_S_PATH_CTRL_REG(26:31) must be
+ * avoided, and if it does get written it must be followed up by writing a
+ * value of all ones to clean up the resulting bad state before the (nonzero)
+ * final value can be written.
+ */
+static void fixup_tod_reg_value(struct chiptod_tod_regs *treg_entry)
+{
+ int32_t chip_id = this_cpu()->chip_id;
+
+ if (proc_gen != proc_gen_p10)
+ return;
+
+ if (treg_entry->xscom_addr == TOD_SLAVE_PATH_CTRL)
+ treg_entry->val[chip_id].data |= PPC_BITMASK(26,31);
+}
+
static int __chiptod_recover_tod_errors(void)
{
uint64_t terr;
@@ -997,8 +1021,12 @@ static int __chiptod_recover_tod_errors(void)
return 0;
}
+ fixup_tod_reg_value(&chiptod_tod_regs[i]);
+
prlog(PR_DEBUG, "Parity error, Restoring TOD register: "
- "%08llx\n", chiptod_tod_regs[i].xscom_addr);
+ "%08llx = %016llx\n",
+ chiptod_tod_regs[i].xscom_addr,
+ chiptod_tod_regs[i].val[chip_id].data);
if (xscom_writeme(chiptod_tod_regs[i].xscom_addr,
chiptod_tod_regs[i].val[chip_id].data)) {
prerror("XSCOM error writing 0x%08llx reg.\n",
diff --git a/hw/dts.c b/hw/dts.c
index b72516a..d8831e4 100644
--- a/hw/dts.c
+++ b/hw/dts.c
@@ -171,7 +171,11 @@ static void dts_async_read_temp(struct timer *t __unused, void *data,
swkup_rc = dctl_set_special_wakeup(cpu);
- rc = dts_read_core_temp_p9(cpu->pir, &dts);
+ if (proc_gen == proc_gen_p9)
+ rc = dts_read_core_temp_p9(cpu->pir, &dts);
+ else /* (proc_gen == proc_gen_p10) */
+ rc = OPAL_UNSUPPORTED; /* XXX P10 */
+
if (!rc) {
if (cpu->sensor_attr == SENSOR_DTS_ATTR_TEMP_MAX)
*cpu->sensor_data = cpu_to_be64(dts.temp);
@@ -219,6 +223,7 @@ static int dts_read_core_temp(u32 pir, struct dts *dts, u8 attr,
rc = OPAL_ASYNC_COMPLETION;
unlock(&cpu->dts_lock);
break;
+ case proc_gen_p10: /* XXX P10 */
default:
rc = OPAL_UNSUPPORTED;
}
diff --git a/hw/lpc.c b/hw/lpc.c
index c2a07a0..bf3ab1f 100644
--- a/hw/lpc.c
+++ b/hw/lpc.c
@@ -915,7 +915,8 @@ void lpc_finalize_interrupts(void)
if (chip->lpc && chip->psi &&
(chip->type == PROC_CHIP_P9_NIMBUS ||
chip->type == PROC_CHIP_P9_CUMULUS ||
- chip->type == PROC_CHIP_P9P))
+ chip->type == PROC_CHIP_P9P ||
+ chip->type == PROC_CHIP_P10))
lpc_create_int_map(chip->lpc, chip->psi->node);
}
}
@@ -959,6 +960,7 @@ static void lpc_init_interrupts_one(struct proc_chip *chip)
case PROC_CHIP_P9_NIMBUS:
case PROC_CHIP_P9_CUMULUS:
case PROC_CHIP_P9P:
+ case PROC_CHIP_P10:
/* On P9, we additionally setup the routing. */
lpc->has_serirq = true;
for (i = 0; i < LPC_NUM_SERIRQ; i++) {
@@ -1377,7 +1379,8 @@ void lpc_register_client(uint32_t chip_id,
has_routes =
chip->type == PROC_CHIP_P9_NIMBUS ||
chip->type == PROC_CHIP_P9_CUMULUS ||
- chip->type == PROC_CHIP_P9P;
+ chip->type == PROC_CHIP_P9P ||
+ chip->type == PROC_CHIP_P10;
if (policy != IRQ_ATTR_TARGET_OPAL && !has_routes) {
prerror("Chip doesn't support OS interrupt policy\n");
diff --git a/hw/xscom.c b/hw/xscom.c
index c97740a..3474572 100644
--- a/hw/xscom.c
+++ b/hw/xscom.c
@@ -94,7 +94,11 @@ static void xscom_reset(uint32_t gcid, bool need_delay)
mtspr(SPR_HMER, HMER_CLR_MASK);
/* Setup local and target scom addresses */
- if (proc_gen == proc_gen_p9) {
+ if (proc_gen == proc_gen_p10) {
+ recv_status_reg = 0x00090018;
+ log_reg = 0x0090012;
+ err_reg = 0x0090013;
+ } else if (proc_gen == proc_gen_p9) {
recv_status_reg = 0x00090018;
log_reg = 0x0090012;
err_reg = 0x0090013;
@@ -497,7 +501,7 @@ static int xscom_indirect_read(uint32_t gcid, uint64_t pcb_addr, uint64_t *val)
{
uint64_t form = xscom_indirect_form(pcb_addr);
- if ((proc_gen == proc_gen_p9) && (form == 1))
+ if ((proc_gen >= proc_gen_p9) && (form == 1))
return OPAL_UNSUPPORTED;
return xscom_indirect_read_form0(gcid, pcb_addr, val);
@@ -565,7 +569,7 @@ static int xscom_indirect_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val)
{
uint64_t form = xscom_indirect_form(pcb_addr);
- if ((proc_gen == proc_gen_p9) && (form == 1))
+ if ((proc_gen >= proc_gen_p9) && (form == 1))
return xscom_indirect_write_form1(gcid, pcb_addr, val);
return xscom_indirect_write_form0(gcid, pcb_addr, val);
@@ -576,7 +580,7 @@ static uint32_t xscom_decode_chiplet(uint32_t partid, uint64_t *pcb_addr)
uint32_t gcid = (partid & 0x0fffffff) >> 4;
uint32_t core = partid & 0xf;
- if (proc_gen == proc_gen_p9) {
+ if (proc_gen >= proc_gen_p9) {
/* XXX Not supported */
*pcb_addr = 0;
} else {
@@ -821,7 +825,9 @@ int64_t xscom_read_cfam_chipid(uint32_t partid, uint32_t *chip_id)
* something up
*/
if (chip_quirk(QUIRK_NO_F000F)) {
- if (proc_gen == proc_gen_p9)
+ if (proc_gen == proc_gen_p10)
+ val = 0x120DA04980000000UL; /* P10 DD1.0 */
+ else if (proc_gen == proc_gen_p9)
val = 0x203D104980000000UL; /* P9 Nimbus DD2.3 */
else
val = 0x221EF04980000000UL; /* P8 Murano DD2.1 */
@@ -873,6 +879,10 @@ static void xscom_init_chip_info(struct proc_chip *chip)
chip->type = PROC_CHIP_P9P;
assert(proc_gen == proc_gen_p9);
break;
+ case 0xda:
+ chip->type = PROC_CHIP_P10;
+ assert(proc_gen == proc_gen_p10);
+ break;
default:
printf("CHIP: Unknown chip type 0x%02x !!!\n",
(unsigned char)(val & 0xff));
@@ -911,7 +921,7 @@ static void xscom_init_chip_info(struct proc_chip *chip)
prlog(PR_INFO,"P9 DD%i.%i%d detected\n", 0xf & (chip->ec_level >> 4),
chip->ec_level & 0xf, rev);
chip->ec_rev = rev;
- }
+ } /* XXX P10 */
}
/*
@@ -949,7 +959,8 @@ void xscom_init(void)
struct proc_chip *chip;
const char *chip_name;
static const char *chip_names[] = {
- "UNKNOWN", "P8E", "P8", "P8NVL", "P9N", "P9C", "P9P"
+ "UNKNOWN", "P8E", "P8", "P8NVL", "P9N", "P9C", "P9P",
+ "P10",
};
chip = get_chip(gcid);
diff --git a/include/chip.h b/include/chip.h
index 4deb961..8bc48ba 100644
--- a/include/chip.h
+++ b/include/chip.h
@@ -100,10 +100,58 @@
#define P9_PIRFUSED2NORMALTHREADID(pir) (((pir) >> 1) & 0x3)
+#define P10_PIR2FUSEDCOREID(pir) P9_PIR2FUSEDCOREID(pir)
+#define P10_PIRFUSED2NORMALCOREID(pir) P9_PIRFUSED2NORMALCOREID(pir)
+#define P10_PIRFUSED2NORMALTHREADID(pir) P9_PIRFUSED2NORMALTHREADID(pir)
+
/* P9 specific ones mostly used by XIVE */
#define P9_PIR2LOCALCPU(pir) ((pir) & 0xff)
#define P9_PIRFROMLOCALCPU(chip, cpu) (((chip) << 8) | (cpu))
+/*
+ * P10 PIR
+ * -------
+ *
+ * PIR layout:
+ *
+ * | 49| 50| 51| 52| 53| 54| 55| 56| 57| 58| 59| 60| 61| 62| 63|
+ * |Spare ID |Topology ID |Sp. |Quad ID |Core ID |Thread ID|
+ *
+ * Bit 56 is a spare quad ID. In big-core mode, thread ID extends to bit 61.
+ *
+ * P10 GCID
+ * --------
+ *
+ * - Global chip ID is also called Topology ID.
+ * - Node ID is called Group ID (? XXX P10).
+ *
+ * Global chip ID is a 4 bit number.
+ *
+ * There is a topology mode bit that can be 0 or 1, which changes GCID mapping.
+ *
+ * Topology mode 0:
+ * NodeID ChipID
+ * | | |
+ * |____|____|____|____|
+ *
+ * Topology mode 1:
+ * NodeID ChipID
+ * | | |
+ * |____|____|____|____|
+ */
+#define P10_PIR2GCID(pir) (((pir) >> 8) & 0xf)
+
+#define P10_PIR2COREID(pir) (((pir) >> 2) & 0x3f)
+
+#define P10_PIR2THREADID(pir) ((pir) & 0x3)
+
+// XXX P10 These depend on the topology mode, how to get that (system type?)
+#define P10_GCID2NODEID(gcid, mode) ((mode) == 0 ? ((gcid) >> 1) & 0x7 : ((gcid) >> 2) & 0x3)
+#define P10_GCID2CHIPID(gcid, mode) ((mode) == 0 ? (gcid) & 0x1 : (gcid) & 0x3)
+
+/* P10 specific ones mostly used by XIVE */
+#define P10_PIR2LOCALCPU(pir) ((pir) & 0xff)
+#define P10_PIRFROMLOCALCPU(chip, cpu) (((chip) << 8) | (cpu))
struct dt_node;
struct centaur_chip;
@@ -123,6 +171,7 @@ enum proc_chip_type {
PROC_CHIP_P9_NIMBUS,
PROC_CHIP_P9_CUMULUS,
PROC_CHIP_P9P,
+ PROC_CHIP_P10,
};
/* Simulator quirks */
diff --git a/include/opal-api.h b/include/opal-api.h
index e90cab1..9cba35c 100644
--- a/include/opal-api.h
+++ b/include/opal-api.h
@@ -731,6 +731,7 @@ enum OpalHMI_CoreXstopReason {
CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED = 0x00004000,
CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ = 0x00008000,
CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ = 0x00010000,
+ CORE_CHECKSTOP_MMU_SYSTEM = 0x00020000,
};
enum OpalHMI_NestAccelXstopReason {
diff --git a/include/processor.h b/include/processor.h
index 70e749f..973d7e7 100644
--- a/include/processor.h
+++ b/include/processor.h
@@ -27,6 +27,7 @@
#define MSR_LE PPC_BIT(63) /* Little Endian */
/* PIR */
+#define SPR_PIR_P10_MASK 0x7fff /* Mask of implemented bits */
#define SPR_PIR_P9_MASK 0x7fff /* Mask of implemented bits */
#define SPR_PIR_P8_MASK 0x1fff /* Mask of implemented bits */
@@ -114,6 +115,7 @@
#define SPR_TFMR_MOVE_CHIP_TOD_TO_TB PPC_BIT(18)
#define SPR_TFMR_CLEAR_TB_ERRORS PPC_BIT(24)
/* Bits in TFMR - thread indep. status bits */
+#define SPR_TFMR_TFAC_XFER_ERROR PPC_BIT(25)
#define SPR_TFMR_HDEC_PARITY_ERROR PPC_BIT(26)
#define SPR_TFMR_TBST_CORRUPT PPC_BIT(27)
#define SPR_TFMR_TBST_ENCODED PPC_BITMASK(28,31)
@@ -140,17 +142,21 @@
/* Bits in HMER/HMEER */
#define SPR_HMER_MALFUNCTION_ALERT PPC_BIT(0)
#define SPR_HMER_PROC_RECV_DONE PPC_BIT(2)
-#define SPR_HMER_PROC_RECV_ERROR_MASKED PPC_BIT(3)
+#define SPR_HMER_PROC_RECV_ERROR_MASKED PPC_BIT(3) /* Not P10 */
#define SPR_HMER_TFAC_ERROR PPC_BIT(4)
-#define SPR_HMER_TFMR_PARITY_ERROR PPC_BIT(5)
+#define SPR_HMER_TFMR_PARITY_ERROR PPC_BIT(5) /* P9 */
+#define SPR_HMER_TFAC_SHADOW_XFER_ERROR PPC_BIT(5) /* P10 */
+#define SPR_HMER_SPURR_SCALE_LIMIT PPC_BIT(6) /* P10 */
#define SPR_HMER_XSCOM_FAIL PPC_BIT(8)
#define SPR_HMER_XSCOM_DONE PPC_BIT(9)
#define SPR_HMER_PROC_RECV_AGAIN PPC_BIT(11)
-#define SPR_HMER_WARN_RISE PPC_BIT(14)
-#define SPR_HMER_WARN_FALL PPC_BIT(15)
+#define SPR_HMER_WARN_RISE PPC_BIT(14) /* Not P10 */
+#define SPR_HMER_WARN_FALL PPC_BIT(15) /* Not P10 */
#define SPR_HMER_SCOM_FIR_HMI PPC_BIT(16)
-#define SPR_HMER_TRIG_FIR_HMI PPC_BIT(17)
-#define SPR_HMER_HYP_RESOURCE_ERR PPC_BIT(20)
+#define SPR_HMER_TRIG_FIR_HMI PPC_BIT(17) /* Not P10 */
+#define SPR_HMER_THD_WAKE_BLOCKED_TM_SUSPEND PPC_BIT(17) /* Not P10 */
+#define SPR_HMER_P10_TRIG_FIR_HMI PPC_BIT(18)
+#define SPR_HMER_HYP_RESOURCE_ERR PPC_BIT(20) /* Not P10 */
#define SPR_HMER_XSCOM_STATUS PPC_BITMASK(21,23)
/*
@@ -165,14 +171,23 @@
SPR_HMER_TFMR_PARITY_ERROR |\
SPR_HMER_PROC_RECV_AGAIN)
+#define SPR_HMEER_P10_HMI_ENABLE_MASK (SPR_HMER_MALFUNCTION_ALERT |\
+ SPR_HMER_PROC_RECV_DONE |\
+ SPR_HMER_TFAC_ERROR |\
+ SPR_HMER_TFAC_SHADOW_XFER_ERROR |\
+ SPR_HMER_SPURR_SCALE_LIMIT |\
+ SPR_HMER_PROC_RECV_AGAIN)
+
/* Bits in HID0 */
#define SPR_HID0_POWER8_4LPARMODE PPC_BIT(2)
#define SPR_HID0_POWER8_2LPARMODE PPC_BIT(6)
#define SPR_HID0_POWER8_DYNLPARDIS PPC_BIT(15)
#define SPR_HID0_POWER8_HILE PPC_BIT(19)
#define SPR_HID0_POWER9_HILE PPC_BIT(4)
+#define SPR_HID0_POWER10_HILE PPC_BIT(4)
#define SPR_HID0_POWER8_ENABLE_ATTN PPC_BIT(31)
#define SPR_HID0_POWER9_ENABLE_ATTN (PPC_BIT(2) | PPC_BIT(3))
+#define SPR_HID0_POWER10_ENABLE_ATTN (PPC_BIT(2) | PPC_BIT(3))
#define SPR_HID0_POWER9_RADIX PPC_BIT(8)
/* PVR bits */
@@ -192,6 +207,7 @@
#define PVR_TYPE_P8NVL 0x004c /* Naples */
#define PVR_TYPE_P9 0x004e
#define PVR_TYPE_P9P 0x004f /* Axone */
+#define PVR_TYPE_P10 0x0080
#ifdef __ASSEMBLY__
@@ -236,16 +252,22 @@ static inline bool is_power9n(uint32_t version)
static inline bool is_fused_core(uint32_t version)
{
- if (PVR_TYPE(version) != PVR_TYPE_P9)
- return false;
-
- switch(PVR_CHIP_TYPE(version)) {
- case 0:
- case 2:
- return true;
- default:
+ if (PVR_TYPE(version) == PVR_TYPE_P9) {
+ switch(PVR_CHIP_TYPE(version)) {
+ case 0:
+ case 2:
+ return true;
+ default:
+ return false;
+ }
+
+ } else if(PVR_TYPE(version) == PVR_TYPE_P10) {
+ if(PVR_CHIP_TYPE(version) & 0x01)
return false;
- }
+ else
+ return true;
+ } else
+ return false;
}
static inline bool is_power9c(uint32_t version)
diff --git a/include/skiboot.h b/include/skiboot.h
index d33c025..f3378ec 100644
--- a/include/skiboot.h
+++ b/include/skiboot.h
@@ -97,6 +97,7 @@ enum proc_gen {
proc_gen_unknown,
proc_gen_p8,
proc_gen_p9,
+ proc_gen_p10,
};
extern enum proc_gen proc_gen;
diff --git a/include/xscom-p10-regs.h b/include/xscom-p10-regs.h
new file mode 100644
index 0000000..8096b2f
--- /dev/null
+++ b/include/xscom-p10-regs.h
@@ -0,0 +1,54 @@
+#ifndef __XSCOM_P10_REGS_H__
+#define __XSCOM_P10_REGS_H__
+
+/* Core FIR (Fault Isolation Register) */
+#define P10_CORE_FIR 0x440
+
+/* Core WOF (Whose On First) */
+#define P10_CORE_WOF 0x448
+
+#define P10_MALFUNC_ALERT 0x00090022
+
+#define P10_NX_STATUS_REG 0x02011040 /* NX status register */
+#define P10_NX_DMA_ENGINE_FIR 0x02011100 /* DMA & Engine FIR Data Register */
+#define P10_NX_PBI_FIR 0x02011080 /* PowerBus Interface FIR Register */
+
+#define P10_EC_CORE_THREAD_STATE 0x412 /* XXX P10 is this right? */
+#define P10_THREAD_STOPPED(t) PPC_BIT(56 + (t))
+
+#define P10_EC_THREAD_INFO 0x413
+#define P10_THREAD_ACTIVE(t) PPC_BIT(t)
+
+#define P10_EC_RAS_STATUS 0x454
+#define P10_THREAD_MAINT(t) PPC_BIT(0 + 8*(t))
+#define P10_THREAD_QUIESCED(t) PPC_BIT(1 + 8*(t))
+#define P10_THREAD_ICT_EMPTY(t) PPC_BIT(2 + 8*(t))
+
+#define P10_EC_DIRECT_CONTROLS 0x449
+#define P10_THREAD_STOP(t) PPC_BIT(7 + 8*(t))
+#define P10_THREAD_START(t) PPC_BIT(6 + 8*(t))
+#define P10_THREAD_SRESET(t) PPC_BIT(4 + 8*(t))
+#define P10_THREAD_CLEAR_MAINT(t) PPC_BIT(3 + 8*(t))
+#define P10_THREAD_PWR(t) PPC_BIT(32 + 8*(t))
+
+#define P10_QME_FIR 0x000
+
+#define P10_QME_SPWU_HYP 0x83c
+#define P10_SPWU_REQ PPC_BIT(0)
+#define P10_SPWU_DONE PPC_BIT(4)
+
+#define P10_QME_SSH_HYP 0x82c
+#define P10_SSH_CORE_GATED PPC_BIT(0)
+#define P10_SSH_SPWU_DONE PPC_BIT(1)
+
+#define P10_NCU_STATUS_REG 0x64f
+#define P10_NCU_SPEC_BAR 0x650
+#define P10_NCU_SPEC_BAR_ENABLE PPC_BIT(0)
+#define P10_NCU_SPEC_BAR_256K PPC_BIT(1)
+#define P10_NCU_SPEC_BAR_ADDRMSK 0x000fffffffffc000ull /* 16k aligned */
+
+#define P10_NCU_DARN_BAR 0x651
+#define P10_NCU_DARN_BAR_EN PPC_BIT(0)
+#define P10_NCU_DARN_BAR_ADDRMSK 0x000ffffffffff000ull /* 4k aligned */
+
+#endif /* __XSCOM_P10_REGS_H__ */
diff --git a/include/xscom.h b/include/xscom.h
index db6d3fc..a6bb7e4 100644
--- a/include/xscom.h
+++ b/include/xscom.h
@@ -137,6 +137,91 @@
#define XSCOM_ADDR_P9_EC_SLAVE(core, addr) \
XSCOM_ADDR_P9_EC(core, (addr) | 0xf0000)
+/*
+ * Additional useful definitions for P10
+ */
+
+/*
+ * POWER10 pervasive structure
+ * Chip has 8 EQ chiplets (aka super-chiplets), and other nest chiplets.
+ * Each EQ contains 4 EX regions.
+ * Each EX contains an ECL2, L3, MMA.
+ * Each ECL2 contains an EC (core), L2, and NCU.
+ *
+ * Each EQ has a Quad Management Engine (QME), responsible for power management
+ * for the cores, among other things.
+ *
+ * POWER10 XSCOM address format:
+ *
+ * | 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16-31|
+ * MC=0 |WR|MC|SLAVE ADDR |PIB MASTER |PORT NUMBER|LOCAL|
+ * MC=1 |WR|MC|MC TYPE |MC GROUP|PIB MASTER |PORT NUMBER|LOCAL|
+ *
+ * * Port is also known as PSCOM endpoint.
+ *
+ * WR is set by the xscom access functions (XSCOM_DATA_IND_READ bit)
+ * MC is always 0 (skiboot does not use multicast scoms).
+ *
+ * For unicast:
+ * EQ0-7 is addressed from 0x20 to 0x27 in the top 8 bits.
+ * L3 is on port 1
+ * NCU is on port 1
+ * ECL2 (core+L2) is on port 2 (XXX P10 scoms html doc suggests port 1?)
+ * QME is on port E.
+ *
+ * EQ chiplets (aka super chiplet) local address format:
+ *
+ * | 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|
+ * |C0|C1|C2|C3|RING ID |SAT ID |REGISTER ID |
+ *
+ * EX0-4 are selected with one-hot encoding (C0-3)
+ *
+ * QME per-core register access address format:
+ * | 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|
+ * |C0|C1|C2|C3| 1| 0| 0| 0|PER-CORE REGISTER ID |
+ *
+ * NCU - ring 6 (port 1)
+ * L3 - ring 3 (port 1) (XXX P10 scoms html doc suggests ring 6)
+ * L2 - ring 0 (port 2) (XXX P10 scoms html doc suggests ring 4)
+ * EC (PC unit) - rings 2-5 (port 2)
+ *
+ * Other chiplets:
+ *
+ * | 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|
+ * | 1|RING ID |SAT ID |REGISTER ID |
+ */
+
+#define P10_CORE_EQ_CHIPLET(core) (0x20 + ((core) >> 2))
+#define P10_CORE_PROC(core) ((core) & 0x3)
+
+#define XSCOM_P10_EQ(chiplet) ((chiplet) << 24)
+
+#define XSCOM_P10_QME(chiplet) \
+ (XSCOM_P10_EQ(chiplet) | (0xE << 16))
+
+#define XSCOM_P10_QME_CORE(chiplet, proc) \
+ (XSCOM_P10_QME(chiplet) | ((1 << (3 - proc)) << 12))
+
+#define XSCOM_P10_EC(chiplet, proc) \
+ (XSCOM_P10_EQ(chiplet) | (0x2 << 16) | ((1 << (3 - proc)) << 12))
+
+#define XSCOM_P10_NCU(chiplet, proc) \
+ (XSCOM_P10_EQ(chiplet) | (0x1 << 16) | ((1 << (3 - proc)) << 12))
+
+#define XSCOM_ADDR_P10_EQ(core, addr) \
+ (XSCOM_P10_EQ(P10_CORE_EQ_CHIPLET(core)) | (addr))
+
+#define XSCOM_ADDR_P10_QME(core, addr) \
+ (XSCOM_P10_QME(P10_CORE_EQ_CHIPLET(core)) | (addr))
+
+#define XSCOM_ADDR_P10_QME_CORE(core, addr) \
+ (XSCOM_P10_QME_CORE(P10_CORE_EQ_CHIPLET(core), P10_CORE_PROC(core)) | (addr))
+
+#define XSCOM_ADDR_P10_EC(core, addr) \
+ (XSCOM_P10_EC(P10_CORE_EQ_CHIPLET(core), P10_CORE_PROC(core)) | (addr))
+
+#define XSCOM_ADDR_P10_NCU(core, addr) \
+ (XSCOM_P10_NCU(P10_CORE_EQ_CHIPLET(core), P10_CORE_PROC(core)) | (addr))
/* Definitions relating to indirect XSCOMs shared with centaur */
#define XSCOM_ADDR_IND_FLAG PPC_BIT(0)