aboutsummaryrefslogtreecommitdiff
path: root/libpore
diff options
context:
space:
mode:
authorPratik Rajesh Sampat <psampat@linux.ibm.com>2021-08-04 12:51:34 +0530
committerVasant Hegde <hegdevasant@linux.vnet.ibm.com>2021-08-06 12:30:46 +0530
commit545391ffd6b791474e5e7b1231738a1cb19a6cf8 (patch)
treefad65ff1b9de883201bb9629b4c1a181b1d65690 /libpore
parent5232a9038a0a95f8d23549038ab791ac97c6a4ff (diff)
downloadskiboot-545391ffd6b791474e5e7b1231738a1cb19a6cf8.zip
skiboot-545391ffd6b791474e5e7b1231738a1cb19a6cf8.tar.gz
skiboot-545391ffd6b791474e5e7b1231738a1cb19a6cf8.tar.bz2
libpore: P10 stop-api support
Update libpore with P10 STOP API. Add minor changes to make P9 stop-api and P10 stop-api to co-exist in OPAL. These calls are required for STOP11 support on P10. STIOP0,2,3 on P10 does not lose full core state or scoms. stop-api based restore of SPRs or xscoms required only for STOP11 on P10. STOP11 on P10 will be a limited lab test/stress feature and not a product feature. (Same case as P9) Co-authored-by: Pratik Rajesh Sampat <psampat@linux.ibm.com> Signed-off-by: Pratik Rajesh Sampat <psampat@linux.ibm.com> Co-authored-by: Vaidyanathan Srinivasan <svaidy@linux.ibm.com> Signed-off-by: Vaidyanathan Srinivasan <svaidy@linux.ibm.com> Co-authored-by: Ryan Grimm <grimm@linux.ibm.com> Signed-off-by: Ryan Grimm <grimm@linux.ibm.com> Signed-off-by: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
Diffstat (limited to 'libpore')
-rw-r--r--libpore/Makefile.inc2
-rw-r--r--libpore/p10_cpu_reg_restore_instruction.H88
-rw-r--r--libpore/p10_hcd_header_defs.H152
-rw-r--r--libpore/p10_hcd_memmap_base.H463
-rw-r--r--libpore/p10_hcd_memmap_homer.H94
-rw-r--r--libpore/p10_hcd_memmap_occ_sram.H174
-rw-r--r--libpore/p10_hcode_image_defines.H462
-rw-r--r--libpore/p10_stop_api.C1816
-rw-r--r--libpore/p10_stop_api.H238
-rw-r--r--libpore/p10_stop_data_struct.H162
-rw-r--r--libpore/p10_stop_util.C190
-rw-r--r--libpore/p10_stop_util.H123
12 files changed, 3963 insertions, 1 deletions
diff --git a/libpore/Makefile.inc b/libpore/Makefile.inc
index 1060a04..06d9c89 100644
--- a/libpore/Makefile.inc
+++ b/libpore/Makefile.inc
@@ -1,4 +1,4 @@
-LIBPORE_SRCS = p8_pore_table_gen_api_fixed.C p9_stop_api.C p9_stop_util.C
+LIBPORE_SRCS = p8_pore_table_gen_api_fixed.C p9_stop_api.C p9_stop_util.C p10_stop_api.C p10_stop_util.C
LIBPORE_SRCS += p8_pore_table_static_data.c sbe_xip_image.c pore_inline_assembler.c
LIBPORE_OBJS_1 = $(LIBPORE_SRCS:%.c=%.o)
LIBPORE_OBJS = $(LIBPORE_OBJS_1:%.C=%.o)
diff --git a/libpore/p10_cpu_reg_restore_instruction.H b/libpore/p10_cpu_reg_restore_instruction.H
new file mode 100644
index 0000000..4da194d
--- /dev/null
+++ b/libpore/p10_cpu_reg_restore_instruction.H
@@ -0,0 +1,88 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/utils/stopreg/p10_cpu_reg_restore_instruction.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+///
+/// @file p10_cpu_reg_restore_instruction.H
+/// @brief enumerates all the opcodes used for SPR restoration.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+
+#ifndef __REG_RESTORE_INSTRUCTION_H
+#define __REG_RESTORE_INSTRUCTION_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+
+namespace stopImageSection
+{
+#endif
+
+/**
+ * @brief enumerates opcodes for few instructions.
+ */
+enum
+{
+ ORI_OPCODE = 24,
+ RFI_OPCODE = 19,
+ RFI_CONST = 50,
+ MFMSR_CONST = 83,
+ ORIS_OPCODE = 25,
+ OPCODE_31 = 31,
+ XOR_CONST = 316,
+ RLDICR_OPCODE = 30,
+ RLDICR_CONST = 1,
+ MTSPR_CONST1 = 467,
+ MTMSRD_CONST1 = 178,
+ MR_R0_TO_R10 = 0x7c0a0378, //mr r10, r0
+ MR_R0_TO_R21 = 0x7c150378, //mr r21, r0
+ MR_R0_TO_R9 = 0x7c090378, //mr r9, r0
+ URMOR_CORRECTION = 0x7d397ba6,
+ MFSPR_CONST = 339,
+ BLR_INST = 0x4e800020,
+ MTSPR_BASE_OPCODE = 0x7c0003a6,
+ MFSPR_BASE_OPCODE = 0x7c0002a6,
+ ATTN_OPCODE = 0x00000200,
+ OPCODE_18 = 18,
+ SELF_SAVE_FUNC_ADD = 0x2300,
+ SELF_SAVE_OFFSET = 0x180,
+ SKIP_SPR_REST_INST = 0x4800001c, //b . +0x01c
+ MFLR_R30 = 0x7fc802a6,
+ SKIP_SPR_SELF_SAVE = 0x3bff0020, //addi r31 r31, 0x20
+ MTLR_INST = 0x7fc803a6 //mtlr r30
+};
+
+#ifdef __cplusplus
+} // namespace stopImageSection ends
+
+} // extern "C"
+#endif //__cplusplus
+
+#endif //__REG_RESTORE_INSTRUCTION_H
diff --git a/libpore/p10_hcd_header_defs.H b/libpore/p10_hcd_header_defs.H
new file mode 100644
index 0000000..d02a725
--- /dev/null
+++ b/libpore/p10_hcd_header_defs.H
@@ -0,0 +1,152 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_hcd_header_defs.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2016,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p10_hcd_header_defs.H
+/// @brief defines header constants based on file types
+///
+/// This header contains those cpp manifest constants required for processing
+/// the linker scripts used to generate OCC code images. As these are used
+/// by linker scripts as well as by C++ code, these cannot be solely be put
+/// into a namespace. Prefixing these with the region name is the attempt
+/// to make these globally unique when this header is included in C++ code.
+///
+// *HWP HWP Owner: David Du <daviddu@us.ibm.com>
+// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner: Prem Jha <premjha2@in.ibm.com>
+// *HWP Team: PM
+// *HWP Level: 2
+// *HWP Consumed by: PM
+//
+
+#ifndef __HCD_HEADER_DEFS_H__
+#define __HCD_HEADER_DEFS_H__
+
+/// Macros for generating an Hcode header section
+///
+/// The CPP macros HCD_HDR_UINTxx generate equivalent code depending on
+/// whether they are being called from assembler (where they actually
+/// create the header section data) or from C (where they specifiy a
+/// C-structure form of the contents of the header section.
+///
+/// In assembler each invocation also creates space in the header section
+
+#ifdef __ASSEMBLER__
+
+// *INDENT-OFF*
+ .macro hcd_header_uint64, symbol:req, value = 0
+ .global \symbol
+\symbol\():
+ .quad (\value)
+ .endm
+
+ .macro hcd_header_uint32, symbol:req, value = 0
+ .global \symbol
+ \symbol\():
+ .long (\value)
+ .endm
+
+ .macro hcd_header_uint16, symbol:req, value = 0
+ .global \symbol
+\symbol\():
+ .short (\value)
+ .endm
+
+ .macro hcd_header_uint8, symbol:req, value = 0
+ .global \symbol
+\symbol\():
+ .byte (\value)
+ .endm
+
+ .macro hcd_header_uint8_vec, symbol:req, number:req, value = 0
+ .global \symbol
+\symbol\():
+ .rept (\number)
+ .byte (\value)
+ .endr
+ .endm
+
+ .macro hcd_header_attn, symbol:req, number = 1
+ .global \symbol
+\symbol\():
+ .rept (\number)
+ .long 0x00000200
+ .endr
+ .endm
+
+ .macro hcd_header_attn_pad, align:req
+ .balignl (\align), 0x00000200
+ .endm
+
+ .macro hcd_header_pad, align:req
+ .balignl (\align), 0
+ .endm
+// *INDENT-ON*
+
+#define ULL(x) x
+#define HCD_CONST(name, expr) .set name, expr;
+#define HCD_CONST64(name, expr) .set name, expr;
+
+#define HCD_HDR_UINT64(symbol, value) hcd_header_uint64 symbol value
+#define HCD_HDR_UINT32(symbol, value) hcd_header_uint32 symbol value
+#define HCD_HDR_UINT16(symbol, value) hcd_header_uint16 symbol value
+#define HCD_HDR_UINT8(symbol, value) hcd_header_uint8 symbol value
+#define HCD_HDR_UINT8_VEC(symbol, number, value) hcd_header_uint8_vec symbol number value
+#define HCD_HDR_ATTN(symbol, number) hcd_header_attn symbol number
+#define HCD_HDR_ATTN_PAD(align) hcd_header_attn_pad align
+#define HCD_HDR_PAD(align) hcd_header_pad align
+
+#else // NOT __ASSEMBLER__
+
+#ifdef __LINKERSCRIPT__
+
+ #define ULL(x) x
+ #define POUND_DEFINE #define
+ #define HCD_CONST(name, expr) POUND_DEFINE name expr
+ #define HCD_CONST64(name, expr) POUND_DEFINE name expr
+
+#else
+
+ #define ULL(x) x##ull
+ #define HCD_CONST(name, expr) enum { name = expr };
+ #define HCD_CONST64(name, expr) enum { name = expr };
+
+ #define HCD_HDR_UINT64(symbol, value) uint64_t symbol
+ #define HCD_HDR_UINT32(symbol, value) uint32_t symbol
+ #define HCD_HDR_UINT16(symbol, value) uint16_t symbol
+ #define HCD_HDR_UINT8(symbol, value) uint8_t symbol
+ #define HCD_HDR_UINT8_VEC(symbol, number, value) uint8_t symbol[number]
+ #define HCD_HDR_ATTN(symbol, number) uint32_t symbol[number]
+ #define HCD_HDR_ATTN_PAD(align)
+ #define HCD_HDR_PAD(align)
+
+#endif // __LINKERSCRIPT__
+#endif // __ASSEMBLER__
+
+// Stringification
+
+#define STR_HELPER(x) #x
+#define STR(x) STR_HELPER(x)
+
+#endif // __HCD_HEADER_DEFS_H__
diff --git a/libpore/p10_hcd_memmap_base.H b/libpore/p10_hcd_memmap_base.H
new file mode 100644
index 0000000..4dac9c9
--- /dev/null
+++ b/libpore/p10_hcd_memmap_base.H
@@ -0,0 +1,463 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_hcd_memmap_base.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2019,2020 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p10_hcd_memmap_base.H
+/// @brief defines region constants shared by different memory components.
+///
+
+// *HWP HWP Owner: David Du <daviddu@us.ibm.com>
+// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner: Prem S Jha <premjha2@in.ibm.com>
+// *HWP Team: PM
+// *HWP Level: 2
+// *HWP Consumed by: PM:Hostboot:Phyp
+
+#ifndef __HCD_MEMMAP_BASE_H__
+#define __HCD_MEMMAP_BASE_H__
+
+#include <p10_hcd_header_defs.H>
+
+// -------------------------------------------------------------------
+// Note: There can be NO semicolons(";") at end of macros in this file
+// There can ONLY have HCD_CONST/HCD_CONST64 macros in this file
+// -------------------------------------------------------------------
+
+/// Image Magic Numbers
+
+HCD_CONST64(CPMR_MAGIC_NUMBER, ULL(0x43504d525f312e30)) // CPMR_1.0
+HCD_CONST64(QME_MAGIC_NUMBER , ULL(0x514d455f5f312e30)) // QME__1.0
+
+HCD_CONST64(XPMR_MAGIC_NUMBER, ULL(0x58504d525f312e30)) // XPMR_1.0
+HCD_CONST64(XGPE_MAGIC_NUMBER, ULL(0x584750455f312e30)) // XGPE_1.0
+
+HCD_CONST64(PPMR_MAGIC_NUMBER, ULL(0x50504d525f312e30)) // PPMR_1.0
+HCD_CONST64(PGPE_MAGIC_NUMBER, ULL(0x504750455F312E30)) // PGPE_1.0
+
+HCD_CONST(QME_BUILD_VERSION, 0x001) // QME__1.0
+HCD_CONST(XGPE_BUILD_VERSION, 0x001) // XGPE_1.0
+HCD_CONST(PGPE_BUILD_VERSION, 0x001) // PGPE_1.0
+
+
+HCD_CONST(CPMR_REGION_CHECK_WORD, (0x43504d52)) // CPMR
+HCD_CONST(SCOM_REST_MAGIC_WORD, (0x5343))//SC
+HCD_CONST(CPMR_BUILD_VER, 1)
+
+
+/// Size constants
+
+HCD_CONST(HALF_KB, 512)
+HCD_CONST(ONE_KB, 1024)
+HCD_CONST(HALF_MB, (1024 * 512))
+HCD_CONST(ONE_MB, (1024 * 1024))
+HCD_CONST(TWO_MB, (2 * 1024 * 1024))
+
+/// Memory constants
+
+HCD_CONST(QME_SRAM_SIZE, (64 * ONE_KB))
+
+HCD_CONST(HOMER_MEMORY_SIZE, (4 * ONE_MB))
+HCD_CONST(HOMER_OPMR_REGION_NUM, 0)
+HCD_CONST(HOMER_XPMR_REGION_NUM, 1)
+HCD_CONST(HOMER_CPMR_REGION_NUM, 2)
+HCD_CONST(HOMER_PPMR_REGION_NUM, 3)
+
+/// Chip constants
+HCD_CONST(OCC_HOST_AREA_SIZE, ONE_MB)
+
+HCD_CONST(MAX_THREADS_PER_CORE, 4)
+HCD_CONST(MAX_CORES_PER_CHIP, 32)
+
+HCD_CONST(MAX_QMES_PER_CHIP, 8)
+HCD_CONST(MAX_EXES_PER_CHIP, 16)
+
+HCD_CONST(MAX_QUADS_PER_CHIP, 8)
+HCD_CONST(MAX_CACHES_PER_CHIP, 32)
+
+HCD_CONST(MAX_CORES_PER_QME, 4)
+HCD_CONST(MAX_CORES_PER_EX, 2)
+
+HCD_CONST(MAX_QMES_PER_QUAD, 1)
+HCD_CONST(MAX_EXES_PER_QUAD, 2)
+HCD_CONST(MAX_CORES_PER_QUAD, 4)
+HCD_CONST(MAX_L3_PER_QUAD, 4)
+
+HCD_CONST(MAX_QUAD_ID_SUPPORTED, 7)
+HCD_CONST(MAX_CORE_ID_SUPPORTED, 31)
+HCD_CONST(MAX_THREAD_ID_SUPPORTED, 3)
+
+/// Image build constants
+
+HCD_CONST(HARDWARE_IMG_SIZE, ONE_MB)
+
+HCD_CONST(FUSED_CORE_MODE, 0xBB)
+HCD_CONST(NONFUSED_CORE_MODE, 0xAA)
+
+HCD_CONST(SELF_RESTORE_BLR_INST, 0x4e800020)
+HCD_CONST(CORE_RESTORE_PAD_OPCODE, 0x00000200) //ATTN Opcode
+
+HCD_CONST(SCOM_RESTORE_PAD_OPCODE, 0x00000000) //zero pads
+HCD_CONST(SCOM_RESTORE_ENTRY_SIZE, 12) //4B address,8B data
+
+HCD_CONST(QME_BLOCK_READ_LEN, 32)
+HCD_CONST(QME_BLK_SIZE_SHIFT, 0x05)
+
+HCD_CONST(RING_ALIGN_BOUNDARY, 0x08)
+HCD_CONST64(DARN_BAR_EN_POS, ULL(0x8000000000000000))
+
+//FFDC Region
+HCD_CONST(FFDC_REGION_XPMR_BASE_OFFSET, 0xE0000) //Offset wrt to XPMR base
+HCD_CONST(FFDC_REGION_SIZE, (80 * ONE_KB))
+//end offset of FFDC region wrt to XPMR base
+HCD_CONST(FFDC_REGION_XPMR_END_OFFSET, (FFDC_REGION_XPMR_BASE_OFFSET +
+ FFDC_REGION_SIZE ))
+//---------------------------------------------------------------------------------------
+
+//XPMR Header
+HCD_CONST(XGPE_BUILD_VER, 1)
+HCD_CONST(XPMR_BUILD_VER, 1)
+HCD_CONST(XPMR_HEADER_SIZE, 512)
+HCD_CONST(XGPE_INT_VECTOR_SIZE, 384)
+HCD_CONST(XGPE_HEADER_IMAGE_OFFSET, XGPE_INT_VECTOR_SIZE)
+HCD_CONST(XGPE_BOOT_COPIER_OFFSET, 512)
+HCD_CONST(XGPE_BOOT_COPIER_LENGTH, ONE_KB)
+HCD_CONST(XGPE_BOOT_LOADER_OFFSET,
+ XGPE_BOOT_COPIER_OFFSET + XGPE_BOOT_COPIER_LENGTH )
+HCD_CONST(XGPE_BOOT_LOADER_LENGTH, ONE_KB)
+HCD_CONST(XGPE_HCODE_OFFSET,
+ XGPE_BOOT_LOADER_OFFSET + XGPE_BOOT_LOADER_LENGTH )
+HCD_CONST(XGPE_SRAM_SIZE, (64 * ONE_KB))
+HCD_CONST(XGPE_HCODE_SIZE, (64 * ONE_KB))
+HCD_CONST(XPMR_BOOT_REGION, (XPMR_HEADER_SIZE + XGPE_BOOT_COPIER_LENGTH +
+ XGPE_BOOT_LOADER_LENGTH ))
+
+HCD_CONST(XGPE_HCODE_RESET_ADDR_VAL, 0x40)
+HCD_CONST(XGPE_DBG_PTR_AREA_SIZE, 64)
+
+HCD_CONST(XPMR_MAGIC_WORD_BYTE, 0x00)
+HCD_CONST(XPMR_BOOT_COPIER_OFFSET_BYTE, 0x08)
+HCD_CONST(XPMR_BOOT_LOADER_OFFSET_BYTE, 0x10)
+HCD_CONST(XPMR_BOOT_LOADER_LENGTH_BYTE, 0x14)
+HCD_CONST(XPMR_BUILD_DATE_BYTE, 0x18)
+HCD_CONST(XPMR_BUILD_VER_BYTE, 0x1c)
+HCD_CONST(XPMR_XGPE_HCODE_OFFSET_BYTE, 0x28)
+HCD_CONST(XPMR_XGPE_HCODE_LENGTH_BYTE, 0x2c)
+HCD_CONST(XPMR_XGPE_BOOT_PROG_CODE_BYTE, 0x30)
+HCD_CONST(XPMR_XGPE_SRAM_IMAGE_SIZE_BYTE, 0x34)
+HCD_CONST(XGPE_IMAGE_XPMR_OFFSET,
+ (XGPE_BOOT_LOADER_OFFSET + XGPE_BOOT_LOADER_LENGTH))
+
+//---------------------------------------------------------------------------------------
+
+/// CPMR Header
+
+HCD_CONST(CPMR_HOMER_OFFSET, (HOMER_CPMR_REGION_NUM* ONE_MB))
+HCD_CONST(CPMR_HEADER_SIZE, 256)
+
+HCD_CONST(CPMR_ATTN_WORD0_BYTE, 0x00)
+HCD_CONST(CPMR_ATTN_WORD1_BYTE, 0x04)
+HCD_CONST(CPMR_MAGIC_NUMBER_BYTE, 0x08)
+HCD_CONST(CPMR_BUILD_DATE_BYTE, 0x10)
+HCD_CONST(CPMR_BUILD_VER_BYTE, 0x14)
+HCD_CONST(CPMR_SELF_RESTORE_VER_BYTE, 0x1C)
+HCD_CONST(CPMR_STOP_API_VER_BYTE, 0x1D)
+HCD_CONST(CPMR_FUSED_CORE_FLAG, 0x1F)
+HCD_CONST(CPMR_QME_HCODE_OFFSET_BYTE, 0x20)
+HCD_CONST(CPMR_QME_HCODE_LENGTH_BYTE, 0x24)
+HCD_CONST(CPMR_CORE_COMMON_RING_OFFSET_BYTE, 0x28)
+HCD_CONST(CPMR_CORE_COMMON_RING_LENGTH_BYTE, 0x2C)
+HCD_CONST(CPMR_QME_LOCAL_PSTATE_OFFSET_BYTE, 0x30)
+HCD_CONST(CPMR_QME_LOCAL_PSTATE_LENGTH_BYTE, 0x34)
+HCD_CONST(CPMR_CORE_SPECIFIC_RING_OFFSET_BYTE, 0x38)
+HCD_CONST(CPMR_CORE_SPECIFIC_RING_LENGTH_BYTE, 0x3C)
+HCD_CONST(CPMR_CORE_SCOM_RESTORE_OFFSET_BYTE, 0x40)
+HCD_CONST(CPMR_CORE_SCOM_RESTORE_LENGTH_BYTE, 0x44)
+HCD_CONST(CPMR_SELF_RESTORE_OFFSET_BYTE, 0x48)
+HCD_CONST(CPMR_SELF_RESTORE_LENGTH_BYTE, 0x4C)
+HCD_CONST(CPMR_MAX_CORE_L2_SCOM_ENTRIES, 0x50)
+HCD_CONST(CPMR_MAX_QUAD_L3_SCOM_ENTRIES, 0x54)
+HCD_CONST(CPMR_MAX_CORE_L2_SCOM_OFFSET, 0x58)
+HCD_CONST(CPMR_MAX_CORE_L2_SCOM_LENGTH, 0x5C)
+HCD_CONST(CPMR_MAX_QUAD_SCOM_OFFSET, 0x60)
+HCD_CONST(CPMR_MAX_QUAD_SCOM_LENGTH, 0x64)
+
+/// Self Restore without SMF Support
+
+HCD_CONST(SELF_RESTORE_CPMR_OFFSET, CPMR_HEADER_SIZE)
+HCD_CONST(SELF_RESTORE_INT_SIZE, (8 * ONE_KB))
+HCD_CONST(SELF_RESTORE_FFDC_OFFSET, (224 * ONE_KB))
+HCD_CONST(SELF_RESTORE_FFDC_LENGTH, (32 * ONE_KB))
+HCD_CONST(SELF_RESTORE_FFDC_PER_CORE, 864)
+HCD_CONST(SELF_RESTORE_FFDC_PER_CORE_IN_HOMER, 1024)
+HCD_CONST(SELF_RESTORE_FFDC_PER_QUAD_IN_HOMER, (SELF_RESTORE_FFDC_PER_CORE_IN_HOMER * 4))
+HCD_CONST(SELF_RESTORE_FFDC_BLK_CNT, 27)
+
+// Self Restore Region With SMF Support
+HCD_CONST(SMF_THREAD_LAUNCHER_SIZE, 1024)
+HCD_CONST(SMF_SELF_RESTORE_CODE_SIZE,
+ (SELF_RESTORE_INT_SIZE + SMF_THREAD_LAUNCHER_SIZE))
+
+HCD_CONST(SMF_CORE_RESTORE_THREAD_AREA_SIZE, HALF_KB)
+HCD_CONST(SMF_SELF_SAVE_THREAD_AREA_SIZE, 256)
+HCD_CONST(SMF_CORE_RESTORE_CORE_AREA_SIZE, HALF_KB)
+HCD_CONST(SMF_CORE_SAVE_CORE_AREA_SIZE, HALF_KB)
+
+HCD_CONST(SMF_SELF_RESTORE_CORE_REGS_SIZE,
+ MAX_CORES_PER_CHIP * ((SMF_CORE_RESTORE_THREAD_AREA_SIZE* MAX_THREADS_PER_CORE ) +
+ (SMF_SELF_SAVE_THREAD_AREA_SIZE* MAX_THREADS_PER_CORE ) +
+ SMF_CORE_RESTORE_CORE_AREA_SIZE +
+ SMF_CORE_SAVE_CORE_AREA_SIZE ))
+
+HCD_CONST(SMF_SELF_RESTORE_SIZE_TOTAL,
+ (SMF_SELF_RESTORE_CODE_SIZE + SMF_SELF_RESTORE_CORE_REGS_SIZE))
+/// Core Scom
+
+HCD_CONST(SELF_SAVE_RESTORE_REGION_SIZE, (256 * ONE_KB))
+HCD_CONST(SCOM_RESTORE_CPMR_OFFSET, (256 * ONE_KB))
+HCD_CONST(SCOM_RESTORE_HOMER_OFFSET,
+ (SCOM_RESTORE_CPMR_OFFSET + CPMR_HOMER_OFFSET))
+
+HCD_CONST(MAX_CORE_SCOM_ENTRIES, 16)
+HCD_CONST(MAX_L2_SCOM_ENTRIES, 32)
+HCD_CONST(MAX_L3_SCOM_ENTRIES, 64)
+HCD_CONST(MAX_EQ_SCOM_ENTRIES, 16)
+HCD_CONST(MAX_SCOM_RESTORE_ENTRIES_PER_CORE, (MAX_CORE_SCOM_ENTRIES +
+ MAX_L2_SCOM_ENTRIES + MAX_L3_SCOM_ENTRIES +
+ MAX_EQ_SCOM_ENTRIES))
+
+
+HCD_CONST(SCOM_RESTORE_SIZE_PER_CORE,
+ (SCOM_RESTORE_ENTRY_SIZE* MAX_SCOM_RESTORE_ENTRIES_PER_CORE)) // 128 * 16
+HCD_CONST(SCOM_RESTORE_SIZE_PER_QME,
+ (SCOM_RESTORE_SIZE_PER_CORE* MAX_CORES_PER_QME)) // 128 * 16 * 4
+
+HCD_CONST(SCOM_RESTORE_SIZE_TOTAL, (96 * ONE_KB))
+
+HCD_CONST(SCOM_RESTORE_EL_AREA,
+ MAX_CORE_SCOM_ENTRIES* SCOM_RESTORE_ENTRY_SIZE)
+HCD_CONST(SCOM_RESTORE_L2_AREA,
+ MAX_L2_SCOM_ENTRIES* SCOM_RESTORE_ENTRY_SIZE)
+HCD_CONST(SCOM_RESTORE_L3_AREA,
+ MAX_L3_SCOM_ENTRIES* SCOM_RESTORE_ENTRY_SIZE)
+HCD_CONST(SCOM_RESTORE_EQ_AREA,
+ MAX_EQ_SCOM_ENTRIES* SCOM_RESTORE_ENTRY_SIZE)
+HCD_CONST(SCOM_RESTORE_VER, 1)
+HCD_CONST(SCOM_RESTORE_L2_CORE,
+ (MAX_CORE_SCOM_ENTRIES + MAX_L2_SCOM_ENTRIES))
+HCD_CONST(SCOM_RESTORE_L3_CACHE,
+ (MAX_EQ_SCOM_ENTRIES + MAX_L3_SCOM_ENTRIES))
+/// QME Image
+
+HCD_CONST(QME_IMAGE_CPMR_OFFSET, 0x58000) // assumes SCOMs take up the first 96KB of second 256KB
+//HCD_CONST(QME_IMAGE_SIZE, 0)
+HCD_CONST(QME_INT_VECTOR_SIZE, 384) // 0x180
+HCD_CONST(QME_HCODE_OFFSET, (SELF_SAVE_RESTORE_REGION_SIZE + SCOM_RESTORE_SIZE_TOTAL))
+
+/// QME Header
+
+HCD_CONST(QME_HEADER_CPMR_OFFSET,
+ (QME_IMAGE_CPMR_OFFSET + QME_INT_VECTOR_SIZE))
+HCD_CONST(QME_HEADER_IMAGE_OFFSET, QME_INT_VECTOR_SIZE)
+HCD_CONST(QME_HEADER_SIZE, 128) // 0x80, +0x180=0x200
+
+HCD_CONST(QME_MAGIC_NUMBER_BYTE, 0x00)
+HCD_CONST(QME_HCODE_OFFSET_BYTE, 0x08)
+HCD_CONST(QME_HCODE_LENGTH_BYTE, 0x0C)
+HCD_CONST(QME_COMMON_RING_OFFSET_BYTE, 0x10)
+HCD_CONST(QME_OVERRIDE_RING_OFFSET_BYTE, 0x14)
+HCD_CONST(QME_COMMON_RING_LENGTH_BYTE, 0x18)
+HCD_CONST(QME_LOCAL_PSTATE_OFFSET_BYTE, 0x1C)
+HCD_CONST(QME_LOCAL_PSTATE_LENGTH_BYTE, 0x20)
+HCD_CONST(QME_SPECIFIC_RING_OFFSET_BYTE, 0x24)
+HCD_CONST(QME_SPECIFIC_RING_LENGTH_BYTE, 0x28)
+HCD_CONST(QME_QUAD_SCOM_RESTORE_OFFSET_BYTE, 0x2C)
+HCD_CONST(QME_QUAD_SCOM_RESTORE_LENGTH_BYTE, 0x30)
+HCD_CONST(QME_ATTR_TANK_ADDRESS, 0x34)
+HCD_CONST(QME_LOCATION_ID_BYTE, 0x38)
+HCD_CONST(QME_TIME_BASE, 0x3C)
+HCD_CONST(QME_CPMR_HOMER_ADDRESS_BYTE, 0x40)
+
+HCD_CONST(QME_HCODE_OFF_IMAGE_OFFSET, (QME_HEADER_IMAGE_OFFSET + QME_HCODE_OFFSET_BYTE))
+HCD_CONST(QME_HCODE_LEN_IMAGE_OFFSET, (QME_HEADER_IMAGE_OFFSET + QME_HCODE_LENGTH_BYTE))
+
+/// QME Hcode
+
+HCD_CONST(QME_HCODE_IMAGE_OFFSET, (QME_INT_VECTOR_SIZE + QME_HEADER_SIZE)) // 0x200
+HCD_CONST(QME_HCODE_SIZE, (43 * ONE_KB))
+HCD_CONST(QME_COMMON_RING_SIZE, (5 * ONE_KB))
+HCD_CONST(QME_INST_RING_SIZE, (5 * ONE_KB))
+HCD_CONST(QME_DEBUG_PTRS_OFFSET, 0x200)
+HCD_CONST(QME_DEBUG_PTRS_SIZE, 0x10)
+HCD_CONST(QME_DUMP_PTRS_OFFSET, QME_DEBUG_PTRS_OFFSET + QME_DEBUG_PTRS_SIZE)
+HCD_CONST(QME_DUMP_PTRS_SIZE, 0x300)
+HCD_CONST(QME_ATTR_PTRS_OFFSET, QME_DUMP_PTRS_OFFSET + QME_DUMP_PTRS_SIZE)
+HCD_CONST(QME_INSTRUMENTATION_SIZE, HALF_KB)
+HCD_CONST(QME_SRAM_HCODE_OFFSET, 0)
+HCD_CONST(QME_OVERRIDE_RING_SIZE, (2 * ONE_KB))
+
+// QME Hcode + Core Scan + Pstate
+HCD_CONST(QME_REGION_SIZE, (128 * ONE_KB))
+
+// Debug
+
+HCD_CONST(CPMR_TRACE_REGION_OFFSET, (512 * ONE_KB))
+HCD_CONST(QME_TRACE_REGION_SIZE, (16 * ONE_KB))
+HCD_CONST(CPMR_TRACE_REGION_SIZE, (QME_TRACE_REGION_SIZE* MAX_QMES_PER_CHIP)) // 192K
+HCD_CONST(CPMR_DEBUG_REGION_OFFSET, CPMR_TRACE_REGION_OFFSET + CPMR_TRACE_REGION_SIZE)
+HCD_CONST(CPMR_DEBUG_REGION_SIZE, (64 * ONE_KB)) // 192K + 64K = 256K
+
+HCD_CONST(CACHE_CHIPLET_ID_MIN, 0x20 )
+HCD_CONST(CACHE_CHIPLET_ID_MAX, 0x27 )
+
+//---------------------------------------------------------------------------------------
+
+/// PPMR Header
+HCD_CONST(PPMR_BUILD_VERSION, 1)
+HCD_CONST(PPMR_HEADER_SIZE, 512)
+HCD_CONST(PGPE_INT_VECTOR_SIZE, 384)
+HCD_CONST(PGPE_HEADER_IMAGE_OFFSET, PGPE_INT_VECTOR_SIZE)
+HCD_CONST(PGPE_BOOT_COPIER_OFFSET, PPMR_HEADER_SIZE)
+HCD_CONST(PGPE_BOOT_COPIER_LENGTH, ONE_KB)
+HCD_CONST(PGPE_BOOT_LOADER_OFFSET,
+ (PGPE_BOOT_COPIER_OFFSET + PGPE_BOOT_COPIER_LENGTH) )
+
+HCD_CONST(PGPE_BOOT_LOADER_LENGTH, ONE_KB)
+HCD_CONST(PGPE_HCODE_OFFSET,
+ PGPE_BOOT_LOADER_OFFSET + PGPE_BOOT_LOADER_LENGTH )
+HCD_CONST(PPMR_HOMER_OFFSET, (HOMER_PPMR_REGION_NUM* ONE_MB))
+
+HCD_CONST(PPMR_MAGIC_NUMBER_BYTE, 0x00)
+HCD_CONST(PPMR_BOOT_COPIER_OFFSET_BYTE, 0x08)
+HCD_CONST(PPMR_BOOT_LOADER_OFFSET_BYTE, 0x10)
+HCD_CONST(PPMR_BOOT_LOADER_LENGTH_BYTE, 0x14)
+HCD_CONST(PPMR_BUILD_DATE_BYTE, 0x18)
+HCD_CONST(PPMR_BUILD_VER_BYTE, 0x1C)
+HCD_CONST(PPMR_PGPE_HCODE_OFFSET_BYTE, 0x28)
+HCD_CONST(PPMR_PGPE_HCODE_LENGTH_BYTE, 0x2C)
+HCD_CONST(PPMR_GLOBAL_PSTATE_OFFSET_BYTE, 0x30)
+HCD_CONST(PPMR_GLOBAL_PSTATE_LENGTH_BYTE, 0x34)
+HCD_CONST(PPMR_LOCAL_PSTATE_OFFSET_BYTE, 0x38)
+HCD_CONST(PPMR_LOCAL_PSTATE_LENGTH_BYTE, 0x3C)
+HCD_CONST(PPMR_OCC_PSTATE_OFFSET_BYTE, 0x40)
+HCD_CONST(PPMR_OCC_PSTATE_LENGTH_BYTE, 0x44)
+HCD_CONST(PPMR_PSTATE_TABLE_OFFSET_BYTE, 0x48)
+HCD_CONST(PPMR_PSTATE_TABLE_LENGTH_BYTE, 0x4C)
+HCD_CONST(PPMR_PGPE_SRAM_IMAGE_SIZE_BYTE, 0x50)
+HCD_CONST(PPMR_PGPE_BOOT_PROG_CODE_BYTE, 0x54)
+HCD_CONST(PPMR_WOF_TABLE_OFFSET, 0x58)
+HCD_CONST(PPMR_WOF_TABLE_LENGTH, 0x5C)
+HCD_CONST(PPMR_AUX_TASK_OFFSET, 0x60)
+HCD_CONST(PPMR_AUX_TASK_LENGTH, 0x64)
+HCD_CONST(PPMR_DEEP_OP_TRACE_OFFSET, 0x68)
+HCD_CONST(PPMR_DEEP_OP_TRACE_LENGTH, 0x6C)
+
+/// PGPE Boot
+
+HCD_CONST(PGPE_BOOT_COPIER_PPMR_OFFSET, PPMR_HEADER_SIZE)
+HCD_CONST(PGPE_BOOT_COPIER_SIZE, ONE_KB)
+
+HCD_CONST(PGPE_BOOT_LOADER_PPMR_OFFSET,
+ (PGPE_BOOT_COPIER_PPMR_OFFSET + PGPE_BOOT_COPIER_SIZE))
+HCD_CONST(PGPE_BOOT_LOADER_SIZE, ONE_KB)
+HCD_CONST(PGPE_BOOT_LOADER_RESET_ADDR_VAL, 0x40)
+HCD_CONST(XGPE_BOOT_LOADER_RESET_ADDR_VAL, PGPE_BOOT_LOADER_RESET_ADDR_VAL)
+
+HCD_CONST(PGPE_INSTRUMENTATION_SIZE, (2 * ONE_KB))
+/// PGPE Image
+HCD_CONST(PGPE_IMAGE_PPMR_OFFSET,
+ (PGPE_BOOT_LOADER_PPMR_OFFSET + PGPE_BOOT_LOADER_SIZE))
+
+HCD_CONST(PGPE_HCODE_RESET_ADDR_VAL, 0x40)
+HCD_CONST(PGPE_DBG_PTR_AREA_SIZE, 64)
+
+/// PGPE Header
+
+HCD_CONST(PGPE_HEADER_SIZE, 128)
+
+HCD_CONST(PGPE_MAGIC_NUMBER_BYTE, 0x00)
+HCD_CONST(PGPE_SYSTEM_RESET_ADDR_BYTE, 0x08)
+HCD_CONST(PGPE_SHARED_SRAM_ADDR_BYTE, 0x0C)
+HCD_CONST(PGPE_IVPR_ADDR_BYTE, 0x10)
+HCD_CONST(PGPE_SHARED_SRAM_LENGTH_BYTE, 0x14)
+HCD_CONST(PGPE_BUILD_DATE_BYTE, 0x18)
+HCD_CONST(PGPE_BUILD_VER_BYTE, 0x1C)
+HCD_CONST(PGPE_PGPE_FLAGS_BYTE, 0x20)
+HCD_CONST(PGPE_PGPE_TIMEBASE_HZ, 0x24)
+HCD_CONST(PGPE_GLOBAL_PSTATE_SRAM_ADDR_BYTE, 0x28)
+HCD_CONST(PGPE_HCODE_LENGTH_BYTE, 0x2C)
+HCD_CONST(PGPE_GLOBAL_PSTATE_MEM_OFFSET_BYTE, 0x30)
+HCD_CONST(PGPE_GLOBAL_PSTATE_PPB_SIZE_BYTE, 0x34)
+HCD_CONST(PGPE_GEN_PSTATE_TABLE_MEM_OFFSET_BYTE, 0x38)
+HCD_CONST(PGPE_GEN_PSTATE_TABLE_SIZE_BYTE, 0x3C)
+HCD_CONST(PGPE_OCC_PSTATE_TABLE_MEM_OFFSET_BYTE, 0x40)
+HCD_CONST(PGPE_OCC_PSTATE_TABLE_SIZE_BYTE, 0x44)
+HCD_CONST(PGPE_BEACON_ADDR_BYTE, 0x48)
+HCD_CONST(PGPE_RESERVE_1, 0x4C)
+HCD_CONST(PGPE_WOF_STATE_ADDR_BYTE, 0x50)
+HCD_CONST(PGPE_RESERVE_2, 0x54)
+HCD_CONST(PGPE_WOF_TABLE_ADDR_BYTE, 0x58)
+HCD_CONST(PGPE_WOF_TABLE_LENGTH_BYTE, 0x5C)
+HCD_CONST(PGPE_RESERVE_3, 0x60)
+HCD_CONST(PGPE_RESERVE_4, 0x64)
+HCD_CONST(PGPE_RESERVE_5, 0x68)
+HCD_CONST(PGPE_OP_TRACE_PTR_BYTE, 0x6C)
+HCD_CONST(PGPE_DEEP_OP_TRACE_MEM_ADDR_BYTE, 0x70)
+HCD_CONST(PGPE_DEEP_OP_TRACE_LENGTH_BYTE, 0x74)
+
+HCD_CONST(PGPE_RESET_ADDR_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_SYSTEM_RESET_ADDR_BYTE))
+HCD_CONST(PGPE_BUILD_DATE_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_BUILD_DATE_BYTE))
+HCD_CONST(PGPE_BUILD_VER_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_BUILD_VER_BYTE))
+
+//PPMR Misc
+HCD_CONST(PPMR_MEM_MASK, 0x80300000)
+
+/// PGPE Hcode
+HCD_CONST(PPMR_BOOT_REGION, (PPMR_HEADER_SIZE + PGPE_BOOT_COPIER_SIZE + PGPE_BOOT_LOADER_SIZE ))
+HCD_CONST(PGPE_SRAM_BOOT_REGION, (PPMR_HEADER_SIZE + PGPE_BOOT_LOADER_SIZE ))
+HCD_CONST(PGPE_GLOBAL_PSTATE_PARAM_BLOCK_SIZE, (6 * ONE_KB))
+HCD_CONST(PGPE_OCC_SHARED_SRAM_SIZE, (2 * ONE_KB))
+HCD_CONST(PGPE_DEBUG_PTRS_OFFSET, 0x200)
+HCD_CONST(PGPE_DEBUG_PTRS_SIZE, 0x24)
+
+
+/// Pstate Parameter Block + Pstate Table
+
+HCD_CONST(OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET, (128 * ONE_KB))
+HCD_CONST(OCC_PSTATE_PARAM_BLOCK_SIZE, (8 * ONE_KB)) // this is over allocated
+HCD_CONST(OCC_PSTATE_PARAM_BLOCK_REGION_SIZE, (16 * ONE_KB))
+
+HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET, (144 * ONE_KB))
+HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_SIZE, (8 * ONE_KB)) // this is over allocated
+HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE, (16 * ONE_KB))
+
+HCD_CONST(OCC_WOF_TABLES_PPMR_OFFSET, (768 * ONE_KB))
+HCD_CONST(OCC_WOF_TABLES_SIZE, (256 * ONE_KB))
+HCD_CONST(PPMR_RESERVE_PSTATE_TABLE_TO_WOF,
+ ( OCC_WOF_TABLES_PPMR_OFFSET - ( PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET + PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE ) ))
+
+HCD_CONST(WOF_TABLE_RESERVE,
+ OCC_WOF_TABLES_PPMR_OFFSET - (PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET + PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE))
+HCD_CONST(PGPE_AUX_TASK_SIZE, (2 * ONE_KB))
+
+#endif /* __HCD_MEMMAP_BASE_H__ */
diff --git a/libpore/p10_hcd_memmap_homer.H b/libpore/p10_hcd_memmap_homer.H
new file mode 100644
index 0000000..6338bf2
--- /dev/null
+++ b/libpore/p10_hcd_memmap_homer.H
@@ -0,0 +1,94 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_hcd_memmap_homer.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p9_hcd_memmap_homer.H
+/// @brief defines region constants of homer.
+///
+
+// *HWP HWP Owner: David Du <daviddu@us.ibm.com>
+// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner: Prem S Jha <premjha2@in.ibm.com>
+// *HWP Team: PM
+// *HWP Level: 2
+// *HWP Consumed by: PM:Hostboot:Phyp
+
+#ifndef __P9_HCD_MEMMAP_HOMER_H__
+#define __P9_HCD_MEMMAP_HOMER_H__
+
+#include <p10_hcd_header_defs.H>
+#include <p10_hcd_memmap_base.H>
+
+// -------------------------------------------------------------------
+// Note: There can be NO semicolons(";") at end of macros in this file
+// There can ONLY have HCD_CONST/HCD_CONST64 macros in this file
+//--------------------------------------------------------------------
+
+/// HOMER
+
+HCD_CONST(HOMER_BASE_ADDR, 0x80000000)
+HCD_CONST(IMG_HDR_ALIGN_SIZE, 128)
+
+/// OPMR
+HCD_CONST(OPMR_REGION_SIZE, ONE_MB )
+
+
+/// XPMR
+HCD_CONST(XPMR_HOMER_OFFSET, (HOMER_XPMR_REGION_NUM* ONE_MB))
+HCD_CONST(HOMER_XPMR_BASE_ADDR, (HOMER_BASE_ADDR + (XPMR_HOMER_OFFSET)))
+HCD_CONST(HOMER_XPMR_HEADER_ADDR, HOMER_XPMR_BASE_ADDR)
+HCD_CONST(HOMER_XGPE_BOOT_COPIER_ADDR, (HOMER_XPMR_HEADER_ADDR + XPMR_HEADER_SIZE))
+HCD_CONST(XGPE_BOOT_COPIER_SIZE, (ONE_KB))
+HCD_CONST(HOMER_XGPE_BOOT_LOADER_OFFSET_ADDR,
+ (HOMER_XPMR_HEADER_ADDR + XPMR_BOOT_LOADER_OFFSET_BYTE))
+HCD_CONST(HOMER_XGPE_BOOT_LOADER_LENGTH_ADDR,
+ (HOMER_XPMR_HEADER_ADDR + XPMR_BOOT_LOADER_LENGTH_BYTE))
+
+/// CPMR
+
+HCD_CONST(HOMER_CPMR_BASE_ADDR, (HOMER_BASE_ADDR + (CPMR_HOMER_OFFSET)))
+HCD_CONST(HOMER_CPMR_HEADER_ADDR, HOMER_CPMR_BASE_ADDR)
+HCD_CONST(HOMER_CPMR_TRACE_ADDR, (HOMER_CPMR_BASE_ADDR + CPMR_TRACE_REGION_OFFSET))
+HCD_CONST(HOMER_CPMR_DEBUG_ADDR, (HOMER_CPMR_BASE_ADDR + CPMR_DEBUG_REGION_OFFSET))
+
+
+/// PPMR
+
+HCD_CONST(HOMER_PPMR_BASE_ADDR, (HOMER_BASE_ADDR + (PPMR_HOMER_OFFSET)))
+HCD_CONST(HOMER_PPMR_HEADER_ADDR, HOMER_PPMR_BASE_ADDR)
+HCD_CONST(HOMER_PGPE_BOOT_LOADER_OFFSET_ADDR,
+ (HOMER_PPMR_HEADER_ADDR + PPMR_BOOT_LOADER_OFFSET_BYTE))
+HCD_CONST(HOMER_PGPE_BOOT_LOADER_LENGTH_ADDR,
+ (HOMER_PPMR_HEADER_ADDR + PPMR_BOOT_LOADER_LENGTH_BYTE))
+HCD_CONST(HOMER_PGPE_BOOT_COPIER_ADDR,
+ (HOMER_PPMR_HEADER_ADDR + PPMR_HEADER_SIZE))
+
+HCD_CONST(HOMER_OCC_PSTATE_PARAM_BLOCK_ADDR,
+ (HOMER_PPMR_BASE_ADDR + OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET))
+HCD_CONST(HOMER_PGPE_PSTATE_OUTPUT_TABLES_ADDR,
+ (HOMER_PPMR_BASE_ADDR + PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET))
+HCD_CONST(HOMER_OCC_WOF_TABLES_ADDR,
+ (HOMER_PPMR_BASE_ADDR + OCC_WOF_TABLES_PPMR_OFFSET))
+
+#endif /* __P9_HCD_MEMMAP_HOMER_H__ */
diff --git a/libpore/p10_hcd_memmap_occ_sram.H b/libpore/p10_hcd_memmap_occ_sram.H
new file mode 100644
index 0000000..255748b
--- /dev/null
+++ b/libpore/p10_hcd_memmap_occ_sram.H
@@ -0,0 +1,174 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_hcd_memmap_occ_sram.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2020 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p10_hcd_memmap_occ_sram.H
+/// @brief defines region constants of occ sram.
+///
+
+// *HWP HWP Owner: David Du <daviddu@us.ibm.com>
+// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner: Prem S Jha <premjha2@in.ibm.com>
+// *HWP Team: PM
+// *HWP Level: 2
+// *HWP Consumed by: HB, XGPE,PGPE
+
+#ifndef __HCD_MEMMAP_OCC_SRAM_H__
+#define __HCD_MEMMAP_OCC_SRAM_H__
+
+#include <p10_hcd_header_defs.H>
+#include <p10_hcd_memmap_base.H>
+
+// -------------------------------------------------------------------
+// Note: There can be NO semicolons(";") at end of macros in this file
+// There can ONLY have HCD_CONST/HCD_CONST64 macros in this file
+// -------------------------------------------------------------------
+
+/// OCC SRAM
+
+HCD_CONST(OCC_SRAM_BASE_ADDR, 0xFFF00000)
+HCD_CONST(GPE0_SRAM_BASE_ADDR, 0xFFF01000)
+HCD_CONST(GPE1_SRAM_BASE_ADDR, 0xFFF10000)
+HCD_CONST(PGPE_SRAM_BASE_ADDR, 0xFFF20000)
+HCD_CONST(XGPE_SRAM_BASE_ADDR, 0xFFF30000)
+HCD_CONST(OCC_SRAM_SIZE, ONE_MB)
+HCD_CONST(OCC_SRAM_END_ADDR, ( OCC_SRAM_BASE_ADDR + OCC_SRAM_SIZE))
+
+/// Base Addresses for various debug/trace regions in OCC SRAM
+HCD_CONST(OCC_SRAM_TRACE_BUF_BASE_ERR, 0xFFFB4000)
+HCD_CONST(OCC_SRAM_TRACE_BUF_BASE_INF, 0xFFFB6000)
+HCD_CONST(OCC_SRAM_TRACE_BUF_BASE_IMP, 0xFFFB8000)
+HCD_CONST(OCC_SRAM_TRACE_BUF_BASE_SSX_PTR, 0xFFF40824)
+HCD_CONST(OCC_SRAM_PGPE_REGION_SIZE, (64 * ONE_KB))
+HCD_CONST(OCC_SHARED_SRAM_ADDR_START,
+ ((PGPE_SRAM_BASE_ADDR + OCC_SRAM_PGPE_REGION_SIZE) - PGPE_OCC_SHARED_SRAM_SIZE))
+
+// Offset to trace buf ptr and trace buffer size from base
+HCD_CONST(GPE_DEBUG_PTR_OFFSET, 0x180)
+
+// Size of various traces regions in OCC SRAM
+HCD_CONST(OCC_SRAM_TRACE_BUF_SSX_SIZE_PTR, 0xFFF40828)
+HCD_CONST(OCC_SRAM_TRACE_BUF_ERR_SIZE, (8 * ONE_KB))
+HCD_CONST(OCC_SRAM_TRACE_BUF_INF_SIZE, (8 * ONE_KB))
+HCD_CONST(OCC_SRAM_TRACE_BUF_IMP_SIZE, (8 * ONE_KB))
+
+HCD_CONST(OCC_SRAM_IPC_REGION_SIZE, (4 * ONE_KB))
+HCD_CONST(OCC_SRAM_GPE0_REGION_SIZE, (60 * ONE_KB))
+HCD_CONST(OCC_SRAM_GPE1_REGION_SIZE, (64 * ONE_KB))
+HCD_CONST(OCC_SRAM_OCC_REGION_SIZE, (512 * ONE_KB))
+HCD_CONST(OCC_SRAM_XGPE_REGION_SIZE, (64 * ONE_KB))
+
+
+HCD_CONST(PPE_RESET_VECTOR, 0x40)
+//--------------------------------------------------------------------------------------
+
+/// PGPE Base
+
+HCD_CONST(OCC_SRAM_PGPE_BASE_ADDR, PGPE_SRAM_BASE_ADDR)
+HCD_CONST(OCC_SRAM_PGPE_END_ADDR,
+ (PGPE_SRAM_BASE_ADDR + OCC_SRAM_PGPE_REGION_SIZE))
+HCD_CONST(OCC_SRAM_PGPE_HCODE_RESET_ADDR,
+ (PGPE_SRAM_BASE_ADDR + PGPE_HCODE_RESET_ADDR_VAL))
+HCD_CONST(OCC_SRAM_PGPE_HEADER_ADDR,
+ (OCC_SRAM_PGPE_BASE_ADDR + PGPE_INT_VECTOR_SIZE))
+//PGPE image size is sum of various parts hence located here instead of p10_hcd_memmap_base.H
+HCD_CONST(PGPE_HCODE_SIZE, (OCC_SRAM_PGPE_REGION_SIZE - ( PGPE_OCC_SHARED_SRAM_SIZE +
+ PGPE_GLOBAL_PSTATE_PARAM_BLOCK_SIZE + PGPE_SRAM_BOOT_REGION )))
+HCD_CONST(PGPE_IMAGE_SIZE, (PGPE_HCODE_SIZE + PGPE_GLOBAL_PSTATE_PARAM_BLOCK_SIZE +
+ PGPE_OCC_SHARED_SRAM_SIZE + PGPE_SRAM_BOOT_REGION))
+HCD_CONST(PGPE_IMAGE_RESERVE_SIZE,
+ (OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET - PGPE_IMAGE_PPMR_OFFSET - PGPE_IMAGE_SIZE))
+
+
+/// PGPE Boot
+
+HCD_CONST(OCC_SRAM_PGPE_COPY_BOOT_LOADER_SIZE, ONE_KB)
+HCD_CONST(OCC_SRAM_PGPE_COPY_PPMR_HEADER_SIZE, 512)
+HCD_CONST(OCC_SRAM_PGPE_BOOT_LOADER_ADDR,
+ (OCC_SRAM_END_ADDR - OCC_SRAM_PGPE_COPY_BOOT_LOADER_SIZE))
+HCD_CONST(OCC_SRAM_PGPE_BOOT_LOADER_RESET_ADDR,
+ (OCC_SRAM_PGPE_BOOT_LOADER_ADDR + PGPE_BOOT_LOADER_RESET_ADDR_VAL))
+HCD_CONST(OCC_SRAM_PGPE_PPMR_HEADER_ADDR,
+ (OCC_SRAM_PGPE_BOOT_LOADER_ADDR - OCC_SRAM_PGPE_COPY_PPMR_HEADER_SIZE))
+HCD_CONST(OCC_SRAM_PGPE_OPTRACE_ADDR, OCC_SRAM_PGPE_BOOT_LOADER_ADDR)
+HCD_CONST(OCC_SRAM_PGPE_OPTRACE_SIZE, OCC_SRAM_PGPE_COPY_BOOT_LOADER_SIZE)
+
+/// PGPE Copy
+
+HCD_CONST(OCC_SRAM_PGPE_HCODE_OFFSET_ADDR,
+ (OCC_SRAM_PGPE_PPMR_HEADER_ADDR + PPMR_PGPE_HCODE_OFFSET_BYTE))
+HCD_CONST(OCC_SRAM_PGPE_HCODE_LENGTH_ADDR,
+ (OCC_SRAM_PGPE_PPMR_HEADER_ADDR + PPMR_PGPE_HCODE_LENGTH_BYTE))
+HCD_CONST(OCC_SRAM_PGPE_IMAGE_LENGTH_ADDR,
+ (OCC_SRAM_PGPE_PPMR_HEADER_ADDR + PPMR_PGPE_SRAM_IMAGE_SIZE_BYTE))
+
+// Misc constants used in PGPE boot loader and boot copier.
+HCD_CONST(PGPE_BOOT_COPY_SUCCESS, 0x42432d53 ) // ASCII code for BC-S
+HCD_CONST(PGPE_BOOT_COPIER_FAIL, 0x42432d46 ) // ASCII code for BC-F
+HCD_CONST(PGPE_BOOT_LOADER_SUCCESS, 0x424c2d53 ) // ASCII code for BL-S
+HCD_CONST(PGPE_BOOT_LOADER_FAIL, 0x424c2d46 ) // ASCII code for BL-F
+
+//--------------------------------------------------------------------------------------
+
+// Misc constants used in XGPE boot loader and boot copier.
+HCD_CONST(DIVDE_BY_8, 3)
+HCD_CONST(DOUBLE_WORD_SIZE, 8)
+HCD_CONST(XGPE_IMG_OFFSET_POS, 40)
+HCD_CONST(BOOT_COPIER_LEN_ZERO, 0)
+HCD_CONST(ENABLE_TRAP, 0)
+HCD_CONST(XGPE_BOOT_COPY_SUCCESS, 0x42432d53 ) // ASCII code for BC-S
+HCD_CONST(XGPE_BOOT_COPIER_FAIL, 0x42432d46 ) // ASCII code for BC-F
+HCD_CONST(XGPE_BOOT_LOADER_SUCCESS, 0x424c2d53 ) // ASCII code for BL-S
+HCD_CONST(XGPE_BOOT_LOADER_FAIL, 0x424c2d46 ) // ASCII code for BL-F
+
+/// XGPE Base
+HCD_CONST(OCC_SRAM_XGPE_SYSTEM_RESET_ADDR,
+ (XGPE_SRAM_BASE_ADDR + XGPE_HCODE_RESET_ADDR_VAL))
+HCD_CONST(OCC_SRAM_XGPE_IVPR_ADDR, XGPE_SRAM_BASE_ADDR)
+HCD_CONST(OCC_SRAM_XGPE_GPPB_ADDR,
+ (PGPE_SRAM_BASE_ADDR + PGPE_HEADER_IMAGE_OFFSET + PGPE_GLOBAL_PSTATE_SRAM_ADDR_BYTE))
+HCD_CONST(OCC_SRAM_XGPE_GPPB_LEN,
+ (PGPE_SRAM_BASE_ADDR + PGPE_HEADER_IMAGE_OFFSET + PGPE_GLOBAL_PSTATE_PPB_SIZE_BYTE))
+
+/// XGPE Boot
+HCD_CONST(OCC_SRAM_XGPE_COPY_BOOT_LOADER_SIZE, ONE_KB)
+HCD_CONST(OCC_SRAM_XGPE_COPY_XPMR_HEADER_SIZE, 512)
+HCD_CONST(OCC_SRAM_XGPE_BOOT_LOADER_ADDR,
+ (OCC_SRAM_END_ADDR - OCC_SRAM_XGPE_COPY_BOOT_LOADER_SIZE))
+HCD_CONST(OCC_SRAM_XGPE_BOOT_LOADER_RESET_ADDR,
+ (OCC_SRAM_XGPE_BOOT_LOADER_ADDR + XGPE_BOOT_LOADER_RESET_ADDR_VAL))
+HCD_CONST(OCC_SRAM_XGPE_XPMR_HEADER_ADDR,
+ (OCC_SRAM_XGPE_BOOT_LOADER_ADDR - OCC_SRAM_XGPE_COPY_XPMR_HEADER_SIZE))
+
+/// XGPE Copy
+HCD_CONST(OCC_SRAM_XGPE_HCODE_OFFSET_ADDR,
+ (OCC_SRAM_XGPE_XPMR_HEADER_ADDR + XPMR_XGPE_HCODE_OFFSET_BYTE))
+HCD_CONST(OCC_SRAM_XGPE_HCODE_LENGTH_ADDR,
+ (OCC_SRAM_XGPE_XPMR_HEADER_ADDR + XPMR_XGPE_HCODE_LENGTH_BYTE))
+HCD_CONST(OCC_SRAM_XGPE_IMAGE_LENGTH_ADDR,
+ (OCC_SRAM_XGPE_XPMR_HEADER_ADDR + XPMR_XGPE_SRAM_IMAGE_SIZE_BYTE))
+HCD_CONST(OCC_SRAM_XGPE_HCODE_RESET_ADDR,
+ (XGPE_SRAM_BASE_ADDR + XGPE_HCODE_RESET_ADDR_VAL))
+
+#endif /* __HCD_MEMMAP_OCC_SRAM_H__ */
diff --git a/libpore/p10_hcode_image_defines.H b/libpore/p10_hcode_image_defines.H
new file mode 100644
index 0000000..6a14cb2
--- /dev/null
+++ b/libpore/p10_hcode_image_defines.H
@@ -0,0 +1,462 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_hcode_image_defines.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2019,2020 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+///
+/// @file p10_hcode_image_defines.H
+/// @brief defines constants associated with hcode image build.
+///
+// *HWP HWP Owner: Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner: Prem S Jha <premjha2@in.ibm.com>
+// *HWP Team: PM
+// *HWP Level: 2
+// *HWP Consumed by: Hostboot: HBRT
+
+#ifndef __HW_IMG_DEFINE
+#define __HW_IMG_DEFINE
+
+#include <p10_hcd_header_defs.H>
+#include <p10_hcd_memmap_base.H>
+#include <p10_hcd_memmap_homer.H>
+#include <p10_hcd_memmap_occ_sram.H>
+
+//--------------------------------------------------------------------------
+// local structs and constants
+// -------------------------------------------------------------------------
+#ifndef __ASSEMBLER__
+#ifdef __cplusplus
+#ifndef __PPE_PLAT
+
+#define IMG_HDR_ALIGN_SIZE 32
+
+namespace hcodeImageBuild
+{
+#endif //__PPE_PLAT
+#endif //__cplusplus
+#endif //__ASSEMBLER__
+
+/**
+ * CPMR Header
+ *
+ * This header is only consumed by Hcode Image Build and
+ * lab tools, not by PPE code. It is generated with assembler
+ * primitives during QME build and placed in HOMER by
+ * Hcode Image Build.
+ */
+
+#ifdef __ASSEMBLER__
+.macro .cpmr_header
+.section ".cpmr" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_ATTN ( iv_attnOpcodes, 2);
+HCD_HDR_UINT64( iv_cpmrMagicWord, CPMR_MAGIC_NUMBER);
+HCD_HDR_UINT32( iv_buildDate, 0);
+HCD_HDR_UINT32( iv_version, 0);
+HCD_HDR_UINT8_VEC (iv_reserveFlags, 4, 0);
+HCD_HDR_UINT8 ( iv_selfRestoreVer, 0);
+HCD_HDR_UINT8 ( iv_stopApiVer, 0);
+HCD_HDR_UINT8 ( iv_urmorFix, 0);
+HCD_HDR_UINT8 ( iv_fusedMode, 0);
+HCD_HDR_UINT32( iv_qmeImgOffset, 0);
+HCD_HDR_UINT32( iv_qmeImgLength, 0);
+HCD_HDR_UINT32( iv_commonRingOffset, 0);
+HCD_HDR_UINT32( iv_commonRingLength, 0);
+HCD_HDR_UINT32( iv_localPstateOffset, 0);
+HCD_HDR_UINT32( iv_localPstateLength, 0);
+HCD_HDR_UINT32( iv_specRingOffset, 0);
+HCD_HDR_UINT32( iv_specRingLength, 0);
+HCD_HDR_UINT32( iv_scomRestoreOffset, 0);
+HCD_HDR_UINT32( iv_scomRestoreLength, 0);
+HCD_HDR_UINT32( iv_selfRestoreOffset, 0);
+HCD_HDR_UINT32( iv_selfRestoreLength, 0);
+HCD_HDR_UINT32( iv_maxCoreL2ScomEntry, 0);
+HCD_HDR_UINT32( iv_maxEqL3ScomEntry, 0);
+HCD_HDR_UINT32( iv_coreL2ScomOffset, 0);
+HCD_HDR_UINT32( iv_coreL2ScomLength, 0);
+HCD_HDR_UINT32( iv_eqL3ScomOffset, 0);
+HCD_HDR_UINT32( iv_eqL3ScomLength, 0);
+HCD_HDR_PAD(CPMR_HEADER_SIZE);
+#ifdef __ASSEMBLER__
+.endm
+#else
+} __attribute__((packed, aligned(256))) CpmrHeader_t;
+#endif
+
+/**
+ * QME Header
+ *
+ * The QME header is loaded in the QME SRAM so it is "tight" (little extra space)
+ * Thus, this "structure" is NOT padded to a specific size and is limited to
+ * 64B. Also, structure member names are preceded with "g_" as these becoming
+ * global variables in the QME Hcode.
+ */
+#ifdef __ASSEMBLER__
+.macro .qme_header
+.section ".qme_image_header" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_UINT64( g_qme_magic_number, QME_MAGIC_NUMBER );
+HCD_HDR_UINT32( g_qme_hcode_offset, 0 );
+HCD_HDR_UINT32( g_qme_hcode_length, 0 );
+HCD_HDR_UINT32( g_qme_common_ring_offset, 0 );
+HCD_HDR_UINT32( g_qme_cmn_ring_ovrd_offset, 0 );
+HCD_HDR_UINT32( g_qme_common_ring_length, 0 );
+HCD_HDR_UINT32( g_qme_pstate_region_offset, 0 );
+HCD_HDR_UINT32( g_qme_pstate_region_length, 0 );
+HCD_HDR_UINT32( g_qme_inst_spec_ring_offset, 0 );
+HCD_HDR_UINT32( g_qme_max_spec_ring_length, 0 );
+HCD_HDR_UINT32( g_qme_scom_offset, 0 );
+HCD_HDR_UINT32( g_qme_scom_length, 0 );
+HCD_HDR_UINT32( g_qme_attr_tank_address, 0 );
+HCD_HDR_UINT16( g_qme_location_id, 0 );
+HCD_HDR_UINT16( g_qme_reserved , 0 );
+HCD_HDR_UINT32( g_qme_timebase_hz, 0 ); //Retain next field at 8B boundary
+HCD_HDR_UINT64( g_qme_cpmr_PhyAddr, 0 );
+HCD_HDR_UINT64( g_qme_unsec_cpmr_PhyAddr, 0 );
+HCD_HDR_UINT32( g_qme_custom_length, 0 );
+HCD_HDR_UINT32( g_qme_elog_addr, 0 );
+HCD_HDR_PAD(IMG_HDR_ALIGN_SIZE);
+#ifdef __ASSEMBLER__
+.endm
+#else
+//QME Header size is 96B
+} __attribute__((packed, aligned(32))) QmeHeader_t;
+#endif
+
+#ifndef __ASSEMBLER__
+
+typedef struct QMEImageFlags
+{
+ uint32_t fused_mode : 1;
+ uint32_t reserved0 : 31;
+} QMEImageFlags_t;
+
+#endif //__ASSEMBLER__
+
+#ifdef __ASSEMBLER__
+.macro .ppmr_header
+.section ".ppmr" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_UINT64( iv_ppmrMagicWord, PPMR_MAGIC_NUMBER);
+HCD_HDR_UINT32( iv_bootCopierOffset, 0);
+HCD_HDR_UINT32( iv_reserved1, 0);
+HCD_HDR_UINT32( iv_bootLoaderOffset, 0);
+HCD_HDR_UINT32( iv_bootLoaderLength, 0);
+HCD_HDR_UINT32( iv_buildDate, 0);
+HCD_HDR_UINT32( iv_buildVer, 0);
+HCD_HDR_UINT64( iv_reserved2, 0);
+HCD_HDR_UINT32( iv_hcodeOffset, 0);
+HCD_HDR_UINT32( iv_hcodeLength, 0);
+HCD_HDR_UINT32( iv_gpspbOffset, 0);
+HCD_HDR_UINT32( iv_gpspbLength, 0);
+HCD_HDR_UINT32( iv_lpspbOffset, 0);
+HCD_HDR_UINT32( iv_lpspbLength, 0);
+HCD_HDR_UINT32( iv_opspbOffset, 0);
+HCD_HDR_UINT32( iv_opspbLength, 0);
+HCD_HDR_UINT32( iv_pstateOffset, 0);
+HCD_HDR_UINT32( iv_pstateLength, 0);
+HCD_HDR_UINT32( iv_sramSize, 0);
+HCD_HDR_UINT32( iv_progCode, 0);
+HCD_HDR_UINT32( iv_wofTableOffset, 0);
+HCD_HDR_UINT32( iv_wofTableLength, 0);
+HCD_HDR_UINT32( iv_deepOptraceOffset, 0);
+HCD_HDR_UINT32( iv_deepOptraceLength, 0);
+
+#ifdef __ASSEMBLER__
+.endm
+#else
+} __attribute__((packed, aligned(32))) PpmrHeader_t;
+#endif
+
+#ifdef __ASSEMBLER__
+.macro .pgpe_header
+.section ".pgpe_hdr" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_UINT64( g_pgpe_magicWord, PGPE_MAGIC_NUMBER);
+HCD_HDR_UINT32( g_pgpe_sysResetAddress, 0);
+HCD_HDR_UINT32( g_pgpe_sharedSramAddress, 0);
+HCD_HDR_UINT32( g_pgpe_ivprAddress, 0);
+HCD_HDR_UINT32( g_pgpe_sharedLength, 0);
+HCD_HDR_UINT32( g_pgpe_buildDate, 0);
+HCD_HDR_UINT32( g_pgpe_buildVer, 0);
+HCD_HDR_UINT32( g_pgpe_reserved0, 0);
+HCD_HDR_UINT32( g_pgpe_timeBaseHz, 0);
+HCD_HDR_UINT32( g_pgpe_gpspbSramAddress, 0);
+HCD_HDR_UINT32( g_pgpe_hcodeLength, 0);
+HCD_HDR_UINT32( g_pgpe_gpspbMemOffset, 0);
+HCD_HDR_UINT32( g_pgpe_gpspbMemLength, 0);
+HCD_HDR_UINT32( g_pgpe_genPsTableMemOffset, 0);
+HCD_HDR_UINT32( g_pgpe_genPsTableMemLength, 0);
+HCD_HDR_UINT32( g_pgpe_opspbTableAddress, 0);
+HCD_HDR_UINT32( g_pgpe_opspbTableLength, 0);
+HCD_HDR_UINT32( g_pgpe_beaconAddress, 0);
+HCD_HDR_UINT32( g_pgpe_reserved1, 0);
+HCD_HDR_UINT32( g_pgpe_pgpeWofStateAddress, 0);
+HCD_HDR_UINT32( g_pgpe_reserved2, 0);
+HCD_HDR_UINT32( g_pgpe_wofTableAddress, 0);
+HCD_HDR_UINT32( g_pgpe_wofTableLength, 0);
+HCD_HDR_UINT32( g_pgpe_reserved3, 0);
+HCD_HDR_UINT32( g_pgpe_reserved4, 0);
+HCD_HDR_UINT32( g_pgpe_reserved5, 0);
+HCD_HDR_UINT32( g_pgpe_opTracePtr, 0);
+HCD_HDR_UINT32( g_pgpe_deepOpTraceMemAddress, 0);
+HCD_HDR_UINT32( g_pgpe_deepOpTraceLength, 0);
+#ifdef __ASSEMBLER__
+.endm
+#else
+} __attribute__((packed, aligned(32))) PgpeHeader_t;
+#endif
+
+#ifdef __ASSEMBLER__
+.macro .xpmr_hdr
+.section ".xpmr" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_UINT64( iv_xpmrMagicWord, XPMR_MAGIC_NUMBER);
+HCD_HDR_UINT32( iv_bootCopierOffset, 0);
+HCD_HDR_UINT32( iv_reserve1, 0);
+HCD_HDR_UINT32( iv_bootLoaderOffset, 0);
+HCD_HDR_UINT32( iv_bootLoaderLength, 0);
+HCD_HDR_UINT32( iv_buildDate, 0);
+HCD_HDR_UINT32( iv_version, 0);
+HCD_HDR_UINT32( iv_reserve2, 0);
+HCD_HDR_UINT32( iv_reserve3, 0);
+HCD_HDR_UINT32( iv_xgpeHcodeOffset, 0);
+HCD_HDR_UINT32( iv_xgpeHcodeLength, 0);
+HCD_HDR_UINT32( iv_xgpeBootProgCode, 0);
+HCD_HDR_UINT32( iv_xgpeSramSize, 0);
+HCD_HDR_PAD(XPMR_HEADER_SIZE);
+#ifdef __ASSEMBLER__
+.endm
+#else
+} __attribute__((packed, aligned(512))) XpmrHeader_t;
+#endif
+
+#ifdef __ASSEMBLER__
+.macro .xgpe_header
+.section ".xgpe_header" , "aw"
+.balign 8
+#else
+typedef struct
+{
+#endif
+HCD_HDR_UINT64( g_xgpe_magicWord, XGPE_MAGIC_NUMBER);
+HCD_HDR_UINT32( g_xgpe_sysResetAddress, 0 ); //FIXME need to add correct address
+HCD_HDR_UINT32( g_xgpe_sharedSramAddress, 0 ); //FIXME need to add correct address
+HCD_HDR_UINT32( g_xgpe_ivprAddress, 0 ); //FIXME need to add correct address
+HCD_HDR_UINT32( g_xgpe_sharedSramLength, 0 );
+HCD_HDR_UINT32( g_xgpe_buildDate, 0 );
+HCD_HDR_UINT32( g_xgpe_buildVer, 0 );
+HCD_HDR_UINT16( g_xgpe_xgpeFlags, 0 );
+HCD_HDR_UINT16( g_xgpe_reserve1, 0 );
+HCD_HDR_UINT32( g_xgpe_timeBaseHz, 0 );
+HCD_HDR_UINT32( g_xgpe_gpspbSramAddress, 0 );
+HCD_HDR_UINT32( g_xgpe_hcodeLength, 0 );
+HCD_HDR_UINT32( g_xgpe_reserve2, 0 );
+HCD_HDR_UINT32( g_xgpe_gpspbLength, 0 );
+HCD_HDR_UINT32( g_xgpe_coreThrottleAssertCnt, 0 );
+HCD_HDR_UINT32( g_xgpe_coreThrottleDeAssertCnt, 0 );
+HCD_HDR_UINT32( g_xgpe_charactControls, 0 );
+HCD_HDR_UINT32( g_xgpe_xgpeOpTracePointer, 0 );
+HCD_HDR_UINT32( g_xgpe_xgpeDeepOpTraceMemAddr, 0 );
+HCD_HDR_UINT32( g_xgpe_xgpeDeepOpTraceLength, 0 );
+HCD_HDR_PAD(IMG_HDR_ALIGN_SIZE);
+#ifdef __ASSEMBLER__
+.endm
+#else
+} __attribute__((packed, aligned(32))) XgpeHeader_t;
+#endif
+
+#ifndef __ASSEMBLER__
+
+/**
+ * @brief enumerates all return codes associated with hcode image build.
+ */
+enum ImgBldRetCode_t
+{
+ IMG_BUILD_SUCCESS = 0,
+ BUILD_FAIL_XGPE_IMAGE = 1,
+ BUILD_FAIL_SELF_REST_IMAGE = 2,
+ BUILD_FAIL_QME_IMAGE = 3,
+ BUILD_FAIL_PGPE_IMAGE = 4,
+ BUILD_FAIL_XGPE_QPMR = 5,
+ BUILD_FAIL_XGPE_BL1 = 6,
+ BUILD_FAIL_XGPE_BL2 = 7,
+ BUILD_FAIL_XGPE_INT_VECT = 8,
+ BUILD_FAIL_XGPE_HDR = 9,
+ BUILD_FAIL_XGPE_HCODE = 10,
+ BUILD_FAIL_XGPE_CMN_RINGS = 11,
+ BUILD_FAIL_XGPE_SPEC_RINGS = 12,
+ BUILD_FAIL_CPMR_HDR = 13,
+ BUILD_FAIL_SRESET_HNDLR = 14,
+ BUILD_FAIL_THRD_LAUNCHER = 15,
+ BUILD_FAIL_SPR_RESTORE = 16,
+ BUILD_FAIL_SCOM_RESTORE = 17,
+ BUILD_FAIL_QME_IMG_HDR = 18,
+ BUILD_FAIL_QME_HCODE = 19,
+ BUILD_FAIL_CMN_RINGS = 20,
+ BUILD_FAIL_QME_QUAD_PSTATE = 21,
+ BUILD_FAIL_SPEC_RINGS = 22,
+ BUILD_FAIL_INT_VECT = 23,
+ BUILD_FAIL_PGPE_BL1 = 24,
+ BUILD_FAIL_PGPE_BL2 = 25,
+ BUILD_FAIL_PGPE_HCODE = 26,
+ BUILD_FAIL_OVERRIDE = 27,
+ BUILD_SEC_SIZE_OVERFLOW = 28,
+ BUILD_FAIL_INVALID_SECTN = 29,
+ BUILD_FAIL_RING_EXTRACTN = 30,
+ QME_SRAM_IMG_SIZE_ERR = 31,
+ XGPE_SRAM_IMG_SIZE_ERR = 32,
+ PGPE_SRAM_IMG_SIZE_ERR = 33,
+ BUILD_FAIL_PGPE_PPMR = 34,
+ BUILD_FAIL_XIP_CUST_ERR = 35,
+ BUILD_ERR_INTERNAL = 0xffff,
+};
+
+/**
+ * @brief models SCOM restore header region.
+ */
+typedef struct
+{
+ uint16_t iv_magicMark;
+ uint8_t iv_version;
+ uint8_t iv_reserved1;
+ uint8_t iv_reserved2[4];
+ uint16_t iv_coreOffset;
+ uint16_t iv_coreLength;
+ uint16_t iv_eqOffset;
+ uint16_t iv_eqLength;
+ uint16_t iv_l2Offset;
+ uint16_t iv_l2Length;
+ uint16_t iv_l3Offset;
+ uint16_t iv_l3Length;
+} ScomRestoreHeader_t;
+
+/**
+ * @brief models a CPU register restoration area in STOP section of homer image.
+ */
+typedef struct
+{
+ uint8_t iv_threadRestoreArea[MAX_THREADS_PER_CORE][SMF_CORE_RESTORE_THREAD_AREA_SIZE];
+ uint8_t iv_threadSaveArea[MAX_THREADS_PER_CORE][SMF_SELF_SAVE_THREAD_AREA_SIZE];
+ uint8_t iv_coreRestoreArea[SMF_CORE_RESTORE_CORE_AREA_SIZE];
+ uint8_t iv_coreSaveArea[SMF_CORE_SAVE_CORE_AREA_SIZE];
+} SmfSprRestoreRegion_t;
+
+/**
+ * @brief models image section of CPMR in HOMER.
+ */
+typedef union CPMRSelfRestoreLayout
+{
+ uint8_t iv_region[SMF_SELF_RESTORE_CODE_SIZE];
+ struct
+ {
+ CpmrHeader_t iv_CPMRHeader;
+ uint8_t iv_exe[SMF_SELF_RESTORE_CODE_SIZE - sizeof(CpmrHeader_t)];
+ } elements;
+} CPMRSelfRestoreLayout_t;
+
+/**
+ * @brief models image section associated with core self restore in HOMER.
+ */
+typedef struct
+{
+ CPMRSelfRestoreLayout_t iv_CPMR_SR;
+ uint8_t iv_coreSelfRestore[SMF_SELF_RESTORE_CORE_REGS_SIZE];
+ uint8_t iv_reserve[SCOM_RESTORE_CPMR_OFFSET - SMF_SELF_RESTORE_SIZE_TOTAL];
+ uint8_t iv_coreScom[SCOM_RESTORE_SIZE_TOTAL];
+} SelfRestoreLayout_t;
+
+typedef struct
+{
+ SelfRestoreLayout_t iv_selfRestoreRegion;
+ uint8_t iv_qmeSramRegion[QME_REGION_SIZE];
+} CPMRLayout_t;
+
+/**
+ * @brief models image section associated with PGPE in HOMER.
+ */
+typedef struct
+{
+ uint8_t iv_ppmrHeader[PPMR_HEADER_SIZE];
+ uint8_t iv_bootCopier[PGPE_BOOT_COPIER_SIZE];
+ uint8_t iv_bootLoader[PGPE_BOOT_LOADER_SIZE];
+ uint8_t iv_pgpeSramRegion[OCC_SRAM_PGPE_REGION_SIZE];
+ uint8_t iv_reserve1[OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET - (PPMR_BOOT_REGION + OCC_SRAM_PGPE_REGION_SIZE)];
+ uint8_t iv_occPstateParamBlock[OCC_PSTATE_PARAM_BLOCK_REGION_SIZE];
+ uint8_t iv_pstateTable[PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE];
+ uint8_t iv_reserve2[PPMR_RESERVE_PSTATE_TABLE_TO_WOF];
+ uint8_t iv_wofTable[OCC_WOF_TABLES_SIZE];
+} PPMRLayout_t;
+
+/**
+ * @brief models XPMR in HOMER.
+ */
+typedef struct
+{
+ uint8_t iv_xpmrHeader[XPMR_HEADER_SIZE];
+ uint8_t iv_bootCopier[XGPE_BOOT_COPIER_LENGTH];
+ uint8_t iv_bootLoader[XGPE_BOOT_LOADER_LENGTH];
+ uint8_t iv_xgpeSramRegion[XGPE_SRAM_SIZE];
+} XPMRLayout_t;
+
+/**
+ * @brief models layout of HOMER.
+ */
+typedef struct
+{
+ uint8_t iv_occHostRegion[OCC_HOST_AREA_SIZE];
+ XPMRLayout_t iv_xpmrRegion;
+ uint8_t iv_xpmrReserve[ONE_MB - sizeof( XPMRLayout_t )];
+ CPMRLayout_t iv_cpmrRegion;
+ uint8_t iv_cpmrReserve[ONE_MB - sizeof( CPMRLayout_t )];
+ PPMRLayout_t iv_ppmrRegion;
+ uint8_t iv_ppmrReserve[ONE_MB - sizeof( PPMRLayout_t )];
+} Homerlayout_t;
+
+#ifdef __cplusplus
+#ifndef __PPE_PLAT
+}// namespace hcodeImageBuild ends
+#endif //__PPE_PLAT
+#endif //__cplusplus
+
+#endif //__ASSEMBLER__
+#endif //__HW_IMG_DEFINE
diff --git a/libpore/p10_stop_api.C b/libpore/p10_stop_api.C
new file mode 100644
index 0000000..4a8efa7
--- /dev/null
+++ b/libpore/p10_stop_api.C
@@ -0,0 +1,1816 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: chips/p10/procedures/utils/stopreg/p10_stop_api.C $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* EKB Project */
+/* */
+/* COPYRIGHT 2015,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+///
+/// @file p10_stop_api.C
+/// @brief implements STOP API which create/manipulate STOP image.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+
+// *INDENT-OFF*
+#ifdef PPC_HYP
+ #include <HvPlicModule.H>
+#endif
+
+#include "p10_stop_api.H"
+#include "p10_cpu_reg_restore_instruction.H"
+#include "p10_stop_data_struct.H"
+#include <string.h>
+#include "p10_stop_util.H"
+#include "p10_hcode_image_defines.H"
+#ifdef __cplusplus
+extern "C" {
+
+using namespace hcodeImageBuild;
+namespace stopImageSection
+{
+#endif
+// a true in the table below means register is of scope thread
+// whereas a false meanse register is of scope core.
+
+const StopSprReg_t g_sprRegister_p10[] =
+{
+ { PROC_STOP_SPR_CIABR, true, 0 },
+ { PROC_STOP_SPR_DAWR, true, 1 },
+ { PROC_STOP_SPR_DAWRX, true, 2 },
+ { PROC_STOP_SPR_HSPRG0, true, 3 },
+ { PROC_STOP_SPR_LDBAR, true, 4, },
+ { PROC_STOP_SPR_LPCR, true, 5 },
+ { PROC_STOP_SPR_PSSCR, true, 6 },
+ { PROC_STOP_SPR_MSR, true, 7 },
+ { PROC_STOP_SPR_HRMOR, false, 255 },
+ { PROC_STOP_SPR_HID, false, 21 },
+ { PROC_STOP_SPR_HMEER, false, 22 },
+ { PROC_STOP_SPR_PMCR, false, 23 },
+ { PROC_STOP_SPR_PTCR, false, 24 },
+ { PROC_STOP_SPR_SMFCTRL, true, 28 },
+ { PROC_STOP_SPR_USPRG0, true, 29 },
+ { PROC_STOP_SPR_USPRG1, true, 30 },
+ { PROC_STOP_SPR_URMOR, false, 255 },
+};
+
+const uint32_t MAX_SPR_SUPPORTED_P10 = 17;
+const uint32_t DEFAULT_CORE_SCOM_SUPPORTED = 15;
+const uint32_t DEFAULT_QUAD_SCOM_SUPPORTED = 255;
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief validated input arguments passed to proc_stop_save_cpureg_control.
+ * @param[in] i_pImage point to start of HOMER
+ * @param[in] i_coreId id of the core
+ * @param[in] i_threadId id of the thread
+ * @param[in] i_saveMaskVector SPR save bit mask vector
+ * @return STOP_SAVE_SUCCESS if function succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t validateArgumentSaveRegMask( void* const i_pImage,
+ uint32_t const i_coreId,
+ uint32_t const i_threadId,
+ uint64_t i_saveMaskVector )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+
+ do
+ {
+ if( !i_pImage )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ if( i_coreId > MAX_CORE_ID_SUPPORTED )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ break;
+ }
+
+ if( i_threadId > MAX_THREAD_ID_SUPPORTED )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_THREAD;
+ break;
+ }
+
+ if( ( 0 == i_saveMaskVector ) || ( BAD_SAVE_MASK & i_saveMaskVector ) )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_REG;
+ break;
+ }
+
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief validates input arguments provided by STOP API caller.
+ * @param[in] i_pImage pointer to beginning of chip's HOMER image.
+ * @param[in] i_regId SPR register id
+ * @param[in] i_coreId core id
+ * @param[in|out] i_pThreadId points to thread id
+ * @param[in|out] i_pThreadLevelReg points to scope information of SPR
+ * @return STOP_SAVE_SUCCESS if arguments found valid, error code otherwise.
+ * @note for register of scope core, function shall force io_threadId to
+ * zero.
+ */
+STATIC StopReturnCode_t validateSprImageInputs( void* const i_pImage,
+ const CpuReg_t i_regId,
+ const uint32_t i_coreId,
+ uint32_t* i_pThreadId,
+ bool* i_pThreadLevelReg )
+{
+ uint32_t index = 0;
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ bool sprSupported = false;
+ *i_pThreadLevelReg = false;
+
+ do
+ {
+ if( NULL == i_pImage )
+ {
+ // Error: HOMER image start location is not valid
+ // Cannot proceed further. So, let us exit.
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ MY_ERR( "invalid image location " );
+
+ break;
+ }
+
+ // STOP API manages STOP image based on physical core Id. PIR value
+ // is interpreted to calculate the physical core number and virtual
+ // thread number.
+ if( MAX_CORE_ID_SUPPORTED < i_coreId )
+ {
+ // Error: invalid core number. given core number exceeds maximum
+ // cores supported by chip.
+
+ // Physical core number is calculated based on following formula:
+ // core id = 4 * quad id (0..5) + core no within quad ( 0..3)
+ l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ MY_ERR( "invalid core id " );
+ break;
+ }
+
+ if( MAX_THREAD_ID_SUPPORTED < *i_pThreadId )
+ {
+ //Error: invalid core thread. Given core thread exceeds maximum
+ //threads supported in a core.
+
+ // 64 bit PIR value is interpreted to calculate virtual thread
+ // Id. In fuse mode, b61 and b62 gives virtual thread id whereas in
+ // non fuse mode, b62 and b63 is read to determine the same.
+
+ l_rc = STOP_SAVE_ARG_INVALID_THREAD;
+ MY_ERR( "invalid thread " );
+ break;
+ }
+
+ for( index = 0; index < MAX_SPR_SUPPORTED_P10; ++index )
+ {
+ if( i_regId == (CpuReg_t )g_sprRegister_p10[index].iv_sprId )
+ {
+ // given register is in the list of register supported
+ sprSupported = true;
+ *i_pThreadLevelReg = g_sprRegister_p10[index].iv_isThreadScope;
+ *i_pThreadId = *i_pThreadLevelReg ? *i_pThreadId : 0;
+ break;
+ }
+ }
+
+ if( !sprSupported )
+ {
+ // Following SPRs are supported
+ // trace out all registers supported
+ MY_ERR("Register not supported" );
+ // error code to caller.
+ l_rc = STOP_SAVE_ARG_INVALID_REG;
+ break;
+ }
+
+ }
+ while(0);
+
+ if( l_rc )
+ {
+ MY_ERR( "regId %08d, coreId %d, "
+ "threadId %d return code 0x%08x", i_regId,
+ i_coreId, *i_pThreadId, l_rc );
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates ori instruction code.
+ * @param[in] i_Rs Source register number
+ * @param[in] i_Ra destination register number
+ * @param[in] i_data 16 bit immediate data
+ * @return returns 32 bit number representing ori instruction.
+ */
+STATIC uint32_t getOriInstruction( const uint16_t i_Rs, const uint16_t i_Ra,
+ const uint16_t i_data )
+{
+ uint32_t oriInstOpcode = 0;
+ oriInstOpcode = 0;
+ oriInstOpcode = ORI_OPCODE << 26;
+ oriInstOpcode |= i_Rs << 21;
+ oriInstOpcode |= i_Ra << 16;
+ oriInstOpcode |= i_data;
+
+ return SWIZZLE_4_BYTE(oriInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates 32 bit key used for SPR lookup in core section.
+ */
+STATIC uint32_t genKeyForSprLookup( const CpuReg_t i_regId )
+{
+ return getOriInstruction( 24, 0, (uint16_t) i_regId );
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates xor instruction code.
+ * @param[in] i_Rs source register number for xor operation
+ * @param[in] i_Ra destination register number for xor operation result
+ * @param[in] i_Rb source register number for xor operation
+ * @return returns 32 bit number representing xor immediate instruction.
+ */
+STATIC uint32_t getXorInstruction( const uint16_t i_Ra, const uint16_t i_Rs,
+ const uint16_t i_Rb )
+{
+ uint32_t xorRegInstOpcode;
+ xorRegInstOpcode = XOR_CONST << 1;
+ xorRegInstOpcode |= OPCODE_31 << 26;
+ xorRegInstOpcode |= i_Rs << 21;
+ xorRegInstOpcode |= i_Ra << 16;
+ xorRegInstOpcode |= i_Rb << 11;
+
+ return SWIZZLE_4_BYTE(xorRegInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates oris instruction code.
+ * @param[in] i_Rs source register number
+ * @param[in] i_Ra destination register number
+ * @param[in] i_data 16 bit immediate data
+ * @return returns 32 bit number representing oris immediate instruction.
+ */
+STATIC uint32_t getOrisInstruction( const uint16_t i_Rs, const uint16_t i_Ra,
+ const uint16_t i_data )
+{
+ uint32_t orisInstOpcode;
+ orisInstOpcode = 0;
+ orisInstOpcode = ORIS_OPCODE << 26;
+ orisInstOpcode |= ( i_Rs & 0x001F ) << 21 | ( i_Ra & 0x001F ) << 16;
+ orisInstOpcode |= i_data;
+
+ return SWIZZLE_4_BYTE(orisInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates instruction for mtspr
+ * @param[in] i_Rs source register number
+ * @param[in] i_Spr represents spr where data is to be moved.
+ * @return returns 32 bit number representing mtspr instruction.
+ */
+STATIC uint32_t getMtsprInstruction( const uint16_t i_Rs, const uint16_t i_Spr )
+{
+ uint32_t mtsprInstOpcode = 0;
+ uint32_t temp = (( i_Spr & 0x03FF ) << 11);
+ mtsprInstOpcode = (uint8_t)i_Rs << 21;
+ mtsprInstOpcode |= ( temp & 0x0000F800 ) << 5;
+ mtsprInstOpcode |= ( temp & 0x001F0000 ) >> 5;
+ mtsprInstOpcode |= MTSPR_BASE_OPCODE;
+
+ return SWIZZLE_4_BYTE(mtsprInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates instruction for mfmsr
+ * @param[in] i_Rt target register for SPR content.
+ * @return returns 32 bit number representing mfmsr instruction.
+ */
+STATIC uint32_t getMfmsrInstruction( const uint16_t i_Rt )
+{
+ uint32_t mfmsrInstOpcode = ((OPCODE_31 << 26) | (i_Rt << 21) | ((MFMSR_CONST)<< 1));
+
+ return SWIZZLE_4_BYTE(mfmsrInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief generates rldicr instruction.
+ * @param[in] i_Rs source register number
+ * @param[in] i_Ra destination register number
+ * @param[in] i_sh bit position by which contents of i_Rs are to be shifted
+ * @param[in] i_me bit position up to which mask should be 1.
+ * @return returns 32 bit number representing rldicr instruction.
+ */
+STATIC uint32_t getRldicrInstruction( const uint16_t i_Ra, const uint16_t i_Rs,
+ const uint16_t i_sh, uint16_t i_me )
+{
+ uint32_t rldicrInstOpcode = 0;
+ rldicrInstOpcode = ((RLDICR_OPCODE << 26 ) | ( i_Rs << 21 ) | ( i_Ra << 16 ));
+ rldicrInstOpcode |= ( ( i_sh & 0x001F ) << 11 ) | (RLDICR_CONST << 2 );
+ rldicrInstOpcode |= (( i_sh & 0x0020 ) >> 4);
+ rldicrInstOpcode |= (i_me & 0x001F ) << 6;
+ rldicrInstOpcode |= (i_me & 0x0020 );
+ return SWIZZLE_4_BYTE(rldicrInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+STATIC uint32_t getMfsprInstruction( const uint16_t i_Rt, const uint16_t i_sprNum )
+{
+ uint32_t mfsprInstOpcode = 0;
+ uint32_t temp = (( i_sprNum & 0x03FF ) << 11);
+ mfsprInstOpcode = (uint8_t)i_Rt << 21;
+ mfsprInstOpcode |= (( temp & 0x0000F800 ) << 5);
+ mfsprInstOpcode |= (( temp & 0x001F0000 ) >> 5);
+ mfsprInstOpcode |= MFSPR_BASE_OPCODE;
+
+ return SWIZZLE_4_BYTE(mfsprInstOpcode);
+}
+
+//-----------------------------------------------------------------------------
+
+STATIC uint32_t getBranchLinkRegInstruction(void)
+{
+ uint32_t branchConstInstOpcode = 0;
+ branchConstInstOpcode = (( OPCODE_18 << 26 ) | ( SELF_SAVE_FUNC_ADD ) | 0x03 );
+
+ return SWIZZLE_4_BYTE(branchConstInstOpcode);
+}
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief looks up entry for given SPR in given thread/core section.
+ * @param[in] i_pThreadSectLoc start of given thread section or core section.
+ * @param[in] i_lookUpKey search key for lookup of given SPR entry.
+ * @param[in] i_isThreadReg true if register is of scope thread, false
+ * otherwise.
+ * @param[in|out] io_pSprEntryLoc Input: NULL
+ * Output: location of given entry or end of table.
+ * @return STOP_SAVE_SUCCESS if entry is found, STOP_SAVE_FAIL in case of
+ * an error.
+ */
+STATIC StopReturnCode_t lookUpSprInImage( uint32_t* i_pThreadSectLoc, const uint32_t i_lookUpKey,
+ const bool i_isThreadReg, void** io_pSprEntryLoc )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_FAIL;
+ uint32_t temp = 0;
+ uint32_t* i_threadSectEnd = NULL;
+ uint32_t bctr_inst = SWIZZLE_4_BYTE(BLR_INST);
+ *io_pSprEntryLoc = NULL;
+
+ do
+ {
+ if( !i_pThreadSectLoc )
+ {
+ MY_ERR( "Bad SPR Start Location" );
+ break;
+ }
+
+ temp = i_isThreadReg ? (uint32_t)(SMF_CORE_RESTORE_THREAD_AREA_SIZE) :
+ (uint32_t)(SMF_CORE_RESTORE_CORE_AREA_SIZE);
+
+ i_threadSectEnd = i_pThreadSectLoc + ( temp >> 2 );
+
+ temp = 0;
+
+ while( ( i_pThreadSectLoc <= i_threadSectEnd ) &&
+ ( temp != bctr_inst ) )
+ {
+ temp = *i_pThreadSectLoc;
+
+ if( ( temp == i_lookUpKey ) || ( temp == bctr_inst ) )
+ {
+ *io_pSprEntryLoc = i_pThreadSectLoc;
+ l_rc = STOP_SAVE_SUCCESS;
+ break;
+ }
+
+ i_pThreadSectLoc = i_pThreadSectLoc + SIZE_PER_SPR_RESTORE_INST;
+ }
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief updates an SPR STOP image entry.
+ * @param[in] i_pSprEntryLocation location of entry.
+ * @param[in] i_regId register Id associated with SPR.
+ * @param[in] i_regData data needs to be written to SPR entry.
+ * @return STOP_SAVE_SUCCESS if update works, STOP_SAVE_FAIL otherwise.
+ */
+STATIC StopReturnCode_t updateSprEntryInImage( uint32_t* i_pSprEntryLocation,
+ const CpuReg_t i_regId,
+ const uint64_t i_regData,
+ const enum SprEntryUpdateMode i_mode
+ )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t tempInst = 0;
+ uint64_t tempRegData = 0;
+ bool newEntry = true;
+ uint16_t regRs = 0; //to use R0 for SPR restore insruction generation
+ uint16_t regRa = 0;
+
+ do
+ {
+ if( !i_pSprEntryLocation )
+ {
+ MY_ERR("invalid location of SPR image entry" );
+ l_rc = STOP_SAVE_FAIL;
+ break;
+ }
+
+ tempInst = genKeyForSprLookup( i_regId );
+
+ if( *i_pSprEntryLocation == tempInst )
+ {
+ newEntry = false;
+ }
+
+ //Add SPR search instruction i.e. "ori r0, r0, SPRID"
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ if( INIT_SPR_REGION == i_mode )
+ {
+ //adding inst 'b . + 0x1C'
+ *i_pSprEntryLocation = SWIZZLE_4_BYTE(SKIP_SPR_REST_INST);
+ }
+ else
+ {
+ //clear R0 i.e. "xor ra, rs, rb"
+ tempInst = getXorInstruction( regRs, regRs, regRs );
+ *i_pSprEntryLocation = tempInst;
+ }
+
+
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ tempRegData = i_regData >> 48;
+ //get lower order 16 bits of SPR restore value in R0
+ tempInst = getOrisInstruction( regRs, regRa, (uint16_t)tempRegData );
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ tempRegData = ((i_regData >> 32) & 0x0000FFFF );
+ //get bit b16-b31 of SPR restore value in R0
+ tempInst = getOriInstruction( regRs, regRa, (uint16_t)tempRegData );
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ //Rotate R0 to left by 32 bit position and zero lower order 32 bits.
+ //Place the result in R0
+ tempInst = getRldicrInstruction(regRa, regRs, 32, 31);
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ tempRegData = ((i_regData >> 16) & 0x000000FFFF );
+ //get bit b32-b47 of SPR restore value to R0
+ tempInst = getOrisInstruction( regRs, regRa, (uint16_t)tempRegData );
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ tempRegData = (uint16_t)i_regData;
+ //get bit b48-b63 of SPR restore value to R0
+ tempInst = getOriInstruction( regRs, regRa, (uint16_t)i_regData );
+ *i_pSprEntryLocation = tempInst;
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+
+ if( PROC_STOP_SPR_MSR == i_regId )
+ {
+ //MSR cannot be restored completely with mtmsrd instruction.
+ //as it does not update ME, LE and HV bits. In self restore code
+ //inorder to restore MSR, contents of R21 is moved to SRR1. It also
+ //executes an RFID which causes contents of SRR1 to be copied to
+ //MSR. This allows copy of LE bit which are specifically interested
+ //in. Instruction below moves contents of MSR Value (in R0 ) to R21.
+ tempInst = SWIZZLE_4_BYTE( MR_R0_TO_R21 );
+ }
+ else if ( PROC_STOP_SPR_HRMOR == i_regId )
+ {
+ //Case HRMOR, move contents of R0 to a placeholder GPR (R10)
+ //Thread Launcher expects HRMOR value in R10
+ tempInst = SWIZZLE_4_BYTE( MR_R0_TO_R10 );
+ }
+ else if( PROC_STOP_SPR_URMOR == i_regId )
+ {
+ //Case URMOR, move contents of R0 to a placeholder GPR (R9)
+ //Thread Launcher expects URMOR value in R9
+ tempInst = SWIZZLE_4_BYTE( MR_R0_TO_R9 );
+ }
+ else
+ {
+ // Case other SPRs, move contents of R0 to SPR
+ // For a UV system, even HRMOR is treated like any other SPR.
+ tempInst =
+ getMtsprInstruction( 0, (uint16_t)i_regId );
+ }
+
+ *i_pSprEntryLocation = tempInst;
+
+ if( newEntry )
+ {
+ i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST;
+ //at the end of SPR restore, add instruction BLR to go back to thread
+ //launcher.
+ tempInst = SWIZZLE_4_BYTE(BLR_INST);
+ *i_pSprEntryLocation = tempInst;
+ }
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+STATIC StopReturnCode_t initSelfSaveEntry( void* const i_pImage, uint16_t i_sprNum )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t* i_pSprSave = (uint32_t*)i_pImage;
+
+ //ori r0, r0, 0x00nn
+ *i_pSprSave = getOriInstruction( 0, 0, i_sprNum );
+
+ i_pSprSave++;
+
+ //addi r31, r31, 0x20
+ *i_pSprSave = SWIZZLE_4_BYTE(SKIP_SPR_SELF_SAVE);
+ i_pSprSave++;
+
+ //nop
+ *i_pSprSave = getOriInstruction( 0, 0, 0 );;
+ i_pSprSave++;
+
+ //mtlr, r30
+ *i_pSprSave = SWIZZLE_4_BYTE( MTLR_INST );
+ i_pSprSave++;
+
+ //blr
+ *i_pSprSave = SWIZZLE_4_BYTE(BLR_INST);
+ i_pSprSave++;
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+STATIC StopReturnCode_t getSprRegIndexAdjustment( const uint32_t i_saveMaskPos, uint32_t* i_sprAdjIndex )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+
+ do
+ {
+ if( (( i_saveMaskPos >= SPR_BIT_POS_8 ) && ( i_saveMaskPos <= SPR_BIT_POS_20 )) ||
+ (( i_saveMaskPos >= SPR_BIT_POS_25 ) && ( i_saveMaskPos <= SPR_BIT_POS_27 )) )
+ {
+ l_rc = STOP_SAVE_SPR_BIT_POS_RESERVE;
+ break;
+ }
+
+ if( (i_saveMaskPos > SPR_BIT_POS_20) && (i_saveMaskPos < SPR_BIT_POS_25) )
+ {
+ *i_sprAdjIndex = 12;
+ }
+ else if( i_saveMaskPos > SPR_BIT_POS_27 )
+ {
+ *i_sprAdjIndex = 15;
+ }
+ else
+ {
+ *i_sprAdjIndex = 0;
+ }
+
+ }
+ while(0);
+
+ return l_rc;
+}
+
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief returns core region and relative id wrt to quad
+ * @param[in] i_scomAddress scom address associated with a core
+ * @param[in] o_scomRegion SCOM region in HOMER
+ * @param[in] o_coreRelativeInst core relative id
+ * @return STOP_SAVE_SUCCESS if function succeeds, error code otherwise
+ */
+STATIC StopReturnCode_t decodeScomAddress( const uint32_t i_scomAddress, uint32_t * o_scomRegion,
+ uint32_t * o_coreRelativeInst )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t l_regionSelect = ( i_scomAddress & CORE_REGION_MASK );
+ uint32_t l_endPoint = ( i_scomAddress & EP_SELECT_MASK );
+ l_endPoint = ( l_endPoint >> 16 );
+ l_regionSelect = l_regionSelect >> 12;
+
+ if( 1 == l_endPoint )
+ {
+ *o_scomRegion = PROC_STOP_SECTION_L3;
+ }
+ else if ( 2 == l_endPoint )
+ {
+ *o_scomRegion = PROC_STOP_SECTION_CORE;
+ }
+
+ switch( l_regionSelect )
+ {
+ case 8:
+ *o_coreRelativeInst = 0;
+ break;
+
+ case 4:
+ *o_coreRelativeInst = 1;
+ break;
+
+ case 2:
+ *o_coreRelativeInst = 2;
+ break;
+
+ case 1:
+ *o_coreRelativeInst = 3;
+ break;
+
+ default:
+ l_rc = STOP_SAVE_SCOM_INVALID_ADDRESS;
+ break;
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief validates all the input arguments.
+ * @param[in] i_pImage pointer to start of HOMER of image for proc chip.
+ * @param[in] i_scomAddress SCOM address of register.
+ * @param[in] i_chipletId core or cache chiplet id
+ * @param[in] i_operation operation requested for SCOM entry.
+ * @param[in] i_section image section on which operation is to be performed
+ * @return STOP_SAVE_SUCCESS if arguments found valid, error code otherwise.
+ * @note Function does not validate that the given SCOM address really
+ * belongs to the given section.
+ */
+STATIC StopReturnCode_t validateScomImageInputs( void* const i_pImage,
+ const uint32_t i_scomAddress,
+ const uint8_t i_chipletId,
+ const ScomOperation_t i_operation,
+ const ScomSection_t i_section )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t l_scomRegion = 0;
+ uint32_t l_coreId = 0;
+
+ do
+ {
+ if( !i_pImage )
+ {
+ //Error Invalid image pointer
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ MY_ERR("invalid image location ");
+ break;
+ }
+
+ if( 0 == i_scomAddress )
+ {
+ l_rc = STOP_SAVE_SCOM_INVALID_ADDRESS;
+ MY_ERR("invalid SCOM address");
+ break;
+ }
+
+ if(( CACHE_CHIPLET_ID_MIN > i_chipletId ) ||
+ ( CACHE_CHIPLET_ID_MAX < i_chipletId ))
+ {
+ l_rc = STOP_SAVE_SCOM_INVALID_CHIPLET;
+ MY_ERR("chiplet id not valid");
+ break;
+ }
+
+ if(( PROC_STOP_SCOM_OP_MIN >= i_operation ) ||
+ ( PROC_STOP_SCOM_OP_MAX <= i_operation ))
+ {
+ //invalid SCOM image operation requested
+ l_rc = STOP_SAVE_SCOM_INVALID_OPERATION;
+ MY_ERR("invalid SCOM image operation");
+ break;
+ }
+
+ l_rc = decodeScomAddress( i_scomAddress, &l_scomRegion, &l_coreId );
+
+ if( l_rc )
+ {
+ MY_ERR( "Bad Scom Address 0x%08x", i_chipletId );
+ break;
+ }
+
+ if( PROC_STOP_SECTION_CORE == l_scomRegion )
+ {
+ if( ( i_section != PROC_STOP_SECTION_CORE ) ||
+ ( i_section != PROC_STOP_SECTION_L2 ) )
+ {
+ MY_ERR( "SCOM adress doesn't match with section type passed,"
+ " EP : %d , Section Type %d", l_scomRegion, i_section );
+ l_rc = STOP_SAVE_SCOM_INVALID_SECTION;
+ break;
+ }
+ }
+
+ if( PROC_STOP_SECTION_L3 == l_scomRegion )
+ {
+ if( ( i_section != PROC_STOP_SECTION_L3 ) ||
+ ( i_section != PROC_STOP_SECTION_CACHE ) )
+ {
+ MY_ERR( "SCOM adress doesn't match with section type passed,"
+ " EP : %d , Section Type %d", l_scomRegion, i_section );
+ l_rc = STOP_SAVE_SCOM_INVALID_SECTION;
+ break;
+ }
+ }
+ }
+ while(0);
+
+ if( l_rc )
+ {
+ MY_ERR("SCOMAddress 0x%08x chipletId 0x%08x operation"
+ "0x%08x section 0x%08x", i_scomAddress, i_chipletId,
+ i_operation, i_section );
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief determines HOMER region for SCOM restore entry request.
+ * @param[in] i_pImage points to base of HOMER image.
+ * @param[in] i_sectn SCOM restore section
+ * @param[in] i_instanceId core instance id
+ * @param[out]o_entryDat meta data pertaining to SCOM restore entry analysis
+ * @return STOP_SAVE_SUCCESS if HWP succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t lookUpScomRestoreRegion( void * i_pImage, const ScomSection_t i_sectn, uint32_t i_instanceId,
+ ScomEntryDat_t * o_entryDat )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ CpmrHeader_t * l_pCpmrHdr = NULL;
+ ScomRestoreHeader_t *l_scomHdr = NULL;
+ uint32_t l_relativeCorePos = 0;
+ uint32_t l_offset = 0;
+ uint32_t l_quadId = 0;
+ uint32_t l_scomLen = 0;
+
+ MY_INF( ">>lookUpScomRestoreRegion" );
+
+ o_entryDat->iv_subRegionBaseOffset = 0;
+ o_entryDat->iv_subRegionLength = 0;
+ l_quadId = ( i_instanceId >> 2 );
+
+ l_relativeCorePos = i_instanceId % MAX_CORES_PER_QUAD;
+ l_pCpmrHdr = ( CpmrHeader_t *) ( (uint8_t *) i_pImage + CPMR_HOMER_OFFSET );
+ l_scomLen = SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxCoreL2ScomEntry) +
+ SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxEqL3ScomEntry);
+ l_scomLen = ( l_scomLen * SCOM_RESTORE_ENTRY_SIZE );
+
+ l_offset = ( l_scomLen * l_quadId * MAX_CORES_PER_QUAD ) + SCOM_RESTORE_HOMER_OFFSET;
+
+ MY_INF( "QUAD_ID 0x%08x BASE OFFSET 0x%08x", l_quadId, l_offset );
+
+ l_scomHdr = ( ScomRestoreHeader_t *) ( (uint8_t *) i_pImage + l_offset );
+
+ if( ( PROC_STOP_SECTION_CORE == i_sectn ) || ( PROC_STOP_SECTION_L2 == i_sectn ) )
+ {
+ MY_INF( "Core Offset 0x%04x", SWIZZLE_2_BYTE(l_scomHdr->iv_coreOffset) );
+ l_offset += SWIZZLE_2_BYTE(l_scomHdr->iv_coreOffset);
+ o_entryDat->iv_subRegionLength = SWIZZLE_2_BYTE(l_scomHdr->iv_coreLength);
+ l_offset += ( SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxCoreL2ScomEntry) * l_relativeCorePos );
+ }
+ else if( ( PROC_STOP_SECTION_L3 == i_sectn ) || ( PROC_STOP_SECTION_CACHE == i_sectn ) )
+ {
+ MY_INF( "Cache Offset 0x%04x", SWIZZLE_2_BYTE(l_scomHdr->iv_l3Offset) );
+ l_offset += SWIZZLE_2_BYTE(l_scomHdr->iv_l3Offset);
+ o_entryDat->iv_subRegionLength = SWIZZLE_2_BYTE(l_scomHdr->iv_l3Length);
+ l_offset += ( SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxEqL3ScomEntry) * l_relativeCorePos );
+ }
+ else
+ {
+ o_entryDat->iv_subRegionBaseOffset = 0;
+ l_rc = STOP_SAVE_SCOM_INVALID_SECTION;
+ }
+
+ if( !l_rc )
+ {
+ o_entryDat->iv_subRegionBaseOffset = l_offset;
+ }
+
+ MY_INF( "SCOM Section Offset 0x%08x", l_offset );
+
+ MY_INF( "<<lookUpScomRestoreRegion" );
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief analyzes SCOM restore region and collects some data.
+ * @param[in] i_pImage points to base of HOMER image
+ * @param[in] i_sectn id associated with SCOM restore sub-region.
+ * @param[in] i_scomAddress fully qualified SCOM address
+ * @param[in] o_pScomDat meta data associated with entry analysis
+ * @return STOP_SAVE_SUCCESS if HWP succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t lookUpScomRestoreEntry( void * i_pImage, const ScomSection_t i_sectn,
+ uint32_t i_scomAddress, ScomEntryDat_t * o_pScomDat )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ ScomEntry_t * l_pScom = NULL;
+ CpmrHeader_t * l_pCpmrHdr = NULL;
+ uint8_t * l_pScomByte = NULL;
+ uint32_t l_entryLimit = 0;
+ uint8_t l_entry = 0;
+ uint32_t l_temp = 0;
+
+ MY_INF( ">> lookUpScomRestoreEntry" );
+
+ o_pScomDat->iv_slotFound = 0x00;
+ o_pScomDat->iv_entryOffset = 0x00;
+ o_pScomDat->iv_lastEntryOffset = 0x00;
+ o_pScomDat->iv_entryMatchOffset = 0x00;
+ o_pScomDat->iv_matchFound = 0x00;
+ l_pCpmrHdr = ( CpmrHeader_t * ) ( (uint8_t *) i_pImage + CPMR_HOMER_OFFSET );
+ l_pScomByte = ( uint8_t * )( (uint8_t *) i_pImage + o_pScomDat->iv_subRegionBaseOffset );
+ l_pScom = (ScomEntry_t *)( l_pScomByte );
+
+ switch( i_sectn )
+ {
+ case PROC_STOP_SECTION_CORE:
+ l_entryLimit = SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxCoreL2ScomEntry);
+ break;
+
+ case PROC_STOP_SECTION_L3:
+ l_entryLimit = SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxEqL3ScomEntry);
+ break;
+
+ default:
+ l_rc = STOP_SAVE_SCOM_INVALID_SECTION;
+ break;
+ }
+
+ if( l_rc )
+ {
+ return l_rc;
+ }
+
+ for( l_entry = 0; l_entry < l_entryLimit; l_entry++ )
+ {
+ if( !( l_pScom->iv_scomAddress & SWIZZLE_4_BYTE(SCOM_ENTRY_VALID) ) )
+ {
+ o_pScomDat->iv_slotFound = 0x01;
+ o_pScomDat->iv_entryOffset = l_entry;
+ break;
+ }
+
+ l_pScom++;
+ }
+
+ l_pScom = (ScomEntry_t *)( l_pScomByte );
+
+ for( l_entry = 0; l_entry < l_entryLimit; l_entry++ )
+ {
+ if( l_pScom->iv_scomAddress & SWIZZLE_4_BYTE(LAST_SCOM_ENTRY) )
+ {
+ o_pScomDat->iv_lastEntryOffset = l_entry;
+ MY_INF( "SCOM Restore Entry Limit 0x%08x",
+ o_pScomDat->iv_lastEntryOffset );
+ break;
+ }
+ l_pScom++;
+ }
+
+ l_pScom = (ScomEntry_t *)( l_pScomByte );
+
+ for( l_entry = 0; l_entry < l_entryLimit; l_entry++ )
+ {
+ l_temp = l_pScom->iv_scomAddress & SWIZZLE_4_BYTE(SCOM_ADDR_MASK);
+
+ if( SWIZZLE_4_BYTE((i_scomAddress & SCOM_ADDR_MASK)) == l_temp )
+ {
+ o_pScomDat->iv_entryMatchOffset = l_entry;
+ o_pScomDat->iv_matchFound = 0x01;
+ MY_INF( "Existing Entry Slot No 0x%08x", l_entry );
+ break;
+ }
+ l_pScom++;
+ }
+
+ o_pScomDat->iv_entryLimit = l_entryLimit;
+
+ MY_INF( "<< lookUpScomRestoreEntry" );
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+#define UNUSED(x) (void)(x)
+
+/**
+ * @brief edits a SCOM restore entry associated with the given core.
+ * @param[in] i_pScom points to SCOM restore location
+ * @param[in] i_scomAddr SCOM address of register.
+ * @param[in] i_scomData data associated with SCOM register.
+ * @param[in] i_operation operation to be performed on SCOM entry.
+ * @param[in] i_pScomDat points to meta data associated with entry analysis
+ * @return STOP_SAVE_SUCCESS if existing entry is updated, STOP_SAVE_FAIL
+ * otherwise.
+ */
+STATIC StopReturnCode_t editScomEntry( uint8_t * i_pScom, uint32_t i_scomAddr,
+ uint64_t i_scomData, ScomOperation_t i_operation,
+ ScomEntryDat_t * i_pScomDat )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ ScomEntry_t * l_pScom = (ScomEntry_t *)i_pScom;
+ UNUSED(i_scomAddr);
+
+ MY_INF( ">> editScomEntry " );
+
+ l_pScom = l_pScom + i_pScomDat->iv_entryMatchOffset;
+
+ switch( i_operation )
+ {
+ case PROC_STOP_SCOM_OR:
+ case PROC_STOP_SCOM_OR_APPEND:
+ l_pScom->iv_scomData |= SWIZZLE_8_BYTE(i_scomData);
+ break;
+
+ case PROC_STOP_SCOM_AND:
+ case PROC_STOP_SCOM_AND_APPEND:
+ l_pScom->iv_scomData &= SWIZZLE_8_BYTE(i_scomData);
+ break;
+
+ case PROC_STOP_SCOM_REPLACE:
+ l_pScom->iv_scomData = SWIZZLE_8_BYTE(i_scomData);
+ break;
+
+ default:
+ break;
+ }
+
+ MY_INF( "<< editScomEntry " );
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief update SCOM restore entry list associated with the given core.
+ * @param[in] i_pImage points to base of HOMER image.
+ * @param[in] i_scomAddr address of SCOM register.
+ * @param[in] i_scomData data associated with SCOM register.
+ * @param[in] i_sectn SCOM restore section in HOMER.
+ * @param[in] i_operation operation type requested on restore entry.
+ * @param[in] i_pScomDat points entry analysis meta data.
+ * @return STOP_SAVE_SUCCESS if new entry is added, STOP_SAVE_FAIL otherwise.
+ */
+STATIC StopReturnCode_t updateScomEntry( void * i_pImage, uint32_t i_scomAddr,
+ uint64_t i_scomData, const ScomSection_t i_sectn,
+ ScomOperation_t i_operation, ScomEntryDat_t * i_pScomDat )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ CpmrHeader_t * l_pCpmrHdr = NULL;
+ ScomEntry_t * l_pScom = NULL;
+ uint32_t l_maxScomEntry = 0;
+ l_pCpmrHdr = ( CpmrHeader_t * ) ( (uint8_t *) i_pImage + CPMR_HOMER_OFFSET );
+ l_pScom = ( ScomEntry_t * )( (uint8_t *) i_pImage + i_pScomDat->iv_subRegionBaseOffset );
+ switch( i_operation )
+ {
+ case PROC_STOP_SCOM_OR_APPEND:
+ case PROC_STOP_SCOM_AND_APPEND:
+ case PROC_STOP_SCOM_APPEND:
+ case PROC_STOP_SCOM_REPLACE:
+
+ l_pScom = l_pScom + i_pScomDat->iv_lastEntryOffset;
+
+ if( i_pScomDat->iv_entryLimit > i_pScomDat->iv_lastEntryOffset )
+ {
+ l_pScom->iv_scomAddress &= ~(SWIZZLE_LAST_SCOM_ENTRY);
+ l_pScom++; // takes us to offset stored in iv_entryOffset
+ l_pScom->iv_scomAddress = i_scomAddr & SCOM_ADDR_MASK;
+ l_pScom->iv_scomAddress |= (SCOM_ENTRY_VALID | LAST_SCOM_ENTRY | SCOM_ENTRY_VER);
+
+ if( PROC_STOP_SECTION_CORE == i_sectn )
+ {
+ l_maxScomEntry = SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxCoreL2ScomEntry);
+ l_pScom->iv_scomAddress |= CORE_SECTION_ID_CODE;
+ }
+ else
+ {
+ l_maxScomEntry = SWIZZLE_4_BYTE(l_pCpmrHdr->iv_maxEqL3ScomEntry);
+ l_pScom->iv_scomAddress |= L3_SECTION_ID_CODE;
+ }
+
+ l_pScom->iv_scomAddress |= ( l_maxScomEntry << MAX_SCOM_ENTRY_POS );
+ l_pScom->iv_scomAddress = SWIZZLE_4_BYTE(l_pScom->iv_scomAddress);
+ l_pScom->iv_scomData = SWIZZLE_8_BYTE(i_scomData);
+
+ MY_INF( "SCOM Data 0x%08x", SWIZZLE_4_BYTE(l_pScom->iv_scomAddress) );
+ }
+ else
+ {
+ MY_ERR( "Current Entry Count 0x%08x More than Max Entry Count 0x%08x",
+ i_pScomDat->iv_lastEntryOffset, i_pScomDat->iv_entryLimit );
+ l_rc = STOP_SAVE_MAX_ENTRY_REACHED;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief searches a self save entry of an SPR in self-save segment.
+ * @param[in] i_sprBitPos bit position associated with SPR in save mask vector.
+ * @param[in] i_pSprSaveStart start location of SPR save segment
+ * @param[in] i_searchLength length of SPR save segment
+ * @param[in] i_pSaveSprLoc start location of save entry for a given SPR.
+ * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t lookUpSelfSaveSpr( uint32_t i_sprBitPos, uint32_t* i_pSprSaveStart,
+ uint32_t i_searchLength, uint32_t** i_pSaveSprLoc )
+{
+ int32_t l_saveWordLength = (int32_t)(i_searchLength >> 2);
+ uint32_t l_oriInst = getOriInstruction( 0, 0, i_sprBitPos );
+ StopReturnCode_t l_rc = STOP_SAVE_FAIL;
+
+ while( l_saveWordLength > 0 )
+ {
+ if( l_oriInst == *i_pSprSaveStart )
+ {
+ *i_pSaveSprLoc = i_pSprSaveStart;
+ l_rc = STOP_SAVE_SUCCESS;
+ break;
+ }
+
+ i_pSprSaveStart++;
+ l_saveWordLength--;
+ }
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+/**
+ * @brief searches a self save entry of an SPR in self-save segment.
+ * @param[in] i_pSaveReg start of editable location of a SPR save entry.
+ * @param[in] i_sprNum Id of the SPR for which entry needs to be edited.
+ * @return STOP_SAVE_SUCCESS if look up succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t updateSelfSaveEntry( uint32_t* i_pSaveReg, uint16_t i_sprNum )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+
+ do
+ {
+ if( !i_pSaveReg )
+ {
+ l_rc = STOP_SAVE_FAIL;
+ MY_ERR( "Failed to update self save area for SPR 0x%04x", i_sprNum );
+ break;
+ }
+
+ if( PROC_STOP_SPR_MSR == i_sprNum )
+ {
+ *i_pSaveReg = getMfmsrInstruction( 1 );
+ }
+ else
+ {
+ *i_pSaveReg = getMfsprInstruction( 1, i_sprNum );
+ }
+
+ i_pSaveReg++;
+
+ *i_pSaveReg = getBranchLinkRegInstruction( );
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos )
+{
+
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t* l_pRestoreStart = NULL;
+ void* l_pTempLoc = NULL;
+ Homerlayout_t* l_pHomer = NULL;
+ SmfSprRestoreRegion_t * l_pSprRest = NULL;
+ uint32_t l_threadPos = 0;
+ uint32_t l_lookUpKey = 0;
+ uint32_t l_sprIndex = 0;
+
+ MY_INF( ">> proc_stop_init_cpureg" );
+
+ do
+ {
+ if( !i_pImage )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ if( i_corePos > MAX_CORE_ID_SUPPORTED )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ break;
+ }
+
+ l_pHomer = ( Homerlayout_t * ) i_pImage;
+
+ for( l_sprIndex = 0; l_sprIndex < MAX_SPR_SUPPORTED_P10; l_sprIndex++ )
+ {
+ //Check if a given SPR needs to be self-saved each time on STOP entry
+
+ l_lookUpKey = genKeyForSprLookup( ( CpuReg_t )g_sprRegister_p10[l_sprIndex].iv_sprId );
+ l_pSprRest =
+ ( SmfSprRestoreRegion_t * ) &l_pHomer->iv_cpmrRegion.iv_selfRestoreRegion.iv_coreSelfRestore[0];
+
+ l_pSprRest += i_corePos;
+
+ if( g_sprRegister_p10[l_sprIndex].iv_isThreadScope )
+ {
+ for( l_threadPos = 0; l_threadPos < MAX_THREADS_PER_CORE; l_threadPos++ )
+ {
+ l_pRestoreStart =
+ (uint32_t*)&l_pSprRest->iv_threadRestoreArea[l_threadPos][0];
+
+
+ l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
+ g_sprRegister_p10[l_sprIndex].iv_isThreadScope,
+ &l_pTempLoc );
+
+ if( l_rc )
+ {
+ MY_ERR( "Thread SPR lookup failed in proc_stop_init_cpureg SPR %d Core %d Thread %d Index %d",
+ g_sprRegister_p10[l_sprIndex].iv_sprId, i_corePos, l_threadPos, l_sprIndex );
+ break;
+ }
+
+ l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
+ ( CpuReg_t )g_sprRegister_p10[l_sprIndex].iv_sprId,
+ 0x00,
+ INIT_SPR_REGION );
+
+ if( l_rc )
+ {
+ MY_ERR( "Thread SPR region init failed. Core %d SPR Id %d",
+ i_corePos, g_sprRegister_p10[l_sprIndex].iv_sprId );
+ break;
+ }
+
+ }//end for thread
+
+ if( l_rc )
+ {
+ break;
+ }
+
+ }//end if SPR threadscope
+ else
+ {
+ l_pRestoreStart = (uint32_t*)&l_pSprRest->iv_coreRestoreArea[0];
+
+ l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
+ g_sprRegister_p10[l_sprIndex].iv_isThreadScope, &l_pTempLoc );
+
+ if( l_rc )
+ {
+ MY_ERR( "Core SPR lookup failed in proc_stop_init_cpureg" );
+ break;
+ }
+
+ l_rc = updateSprEntryInImage( (uint32_t*) l_pTempLoc,
+ ( CpuReg_t )g_sprRegister_p10[l_sprIndex].iv_sprId,
+ 0x00,
+ INIT_SPR_REGION );
+
+ if( l_rc )
+ {
+ MY_ERR( "Core SPR region init failed. Core %d SPR Id %d SPR Index %d",
+ i_corePos, g_sprRegister_p10[l_sprIndex].iv_sprId, l_sprIndex );
+ break;
+ }
+
+ }// end else
+
+ }// end for l_sprIndex
+
+ }
+ while(0);
+
+ MY_INF( "<< proc_stop_init_cpureg" );
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_save_scom( void* const i_pImage,
+ const uint32_t i_scomAddress,
+ const uint64_t i_scomData,
+ const ScomOperation_t i_operation,
+ const ScomSection_t i_section )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t l_quadId = 0;
+ uint32_t l_coreId = 0;
+ uint32_t l_coreRegion = 0;
+ uint8_t * l_pScom = NULL;
+ ScomEntryDat_t l_entryDat;
+
+ MY_INF( ">> proc_stop_save_scom" );
+
+ do
+ {
+ l_quadId = i_scomAddress >> 24;
+ l_quadId = l_quadId & 0x3F;
+
+ l_rc = validateScomImageInputs( i_pImage, i_scomAddress,
+ l_quadId, i_operation, i_section );
+ if( l_rc )
+ {
+ MY_ERR( "invalid argument: aborting");
+ break;
+ }
+
+ l_rc = decodeScomAddress( i_scomAddress, &l_coreRegion, &l_coreId );
+
+ if( l_rc )
+ {
+ MY_ERR( "Failed To get Core Details For Address 0x%08x", i_scomAddress );
+ break;
+ }
+
+ //Converting Superchiplet Id to instance number
+ l_quadId = l_quadId - MIN_SUPERCHIPLET_ID;
+
+ //getting core position relative to the chip
+ l_coreId += ( l_quadId << 2 );
+
+ MY_INF( "Quad Id 0x%08x COre Id 0x%08x", l_quadId, l_coreId );
+
+ // Let us find the start address of SCOM area
+
+ l_rc = lookUpScomRestoreRegion( i_pImage,
+ i_section,
+ l_coreId,
+ &l_entryDat );
+ if( l_rc )
+ {
+ MY_ERR( "Failed To Find SCOM Section Requested 0x%08x",
+ ( uint32_t) i_section );
+ break;
+ }
+
+ l_pScom = (uint8_t *)( (uint8_t *)i_pImage + l_entryDat.iv_subRegionBaseOffset );
+
+ l_rc = lookUpScomRestoreEntry( i_pImage,
+ i_section,
+ i_scomAddress,
+ &l_entryDat );
+ if( l_rc )
+ {
+ MY_ERR( "Failed To Find SCOM Entry Slot 0x%08x", (uint32_t) l_rc );
+ break;
+ }
+
+ switch( i_operation )
+ {
+ case PROC_STOP_SCOM_APPEND:
+ l_rc = updateScomEntry( i_pImage,
+ i_scomAddress,
+ i_scomData,
+ i_section,
+ i_operation,
+ &l_entryDat );
+ break;
+
+ case PROC_STOP_SCOM_OR:
+ case PROC_STOP_SCOM_AND:
+ //case PROC_STOP_SCOM_NOOP:
+
+ if( l_entryDat.iv_matchFound )
+ {
+ l_rc = editScomEntry( l_pScom,
+ i_scomAddress,
+ i_scomData,
+ i_operation,
+ &l_entryDat );
+ }
+
+ break;
+
+ case PROC_STOP_SCOM_RESET:
+
+ l_rc = lookUpScomRestoreRegion( i_pImage,
+ PROC_STOP_SECTION_CORE,
+ l_coreId,
+ &l_entryDat );
+ if( l_rc )
+ {
+ MY_ERR( "Failed To Reset SCOM Section Requested 0x%08x",
+ ( uint32_t) i_section );
+ break;
+ }
+
+ memset( (uint8_t *)((uint8_t *)i_pImage + l_entryDat.iv_subRegionBaseOffset),
+ 0x00, l_entryDat.iv_subRegionLength );
+
+ l_rc = lookUpScomRestoreRegion( i_pImage,
+ PROC_STOP_SECTION_CACHE,
+ l_coreId,
+ &l_entryDat );
+ if( l_rc )
+ {
+ MY_ERR( "Failed To Reset SCOM Section Requested 0x%08x",
+ ( uint32_t) i_section );
+ break;
+ }
+
+ memset( (uint8_t *)((uint8_t *)i_pImage + l_entryDat.iv_subRegionBaseOffset),
+ 0x00, l_entryDat.iv_subRegionLength );
+
+ break;
+
+ case PROC_STOP_SCOM_OR_APPEND:
+ case PROC_STOP_SCOM_AND_APPEND:
+ case PROC_STOP_SCOM_REPLACE:
+
+ if( l_entryDat.iv_matchFound )
+ {
+ l_rc = editScomEntry( l_pScom,
+ i_scomAddress,
+ i_scomData,
+ i_operation,
+ &l_entryDat );
+ }
+ else
+ {
+ l_rc = updateScomEntry( i_pImage,
+ i_scomAddress,
+ i_scomData,
+ i_section,
+ i_operation,
+ &l_entryDat );
+ }
+
+ break;
+
+ default:
+ l_rc = STOP_SAVE_SCOM_INVALID_OPERATION;
+ break;
+ }
+ }
+ while(0);
+
+ if( l_rc )
+ {
+ MY_ERR("SCOM image operation 0x%08x failed for chiplet 0x%08x addr"
+ "0x%08x", i_operation, l_quadId ,
+ i_scomAddress );
+ }
+ else
+ {
+
+ }
+
+ MY_INF( "<< proc_stop_save_scom" );
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_save_cpureg_control( void* i_pImage,
+ const uint64_t i_pir,
+ const uint32_t i_saveRegVector )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t l_coreId = 0;
+ uint32_t l_threadId = 0;
+ uint32_t l_sprPos = 0;
+ uint32_t l_sprIndex = 0;
+ uint32_t l_lookupLength = 0;
+ uint32_t l_lookUpKey = 0;
+ uint32_t* l_pSaveStart = NULL;
+ uint32_t* l_pRestoreStart = NULL;
+ uint32_t* l_pSprSave = NULL;
+ void* l_pTempLoc = NULL;
+ uint32_t * l_pTempWord = NULL;
+ Homerlayout_t* l_pHomer = NULL;
+ SmfSprRestoreRegion_t * l_pSpr = NULL;
+ MY_INF(">> proc_stop_save_cpureg_control" );
+
+ do
+ {
+ l_rc = getCoreAndThread_p10( i_pImage, i_pir, &l_coreId, &l_threadId );
+
+ if( l_rc )
+ {
+ MY_ERR( "Error in getting core no 0x%08x and thread no 0x%08x from PIR 0x%016lx",
+ l_coreId, l_threadId, i_pir );
+ break;
+ }
+
+ l_rc = validateArgumentSaveRegMask( i_pImage, l_coreId, l_threadId, i_saveRegVector );
+
+ if( l_rc )
+ {
+ MY_ERR( "Invalid argument rc 0x%08x", (uint32_t) l_rc );
+ break;
+ }
+
+ l_pHomer = ( Homerlayout_t * )i_pImage;
+ l_pSpr = ( SmfSprRestoreRegion_t *) &l_pHomer->iv_cpmrRegion.iv_selfRestoreRegion.iv_coreSelfRestore[0];
+ l_pSpr += l_coreId;
+
+ for( l_sprIndex = 0; l_sprIndex < MAX_SPR_SUPPORTED_P10; l_sprIndex++ )
+ {
+ l_sprPos = g_sprRegister_p10[l_sprIndex].iv_saveMaskPos;
+
+ if( l_sprPos > MAX_SPR_BIT_POS )
+ {
+ continue;
+ }
+
+ //Check if a given SPR needs to be self-saved each time on STOP entry
+
+ if( i_saveRegVector & ( TEST_BIT_PATTERN >> l_sprPos ) )
+ {
+
+ if( g_sprRegister_p10[l_sprIndex].iv_isThreadScope )
+ {
+ l_lookupLength = SMF_SELF_SAVE_THREAD_AREA_SIZE;
+ l_pSaveStart =
+ (uint32_t*)&l_pSpr->iv_threadSaveArea[l_threadId][0];
+ l_pRestoreStart =
+ (uint32_t*)&l_pSpr->iv_threadRestoreArea[l_threadId][0];
+ }
+ else
+ {
+ l_lookupLength = SMF_CORE_SAVE_CORE_AREA_SIZE;
+ l_pSaveStart = (uint32_t*)&l_pSpr->iv_coreSaveArea[0];
+ l_pRestoreStart = (uint32_t*)&l_pSpr->iv_coreRestoreArea[0];
+ }
+
+ // an SPR restore section for given core already exists
+ l_lookUpKey = genKeyForSprLookup( ( CpuReg_t )g_sprRegister_p10[l_sprIndex].iv_sprId );
+
+ l_rc = lookUpSprInImage( (uint32_t*)l_pRestoreStart, l_lookUpKey,
+ g_sprRegister_p10[l_sprIndex].iv_isThreadScope, &l_pTempLoc );
+
+ if( l_rc )
+ {
+ //SPR specified in the save mask but there is no restore entry present in the memory
+ //Self-Save instruction will edit it during STOP entry to make it a valid entry
+
+ l_rc = proc_stop_save_cpureg( i_pImage,
+ (CpuReg_t)g_sprRegister_p10[l_sprIndex].iv_sprId,
+ 0x00, //creates a dummy entry
+ i_pir );
+ }
+
+ //Find if SPR-Save eye catcher exist in self-save segment of SPR restore region.
+ l_rc = lookUpSelfSaveSpr( l_sprPos, l_pSaveStart, l_lookupLength, &l_pSprSave );
+
+ if( l_rc )
+ {
+ MY_INF( "Failed to find SPR No %02d save entry", l_sprPos );
+ l_rc = STOP_SAVE_SPR_ENTRY_MISSING;
+ break;
+ }
+
+ l_pSprSave++; //point to next instruction location
+
+ //update specific instructions of self save region to enable saving for SPR
+ l_rc = updateSelfSaveEntry( l_pSprSave, g_sprRegister_p10[l_sprIndex].iv_sprId );
+
+ if( l_rc )
+ {
+ MY_ERR( "Failed to update self save instructions for 0x%08x",
+ (uint32_t) g_sprRegister_p10[l_sprIndex].iv_sprId );
+ }
+
+ if( l_pTempLoc )
+ {
+ l_pTempWord = (uint32_t *)l_pTempLoc;
+ l_pTempWord++;
+ *l_pTempWord = getXorInstruction( 0, 0, 0 );
+ }
+
+ }// end if( i_saveRegVector..)
+ }// end for
+ }
+ while(0);
+
+ MY_INF("<< proc_stop_save_cpureg_control" );
+
+ return l_rc;
+
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_save_cpureg( void* const i_pImage,
+ const CpuReg_t i_regId,
+ const uint64_t i_regData,
+ const uint64_t i_pir )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; // procedure return code
+ SmfSprRestoreRegion_t* l_sprRegion = NULL;
+ Homerlayout_t* l_pHomer = NULL;
+
+ MY_INF(">> proc_stop_save_cpureg" );
+
+ do
+ {
+ uint32_t threadId = 0;
+ uint32_t coreId = 0;
+ uint32_t lookUpKey = 0;
+ void* pSprEntryLocation = NULL; // an offset w.r.t. to start of image
+ void* pThreadLocation = NULL;
+ bool threadScopeReg = false;
+
+ l_rc = getCoreAndThread_p10( i_pImage, i_pir, &coreId, &threadId );
+
+ if( l_rc )
+ {
+ MY_ERR("Failed to determine Core Id and Thread Id from PIR 0x%016lx",
+ i_pir);
+ break;
+ }
+
+ MY_INF( " PIR 0x%016lx coreId %d threadid %d "
+ " registerId %d", i_pir, coreId,
+ threadId, i_regId );
+
+ // First of all let us validate all input arguments.
+ l_rc = validateSprImageInputs( i_pImage,
+ i_regId,
+ coreId,
+ &threadId,
+ &threadScopeReg );
+ if( l_rc )
+ {
+ // Error: bad argument traces out error code
+ MY_ERR("Bad input argument rc %d", l_rc );
+
+ break;
+ }
+
+
+ l_pHomer = ( Homerlayout_t *) i_pImage;
+ l_sprRegion = ( SmfSprRestoreRegion_t* )&l_pHomer->iv_cpmrRegion.iv_selfRestoreRegion.iv_coreSelfRestore[0];
+ l_sprRegion += coreId;
+
+ if( threadScopeReg )
+ {
+ pThreadLocation = (uint32_t *)&l_sprRegion->iv_threadRestoreArea[threadId][0];
+ }
+ else
+ {
+ pThreadLocation = (uint32_t *)&l_sprRegion->iv_coreRestoreArea[0];
+ }
+
+ if( ( SWIZZLE_4_BYTE(BLR_INST) == *(uint32_t*)pThreadLocation ) ||
+ ( SWIZZLE_4_BYTE(ATTN_OPCODE) == *(uint32_t*) pThreadLocation ) )
+ {
+ // table for given core id doesn't exit. It needs to be
+ // defined.
+ pSprEntryLocation = pThreadLocation;
+ }
+ else
+ {
+ // an SPR restore section for given core already exists
+ lookUpKey = genKeyForSprLookup( i_regId );
+ l_rc = lookUpSprInImage( (uint32_t*)pThreadLocation,
+ lookUpKey,
+ threadScopeReg,
+ &pSprEntryLocation );
+ }
+
+ if( l_rc )
+ {
+ MY_ERR("Invalid or corrupt SPR entry. CoreId 0x%08x threadId "
+ "0x%08x regId 0x%08x lookUpKey 0x%08x "
+ , coreId, threadId, i_regId, lookUpKey );
+ break;
+ }
+
+ l_rc = updateSprEntryInImage( (uint32_t*) pSprEntryLocation,
+ i_regId,
+ i_regData,
+ UPDATE_SPR_ENTRY );
+
+ if( l_rc )
+ {
+ MY_ERR( " Failed to update the SPR entry of PIR 0x%016lx reg"
+ "0x%08x", (uint64_t)i_pir, i_regId );
+ break;
+ }
+
+ }
+ while(0);
+
+ MY_INF("<< proc_stop_save_cpureg" );
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------------------------------
+
+StopReturnCode_t proc_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos )
+{
+
+ SmfSprRestoreRegion_t * l_pSelfSave = NULL;
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint32_t* l_pSaveStart = NULL;
+ Homerlayout_t * l_pHomer = NULL;
+ uint32_t l_threadPos = 0;
+ uint32_t l_sprBitPos = 0;
+ uint32_t l_sprIndexAdj = 0;
+
+ MY_INF(">> proc_stop_init_self_save" );
+
+ do
+ {
+ if( !i_pImage )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ if( i_corePos > MAX_CORE_ID_SUPPORTED )
+ {
+ l_rc = STOP_SAVE_ARG_INVALID_CORE;
+ break;
+ }
+
+ l_pHomer = ( Homerlayout_t* ) i_pImage;
+ l_pSelfSave =
+ ( SmfSprRestoreRegion_t *) &l_pHomer->iv_cpmrRegion.iv_selfRestoreRegion.iv_coreSelfRestore[0];
+
+ l_pSelfSave += i_corePos;
+
+ for( l_threadPos = 0; l_threadPos < MAX_THREADS_PER_CORE; l_threadPos++ )
+ {
+ l_pSaveStart =
+ (uint32_t*)&l_pSelfSave->iv_threadSaveArea[l_threadPos][0];
+
+ //Adding instruction 'mflr r30'
+ *l_pSaveStart = SWIZZLE_4_BYTE(MFLR_R30);
+ l_pSaveStart++;
+
+ for( l_sprBitPos = 0; l_sprBitPos <= MAX_SPR_BIT_POS; l_sprBitPos++ )
+ {
+ l_rc = getSprRegIndexAdjustment( l_sprBitPos, &l_sprIndexAdj );
+
+ if( STOP_SAVE_SPR_BIT_POS_RESERVE == l_rc )
+ {
+ //Failed to find SPR index adjustment
+ continue;
+ }
+
+ if( !g_sprRegister_p10[l_sprBitPos - l_sprIndexAdj].iv_isThreadScope )
+ {
+ continue;
+ }
+
+ //Initialize self save region with SPR save entry for each thread
+ //level SPR
+ l_rc = initSelfSaveEntry( l_pSaveStart,
+ g_sprRegister_p10[l_sprBitPos - l_sprIndexAdj].iv_saveMaskPos );
+
+ if( l_rc )
+ {
+ MY_ERR( "Failed to init thread self-save region for core %d thread %d",
+ i_corePos, l_threadPos );
+ break;
+ }
+
+ l_pSaveStart++;
+ l_pSaveStart++;
+ l_pSaveStart++;
+ }
+
+ }// for thread = 0;
+
+ if( l_rc )
+ {
+ //breakout if saw an error while init of thread SPR region
+ break;
+ }
+
+ l_pSaveStart =
+ (uint32_t*)&l_pSelfSave->iv_coreSaveArea[0];
+
+ *l_pSaveStart = SWIZZLE_4_BYTE(MFLR_R30);
+ l_pSaveStart++;
+
+ for( l_sprBitPos = 0; l_sprBitPos <= MAX_SPR_BIT_POS; l_sprBitPos++ )
+ {
+ l_rc = getSprRegIndexAdjustment( l_sprBitPos, &l_sprIndexAdj );
+
+ if( STOP_SAVE_SPR_BIT_POS_RESERVE == l_rc )
+ {
+ //Failed to find SPR index adjustment
+ continue;
+ }
+
+ if( g_sprRegister_p10[l_sprBitPos - l_sprIndexAdj].iv_isThreadScope )
+ {
+ continue;
+ }
+
+ //Initialize self save region with SPR save entry for each core
+ //level SPR
+ l_rc = initSelfSaveEntry( l_pSaveStart,
+ g_sprRegister_p10[l_sprBitPos - l_sprIndexAdj].iv_saveMaskPos );
+
+ if( l_rc )
+ {
+ MY_ERR( "Failed to init core self-save region for core %d thread %d",
+ i_corePos, l_threadPos );
+ break;
+ }
+
+ l_pSaveStart++;
+ l_pSaveStart++;
+ l_pSaveStart++;
+ }
+ }
+ while(0);
+
+ MY_INF("<< proc_stop_init_self_save" );
+
+ return l_rc;
+}
+
+//-----------------------------------------------------------------------------------------------------
+#ifdef __cplusplus
+} //namespace stopImageSection ends
+} //extern "C"
+#endif
diff --git a/libpore/p10_stop_api.H b/libpore/p10_stop_api.H
new file mode 100644
index 0000000..a70d2b2
--- /dev/null
+++ b/libpore/p10_stop_api.H
@@ -0,0 +1,238 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/utils/stopreg/p10_stop_api.C $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2015,2021 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __P10_STOP_IMAGE_API_
+#define __P10_STOP_IMAGE_API_
+
+#include <stdint.h>
+
+#ifdef __SKIBOOT__
+ #include <skiboot.h>
+#endif
+
+///
+/// @file p10_stop_api.H
+/// @brief describes STOP API which create/manipulate STOP image.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+
+#ifdef __cplusplus
+namespace stopImageSection
+{
+#endif
+
+/**
+ * @brief all SPRs and MSR for which register restore is to be supported.
+ * @note STOP API design has built in support to accomodate 8 register of
+ * scope core and thread each.
+ */
+typedef enum
+{
+ PROC_STOP_SPR_DAWR = 180, // thread register
+ PROC_STOP_SPR_CIABR = 187, // thread register
+ PROC_STOP_SPR_DAWRX = 188, // thread register
+ PROC_STOP_SPR_HSPRG0 = 304, // thread register
+ PROC_STOP_SPR_HRMOR = 313, // core register
+ PROC_STOP_SPR_LPCR = 318, // thread register
+ PROC_STOP_SPR_HMEER = 337, // core register
+ PROC_STOP_SPR_PTCR = 464, // core register
+ PROC_STOP_SPR_USPRG0 = 496, // thread register
+ PROC_STOP_SPR_USPRG1 = 497, // thread register
+ PROC_STOP_SPR_URMOR = 505, // core register
+ PROC_STOP_SPR_SMFCTRL = 511, // thread register
+ PROC_STOP_SPR_LDBAR = 850, // thread register
+ PROC_STOP_SPR_PSSCR = 855, // thread register
+ PROC_STOP_SPR_PMCR = 884, // core register
+ PROC_STOP_SPR_HID = 1008, // core register
+ PROC_STOP_SPR_MSR = 2000, // thread register
+
+} CpuReg_t;
+
+/**
+ * @brief lists all the bad error codes.
+ */
+typedef enum
+{
+ STOP_SAVE_SUCCESS = 0,
+ STOP_SAVE_ARG_INVALID_IMG = 1,
+ STOP_SAVE_ARG_INVALID_REG = 2,
+ STOP_SAVE_ARG_INVALID_THREAD = 3,
+ STOP_SAVE_ARG_INVALID_MODE = 4,
+ STOP_SAVE_ARG_INVALID_CORE = 5,
+ STOP_SAVE_SPR_ENTRY_NOT_FOUND = 6,
+ STOP_SAVE_SPR_ENTRY_UPDATE_FAILED = 7,
+ STOP_SAVE_SCOM_INVALID_OPERATION = 8,
+ STOP_SAVE_SCOM_INVALID_SECTION = 9,
+ STOP_SAVE_SCOM_INVALID_ADDRESS = 10,
+ STOP_SAVE_SCOM_INVALID_CHIPLET = 11,
+ STOP_SAVE_SCOM_ENTRY_UPDATE_FAILED = 12,
+ STOP_SAVE_INVALID_FUSED_CORE_STATUS = 13,
+ STOP_SAVE_FAIL = 14, // for internal failure within firmware.
+ STOP_SAVE_SPR_ENTRY_MISSING = 15,
+ STOP_SAVE_MAX_ENTRY_REACHED = 16,
+ STOP_SAVE_SPR_BIT_POS_RESERVE = 17,
+} StopReturnCode_t;
+
+/**
+ * @brief summarizes all operations supported on scom entries of STOP image.
+ */
+typedef enum
+{
+ //enum members which are project agnostic
+ PROC_STOP_SCOM_OP_MIN = 0,
+ PROC_STOP_SCOM_APPEND = 1,
+ PROC_STOP_SCOM_REPLACE = 2,
+ PROC_STOP_SCOM_OR = 3,
+ PROC_STOP_SCOM_AND = 4,
+ PROC_STOP_SCOM_NOOP = 5,
+ PROC_STOP_SCOM_RESET = 6,
+ PROC_STOP_SCOM_OR_APPEND = 7,
+ PROC_STOP_SCOM_AND_APPEND = 8,
+ PROC_STOP_SCOM_OP_MAX = 9,
+
+} ScomOperation_t;
+
+/**
+ * @brief All subsections that contain scom entries in a STOP image.
+ */
+typedef enum
+{
+ PROC_STOP_SECTION_CORE = 1,
+ PROC_STOP_SECTION_L2 = 1,
+ PROC_STOP_SECTION_L3 = 2,
+ PROC_STOP_SECTION_CACHE = 2,
+} ScomSection_t;
+
+/**
+ * @brief versions pertaining relvant to STOP API.
+ */
+typedef enum
+{
+ STOP_API_VER = 0x00,
+ STOP_API_VER_CONTROL = 0x02,
+} VersionList_t;
+
+/**
+ * @brief Summarizes bit position allocated to SPRs in save bit mask vector.
+ */
+typedef enum
+{
+ BIT_POS_CIABR = 0,
+ BIT_POS_DAWR = 1,
+ BIT_POS_DAWRX = 2,
+ BIT_POS_HSPRG0 = 3,
+ BIT_POS_LDBAR = 4,
+ BIT_POS_LPCR = 5,
+ BIT_POS_PSSCR = 6,
+ BIT_POS_MSR = 7,
+ BIT_POS_HID = 21,
+ BIT_POS_HMEER = 22,
+ BIT_POS_PMCR = 23,
+ BIT_POS_PTCR = 24,
+ BIT_POS_SMFCTRL = 28,
+ BIT_POS_USPRG0 = 29,
+ BIT_POS_USPRG1 = 30,
+} SprBitPositionList_t;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/**
+ * @brief creates SCOM restore entry for a given scom adress in HOMER.
+ * @param i_pImage points to start address of HOMER image.
+ * @param i_scomAddress address associated with SCOM restore entry.
+ * @param i_scomData data associated with SCOM restore entry.
+ * @param i_operation operation type requested for API.
+ * @param i_section section of HOMER in which restore entry needs to be created.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for creating SCOM restore entry in HOMER. It is agnostic to
+ * generation of POWER processor.
+ */
+
+StopReturnCode_t proc_stop_save_scom( void* const i_pImage,
+ const uint32_t i_scomAddress,
+ const uint64_t i_scomData,
+ const ScomOperation_t i_operation,
+ const ScomSection_t i_section );
+
+/**
+ * @brief initializes self save restore region of HOMER.
+ * @param[in] i_pImage points to base of HOMER image.
+ * @param[in] i_corePos position of the physical core.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for initializing self restore region in HOMER. It is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_init_cpureg( void* const i_pImage, const uint32_t i_corePos );
+
+/**
+ * @brief enables self save for a given set of SPRs
+ * @param[in] i_pImage points to start address of HOMER image.
+ * @param[in] i_pir PIR value associated with core and thread.
+ * @param[in] i_saveRegVector bit vector representing the SPRs that needs to be self saved.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for enabling self save of SPRs and it is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_save_cpureg_control( void* i_pImage,
+ const uint64_t i_pir,
+ const uint32_t i_saveRegVector );
+
+/**
+ * @brief creates an SPR restore entry in HOMER
+ * @param[in] i_pImage points to start address of HOMER image.
+ * @param[in] i_regId SPR number to be saved in HOMER
+ * @param[in] i_regData SPR data to be saved in HOMER
+ * @param[in] i_pir PIR value associated with core and thread.
+ * @return STOP_SAVE_SUCCESS if API succeeds, error code otherwise.
+ * @note It is an API for enabling self save of SPRs and it is agnostic to
+ * generation of POWER processor.
+ */
+StopReturnCode_t proc_stop_save_cpureg( void* const i_pImage,
+ const CpuReg_t i_regId,
+ const uint64_t i_regData,
+ const uint64_t i_pir );
+
+/**
+ * @brief initializes self-save region with specific instruction.
+ * @param[in] i_pImage points to start address of HOMER image.
+ * @param[in] i_corePos physical core's relative position within processor chip.
+ * @return STOP_SAVE_SUCCESS if self-save is initialized successfully,
+ * error code otherwise.
+ * @note API is project agnostic and is intended only for use case of HOMER build.
+ * There is no explicit effort to support any other use case.
+ */
+StopReturnCode_t proc_stop_init_self_save( void* const i_pImage, const uint32_t i_corePos );
+
+#ifdef __cplusplus
+} // extern "C"
+}; // namespace stopImageSection ends
+#endif //__cplusplus
+
+#endif //__P10_STOP_IMAGE_API_
diff --git a/libpore/p10_stop_data_struct.H b/libpore/p10_stop_data_struct.H
new file mode 100644
index 0000000..3a16fcd
--- /dev/null
+++ b/libpore/p10_stop_data_struct.H
@@ -0,0 +1,162 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: chips/p10/procedures/utils/stopreg/p10_stop_data_struct.H $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* EKB Project */
+/* */
+/* COPYRIGHT 2015,2020 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+///
+/// @file p10_stop_data_struct.H
+/// @brief describes data structures internal to STOP API.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+#ifndef __STOP_DATA_STRUCT_
+#define __STOP_DATA_STRUCT_
+
+#include "p10_hcd_memmap_base.H"
+
+#ifdef __SKIBOOT__
+ #include <skiboot.h>
+#endif
+
+#ifdef __FAPI_2_
+ #include <fapi2.H>
+#endif
+
+#ifdef PPC_HYP
+
+ #define STATIC
+
+#else
+
+ #define STATIC static
+
+#endif
+
+
+#ifdef __DEBUG_
+ #include<stdio.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+namespace stopImageSection
+{
+#endif
+
+/**
+ * @brief Misc constants pertaining to instruction opcodes.
+ */
+enum
+{
+ MAX_SPR_RESTORE_INST = 0x08,
+ SIZE_PER_SPR_RESTORE_INST = ((4 * sizeof(uint8_t)) / sizeof(uint32_t)),
+ MAX_THREAD_LEVEL_SPRS = 11,
+ MAX_CORE_LEVEL_SPRS = 6,
+ MAX_SPR_BIT_POS = 30,
+ SPR_BIT_POS_8 = 8,
+ SPR_BIT_POS_20 = 20,
+ SPR_BIT_POS_25 = 25,
+ SPR_BIT_POS_27 = 27,
+};
+
+/**
+ * @brief various operations supported on SPR restore entry.
+ */
+enum SprEntryUpdateMode
+{
+ INIT_SPR_REGION = 0x01,
+ UPDATE_SPR_ENTRY = 0x02,
+};
+
+/**
+ * @brief models an individual SCOM restore entry.
+ */
+typedef struct
+{
+ uint32_t iv_scomAddress;
+ uint64_t iv_scomData;
+} __attribute__((packed)) ScomEntry_t;
+
+/**
+ * @brief describes details pertaining to SCOM entry
+ */
+typedef struct
+{
+ uint32_t iv_subRegionBaseOffset;
+ uint32_t iv_subRegionLength;
+ uint8_t iv_slotFound;
+ uint8_t iv_lastEntryOffset;
+ uint16_t iv_entryOffset;
+ uint8_t iv_entryMatchOffset;
+ uint8_t iv_matchFound;
+ uint8_t iv_entryLimit;
+ uint8_t iv_reserved;
+} ScomEntryDat_t;
+
+/**
+ * @brief summarizes attributes associated with a SPR register.
+ */
+typedef struct
+{
+ uint32_t iv_sprId;
+ bool iv_isThreadScope;
+ uint32_t iv_saveMaskPos;
+} StopSprReg_t;
+
+/**
+ * @brief Misc constants.
+ */
+enum
+{
+ SIZE_SCOM_ENTRY = sizeof( ScomEntry_t ),
+ SCOM_ENTRY_START = 0xDEADDEAD,
+ BAD_SAVE_MASK = 0x007FF000,
+ MAX_SPR_INDEX = 31,
+ TEST_BIT_PATTERN = 0x80000000,
+ EP_SELECT_MASK = 0x000F0000,
+ CORE_REGION_MASK = 0x0000F000,
+ SCOM_ENTRY_VALID = 0x80000000,
+ LAST_SCOM_ENTRY = 0x40000000,
+ SWIZZLE_LAST_SCOM_ENTRY = 0x00000040,
+ SCOM_ADDR_MASK = 0x0000FFFF,
+ SCOM_ADDR_CHIPLET_MASK = 0x000FFFFF,
+ SCOM_ENTRY_VER = 0x10000000, //Ver 1.0
+ CORE_SECTION_ID_CODE = 0x00000000, //Core Section Id 0
+ L3_SECTION_ID_CODE = 0x03000000, //L3 Section Id 3 b4:b7
+ MAX_SCOM_ENTRY_POS = 0x10,
+ MIN_SUPERCHIPLET_ID = 0x20,
+
+};
+
+#ifdef __DEBUG_
+ #define MY_ERR( _fmt_, _args_...) printf( "\n"); printf( _fmt_, ##_args_)
+ #define MY_INF(_fmt_, _args_...) printf( "\n"); printf( _fmt_, ##_args_)
+#else
+ #define MY_ERR( _fmt_, _args_...)
+ #define MY_INF(_fmt_, _args_...)
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+
+} //namespace stopImageSection ends
+#endif //__cplusplus
+
+#endif
diff --git a/libpore/p10_stop_util.C b/libpore/p10_stop_util.C
new file mode 100644
index 0000000..ba3ec15
--- /dev/null
+++ b/libpore/p10_stop_util.C
@@ -0,0 +1,190 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: chips/p10/procedures/utils/stopreg/p10_stop_util.C $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* EKB Project */
+/* */
+/* COPYRIGHT 2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+///
+/// @file p10_stop_util.C
+/// @brief implements some utilty functions for STOP API.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+#ifdef PPC_HYP
+ #include <HvPlicModule.H>
+#endif
+
+#include "p10_stop_api.H"
+#include "p10_stop_util.H"
+#include "p10_stop_data_struct.H"
+#include "p10_hcd_memmap_base.H"
+#include "p10_hcode_image_defines.H"
+#include "stddef.h"
+
+#ifdef __cplusplus
+using namespace hcodeImageBuild;
+namespace stopImageSection
+{
+#endif
+
+//-----------------------------------------------------------------------
+
+/**
+ * @brief Returns proc chip's fuse mode status.
+ * @param i_pImage points to start of chip's HOMER image.
+ * @param o_fusedMode points to fuse mode information.
+ * @return STOP_SAVE_SUCCESS if functions succeeds, error code otherwise.
+ */
+STATIC StopReturnCode_t isFusedMode( void* const i_pImage, bool* o_fusedMode )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+ uint64_t l_cpmrCheckWord = 0;
+ uint32_t* l_pMagic = NULL;
+ CpmrHeader_t* l_pCpmr = NULL;
+ *o_fusedMode = false;
+
+ do
+ {
+
+ if( !i_pImage )
+ {
+ MY_ERR( "invalid pointer to HOMER image");
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ l_pMagic = (uint32_t*)( (uint8_t*)i_pImage + CPMR_HOMER_OFFSET + 8 );
+ l_cpmrCheckWord = SWIZZLE_4_BYTE( *l_pMagic );
+
+ if( CPMR_REGION_CHECK_WORD != l_cpmrCheckWord )
+ {
+ MY_ERR("corrupt or invalid HOMER image location 0x%016lx",
+ l_cpmrCheckWord );
+ l_rc = STOP_SAVE_ARG_INVALID_IMG;
+ break;
+ }
+
+ l_pCpmr = (CpmrHeader_t*)( (uint8_t*)i_pImage + CPMR_HOMER_OFFSET );
+
+ if( (uint8_t) FUSED_CORE_MODE == l_pCpmr->iv_fusedMode )
+ {
+ *o_fusedMode = true;
+ break;
+ }
+
+ if( (uint8_t) NONFUSED_CORE_MODE == l_pCpmr->iv_fusedMode )
+ {
+ break;
+ }
+
+ MY_ERR("Unexpected value 0x%08x for fused mode. Bad or corrupt "
+ "HOMER location", l_pCpmr->iv_fusedMode );
+ l_rc = STOP_SAVE_INVALID_FUSED_CORE_STATUS ;
+
+ }
+ while(0);
+
+ return l_rc;
+}
+
+//----------------------------------------------------------------------
+
+StopReturnCode_t getCoreAndThread_p10( void* const i_pImage, const uint64_t i_pir,
+ uint32_t* o_pCoreId, uint32_t* o_pThreadId )
+{
+ StopReturnCode_t l_rc = STOP_SAVE_SUCCESS;
+
+ do
+ {
+ // for SPR restore using 'Virtual Thread' and 'Physical Core' number
+ // In Fused Mode:
+ // bit b28 and b31 of PIR give physical core and b29 and b30 gives
+ // virtual thread id.
+ // In Non Fused Mode
+ // bit 28 and b29 of PIR give both logical and physical core number
+ // whereas b30 and b31 gives logical and virtual thread id.
+ bool fusedMode = false;
+ uint8_t coreThreadInfo = (uint8_t)i_pir;
+ *o_pCoreId = 0;
+ *o_pThreadId = 0;
+ l_rc = isFusedMode( i_pImage, &fusedMode );
+
+ if( l_rc )
+ {
+ MY_ERR(" Checking Fused mode. Read failed 0x%08x", l_rc );
+ break;
+ }
+
+ if( fusedMode )
+ {
+ if( coreThreadInfo & FUSED_CORE_BIT1 )
+ {
+ *o_pThreadId = 2;
+ }
+
+ if( coreThreadInfo & FUSED_CORE_BIT2 )
+ {
+ *o_pThreadId += 1;
+ }
+
+ if( coreThreadInfo & FUSED_CORE_BIT0 )
+ {
+ *o_pCoreId = 2;
+ }
+
+ if( coreThreadInfo & FUSED_CORE_BIT3 )
+ {
+ *o_pCoreId += 1;
+ }
+ }
+ else
+ {
+ if( coreThreadInfo & FUSED_CORE_BIT0 )
+ {
+ *o_pCoreId = 2;
+ }
+
+ if ( coreThreadInfo & FUSED_CORE_BIT1 )
+ {
+ *o_pCoreId += 1;
+ }
+
+ if( coreThreadInfo & FUSED_CORE_BIT2 )
+ {
+ *o_pThreadId = 2;
+ }
+
+ if( coreThreadInfo & FUSED_CORE_BIT3 )
+ {
+ *o_pThreadId += 1;
+ }
+ }
+
+ MY_INF("Core Type %s", fusedMode ? "Fused" : "Un-Fused" );
+ //quad field is not affected by fuse mode
+ *o_pCoreId += 4 * (( coreThreadInfo & 0x70 ) >> 4 );
+ }
+ while(0);
+
+ return l_rc;
+}
+
+#ifdef __cplusplus
+}//namespace stopImageSection ends
+#endif
diff --git a/libpore/p10_stop_util.H b/libpore/p10_stop_util.H
new file mode 100644
index 0000000..7836dbc
--- /dev/null
+++ b/libpore/p10_stop_util.H
@@ -0,0 +1,123 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/import/chips/p10/procedures/hwp/lib/p10_stop_util.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* Contributors Listed Below - COPYRIGHT 2016,2019 */
+/* [+] International Business Machines Corp. */
+/* */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __P10_STOP_UTIL_
+#define __P10_STOP_UTIL_
+
+#include <stdint.h>
+
+#ifdef _AIX
+ #define __BYTE_ORDER __BIG_ENDIAN
+#elif __SKIBOOT__
+ #include <skiboot.h>
+#else
+ #include <endian.h>
+#endif
+
+#ifndef __PPE_PLAT
+ #include "p10_stop_api.H"
+#endif
+
+#ifdef FAPI_2
+ #include <fapi2.H>
+#endif
+
+///
+/// @file p10_stop_util.H
+/// @brief describes some utilty functions for STOP API.
+///
+// *HWP HW Owner : Greg Still <stillgs@us.ibm.com>
+// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com>
+// *HWP Team : PM
+// *HWP Level : 2
+// *HWP Consumed by : HB:HYP
+#ifndef __PPE_PLAT
+#ifdef __cplusplus
+namespace stopImageSection
+{
+#endif
+#endif //__PPE_PLAT
+/**
+ * @brief helper function to swizzle given input data
+ * @note swizles bytes to handle endianess issue.
+ */
+#if( __BYTE_ORDER == __BIG_ENDIAN )
+
+// NOP if it is a big endian system
+#define SWIZZLE_2_BYTE(WORD) WORD
+#define SWIZZLE_4_BYTE(WORD) WORD
+#define SWIZZLE_8_BYTE(WORD) WORD
+
+#else
+#define SWIZZLE_2_BYTE(WORD) \
+ ( (((WORD) >> 8) & 0x00FF) | (((WORD) << 8) & 0xFF00) )
+
+#define SWIZZLE_4_BYTE(WORD) \
+ ( { uint64_t l_tmp64 = WORD; \
+ (uint32_t)( (((l_tmp64) >> 24) & 0x000000FF) | (((l_tmp64) >> 8) & 0x0000FF00) | \
+ (((l_tmp64) << 8) & 0x00FF0000) | (((l_tmp64) << 24) & 0xFF000000) ) ;\
+ })
+
+#define SWIZZLE_8_BYTE(WORD) \
+ ( (((WORD) >> 56) & 0x00000000000000FF) | \
+ (((WORD) >> 40) & 0x000000000000FF00)| \
+ (((WORD) >> 24) & 0x0000000000FF0000) | \
+ (((WORD) >> 8) & 0x00000000FF000000) | \
+ (((WORD) << 8) & 0x000000FF00000000) | \
+ (((WORD) << 24) & 0x0000FF0000000000) | \
+ (((WORD) << 40) & 0x00FF000000000000) | \
+ (((WORD) << 56) & 0xFF00000000000000) )
+#endif
+
+/**
+ * @brief enumerates bit(s) positions of interest for PIR.
+ */
+enum
+{
+ FUSED_CORE_BIT0 = 0x08,
+ FUSED_CORE_BIT1 = 0x04,
+ FUSED_CORE_BIT2 = 0x02,
+ FUSED_CORE_BIT3 = 0x01,
+ QUAD_BITS = 0x70,
+};
+
+#ifndef __PPE_PLAT
+/**
+ * @brief returns core id and thread id by parsing a given PIR.
+ * @param i_pStopImage points to STOP image associated with a proc chip.
+ * @param i_pir PIR associated with a core's thread.
+ * @param o_coreId points to core id value obtained from PIR.
+ * @param o_threadId points to thread id value obtained from PIR.
+ * @return SUCCESS if function suceeds, error code otherwise.
+ */
+StopReturnCode_t getCoreAndThread_p10( void* const i_pStopImage,
+ const uint64_t i_pir,
+ uint32_t* o_coreId,
+ uint32_t* o_threadId );
+#ifdef __cplusplus
+} // namespace stopImageSection ends
+
+#endif
+#endif //__PPE_PLAT
+#endif