diff options
author | Akshay Adiga <akshay.adiga@linux.vnet.ibm.com> | 2017-09-15 17:50:55 +0530 |
---|---|---|
committer | Stewart Smith <stewart@linux.vnet.ibm.com> | 2017-09-19 04:16:53 -0500 |
commit | d7631e5685185554b43c2199165f5b3a4076e0c3 (patch) | |
tree | a38a62821d2e883e6b0cc899d6fc76b360cc8f6c /libpore | |
parent | eeb4d3226d4eda49ca171d442e49ceccbcaf634e (diff) | |
download | skiboot-d7631e5685185554b43c2199165f5b3a4076e0c3.zip skiboot-d7631e5685185554b43c2199165f5b3a4076e0c3.tar.gz skiboot-d7631e5685185554b43c2199165f5b3a4076e0c3.tar.bz2 |
SLW: Add support for p9_stop_api
p9_stop_api's are used to set SPR state on a core wakeup form a deeper
low power state. p9_stop_api uses low level platform formware and
self-restore microcode to restore the sprs to requested values.
Code is taken from :
https://github.com/open-power/hostboot/tree/master/src/import/chips/p9/procedures/utils/stopreg
Signed-off-by: Akshay Adiga <akshay.adiga@linux.vnet.ibm.com>
Reviewed-by: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'libpore')
-rw-r--r-- | libpore/Makefile.inc | 2 | ||||
-rw-r--r-- | libpore/p9_cpu_reg_restore_instruction.H | 76 | ||||
-rw-r--r-- | libpore/p9_hcd_header_defs.H | 152 | ||||
-rw-r--r-- | libpore/p9_hcd_memmap_base.H | 522 | ||||
-rw-r--r-- | libpore/p9_stop_api.C | 1028 | ||||
-rw-r--r-- | libpore/p9_stop_api.H | 163 | ||||
-rw-r--r-- | libpore/p9_stop_data_struct.H | 149 | ||||
-rw-r--r-- | libpore/p9_stop_util.C | 186 | ||||
-rw-r--r-- | libpore/p9_stop_util.H | 145 |
9 files changed, 2422 insertions, 1 deletions
diff --git a/libpore/Makefile.inc b/libpore/Makefile.inc index 2eac595..cc89127 100644 --- a/libpore/Makefile.inc +++ b/libpore/Makefile.inc @@ -1,4 +1,4 @@ -LIBPORE_SRCS = p8_pore_table_gen_api_fixed.C +LIBPORE_SRCS = p8_pore_table_gen_api_fixed.C p9_stop_api.C p9_stop_util.C LIBPORE_SRCS += p8_pore_table_static_data.c sbe_xip_image.c pore_inline_assembler.c LIBPORE_OBJS_1 = $(LIBPORE_SRCS:%.c=%.o) LIBPORE_OBJS = $(LIBPORE_OBJS_1:%.C=%.o) diff --git a/libpore/p9_cpu_reg_restore_instruction.H b/libpore/p9_cpu_reg_restore_instruction.H new file mode 100644 index 0000000..e5689ae --- /dev/null +++ b/libpore/p9_cpu_reg_restore_instruction.H @@ -0,0 +1,76 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/utils/stopreg/p9_cpu_reg_restore_instruction.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/// +/// @file p9_cpu_reg_restore_instruction.H +/// @brief enumerates all the opcodes used for SPR restoration. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP + +#ifndef __REG_RESTORE_INSTRUCTION_H +#define __REG_RESTORE_INSTRUCTION_H + +#include <stdint.h> + +#ifdef __cplusplus +extern "C" { + +namespace stopImageSection +{ +#endif + +/** + * @brief enumerates opcodes for few instructions. + */ +enum +{ + ORI_OPCODE = 24, + RFI_OPCODE = 19, + RFI_CONST = 50, + ORIS_OPCODE = 25, + OPCODE_31 = 31, + XOR_CONST = 316, + RLDICR_OPCODE = 30, + RLDICR_CONST = 1, + MTSPR_CONST1 = 467, + MTMSRD_CONST1 = 178, + MR_R0_TO_R10 = 0x7c0a0378, //mr r10, r0 + MR_R0_TO_R21 = 0x7c150378, //mr r21, r0 + BLR_INST = 0x4e800020, + MTSPR_BASE_OPCODE = 0x7c0003a6, + ATTN_OPCODE = 0x00000200, +}; + +#ifdef __cplusplus +} // namespace stopImageSection ends + +} // extern "C" +#endif //__cplusplus + +#endif //__REG_RESTORE_INSTRUCTION_H diff --git a/libpore/p9_hcd_header_defs.H b/libpore/p9_hcd_header_defs.H new file mode 100644 index 0000000..49c814a --- /dev/null +++ b/libpore/p9_hcd_header_defs.H @@ -0,0 +1,152 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/hwp/lib/p9_hcd_header_defs.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2016,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/// +/// @file p9_hcd_header_defs.H +/// @brief defines header constants based on file types +/// +/// This header contains those cpp manifest constants required for processing +/// the linker scripts used to generate OCC code images. As these are used +/// by linker scripts as well as by C++ code, these cannot be solely be put +/// into a namespace. Prefixing these with the region name is the attempt +/// to make these globally unique when this header is included in C++ code. +/// +// *HWP HWP Owner: David Du <daviddu@us.ibm.com> +// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner: Prem Jha <premjha2@in.ibm.com> +// *HWP Team: PM +// *HWP Level: 2 +// *HWP Consumed by: PM +// + +#ifndef __HCD_HEADER_DEFS_H__ +#define __HCD_HEADER_DEFS_H__ + +/// Macros for generating an Hcode header section +/// +/// The CPP macros HCD_HDR_UINTxx generate equivalent code depending on +/// whether they are being called from assembler (where they actually +/// create the header section data) or from C (where they specifiy a +/// C-structure form of the contents of the header section. +/// +/// In assembler each invocation also creates space in the header section + +#ifdef __ASSEMBLER__ + +// *INDENT-OFF* + .macro hcd_header_uint64, symbol:req, value = 0 + .global \symbol +\symbol\(): + .quad (\value) + .endm + + .macro hcd_header_uint32, symbol:req, value = 0 + .global \symbol + \symbol\(): + .long (\value) + .endm + + .macro hcd_header_uint16, symbol:req, value = 0 + .global \symbol +\symbol\(): + .short (\value) + .endm + + .macro hcd_header_uint8, symbol:req, value = 0 + .global \symbol +\symbol\(): + .byte (\value) + .endm + + .macro hcd_header_uint8_vec, symbol:req, number:req, value = 0 + .global \symbol +\symbol\(): + .rept (\number) + .byte (\value) + .endr + .endm + + .macro hcd_header_attn, symbol:req, number = 1 + .global \symbol +\symbol\(): + .rept (\number) + .long 0x00000200 + .endr + .endm + + .macro hcd_header_attn_pad, align:req + .balignl (\align), 0x00000200 + .endm + + .macro hcd_header_pad, align:req + .balignl (\align), 0 + .endm +// *INDENT-ON* + +#define ULL(x) x +#define HCD_CONST(name, expr) .set name, expr; +#define HCD_CONST64(name, expr) .set name, expr; + +#define HCD_HDR_UINT64(symbol, value) hcd_header_uint64 symbol value +#define HCD_HDR_UINT32(symbol, value) hcd_header_uint32 symbol value +#define HCD_HDR_UINT16(symbol, value) hcd_header_uint16 symbol value +#define HCD_HDR_UINT8(symbol, value) hcd_header_uint8 symbol value +#define HCD_HDR_UINT8_VEC(symbol, number, value) hcd_header_uint8_vec symbol number value +#define HCD_HDR_ATTN(symbol, number) hcd_header_attn symbol number +#define HCD_HDR_ATTN_PAD(align) hcd_header_attn_pad align +#define HCD_HDR_PAD(align) hcd_header_pad align + +#else // NOT __ASSEMBLER__ + +#ifdef __LINKERSCRIPT__ + + #define ULL(x) x + #define POUND_DEFINE #define + #define HCD_CONST(name, expr) POUND_DEFINE name expr + #define HCD_CONST64(name, expr) POUND_DEFINE name expr + +#else + + #define ULL(x) x##ull + #define HCD_CONST(name, expr) enum { name = expr }; + #define HCD_CONST64(name, expr) enum { name = expr }; + + #define HCD_HDR_UINT64(symbol, value) uint64_t symbol + #define HCD_HDR_UINT32(symbol, value) uint32_t symbol + #define HCD_HDR_UINT16(symbol, value) uint16_t symbol + #define HCD_HDR_UINT8(symbol, value) uint8_t symbol + #define HCD_HDR_UINT8_VEC(symbol, number, value) uint8_t symbol[number] + #define HCD_HDR_ATTN(symbol, number) uint32_t symbol[number] + #define HCD_HDR_ATTN_PAD(align) + #define HCD_HDR_PAD(align) + +#endif // __LINKERSCRIPT__ +#endif // __ASSEMBLER__ + +// Stringification + +#define STR_HELPER(x) #x +#define STR(x) STR_HELPER(x) + +#endif // __HCD_HEADER_DEFS_H__ diff --git a/libpore/p9_hcd_memmap_base.H b/libpore/p9_hcd_memmap_base.H new file mode 100644 index 0000000..6e3b54d --- /dev/null +++ b/libpore/p9_hcd_memmap_base.H @@ -0,0 +1,522 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/hwp/lib/p9_hcd_memmap_base.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +/// +/// @file p9_hcd_memmap_base.H +/// @brief defines region constants shared by different memory components. +/// + +// *HWP HWP Owner: David Du <daviddu@us.ibm.com> +// *HWP Backup HWP Owner: Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner: Prem S Jha <premjha2@in.ibm.com> +// *HWP Team: PM +// *HWP Level: 2 +// *HWP Consumed by: PM:Hostboot:Phyp + +#ifndef __HCD_MEMMAP_BASE_H__ +#define __HCD_MEMMAP_BASE_H__ + +#include <p9_hcd_header_defs.H> + +// ------------------------------------------------------------------- +// Note: There can be NO semicolons(";") at end of macros in this file +// There can ONLY have HCD_CONST/HCD_CONST64 macros in this file +// ------------------------------------------------------------------- + +/// Image Magic Numbers + +HCD_CONST64(CPMR_MAGIC_NUMBER, ULL(0x43504d525f312e30)) // CPMR_1.0 +HCD_CONST64(CME_MAGIC_NUMBER , ULL(0x434d455f5f312e30)) // CME__1.0 + +HCD_CONST64(QPMR_MAGIC_NUMBER, ULL(0x51504d525f312e30)) // QPMR_1.0 +HCD_CONST64(SGPE_MAGIC_NUMBER, ULL(0x534750455f312e30)) // SGPE_1.0 + +HCD_CONST64(PPMR_MAGIC_NUMBER, ULL(0x50504d525f312e30)) // PPMR_1.0 +HCD_CONST64(PGPE_MAGIC_NUMBER, ULL(0x504750455F312E30)) // PGPE_1.0 + +HCD_CONST(CME_BUILD_VERSION, 0x001) // CME__1.0 +HCD_CONST(SGPE_BUILD_VERSION, 0x001) // SGPE_1.0 +HCD_CONST(PGPE_BUILD_VERSION, 0x001) // PGPE_1.0 + +HCD_CONST64(CPMR_MAGIC_NUMBER_BASE, ULL(0x43504d525f302e30)) // CPMR_0.0 +HCD_CONST(CPMR_REGION_CHECK_WORD, (0x43504d52)) // CPMR +HCD_CONST64(CME_MAGIC_NUMBER_BASE , ULL(0x434d455f5f302e30)) // CME__0.0 +HCD_CONST64(QPMR_MAGIC_NUMBER_BASE, ULL(0x51504d525f302e30)) // QPMR_0.0 +HCD_CONST64(SGPE_MAGIC_NUMBER_BASE, ULL(0x534750455f302e30)) // SGPE_0.0 +HCD_CONST64(PPMR_MAGIC_NUMBER_BASE, ULL(0x50504d525f302e30)) // PPMR_0.0 +HCD_CONST64(PGPE_MAGIC_NUMBER_BASE, ULL(0x504750455F302E30)) // PGPE_0.0 + +/// Size constants + +HCD_CONST(HALF_KB, 512) +HCD_CONST(ONE_KB, 1024) +HCD_CONST(HALF_MB, (1024 * 512)) +HCD_CONST(ONE_MB, (1024 * 1024)) +HCD_CONST(TWO_MB, (2 * 1024 * 1024)) + +/// Memory constants + +HCD_CONST(CME_SRAM_SIZE, (32 * ONE_KB)) +HCD_CONST(OCC_SRAM_SIZE, (768 * ONE_KB)) + +HCD_CONST(HOMER_MEMORY_SIZE, (4 * ONE_MB)) +HCD_CONST(HOMER_OPMR_REGION_NUM, 0) +HCD_CONST(HOMER_QPMR_REGION_NUM, 1) +HCD_CONST(HOMER_CPMR_REGION_NUM, 2) +HCD_CONST(HOMER_PPMR_REGION_NUM, 3) + +/// Chip constants + +HCD_CONST(MAX_THREADS_PER_CORE, 4) +HCD_CONST(MAX_CORES_PER_CHIP, 24) + +HCD_CONST(MAX_CMES_PER_CHIP, 12) +HCD_CONST(MAX_EXES_PER_CHIP, 12) + +HCD_CONST(MAX_QUADS_PER_CHIP, 6) +HCD_CONST(MAX_CACHES_PER_CHIP, 6) + +HCD_CONST(MAX_CORES_PER_CME, 2) +HCD_CONST(MAX_CORES_PER_EX, 2) + +HCD_CONST(MAX_CMES_PER_QUAD, 2) +HCD_CONST(MAX_EXES_PER_QUAD, 2) + +HCD_CONST(CACHE0_CHIPLET_ID, 0x10) +HCD_CONST(CACHE_CHIPLET_ID_MIN, 0x10) +HCD_CONST(CACHE_CHIPLET_ID_MAX, 0x15) + +HCD_CONST(CORE0_CHIPLET_ID, 0x20) +HCD_CONST(CORE_CHIPLET_ID_MIN, 0x20) +HCD_CONST(CORE_CHIPLET_ID_MAX, 0x37) + +HCD_CONST(MAX_QUAD_ID_SUPPORTED, 5) +HCD_CONST(MAX_CORE_ID_SUPPORTED, 23) +HCD_CONST(MAX_THREAD_ID_SUPPORTED, 3) + +/// Image build constants + +HCD_CONST(HARDWARE_IMG_SIZE, ONE_MB) + +HCD_CONST(FUSED_CORE_MODE, 0xBB) +HCD_CONST(NONFUSED_CORE_MODE, 0xAA) + +HCD_CONST(SELF_RESTORE_BLR_INST, 0x4e800020) +HCD_CONST(CORE_RESTORE_PAD_OPCODE, 0x00000200) //ATTN Opcode + +HCD_CONST(SCOM_RESTORE_PAD_OPCODE, 0x00000000) //zero pads +HCD_CONST(SCOM_RESTORE_ENTRY_SIZE, 16) //4B pad,4B address,8B data + +HCD_CONST(CME_BLOCK_READ_LEN, 32) +HCD_CONST(CME_BLK_SIZE_SHIFT, 0x05) + +HCD_CONST(RING_ALIGN_BOUNDARY, 0x08) +HCD_CONST64(DARN_BAR_EN_POS, ULL(0x8000000000000000)) + +//--------------------------------------------------------------------------------------- + +/// OPMR + +HCD_CONST(OCC_HOST_AREA_SIZE, ONE_MB) +HCD_CONST(OPMR_OCC_IMAGE_SIZE, HALF_MB) +HCD_CONST(OPMR_HOST_AREA_SIZE, HALF_MB) + +//--------------------------------------------------------------------------------------- + +/// QPMR Header + +HCD_CONST(QPMR_HOMER_OFFSET, (HOMER_QPMR_REGION_NUM* ONE_MB)) +HCD_CONST(QPMR_HEADER_SIZE, 512) + +HCD_CONST(QPMR_MAGIC_NUMBER_BYTE, 0x00) +HCD_CONST(QPMR_BOOT_COPIER_OFFSET_BYTE, 0x08) +HCD_CONST(QPMR_BOOT_LOADER_OFFSET_BYTE, 0x10) +HCD_CONST(QPMR_BOOT_LOADER_LENGTH_BYTE, 0x14) +HCD_CONST(QPMR_BUILD_DATE_BYTE, 0x18) +HCD_CONST(QPMR_BUILD_VER_BYTE, 0x1C) +HCD_CONST(QPMR_SGPE_HCODE_OFFSET_BYTE, 0x28) +HCD_CONST(QPMR_SGPE_HCODE_LENGTH_BYTE, 0x2C) +HCD_CONST(QPMR_QUAD_COMMON_RINGS_OFFSET_BYTE, 0x30) +HCD_CONST(QPMR_QUAD_COMMON_RINGS_LENGTH_BYTE, 0x34) +HCD_CONST(QPMR_QUAD_OVERRIDE_RINGS_OFFSET_BYTE, 0x38) +HCD_CONST(QPMR_QUAD_OVERRIDE_RINGS_LENGTH_BYTE, 0x3C) +HCD_CONST(QPMR_QUAD_SPECIFIC_RINGS_OFFSET_BYTE, 0x40) +HCD_CONST(QPMR_QUAD_SPECIFIC_RINGS_LENGTH_BYTE, 0x44) +HCD_CONST(QPMR_QUAD_SCOM_RESTORE_OFFSET_BYTE, 0x48) +HCD_CONST(QPMR_QUAD_SCOM_RESTORE_LENGTH_BYTE, 0x4C) +HCD_CONST(QPMR_AUX_DATA_OFFSET_BYTE, 0x50) +HCD_CONST(QPMR_AUX_DATA_LENGTH_BYTE, 0x54) +HCD_CONST(QPMR_STOP_FFDC_OFFSET_BYTE, 0x58) +HCD_CONST(QPMR_STOP_FFDC_LENGTH_BYTE, 0x5C) +HCD_CONST(QPMR_SGPE_BOOT_PROG_CODE, 0x60) +HCD_CONST(QPMR_SGPE_IMAGE_SIZE, 0x64) + +/// SGPE Boot + +HCD_CONST(SGPE_BOOT_COPIER_QPMR_OFFSET, QPMR_HEADER_SIZE) +HCD_CONST(SGPE_BOOT_COPIER_SIZE, ONE_KB) + +HCD_CONST(SGPE_BOOT_LOADER_QPMR_OFFSET, + (SGPE_BOOT_COPIER_QPMR_OFFSET + SGPE_BOOT_COPIER_SIZE)) +HCD_CONST(SGPE_BOOT_LOADER_SIZE, ONE_KB) +HCD_CONST(SGPE_BOOT_LOADER_RESET_ADDR_VAL, 0x40) + +HCD_CONST(SGPE_INSTRUMENTATION_SIZE, (2 * ONE_KB)) + +/// SGPE Image + +HCD_CONST(SGPE_IMAGE_QPMR_OFFSET, + (SGPE_BOOT_LOADER_QPMR_OFFSET + SGPE_BOOT_LOADER_SIZE)) +HCD_CONST(SGPE_IMAGE_SIZE, (80 * ONE_KB)) //RTC158543 +HCD_CONST(SGPE_INT_VECTOR_SIZE, 384) +HCD_CONST(SGPE_HCODE_RESET_ADDR_VAL, 0x40) + +/// SGPE Header + +HCD_CONST(SGPE_HEADER_QPMR_OFFSET, + (SGPE_IMAGE_QPMR_OFFSET + SGPE_INT_VECTOR_SIZE)) +HCD_CONST(SGPE_HEADER_IMAGE_OFFSET, SGPE_INT_VECTOR_SIZE) +HCD_CONST(SGPE_HEADER_SIZE, 128) + +HCD_CONST(SGPE_MAGIC_NUMBER_BYTE, 0x00) +HCD_CONST(SGPE_SYSTEM_RESET_ADDR_BYTE, 0x08) +HCD_CONST(SGPE_IVPR_ADDR_BYTE, 0x10) +HCD_CONST(SGPE_BUILD_DATE_BYTE, 0x18) +HCD_CONST(SGPE_BUILD_VER_BYTE, 0x1C) +HCD_CONST(SGPE_STOP_FLAGS_BYTE, 0x20) +HCD_CONST(SGPE_LOCATION_ID_BYTE, 0x24) +HCD_CONST(SGPE_QUAD_COMMON_RING_SRAM_OFF_BYTE, 0x28) +HCD_CONST(SGPE_QUAD_OVERRIDE_RING_SRAM_OFF_BYTE, 0x2C) +HCD_CONST(SGPE_QUAD_SPECIFIC_RING_SRAM_OFF_BYTE, 0x30) +HCD_CONST(SGPE_QUAD_SCOM_RESTORE_SRAM_OFF_BYTE, 0x34) +HCD_CONST(SGPE_QUAD_SCOM_RESTORE_MEM_OFF_BYTE, 0x38) +HCD_CONST(SGPE_QUAD_SCOM_RESTORE_LENGTH_BYTE, 0x3C) +HCD_CONST(SGPE_AUX_DATA_OFFSET_BYTE, 0x40) +HCD_CONST(SGPE_AUX_DATA_LENGTH_BYTE, 0x44) +HCD_CONST(SGPE_AUX_CTRL_BYTE, 0x48) +HCD_CONST(SGPE_CHTM_MEM_CFG_BYTE, 0x50) + +HCD_CONST(SGPE_RESET_ADDR_IMAGE_OFFSET, (SGPE_HEADER_IMAGE_OFFSET + SGPE_SYSTEM_RESET_ADDR_BYTE)) +HCD_CONST(SGPE_BUILD_DATE_IMAGE_OFFSET, (SGPE_HEADER_IMAGE_OFFSET + SGPE_BUILD_DATE_BYTE)) +HCD_CONST(SGPE_BUILD_VER_IMAGE_OFFSET, (SGPE_HEADER_IMAGE_OFFSET + SGPE_BUILD_VER_BYTE)) + +HCD_CONST(SGPE_STOP_4_TO_2_BIT_POS, 0x80000000) +HCD_CONST(SGPE_STOP_5_TO_4_BIT_POS, 0x40000000) +HCD_CONST(SGPE_STOP_8_TO_5_BIT_POS, 0x20000000) +HCD_CONST(SGPE_STOP_11_TO_8_BIT_POS, 0x10000000) +HCD_CONST(SGPE_ENABLE_CME_TRACE_ARRAY_BIT_POS, 0x08000000) +HCD_CONST(SGPE_VDM_ENABLE_BIT_POS, 0x04000000) +HCD_CONST(SGPE_ENABLE_CHTM_TRACE_CME_BIT_POS, 0x02000000) +HCD_CONST(SGPE_PROC_FAB_PUMP_MODE_BIT_POS, 0x00004000) +HCD_CONST(SGPE_CACHE_SKEWADJ_DISABLE_BIT_POS, 0x00002000) +HCD_CONST(SGPE_CACHE_DCADJ_DISABLE_BIT_POS, 0x00001000) + +///24x7 +HCD_CONST(QPMR_AUX_OFFSET, (512 * ONE_KB)) +HCD_CONST(QPMR_AUX_LENGTH, (64 * ONE_KB)) +/// SGPE Hcode + +HCD_CONST(SGPE_HCODE_IMAGE_OFFSET, (SGPE_INT_VECTOR_SIZE + SGPE_HEADER_SIZE)) +HCD_CONST(SGPE_HCODE_SIZE, ((45 * ONE_KB) + HALF_KB)) //RTC158543 +HCD_CONST(SGPE_DEBUG_PTRS_OFFSET, 0x200) +HCD_CONST(SGPE_DEBUG_PTRS_SIZE, 0x24) +HCD_CONST(SGPE_DBG_PTR_AREA_SIZE, 64) + +/// Quad Scan + +// 400B * 9 rings * 3 types (base, RL, CC) +HCD_CONST(QUAD_COMMON_RING_SIZE, (13 * ONE_KB)) +// 300B * 9 rings +HCD_CONST(QUAD_OVERRIDE_RING_SIZE, (3 * ONE_KB)) +// 1KB/ring * 5 rings/cache +HCD_CONST(QUAD_SPECIFIC_RING_SIZE_PER_QUAD, ((3 * ONE_KB) + HALF_KB)) +HCD_CONST(QUAD_SPECIFIC_RING_SIZE_TOTAL, (19 * ONE_KB)) //checkme? + +/// Quad Scom + +HCD_CONST(QUAD_SCOM_RESTORE_QPMR_OFFSET, (128 * ONE_KB)) +HCD_CONST(QUAD_SCOM_RESTORE_HOMER_OFFSET, + (QUAD_SCOM_RESTORE_QPMR_OFFSET + QPMR_HOMER_OFFSET)) + +HCD_CONST(MAX_L2_SCOM_ENTRIES, 16) +HCD_CONST(MAX_L3_SCOM_ENTRIES, 16) +HCD_CONST(MAX_EQ_SCOM_ENTRIES, 15) +HCD_CONST(QUAD_SCOM_RESTORE_REGS_PER_QUAD, + (MAX_EQ_SCOM_ENTRIES + MAX_L2_SCOM_ENTRIES + MAX_L3_SCOM_ENTRIES + 1)) + +HCD_CONST(QUAD_SCOM_RESTORE_SIZE_PER_QUAD, + (SCOM_RESTORE_ENTRY_SIZE* QUAD_SCOM_RESTORE_REGS_PER_QUAD)) + +HCD_CONST(QUAD_SCOM_RESTORE_SIZE_TOTAL, (6 * ONE_KB)) //rounded to 6KB + +//--------------------------------------------------------------------------------------- + +/// CPMR Header + +HCD_CONST(CPMR_HOMER_OFFSET, (HOMER_CPMR_REGION_NUM* ONE_MB)) +HCD_CONST(CPMR_HEADER_SIZE, 256) + +HCD_CONST(CPMR_ATTN_WORD0_BYTE, 0x00) +HCD_CONST(CPMR_ATTN_WORD1_BYTE, 0x04) +HCD_CONST(CPMR_MAGIC_NUMBER_BYTE, 0x08) +HCD_CONST(CPMR_BUILD_DATE_BYTE, 0x10) +HCD_CONST(CPMR_BUILD_VER_BYTE, 0x14) +HCD_CONST(CPMR_CME_HCODE_OFFSET_BYTE, 0x20) +HCD_CONST(CPMR_CME_HCODE_LENGTH_BYTE, 0x24) +HCD_CONST(CPMR_CORE_COMMON_RING_OFFSET_BYTE, 0x28) +HCD_CONST(CPMR_CORE_COMMON_RING_LENGTH_BYTE, 0x2C) +HCD_CONST(CPMR_CME_LOCAL_PSTATE_OFFSET_BYTE, 0x30) +HCD_CONST(CPMR_CME_LOCAL_PSTATE_LENGTH_BYTE, 0x34) +HCD_CONST(CPMR_CORE_SPECIFIC_RING_OFFSET_BYTE, 0x38) +HCD_CONST(CPMR_CORE_SPECIFIC_RING_LENGTH_BYTE, 0x3C) +HCD_CONST(CPMR_CORE_SCOM_RESTORE_OFFSET_BYTE, 0x40) +HCD_CONST(CPMR_CORE_SCOM_RESTORE_LENGTH_BYTE, 0x44) +HCD_CONST(CPMR_SELF_RESTORE_OFFSET_BYTE, 0x48) +HCD_CONST(CPMR_SELF_RESTORE_LENGTH_BYTE, 0x4C) + +/// Self Restore + +HCD_CONST(SELF_RESTORE_CPMR_OFFSET, CPMR_HEADER_SIZE) +HCD_CONST(SELF_RESTORE_INT_SIZE, (8 * ONE_KB)) +HCD_CONST(THREAD_LAUNCHER_SIZE, 256) +HCD_CONST(SELF_RESTORE_CODE_SIZE, + (SELF_RESTORE_INT_SIZE + THREAD_LAUNCHER_SIZE)) + +HCD_CONST(CORE_RESTORE_THREAD_AREA_SIZE, (ONE_KB)) +HCD_CONST(CORE_RESTORE_CORE_AREA_SIZE, (ONE_KB)) +HCD_CONST(CORE_RESTORE_SIZE_PER_THREAD, + (CORE_RESTORE_THREAD_AREA_SIZE + CORE_RESTORE_CORE_AREA_SIZE)) +HCD_CONST(SELF_RESTORE_CORE_REGS_SIZE, + (CORE_RESTORE_SIZE_PER_THREAD* + MAX_THREADS_PER_CORE* MAX_CORES_PER_CHIP)) + +HCD_CONST(SELF_RESTORE_SIZE_TOTAL, + (SELF_RESTORE_CODE_SIZE + SELF_RESTORE_CORE_REGS_SIZE)) + + +/// Core Scom + +HCD_CONST(CORE_SCOM_RESTORE_CPMR_OFFSET, (256 * ONE_KB)) +HCD_CONST(CORE_SCOM_RESTORE_HOMER_OFFSET, + (CORE_SCOM_RESTORE_CPMR_OFFSET + CPMR_HOMER_OFFSET)) + +HCD_CONST(MAX_CORE_SCOM_ENTRIES, 15) +HCD_CONST(CORE_SCOM_RESTORE_REGS_PER_CORE, (MAX_CORE_SCOM_ENTRIES + 1)) + +HCD_CONST(CORE_SCOM_RESTORE_SIZE_PER_CORE, + (SCOM_RESTORE_ENTRY_SIZE* CORE_SCOM_RESTORE_REGS_PER_CORE)) // 16*16=256 +HCD_CONST(CORE_SCOM_RESTORE_SIZE_PER_CME, + (CORE_SCOM_RESTORE_SIZE_PER_CORE* MAX_CORES_PER_CME)) // 256*2=512 + +HCD_CONST(CORE_SCOM_RESTORE_SIZE_TOTAL, + (CORE_SCOM_RESTORE_SIZE_PER_CME* MAX_CMES_PER_CHIP)) // 512*12=6K + +/// CME Image + +HCD_CONST(CME_IMAGE_CPMR_OFFSET, + (CORE_SCOM_RESTORE_CPMR_OFFSET + CORE_SCOM_RESTORE_SIZE_TOTAL)) +//HCD_CONST(CME_IMAGE_SIZE, 0) +HCD_CONST(CME_INT_VECTOR_SIZE, 384) + +/// CME Header + +HCD_CONST(CME_HEADER_CPMR_OFFSET, + (CME_IMAGE_CPMR_OFFSET + CME_INT_VECTOR_SIZE)) +HCD_CONST(CME_HEADER_IMAGE_OFFSET, CME_INT_VECTOR_SIZE) +HCD_CONST(CME_HEADER_SIZE, 128) + +HCD_CONST(CME_MAGIC_NUMBER_BYTE, 0x00) +HCD_CONST(CME_HCODE_OFFSET_BYTE, 0x08) +HCD_CONST(CME_HCODE_LENGTH_BYTE, 0x0C) +HCD_CONST(CME_CORE_COMMON_RING_OFFSET_BYTE, 0x10) +HCD_CONST(CME_CORE_OVERRIDE_RING_OFFSET_BYTE, 0x14) +HCD_CONST(CME_CORE_COMMON_RING_LENGTH_BYTE, 0x18) +HCD_CONST(CME_LOCAL_PSTATE_OFFSET_BYTE, 0x1C) +HCD_CONST(CME_LOCAL_PSTATE_LENGTH_BYTE, 0x20) +HCD_CONST(CME_CORE_SPECIFIC_RING_OFFSET_BYTE, 0x24) +HCD_CONST(CME_CORE_SPECIFIC_RING_LENGTH_BYTE, 0x28) +HCD_CONST(CME_CORE_SCOM_RESTORE_OFFSET_BYTE, 0x2C) +HCD_CONST(CME_CORE_SCOM_RESTORE_LENGTH_BYTE, 0x30) +HCD_CONST(CME_STOP_FLAGS_BYTE, 0x34) +HCD_CONST(CME_LOCATION_ID_BYTE, 0x38) +HCD_CONST(CME_QM_FLAGS_BYTE, 0x3A) +HCD_CONST(CME_HOMER_ADDRESS_BYTE, 0x40) + +HCD_CONST(CME_HCODE_OFF_IMAGE_OFFSET, (CME_HEADER_IMAGE_OFFSET + CME_HCODE_OFFSET_BYTE)) +HCD_CONST(CME_HCODE_LEN_IMAGE_OFFSET, (CME_HEADER_IMAGE_OFFSET + CME_HCODE_LENGTH_BYTE)) + +HCD_CONST(CME_STOP_3_TO_2_BIT_POS, 0x80000000) +HCD_CONST(CME_STOP_4_TO_2_BIT_POS, 0x40000000) +HCD_CONST(CME_STOP_5_TO_4_BIT_POS, 0x20000000) +HCD_CONST(CME_STOP_8_TO_5_BIT_POS, 0x10000000) +HCD_CONST(CME_STOP_11_TO_8_BIT_POS, 0x08000000) +HCD_CONST(CME_VDM_ENABLE_BIT_POS, 0x04000000) +HCD_CONST(CME_STOP_MAPPING_DISABLE_BIT_POS, 0x00000004) +HCD_CONST(CME_QUEUED_SCAN_DISABLE_BIT_POS, 0x00000002) +HCD_CONST(CME_SKIP_CORE_POWEROFF_BIT_POS, 0x00000001) +HCD_CONST(CME_QM_FLAG_RESCLK_ENABLE, 0x8000) +HCD_CONST(CME_QM_FLAG_SYS_IVRM_ENABLE, 0x4000) +HCD_CONST(CME_QM_FLAG_SYS_VDM_ENABLE, 0x2000) +HCD_CONST(CME_QM_FLAG_SYS_WOF_ENABLE, 0x1000) +HCD_CONST(CME_QM_FLAG_SYS_DYN_FMIN_ENABLE, 0x0800) +HCD_CONST(CME_QM_FLAG_SYS_DYN_FMAX_ENABLE, 0x0400) +HCD_CONST(CME_QM_FLAG_SYS_JUMP_PROTECT, 0x0200) + +/// CME Hcode + +HCD_CONST(CME_HCODE_IMAGE_OFFSET, (CME_INT_VECTOR_SIZE + CME_HEADER_SIZE)) +//HCD_CONST(CME_HCODE_SIZE, 0) +HCD_CONST(CME_DEBUG_PTRS_OFFSET, 0x200) +HCD_CONST(CME_DEBUG_PTRS_SIZE, 0x24) +HCD_CONST(CME_INSTRUMENTATION_SIZE, HALF_KB) +HCD_CONST(CME_SRAM_HCODE_OFFSET, 0) + +/// Core Scan + +HCD_CONST(CORE_COMMON_RING_SIZE, (2 * ONE_KB)) +HCD_CONST(CORE_OVERRIDE_RING_SIZE, (1 * ONE_KB)) +HCD_CONST(CORE_SPECIFIC_RING_SIZE_PER_CORE, (1 * ONE_KB)) +HCD_CONST(CORE_SPECIFIC_RING_SIZE_TOTAL, (32 * ONE_KB)) // rounded to 32K + +/// Quad P-State + +HCD_CONST(CME_QUAD_PSTATE_SIZE, HALF_KB) + +// CME Hcode + Core Scan + Pstate + +HCD_CONST(CME_REGION_SIZE, (64 * ONE_KB)) + +// Debug + +HCD_CONST(CPMR_TRACE_REGION_OFFSET, (512 * ONE_KB)) +HCD_CONST(CME_TRACE_REGION_SIZE, (16 * ONE_KB)) +HCD_CONST(CPMR_TRACE_REGION_SIZE, (CME_TRACE_REGION_SIZE* MAX_CMES_PER_CHIP)) // 192K +HCD_CONST(CPMR_DEBUG_REGION_OFFSET, CPMR_TRACE_REGION_OFFSET + CPMR_TRACE_REGION_SIZE) +HCD_CONST(CPMR_DEBUG_REGION_SIZE, (64 * ONE_KB)) // 192K + 64K = 256K + + + +//--------------------------------------------------------------------------------------- + +/// PPMR Header + +HCD_CONST(PPMR_HOMER_OFFSET, (HOMER_PPMR_REGION_NUM* ONE_MB)) +HCD_CONST(PPMR_HEADER_SIZE, 512) + +HCD_CONST(PPMR_MAGIC_NUMBER_BYTE, 0x00) +HCD_CONST(PPMR_BOOT_COPIER_OFFSET_BYTE, 0x08) +HCD_CONST(PPMR_BOOT_LOADER_OFFSET_BYTE, 0x10) +HCD_CONST(PPMR_BOOT_LOADER_LENGTH_BYTE, 0x14) +HCD_CONST(PPMR_BUILD_DATE_BYTE, 0x18) +HCD_CONST(PPMR_BUILD_VER_BYTE, 0x1C) +HCD_CONST(PPMR_PGPE_HCODE_OFFSET_BYTE, 0x28) +HCD_CONST(PPMR_PGPE_HCODE_LENGTH_BYTE, 0x2C) +HCD_CONST(PPMR_GLOBAL_PSTATE_OFFSET_BYTE, 0x30) +HCD_CONST(PPMR_GLOBAL_PSTATE_LENGTH_BYTE, 0x34) +HCD_CONST(PPMR_LOCAL_PSTATE_OFFSET_BYTE, 0x38) +HCD_CONST(PPMR_LOCAL_PSTATE_LENGTH_BYTE, 0x3C) +HCD_CONST(PPMR_OCC_PSTATE_OFFSET_BYTE, 0x40) +HCD_CONST(PPMR_OCC_PSTATE_LENGTH_BYTE, 0x44) +HCD_CONST(PPMR_PSTATE_TABLE_OFFSET_BYTE, 0x48) +HCD_CONST(PPMR_PSTATE_TABLE_LENGTH_BYTE, 0x4C) +HCD_CONST(PPMR_PGPE_SRAM_IMAGE_SIZE_BYTE, 0x50) +HCD_CONST(PPMR_PGPE_BOOT_PROG_CODE_BYTE, 0x54) + +/// PGPE Boot + +HCD_CONST(PGPE_BOOT_COPIER_PPMR_OFFSET, PPMR_HEADER_SIZE) +HCD_CONST(PGPE_BOOT_COPIER_SIZE, ONE_KB) + +HCD_CONST(PGPE_BOOT_LOADER_PPMR_OFFSET, + (PGPE_BOOT_COPIER_PPMR_OFFSET + PGPE_BOOT_COPIER_SIZE)) +HCD_CONST(PGPE_BOOT_LOADER_SIZE, ONE_KB) +HCD_CONST(PGPE_BOOT_LOADER_RESET_ADDR_VAL, 0x40) + +HCD_CONST(PGPE_INSTRUMENTATION_SIZE, (2 * ONE_KB)) + +/// PGPE Image + +HCD_CONST(PGPE_AUX_TASK_SIZE, (2 * ONE_KB)) +HCD_CONST(PGPE_IMAGE_PPMR_OFFSET, + (PGPE_BOOT_LOADER_PPMR_OFFSET + PGPE_BOOT_LOADER_SIZE)) +HCD_CONST(PGPE_IMAGE_SIZE, (48 * ONE_KB)) //RTC158543 +HCD_CONST(PGPE_INT_VECTOR_SIZE, 384) +HCD_CONST(PGPE_HCODE_RESET_ADDR_VAL, 0x40) + +/// PGPE Header + +HCD_CONST(PGPE_HEADER_IMAGE_OFFSET, PGPE_INT_VECTOR_SIZE) +HCD_CONST(PGPE_HEADER_PPMR_OFFSET, + (PGPE_IMAGE_PPMR_OFFSET + PGPE_INT_VECTOR_SIZE)) +HCD_CONST(PGPE_HEADER_SIZE, 128) + +HCD_CONST(PGPE_MAGIC_NUMBER_BYTE, 0x00) +HCD_CONST(PGPE_SYSTEM_RESET_ADDR_BYTE, 0x08) +HCD_CONST(PGPE_SHARED_SRAM_ADDR_BYTE, 0x0C) +HCD_CONST(PGPE_IVPR_ADDR_BYTE, 0x10) +HCD_CONST(PGPE_SHARED_SRAM_LENGTH_BYTE, 0x14) +HCD_CONST(PGPE_BUILD_DATE_BYTE, 0x18) +HCD_CONST(PGPE_BUILD_VER_BYTE, 0x1C) +HCD_CONST(PGPE_PGPE_FLAGS_BYTE, 0x20) +HCD_CONST(PGPE_GLOBAL_PSTATE_SRAM_ADDR_BYTE, 0x28) +HCD_CONST(PGPE_GLOBAL_PSTATE_MEM_OFFSET_BYTE, 0x30) +HCD_CONST(PGPE_GLOBAL_PSTATE_PPB_SIZE_BYTE, 0x34) +HCD_CONST(PGPE_GEN_PSTATE_TABLE_MEM_OFFSET_BYTE, 0x38) +HCD_CONST(PGPE_GEN_PSTATE_TABLE_SIZE_BYTE, 0x3C) +HCD_CONST(PGPE_OCC_PSTATE_TABLE_MEM_OFFSET_BYTE, 0x40) +HCD_CONST(PGPE_OCC_PSTATE_TABLE_SIZE_BYTE, 0x44) +HCD_CONST(PGPE_BEACON_ADDR_BYTE, 0x48) +HCD_CONST(PGPE_ACTUAL_QUAD_STATUS_ADDR_BYTE, 0x4C) +HCD_CONST(PGPE_WOF_TABLE_ADDR_BYTE, 0x50) +HCD_CONST(PGPE_WOF_TABLE_LENGTH_BYTE, 0x54) + +HCD_CONST(PGPE_RESET_ADDR_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_SYSTEM_RESET_ADDR_BYTE)) +HCD_CONST(PGPE_BUILD_DATE_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_BUILD_DATE_BYTE)) +HCD_CONST(PGPE_BUILD_VER_IMAGE_OFFSET, (PGPE_HEADER_IMAGE_OFFSET + PGPE_BUILD_VER_BYTE)) + +/// PGPE Hcode + +//HCD_CONST(PGPE_HCODE_SIZE, (32 * ONE_KB)) //RTC158543 +HCD_CONST(PGPE_DBG_PTR_AREA_SIZE, 64) +HCD_CONST(PGPE_GLOBAL_PSTATE_PARAM_BLOCK_SIZE, (4 * ONE_KB)) + +/// Pstate Parameter Block + Pstate Table + +HCD_CONST(OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET, (128 * ONE_KB)) +HCD_CONST(OCC_PSTATE_PARAM_BLOCK_SIZE, (8 * ONE_KB)) +HCD_CONST(OCC_PSTATE_PARAM_BLOCK_REGION_SIZE, (16 * ONE_KB)) + +HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET, (144 * ONE_KB)) +HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_SIZE, (8 * ONE_KB)) +HCD_CONST(PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE, (16 * ONE_KB)) + +HCD_CONST(OCC_WOF_TABLES_PPMR_OFFSET, (768 * ONE_KB)) +HCD_CONST(OCC_WOF_TABLES_SIZE, (256 * ONE_KB)) + +HCD_CONST(WOF_TABLE_RESERVE, + OCC_WOF_TABLES_PPMR_OFFSET - (PGPE_PSTATE_OUTPUT_TABLES_PPMR_OFFSET + PGPE_PSTATE_OUTPUT_TABLES_REGION_SIZE)) +HCD_CONST(PGPE_IMAGE_RESERVE_SIZE, + (OCC_PSTATE_PARAM_BLOCK_PPMR_OFFSET - PGPE_IMAGE_PPMR_OFFSET - PGPE_IMAGE_SIZE - PGPE_AUX_TASK_SIZE)) + +#endif /* __HCD_MEMMAP_BASE_H__ */ diff --git a/libpore/p9_stop_api.C b/libpore/p9_stop_api.C new file mode 100644 index 0000000..26a14bb --- /dev/null +++ b/libpore/p9_stop_api.C @@ -0,0 +1,1028 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/utils/stopreg/p9_stop_api.C $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/// +/// @file p9_stop_api.C +/// @brief implements STOP API which create/manipulate STOP image. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP + +#include "p9_stop_api.H" +#include "p9_cpu_reg_restore_instruction.H" +#include "p9_stop_data_struct.H" +#include <string.h> +#include "p9_stop_util.H" +#include <stdio.h> + +#ifdef __FAPI_2_ + #include <fapi2.H> +#endif + +#ifdef __cplusplus +extern "C" { + +namespace stopImageSection +{ +#endif +// a true in the table below means register is of scope thread +// whereas a false meanse register is of scope core. + +const StopSprReg_t g_sprRegister[] = +{ + { P9_STOP_SPR_HSPRG0, true }, + { P9_STOP_SPR_HRMOR, false }, + { P9_STOP_SPR_LPCR, true }, + { P9_STOP_SPR_HMEER, false }, + { P9_STOP_SPR_LDBAR, true }, + { P9_STOP_SPR_PSSCR, true }, + { P9_STOP_SPR_PMCR, false }, + { P9_STOP_SPR_HID, false }, + { P9_STOP_SPR_MSR, true }, + { P9_STOP_SPR_DAWR, true }, +}; + +const uint32_t MAX_SPR_SUPPORTED = + sizeof ( g_sprRegister ) / sizeof( StopSprReg_t ); + +//----------------------------------------------------------------------------- + +/** + * @brief validates input arguments provided by STOP API caller. + * @param[in] i_pImage pointer to beginning of chip's HOMER image. + * @param[in] i_regId SPR register id + * @param[in] i_coreId core id + * @param[in|out] i_pThreadId points to thread id + * @param[in|out] i_pThreadLevelReg points to scope information of SPR + * @return STOP_SAVE_SUCCESS if arguments found valid, error code otherwise. + * @note for register of scope core, function shall force io_threadId to + * zero. + */ +static StopReturnCode_t validateSprImageInputs( void* const i_pImage, + const CpuReg_t i_regId, + const uint32_t i_coreId, + uint32_t* i_pThreadId, + bool* i_pThreadLevelReg ) +{ + uint32_t index = 0; + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + bool sprSupported = false; + *i_pThreadLevelReg = false; + + do + { + if( NULL == i_pImage ) + { + // Error: HOMER image start location is not valid + // Cannot proceed further. So, let us exit. + l_rc = STOP_SAVE_ARG_INVALID_IMG; + MY_ERR( "invalid image location " ); + + break; + } + + // STOP API manages STOP image based on physical core Id. PIR value + // is interpreted to calculate the physical core number and virtual + // thread number. + if( MAX_CORE_ID_SUPPORTED < i_coreId ) + { + // Error: invalid core number. given core number exceeds maximum + // cores supported by chip. + + // Physical core number is calculated based on following formula: + // core id = 4 * quad id (0..5) + core no within quad ( 0..3) + l_rc = STOP_SAVE_ARG_INVALID_CORE; + MY_ERR( "invalid core id " ); + break; + } + + if( MAX_THREAD_ID_SUPPORTED < *i_pThreadId ) + { + //Error: invalid core thread. Given core thread exceeds maximum + //threads supported in a core. + + // 64 bit PIR value is interpreted to calculate virtual thread + // Id. In fuse mode, b61 and b62 gives virtual thread id whereas in + // non fuse mode, b62 and b63 is read to determine the same. + + l_rc = STOP_SAVE_ARG_INVALID_THREAD; + MY_ERR( "invalid thread " ); + break; + } + + for( index = 0; index < MAX_SPR_SUPPORTED; ++index ) + { + if( i_regId == (CpuReg_t )g_sprRegister[index].sprId ) + { + // given register is in the list of register supported + sprSupported = true; + *i_pThreadLevelReg = g_sprRegister[index].isThreadScope; + *i_pThreadId = *i_pThreadLevelReg ? *i_pThreadId : 0; + break; + } + } + + if( !sprSupported ) + { + // Following SPRs are supported + // trace out all registers supported + MY_ERR("Register not supported" ); + // error code to caller. + l_rc = STOP_SAVE_ARG_INVALID_REG; + break; + } + + } + while(0); + + if( l_rc ) + { + MY_ERR( "image 0x%08x, regId %08d, coreId %d, " + "threadId %d return code 0x%08x", i_pImage, i_regId, + i_coreId, *i_pThreadId, l_rc ); + } + + return l_rc; +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates ori instruction code. + * @param[in] i_Rs Source register number + * @param[in] i_Ra destination register number + * @param[in] i_data 16 bit immediate data + * @return returns 32 bit number representing ori instruction. + */ +static uint32_t getOriInstruction( const uint16_t i_Rs, const uint16_t i_Ra, + const uint16_t i_data ) +{ + uint32_t oriInstOpcode = 0; + oriInstOpcode = 0; + oriInstOpcode = ORI_OPCODE << 26; + oriInstOpcode |= i_Rs << 21; + oriInstOpcode |= i_Ra << 16; + oriInstOpcode |= i_data; + + return SWIZZLE_4_BYTE(oriInstOpcode); +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates 32 bit key used for SPR lookup in core section. + */ +static uint32_t genKeyForSprLookup( const CpuReg_t i_regId ) +{ + return getOriInstruction( 0, 0, (uint16_t) i_regId ); +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates xor instruction code. + * @param[in] i_Rs source register number for xor operation + * @param[in] i_Ra destination register number for xor operation result + * @param[in] i_Rb source register number for xor operation + * @return returns 32 bit number representing xor immediate instruction. + */ +static uint32_t getXorInstruction( const uint16_t i_Ra, const uint16_t i_Rs, + const uint16_t i_Rb ) +{ + uint32_t xorRegInstOpcode; + xorRegInstOpcode = XOR_CONST << 1; + xorRegInstOpcode |= OPCODE_31 << 26; + xorRegInstOpcode |= i_Rs << 21; + xorRegInstOpcode |= i_Ra << 16; + xorRegInstOpcode |= i_Rb << 11; + + return SWIZZLE_4_BYTE(xorRegInstOpcode); +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates oris instruction code. + * @param[in] i_Rs source register number + * @param[in] i_Ra destination register number + * @param[in] i_data 16 bit immediate data + * @return returns 32 bit number representing oris immediate instruction. + */ +static uint32_t getOrisInstruction( const uint16_t i_Rs, const uint16_t i_Ra, + const uint16_t i_data ) +{ + uint32_t orisInstOpcode; + orisInstOpcode = 0; + orisInstOpcode = ORIS_OPCODE << 26; + orisInstOpcode |= ( i_Rs & 0x001F ) << 21 | ( i_Ra & 0x001F ) << 16; + orisInstOpcode |= i_data; + + return SWIZZLE_4_BYTE(orisInstOpcode); +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates instruction for mtspr + * @param[in] i_Rs source register number + * @param[in] i_Spr represents spr where data is to be moved. + * @return returns 32 bit number representing mtspr instruction. + */ +static uint32_t getMtsprInstruction( const uint16_t i_Rs, const uint16_t i_Spr ) +{ + uint32_t mtsprInstOpcode = 0; + uint32_t temp = (( i_Spr & 0x03FF ) << 11); + mtsprInstOpcode = (uint8_t)i_Rs << 21; + mtsprInstOpcode = ( temp & 0x0000F800 ) << 5; + mtsprInstOpcode |= ( temp & 0x001F0000 ) >> 5; + mtsprInstOpcode |= MTSPR_BASE_OPCODE; + + return SWIZZLE_4_BYTE(mtsprInstOpcode); +} + +//----------------------------------------------------------------------------- + +/** + * @brief generates rldicr instruction. + * @param[in] i_Rs source register number + * @param[in] i_Ra destination register number + * @param[in] i_sh bit position by which contents of i_Rs are to be shifted + * @param[in] i_me bit position up to which mask should be 1. + * @return returns 32 bit number representing rldicr instruction. + */ +static uint32_t getRldicrInstruction( const uint16_t i_Ra, const uint16_t i_Rs, + const uint16_t i_sh, uint16_t i_me ) +{ + uint32_t rldicrInstOpcode = 0; + rldicrInstOpcode = 0; + rldicrInstOpcode = ((RLDICR_OPCODE << 26 ) | ( i_Rs << 21 ) | ( i_Ra << 16 )); + rldicrInstOpcode |= ( ( i_sh & 0x001F ) << 11 ) | (RLDICR_CONST << 2 ); + rldicrInstOpcode |= (( i_sh & 0x0020 ) >> 4); + rldicrInstOpcode |= (i_me & 0x001F ) << 6; + rldicrInstOpcode |= (i_me & 0x0020 ); + return SWIZZLE_4_BYTE(rldicrInstOpcode); +} + +//----------------------------------------------------------------------------- + +/** + * @brief looks up entry for given SPR in given thread/core section. + * @param[in] i_pThreadSectLoc start of given thread section or core section. + * @param[in] i_lookUpKey search key for lookup of given SPR entry. + * @param[in] i_isCoreReg true if register is of scope core, false + * otherwise. + * @param[in|out] io_pSprEntryLoc Input: NULL + * Output: location of given entry or end of table. + * @return STOP_SAVE_SUCCESS if entry is found, STOP_SAVE_FAIL in case of + * an error. + */ +static StopReturnCode_t lookUpSprInImage( uint32_t* i_pThreadSectLoc, + const uint32_t i_lookUpKey, + const bool i_isCoreReg, + void** io_pSprEntryLoc ) +{ + StopReturnCode_t l_rc = STOP_SAVE_FAIL; + uint32_t temp = i_isCoreReg ? (uint32_t)(CORE_RESTORE_CORE_AREA_SIZE) : + (uint32_t)(CORE_RESTORE_THREAD_AREA_SIZE); + uint32_t* i_threadSectEnd = i_pThreadSectLoc + temp; + uint32_t bctr_inst = SWIZZLE_4_BYTE(BLR_INST); + *io_pSprEntryLoc = NULL; + + do + { + if( !i_pThreadSectLoc ) + { + break; + } + + temp = 0; + + while( ( i_pThreadSectLoc <= i_threadSectEnd ) && + ( temp != bctr_inst ) ) + { + temp = *i_pThreadSectLoc; + + if( ( temp == i_lookUpKey ) || ( temp == bctr_inst ) ) + { + *io_pSprEntryLoc = i_pThreadSectLoc; + l_rc = STOP_SAVE_SUCCESS; + break; + } + + i_pThreadSectLoc = i_pThreadSectLoc + SIZE_PER_SPR_RESTORE_INST; + } + + } + while(0); + + return l_rc; +} + +//----------------------------------------------------------------------------- + +/** + * @brief updates an SPR STOP image entry. + * @param[in] i_pSprEntryLocation location of entry. + * @param[in] i_regId register Id associated with SPR. + * @param[in] i_regData data needs to be written to SPR entry. + * @return STOP_SAVE_SUCCESS if update works, STOP_SAVE_FAIL otherwise. + */ +static StopReturnCode_t updateSprEntryInImage( uint32_t* i_pSprEntryLocation, + const CpuReg_t i_regId, + const uint64_t i_regData ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + uint32_t tempInst = 0; + uint64_t tempRegData = 0; + bool newEntry = true; + uint16_t regRs = 0; //to use R0 for SPR restore insruction generation + uint16_t regRa = 0; + + do + { + if( !i_pSprEntryLocation ) + { + MY_ERR("invalid location of SPR image entry" ); + l_rc = STOP_SAVE_FAIL; + break; + } + + tempInst = genKeyForSprLookup( i_regId ); + + if( *i_pSprEntryLocation == tempInst ) + { + newEntry = false; + } + + //Add SPR search instruction i.e. "ori r0, r0, SPRID" + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + //clear R0 i.e. "xor ra, rs, rb" + tempInst = getXorInstruction( regRs, regRs, regRs ); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + tempRegData = i_regData >> 48; + //get lower order 16 bits of SPR restore value in R0 + tempInst = getOrisInstruction( regRs, regRa, (uint16_t)tempRegData ); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + tempRegData = ((i_regData >> 32) & 0x0000FFFF ); + //get bit b16-b31 of SPR restore value in R0 + tempInst = getOriInstruction( regRs, regRa, (uint16_t)tempRegData ); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + //Rotate R0 to left by 32 bit position and zero lower order 32 bits. + //Place the result in R0 + tempInst = getRldicrInstruction(regRa, regRs, 32, 31); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + tempRegData = ((i_regData >> 16) & 0x000000FFFF ); + //get bit b32-b47 of SPR restore value to R0 + tempInst = getOrisInstruction( regRs, regRa, (uint16_t)tempRegData ); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + tempRegData = (uint16_t)i_regData; + //get bit b48-b63 of SPR restore value to R0 + tempInst = getOriInstruction( regRs, regRa, (uint16_t)i_regData ); + *i_pSprEntryLocation = tempInst; + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + + if( P9_STOP_SPR_MSR == i_regId ) + { + //MSR cannot be restored completely with mtmsrd instruction. + //as it does not update ME, LE and HV bits. In self restore code + //inorder to restore MSR, contents of R21 is moved to SRR1. It also + //executes an RFID which causes contents of SRR1 to be copied to + //MSR. This allows copy of LE bit which are specifically interested + //in. Instruction below moves contents of MSR Value (in R0 ) to R21. + tempInst = SWIZZLE_4_BYTE( MR_R0_TO_R21 ); + } + else if (P9_STOP_SPR_HRMOR == i_regId ) + { + //Case HRMOR, move contents of R0 to a placeholder GPR (R10) + //Thread Launcher expects HRMOR value in R10 + tempInst = SWIZZLE_4_BYTE( MR_R0_TO_R10 ); + } + else + { + // Case other SPRs, move contents of R0 to SPR + tempInst = + getMtsprInstruction( 0, (uint16_t)i_regId ); + } + + *i_pSprEntryLocation = tempInst; + + if( newEntry ) + { + i_pSprEntryLocation += SIZE_PER_SPR_RESTORE_INST; + //at the end of SPR restore, add instruction BLR to go back to thread + //launcher. + tempInst = SWIZZLE_4_BYTE(BLR_INST); + *i_pSprEntryLocation = tempInst; + } + } + while(0); + + return l_rc; +} + +//----------------------------------------------------------------------------- + +StopReturnCode_t p9_stop_save_cpureg( void* const i_pImage, + const CpuReg_t i_regId, + const uint64_t i_regData, + const uint64_t i_pir ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; // procedure return code + HomerSection_t* chipHomer = NULL; + + do + { + uint32_t threadId = 0; + uint32_t coreId = 0; + uint32_t lookUpKey = 0; + void* pSprEntryLocation = NULL; // an offset w.r.t. to start of image + void* pThreadLocation = NULL; + bool threadScopeReg = false; + + l_rc = getCoreAndThread( i_pImage, i_pir, &coreId, &threadId ); + + if( l_rc ) + { + MY_ERR("Failed to determine Core Id and Thread Id from PIR 0x%016llx", + i_pir); + break; + } + + MY_INF( " PIR 0x%016llx coreId %d threadid %d " + " registerId %d", i_pir, coreId, + threadId, i_regId ); + + // First of all let us validate all input arguments. + l_rc = validateSprImageInputs( i_pImage, + i_regId, + coreId, + &threadId, + &threadScopeReg ); + + if( l_rc ) + { + // Error: bad argument traces out error code + MY_ERR("Bad input argument rc %d", l_rc ); + + break; + } + + chipHomer = ( HomerSection_t*)i_pImage; + + if( threadScopeReg ) + { + pThreadLocation = + &(chipHomer->coreThreadRestore[coreId][threadId].threadArea[0]); + } + else + { + pThreadLocation = + &(chipHomer->coreThreadRestore[coreId][threadId].coreArea[0]); + } + + if( ( SWIZZLE_4_BYTE(BLR_INST) == *(uint32_t*)pThreadLocation ) || + ( SWIZZLE_4_BYTE(ATTN_OPCODE) == *(uint32_t*) pThreadLocation ) ) + { + // table for given core id doesn't exit. It needs to be + // defined. + pSprEntryLocation = pThreadLocation; + } + else + { + // an SPR restore section for given core already exists + lookUpKey = genKeyForSprLookup( i_regId ); + l_rc = lookUpSprInImage( (uint32_t*)pThreadLocation, + lookUpKey, + threadScopeReg, + &pSprEntryLocation ); + } + + if( l_rc ) + { + MY_ERR("Invalid or corrupt SPR entry. CoreId 0x%08x threadId ", + "0x%08x regId 0x%08x lookUpKey 0x%08x pThreadLocation 0x%08x" + , coreId, threadId, i_regId, lookUpKey, pThreadLocation ); + break; + } + + l_rc = updateSprEntryInImage( (uint32_t*) pSprEntryLocation, + i_regId, + i_regData ); + + if( l_rc ) + { + MY_ERR( " Failed to update the SPR entry of PIR 0x%08x reg" + "0x%08x", i_pir, i_regId ); + break; + } + + } + while(0); + + return l_rc; +} + +//----------------------------------------------------------------------------- + +/** + * @brief validates all the input arguments. + * @param[in] i_pImage pointer to start of HOMER of image for proc chip. + * @param[in] i_scomAddress SCOM address of register. + * @param[in] i_chipletId core or cache chiplet id + * @param[in] i_operation operation requested for SCOM entry. + * @param[in] i_section image section on which operation is to be performed + * @return STOP_SAVE_SUCCESS if arguments found valid, error code otherwise. + * @note Function does not validate that the given SCOM address really + * belongs to the given section. + */ +static StopReturnCode_t validateScomImageInputs( void* const i_pImage, + const uint32_t i_scomAddress, + const uint8_t i_chipletId, + const ScomOperation_t i_operation, + const ScomSection_t i_section ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + + do + { + if( !i_pImage ) + { + //Error Invalid image pointer + l_rc = STOP_SAVE_ARG_INVALID_IMG; + MY_ERR("invalid image location "); + break; + } + + if( 0 == i_scomAddress ) + { + l_rc = STOP_SAVE_SCOM_INVALID_ADDRESS; + MY_ERR("invalid SCOM address"); + break; + } + + if(( CACHE_CHIPLET_ID_MIN > i_chipletId ) || + ( CORE_CHIPLET_ID_MAX < i_chipletId )) + { + l_rc = STOP_SAVE_SCOM_INVALID_CHIPLET; + MY_ERR("chiplet id not in range"); + break; + } + + if(( CORE_CHIPLET_ID_MIN > i_chipletId ) && + ( CACHE_CHIPLET_ID_MAX < i_chipletId )) + { + l_rc = STOP_SAVE_SCOM_INVALID_CHIPLET; + MY_ERR("chiplet id not valid"); + break; + } + + if(( P9_STOP_SCOM_OP_MIN >= i_operation ) || + ( P9_STOP_SCOM_OP_MAX <= i_operation )) + { + //invalid SCOM image operation requested + l_rc = STOP_SAVE_SCOM_INVALID_OPERATION; + MY_ERR("invalid SCOM image operation"); + break; + } + + if(( P9_STOP_SECTION_MIN >= i_section ) || + ( P9_STOP_SECTION_MAX <= i_section )) + { + // invalid cache sub section specified + l_rc = STOP_SAVE_SCOM_INVALID_SECTION; + MY_ERR("invalid section"); + break; + } + + if(( i_operation == P9_STOP_SCOM_RESET ) && + ( i_chipletId < CORE_CHIPLET_ID_MIN )) + { + // replace requested with a cache chiplet Id + l_rc = STOP_SAVE_SCOM_INVALID_OPERATION; + MY_ERR( "reset not supported for cache. chiplet Id 0x%08x", + i_chipletId ); + break; + } + + } + while(0); + + if( l_rc ) + { + MY_ERR("image 0x%08x SCOMAddress 0x%08x chipletId 0x%08x operation" + "0x%08x section 0x%08x", i_pImage, i_scomAddress, i_chipletId, + i_operation, i_section ); + } + + return l_rc; +} + +//----------------------------------------------------------------------------- + +/** + * @brief edit SCOM entry associated with the given core. + * @param[in] i_scomAddr SCOM address of register. + * @param[in] i_scomData data associated with SCOM register. + * @param[in] i_pEntryLocation points to a SCOM entry in HOMER image. + * @param[in] i_operation operation to be performed on SCOM entry. + * @return STOP_SAVE_SUCCESS if existing entry is updated, STOP_SAVE_FAIL + * otherwise. + */ +static StopReturnCode_t editScomEntry( uint32_t i_scomAddr, uint64_t i_scomData, + ScomEntry_t* i_pEntryLocation, + uint32_t i_operation ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + + do + { + if( !i_pEntryLocation ) + { + //Error: location of SCOM entry is not known + //therefore no point moving forward + MY_ERR("SCOM entry location not valid"); + l_rc = STOP_SAVE_FAIL; + break; + } + + switch( i_operation ) + { + case P9_STOP_SCOM_OR: + i_pEntryLocation->scomEntryData |= i_scomData; + break; + + case P9_STOP_SCOM_AND: + i_pEntryLocation->scomEntryData &= i_scomData; + break; + + case P9_STOP_SCOM_NOOP: + { + uint32_t nopInst = getOriInstruction( 0, 0, 0 ); + i_pEntryLocation->scomEntryHeader = SWIZZLE_4_BYTE(SCOM_ENTRY_START); + i_pEntryLocation->scomEntryData = nopInst; + i_pEntryLocation->scomEntryAddress = nopInst; + } + break; + + case P9_STOP_SCOM_APPEND: + i_pEntryLocation->scomEntryHeader = SWIZZLE_4_BYTE(SCOM_ENTRY_START); + i_pEntryLocation->scomEntryData = i_scomData; + i_pEntryLocation->scomEntryAddress = i_scomAddr; + break; + } + + } + while(0); + + return l_rc; +} + +//----------------------------------------------------------------------------- + +/** + * @brief update SCOM entry associated with the given core. + * @param[in] i_scomAddr SCOM address of register. + * @param[in] i_scomData data associated with SCOM register. + * @param[in] i_scomEntry points to a SCOM entry in cache section of HOMER image. + * @return STOP_SAVE_SUCCESS if new entry is added, STOP_SAVE_FAIL otherwise. + * @note adds an entry at a given location. It can be used to add entry in + * place of NOP, at the end of table or as first entry of the cache + * sub-section(L2, L3 or EQ ). + */ +static StopReturnCode_t updateScomEntry( uint32_t i_scomAddr, uint64_t i_scomData, + ScomEntry_t* i_scomEntry ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + + do + { + if( !i_scomEntry ) + { + MY_ERR( "cache entry cannot be located"); + l_rc = STOP_SAVE_SCOM_ENTRY_UPDATE_FAILED; + break; + } + + i_scomEntry->scomEntryHeader = SWIZZLE_4_BYTE(SCOM_ENTRY_START); // done for now + i_scomEntry->scomEntryAddress = i_scomAddr; + i_scomEntry->scomEntryData = i_scomData; + + } + while(0); + + return l_rc; +} + +//----------------------------------------------------------------------------- + +StopReturnCode_t p9_stop_save_scom( void* const i_pImage, + const uint32_t i_scomAddress, + const uint64_t i_scomData, + const ScomOperation_t i_operation, + const ScomSection_t i_section ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + StopCacheSection_t* pStopCacheScomStart = NULL; + ScomEntry_t* pScomEntry = NULL; + uint32_t entryLimit = 0; + uint8_t chipletId = 0; + + uint32_t nopInst; + ScomEntry_t* pEntryLocation = NULL; + ScomEntry_t* pNopLocation = NULL; + ScomEntry_t* pTableEndLocationtable = NULL; + uint32_t swizzleAddr; + uint64_t swizzleData; + uint32_t swizzleAttn; + uint32_t swizzleEntry; + uint32_t index = 0; + uint32_t swizzleBlr = SWIZZLE_4_BYTE(BLR_INST); + + do + { + chipletId = i_scomAddress >> 24; + chipletId = chipletId & 0x3F; + + l_rc = validateScomImageInputs( i_pImage, + i_scomAddress, + chipletId, + i_operation, + i_section ); + + if( l_rc ) + { + MY_ERR( "invalid argument: aborting"); + break; + } + + if( chipletId >= CORE_CHIPLET_ID_MIN ) + { + // chiplet is core. So, let us find the start address of SCOM area + // pertaining to a core in STOP image. + pScomEntry = CORE_ID_SCOM_START(i_pImage, + chipletId ) + entryLimit = MAX_CORE_SCOM_ENTRIES; + } + else + { + // chiplet is a cache. let us find start address of cache section + // associated with given chiplet. A cache section associated with + // given chiplet is split in to L2, L3 and EQ area. + pStopCacheScomStart = CACHE_SECTN_START(i_pImage, + chipletId); + } + + if(( !pStopCacheScomStart ) && ( !pScomEntry) ) + { + //Error invalid pointer to SCOM entry in cache or core section + //of STOP image. + MY_ERR("invalid start location for chiplet %d", + chipletId ); + break; + } + + switch( i_section ) + { + case P9_STOP_SECTION_EQ_SCOM: + pScomEntry = pStopCacheScomStart->nonCacheArea; + entryLimit = MAX_EQ_SCOM_ENTRIES; + break; + + case P9_STOP_SECTION_L2: + pScomEntry = pStopCacheScomStart->l2CacheArea; + entryLimit = MAX_L2_SCOM_ENTRIES; + break; + + case P9_STOP_SECTION_L3: + pScomEntry = pStopCacheScomStart->l3CacheArea; + entryLimit = MAX_L3_SCOM_ENTRIES; + break; + + case P9_STOP_SECTION_CORE_SCOM: + //macro CORE_ID_SCOM_START already gives start of scom + //entry for given core. entry limit too is assigned thereafter. + //Handling for core and cache segment is different for scom + //entries. It is because scom entries are organized differently + //in core and cache segment. + break; + + default: + l_rc = STOP_SAVE_SCOM_INVALID_SECTION; + break; + } + + if(( !pScomEntry ) || ( l_rc ) ) + { + // Error Invalid pointer to cache entry + MY_ERR("invalid subsection %d or internal firmware failure", + i_section ); + l_rc = STOP_SAVE_FAIL; + break; + } + + nopInst = getOriInstruction( 0, 0, 0 ); + pEntryLocation = NULL; + pNopLocation = NULL; + pTableEndLocationtable = NULL; + swizzleAddr = SWIZZLE_4_BYTE(i_scomAddress); + swizzleData = SWIZZLE_8_BYTE(i_scomData); + swizzleAttn = SWIZZLE_4_BYTE(ATTN_OPCODE); + swizzleEntry = SWIZZLE_4_BYTE(SCOM_ENTRY_START); + + for( index = 0; index < entryLimit; ++index ) + { + uint32_t entrySwzAddress = pScomEntry[index].scomEntryAddress; + + if( ( swizzleAddr == entrySwzAddress ) && ( !pEntryLocation ) ) + + { + pEntryLocation = &pScomEntry[index]; + } + + if( (( nopInst == entrySwzAddress ) || + ( swizzleAttn == entrySwzAddress ) || + ( swizzleBlr == entrySwzAddress )) && ( !pNopLocation ) ) + { + pNopLocation = &pScomEntry[index]; + } + + if( swizzleEntry == pScomEntry[index].scomEntryHeader ) + { + continue; + } + + pTableEndLocationtable = &pScomEntry[index]; + break; + } + + if( ( !pEntryLocation ) && ( !pTableEndLocationtable ) ) + { + MY_ERR(" exhausted all location available for section" + "0x%08x scom address 0x%08x", + i_section, i_scomAddress ); + l_rc = STOP_SAVE_SCOM_ENTRY_UPDATE_FAILED; + break; + } + + switch( i_operation ) + { + case P9_STOP_SCOM_APPEND: + { + ScomEntry_t* pScomAppend = NULL; + + if( pNopLocation ) + { + pScomAppend = pNopLocation; + } + else + { + pScomAppend = pTableEndLocationtable; + } + + l_rc = updateScomEntry ( swizzleAddr, + swizzleData, pScomAppend ); + } + break; + + case P9_STOP_SCOM_REPLACE: + { + ScomEntry_t* scomReplace = NULL; + + if( pEntryLocation ) + { + scomReplace = pEntryLocation; + } + else + { + scomReplace = pTableEndLocationtable; + } + + l_rc = updateScomEntry( swizzleAddr, + swizzleData, scomReplace ); + } + break; + + case P9_STOP_SCOM_OR: + case P9_STOP_SCOM_AND: + case P9_STOP_SCOM_NOOP: + + if( pEntryLocation ) + { + l_rc = editScomEntry( swizzleAddr, + swizzleData, + pEntryLocation, + i_operation ); + } + else + { + //Invalid operation requested. + MY_ERR( "entry not found edit chiplet Id 0x%08x " + "swizzle addr 0x%08x ", + chipletId, swizzleAddr ); + + l_rc = STOP_SAVE_SCOM_INVALID_OPERATION; + } + + break; + + case P9_STOP_SCOM_RESET: + + if( P9_STOP_SECTION_CORE_SCOM == i_section ) + { + memset( pScomEntry, 0x00, CORE_SCOM_RESTORE_SIZE_PER_CORE ); + } + + break; + + case P9_STOP_SCOM_OR_APPEND: + case P9_STOP_SCOM_AND_APPEND: + { + uint32_t tempOperation = P9_STOP_SCOM_APPEND; + ScomEntry_t* editAppend = NULL; + + if( NULL == pEntryLocation ) + { + editAppend = pTableEndLocationtable; + } + else + { + editAppend = pEntryLocation; + + if( P9_STOP_SCOM_OR_APPEND == i_operation ) + { + tempOperation = P9_STOP_SCOM_OR; + } + else + { + tempOperation = P9_STOP_SCOM_AND; + } + } + + l_rc = editScomEntry( swizzleAddr, + swizzleData, + editAppend, + tempOperation ); + } + break; + + default: + l_rc = STOP_SAVE_SCOM_INVALID_OPERATION; + break; + } + + } + while(0); + + if( l_rc ) + { + MY_ERR("SCOM image operation 0x%08x failed for chiplet 0x%08x addr" + "0x%08x", i_operation, chipletId , + i_scomAddress ); + } + + return l_rc; +} + + +#ifdef __cplusplus +} //namespace stopImageSection ends + +} //extern "C" +#endif diff --git a/libpore/p9_stop_api.H b/libpore/p9_stop_api.H new file mode 100644 index 0000000..79abd00 --- /dev/null +++ b/libpore/p9_stop_api.H @@ -0,0 +1,163 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/utils/stopreg/p9_stop_api.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __P9_STOP_IMAGE_API_ +#define __P9_STOP_IMAGE_API_ + +#include <stdint.h> + +#ifdef __SKIBOOT__ + #include <skiboot.h> +#endif + +/// +/// @file p9_stop_api.H +/// @brief describes STOP API which create/manipulate STOP image. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP + +#ifdef __cplusplus +namespace stopImageSection +{ +#endif + +/** + * @brief all SPRs and MSR for which register restore is to be supported. + * @note STOP API design has built in support to accomodate 8 register of + * scope core and thread each. + */ +typedef enum +{ + P9_STOP_SPR_DAWR = 180, // thread register + P9_STOP_SPR_HSPRG0 = 304, // thread register + P9_STOP_SPR_HRMOR = 313, // core register + P9_STOP_SPR_LPCR = 318, // thread register + P9_STOP_SPR_HMEER = 337, // core register + P9_STOP_SPR_LDBAR = 850, // thread register + P9_STOP_SPR_PSSCR = 855, // thread register + P9_STOP_SPR_PMCR = 884, // core register + P9_STOP_SPR_HID = 1008, // core register + P9_STOP_SPR_MSR = 2000, // thread register +} CpuReg_t; + +/** + * @brief lists all the bad error codes. + */ +typedef enum +{ + STOP_SAVE_SUCCESS = 0, + STOP_SAVE_ARG_INVALID_IMG = 1, + STOP_SAVE_ARG_INVALID_REG = 2, + STOP_SAVE_ARG_INVALID_THREAD = 3, + STOP_SAVE_ARG_INVALID_MODE = 4, + STOP_SAVE_ARG_INVALID_CORE = 5, + STOP_SAVE_SPR_ENTRY_NOT_FOUND = 6, + STOP_SAVE_SPR_ENTRY_UPDATE_FAILED = 7, + STOP_SAVE_SCOM_INVALID_OPERATION = 8, + STOP_SAVE_SCOM_INVALID_SECTION = 9, + STOP_SAVE_SCOM_INVALID_ADDRESS = 10, + STOP_SAVE_SCOM_INVALID_CHIPLET = 11, + STOP_SAVE_SCOM_ENTRY_UPDATE_FAILED = 12, + STOP_SAVE_INVALID_FUSED_CORE_STATUS = 13, + STOP_SAVE_FAIL = 14, // for internal failure within firmware. +} StopReturnCode_t; + +/** + * @brief summarizes all operations supported on scom entries of STOP image. + */ +typedef enum +{ + P9_STOP_SCOM_OP_MIN = 0, + P9_STOP_SCOM_APPEND = 1, + P9_STOP_SCOM_REPLACE = 2, + P9_STOP_SCOM_OR = 3, + P9_STOP_SCOM_AND = 4, + P9_STOP_SCOM_NOOP = 5, + P9_STOP_SCOM_RESET = 6, + P9_STOP_SCOM_OR_APPEND = 7, + P9_STOP_SCOM_AND_APPEND = 8, + P9_STOP_SCOM_OP_MAX = 9 +} ScomOperation_t; + +/** + * @brief All subsections that contain scom entries in a STOP image. + */ +typedef enum +{ + P9_STOP_SECTION_MIN = 0, + P9_STOP_SECTION_CORE_SCOM = 1, + P9_STOP_SECTION_EQ_SCOM = 2, + P9_STOP_SECTION_L2 = 3, + P9_STOP_SECTION_L3 = 4, + P9_STOP_SECTION_MAX = 5 +} ScomSection_t; + +#ifdef __cplusplus +extern "C" { +#endif +/** + * @brief Updates STOP image entry associated with CPU register. + * @param[in] i_pImage start address of homer image associated with processor. + * @param[in] i_regId id of SPR for which STOP image needs to be updated. + * @param[in] i_regData data to be restored in SPR register. + * @param[in] i_pir value of processor identification register (PIR) + * @return STOP_SAVE_SUCCESS SUCCESS if image is updated successfully, error + * code otherwise. + */ + +StopReturnCode_t p9_stop_save_cpureg( void* const i_pImage, + const CpuReg_t i_regId, + const uint64_t i_regData, + const uint64_t i_pir ); + +/** + * @brief Updates scom image entry associated with given core or cache in + * STOP section of homer image. + * @param[in] i_pImage start address of homer image of P9 chip. + * @param[in] i_scomAddress fully qualified address of SCOM register. + * @param[in] i_scomData data associated with SCOM register. + * @param[in] i_operation operation to be done on SCOM image entry. + * @param[in] i_section area to which given SCOM entry belongs. + * @return STOP_SAVE_SUCCESS if image is updated successfully, error code + * otherwise. + * @note API is intended to update SCOM image entry associated with given + * core or given part of a cache section. API doesn't validate if + * a given SCOM address really belongs to given section. + */ +StopReturnCode_t p9_stop_save_scom( void* const i_pImage, + const uint32_t i_scomAddress, + const uint64_t i_scomData, + const ScomOperation_t i_operation, + const ScomSection_t i_section ); + +#ifdef __cplusplus +} // extern "C" +}; // namespace stopImageSection ends +#endif //__cplusplus + +#endif //__P9_STOP_IMAGE_API_ diff --git a/libpore/p9_stop_data_struct.H b/libpore/p9_stop_data_struct.H new file mode 100644 index 0000000..2201021 --- /dev/null +++ b/libpore/p9_stop_data_struct.H @@ -0,0 +1,149 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/utils/stopreg/p9_stop_data_struct.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/// +/// @file p9_stop_data_struct.H +/// @brief describes data structures internal to STOP API. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP +#ifndef __STOP_DATA_STRUCT_ +#define __STOP_DATA_STRUCT_ + +#ifndef _AIX + #include <endian.h> +#endif + +#include "p9_hcd_memmap_base.H" + +#ifdef __SKIBOOT__ + #include <skiboot.h> +#endif + +#ifdef __FAPI_2_ + #include <fapi2.H> +#endif + + +#ifdef __cplusplus +extern "C" { +namespace stopImageSection +{ +#endif + +enum +{ + MAX_SPR_RESTORE_INST = 0x08, + SIZE_PER_SPR_RESTORE_INST = ((4 * sizeof(uint8_t)) / sizeof(uint32_t)), +}; + +typedef struct +{ + uint32_t scomEntryHeader; + uint32_t scomEntryAddress; + uint64_t scomEntryData; +} ScomEntry_t; + +/** + * @brief models a CPU register restoration area in STOP section of homer image. + */ +typedef struct +{ + uint8_t threadArea[CORE_RESTORE_THREAD_AREA_SIZE]; + uint8_t coreArea[CORE_RESTORE_CORE_AREA_SIZE]; +} SprRestoreArea_t; + +/** + * @brief models homer image of a chip. + * @note sections not relevant for CPU register restoration have been + * abstracted using field 'reserve'. + */ +typedef struct +{ + uint8_t occ_host_sgpe_area[ TWO_MB ]; // CPU restore area starts at an offset of 2MB from chip HOMER + uint8_t interrruptHandler[SELF_RESTORE_INT_SIZE]; + uint8_t threadLauncher[THREAD_LAUNCHER_SIZE]; + SprRestoreArea_t coreThreadRestore[MAX_CORES_PER_CHIP][MAX_THREADS_PER_CORE]; + uint8_t reserve[(ONE_KB * ONE_KB) - SELF_RESTORE_SIZE_TOTAL]; +} HomerSection_t; + +/** + * @brief models cache subsection in STOP section of a given homer image. + * @note given the start of cache subsection associated with a given core, + * the structure below represents what a cache subsection would look + * like. Based on known start address, quick traversing can be done + * within the cache subsection. + */ +typedef struct +{ + ScomEntry_t nonCacheArea[MAX_EQ_SCOM_ENTRIES]; + ScomEntry_t l2CacheArea[MAX_L2_SCOM_ENTRIES]; + ScomEntry_t l3CacheArea[MAX_L3_SCOM_ENTRIES]; +} StopCacheSection_t; + +/** + * @brief summarizes attributes associated with a SPR register. + */ +typedef struct +{ + uint32_t sprId; + bool isThreadScope; +} StopSprReg_t; + +enum +{ + SIZE_SCOM_ENTRY = sizeof( ScomEntry_t ), + SCOM_ENTRY_START = 0xDEADDEAD, +}; + +#ifdef __FAPI_2_ + #define MY_ERR( _fmt_, _args_...) FAPI_ERR(_fmt_, ##_args_) + #define MY_INF(_fmt_, _args_...) FAPI_INF(_fmt_, ##_args_) +#else + #define MY_ERR( _fmt_, _args_...) + #define MY_INF(_fmt_, _args_...) +#endif + +#define CORE_ID_SCOM_START(io_image,\ + i_chipletId) \ +((ScomEntry_t*)(((uint8_t*)(io_image)) + CORE_SCOM_RESTORE_HOMER_OFFSET +\ + ((i_chipletId - CORE_CHIPLET_ID_MIN) * \ + CORE_SCOM_RESTORE_SIZE_PER_CORE))); + +#define CACHE_SECTN_START(io_image,\ + i_chipletId) \ +((StopCacheSection_t *)(((uint8_t *)(io_image)) + QUAD_SCOM_RESTORE_HOMER_OFFSET +\ + ((i_chipletId - CACHE_CHIPLET_ID_MIN) * \ + QUAD_SCOM_RESTORE_SIZE_PER_QUAD))); +#ifdef __cplusplus +} // extern "C" + +} //namespace stopImageSection ends +#endif //__cplusplus + +#endif diff --git a/libpore/p9_stop_util.C b/libpore/p9_stop_util.C new file mode 100644 index 0000000..6fb8d67 --- /dev/null +++ b/libpore/p9_stop_util.C @@ -0,0 +1,186 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/utils/stopreg/p9_stop_util.C $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2015,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ + +/// +/// @file p9_stop_util.C +/// @brief implements some utilty functions for STOP API. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP + +#include "p9_stop_api.H" +#include "p9_stop_util.H" +#include "p9_stop_data_struct.H" + +#ifdef __cplusplus +namespace stopImageSection +{ +#endif + +/** + * @brief Returns proc chip's fuse mode status. + * @param i_pImage points to start of chip's HOMER image. + * @param o_fusedMode points to fuse mode information. + * @return STOP_SAVE_SUCCESS if functions succeeds, error code otherwise. + */ +static StopReturnCode_t isFusedMode( void* const i_pImage, bool* o_fusedMode ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + *o_fusedMode = false; + + do + { + HomerSection_t* pHomerDesc = ( HomerSection_t* ) i_pImage; + HomerImgDesc_t* pHomer = (HomerImgDesc_t*)( pHomerDesc->interrruptHandler ); + + if( !i_pImage ) + { + MY_ERR( "invalid pointer to HOMER image"); + l_rc = STOP_SAVE_ARG_INVALID_IMG; + break; + } + + + uint64_t cpmrCheckWord = SWIZZLE_8_BYTE(pHomer->cpmrMagicWord); + cpmrCheckWord = cpmrCheckWord >> 32; + + if( CPMR_REGION_CHECK_WORD != cpmrCheckWord ) + { + MY_ERR("corrupt or invalid HOMER image location 0x%016llx", + SWIZZLE_8_BYTE(pHomer->cpmrMagicWord) ); + l_rc = STOP_SAVE_ARG_INVALID_IMG; + break; + } + + if( (uint8_t) FUSED_CORE_MODE == pHomer->fusedModeStatus ) + { + *o_fusedMode = true; + break; + } + + if( (uint8_t) NONFUSED_CORE_MODE == pHomer->fusedModeStatus ) + { + break; + } + + MY_ERR("Unexpected value 0x%08x for fused mode. Bad or corrupt " + "HOMER location", pHomer->fuseModeStatus ); + l_rc = STOP_SAVE_INVALID_FUSED_CORE_STATUS ; + + } + while(0); + + return l_rc; +} + +//---------------------------------------------------------------------- + +StopReturnCode_t getCoreAndThread( void* const i_pImage, const uint64_t i_pir, + uint32_t* o_pCoreId, uint32_t* o_pThreadId ) +{ + StopReturnCode_t l_rc = STOP_SAVE_SUCCESS; + + do + { + // for SPR restore using 'Virtual Thread' and 'Physical Core' number + // In Fused Mode: + // bit b28 and b31 of PIR give physical core and b29 and b30 gives + // virtual thread id. + // In Non Fused Mode + // bit 28 and b29 of PIR give both logical and physical core number + // whereas b30 and b31 gives logical and virtual thread id. + bool fusedMode = false; + uint8_t coreThreadInfo = (uint8_t)i_pir; + *o_pCoreId = 0; + *o_pThreadId = 0; + l_rc = isFusedMode( i_pImage, &fusedMode ); + + if( l_rc ) + { + MY_ERR(" Checking Fused mode. Read failed 0x%08x", l_rc ); + break; + } + + if( fusedMode ) + { + if( coreThreadInfo & FUSED_CORE_BIT1 ) + { + *o_pThreadId = 2; + } + + if( coreThreadInfo & FUSED_CORE_BIT2 ) + { + *o_pThreadId += 1; + } + + if( coreThreadInfo & FUSED_CORE_BIT0 ) + { + *o_pCoreId = 2; + } + + if( coreThreadInfo & FUSED_CORE_BIT3 ) + { + *o_pCoreId += 1; + } + } + else + { + if( coreThreadInfo & FUSED_CORE_BIT0 ) + { + *o_pCoreId = 2; + } + + if ( coreThreadInfo & FUSED_CORE_BIT1 ) + { + *o_pCoreId += 1; + } + + if( coreThreadInfo & FUSED_CORE_BIT2 ) + { + *o_pThreadId = 2; + } + + if( coreThreadInfo & FUSED_CORE_BIT3 ) + { + *o_pThreadId += 1; + } + } + + MY_INF("Core Type %s", fusedMode ? "Fused" : "Un-Fused" ); + //quad field is not affected by fuse mode + *o_pCoreId += 4 * (( coreThreadInfo & 0x70 ) >> 4 ); + } + while(0); + + return l_rc; +} + +#ifdef __cplusplus +}//namespace stopImageSection ends +#endif + diff --git a/libpore/p9_stop_util.H b/libpore/p9_stop_util.H new file mode 100644 index 0000000..3266fde --- /dev/null +++ b/libpore/p9_stop_util.H @@ -0,0 +1,145 @@ +/* IBM_PROLOG_BEGIN_TAG */ +/* This is an automatically generated prolog. */ +/* */ +/* $Source: src/import/chips/p9/procedures/hwp/lib/p9_stop_util.H $ */ +/* */ +/* OpenPOWER HostBoot Project */ +/* */ +/* Contributors Listed Below - COPYRIGHT 2016,2017 */ +/* [+] International Business Machines Corp. */ +/* */ +/* */ +/* Licensed under the Apache License, Version 2.0 (the "License"); */ +/* you may not use this file except in compliance with the License. */ +/* You may obtain a copy of the License at */ +/* */ +/* http://www.apache.org/licenses/LICENSE-2.0 */ +/* */ +/* Unless required by applicable law or agreed to in writing, software */ +/* distributed under the License is distributed on an "AS IS" BASIS, */ +/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ +/* implied. See the License for the specific language governing */ +/* permissions and limitations under the License. */ +/* */ +/* IBM_PROLOG_END_TAG */ +#ifndef __P9_STOP_UTIL_ +#define __P9_STOP_UTIL_ + +#ifdef _AIX + #define __BYTE_ORDER __BIG_ENDIAN +#elif __SKIBOOT__ + #include <skiboot.h> +#else + #include <endian.h> +#endif + +#ifndef __PPE_PLAT + #include "p9_stop_api.H" +#endif + +#ifdef FAPI_2 + #include <fapi2.H> +#endif + +/// +/// @file p9_stop_util.H +/// @brief describes some utilty functions for STOP API. +/// +// *HWP HW Owner : Greg Still <stillgs@us.ibm.com> +// *HWP FW Owner : Prem Shanker Jha <premjha2@in.ibm.com> +// *HWP Team : PM +// *HWP Level : 2 +// *HWP Consumed by : HB:HYP +#ifndef __PPE_PLAT +#ifdef __cplusplus +namespace stopImageSection +{ +#endif +#endif //__PPE_PLAT +/** + * @brief helper function to swizzle given input data + * @note swizles bytes to handle endianess issue. + */ +#if( __BYTE_ORDER == __BIG_ENDIAN ) + +// NOP if it is a big endian system +#define SWIZZLE_2_BYTE(WORD) WORD +#define SWIZZLE_4_BYTE(WORD) WORD +#define SWIZZLE_8_BYTE(WORD) WORD + +#else +#define SWIZZLE_2_BYTE(WORD) \ + ( (((WORD) >> 8) & 0x00FF) | (((WORD) << 8) & 0xFF00) ) + +#define SWIZZLE_4_BYTE(WORD) \ + ( (((WORD) >> 24) & 0x000000FF) | (((WORD) >> 8) & 0x0000FF00) | \ + (((WORD) << 8) & 0x00FF0000) | (((WORD) << 24) & 0xFF000000) ) + +#define SWIZZLE_8_BYTE(WORD) \ + ( (((WORD) >> 56) & 0x00000000000000FF) | \ + (((WORD) >> 40) & 0x000000000000FF00)| \ + (((WORD) >> 24) & 0x0000000000FF0000) | \ + (((WORD) >> 8) & 0x00000000FF000000) | \ + (((WORD) << 8) & 0x000000FF00000000) | \ + (((WORD) << 24) & 0x0000FF0000000000) | \ + (((WORD) << 40) & 0x00FF000000000000) | \ + (((WORD) << 56) & 0xFF00000000000000) ) +#endif + +/** + * @brief describes details of CPMR header in HOMER. + */ +typedef struct +{ + uint64_t attnOpcodes; + uint64_t cpmrMagicWord; + uint32_t buildDate; + uint32_t version; + uint8_t reserve1[7]; + uint8_t fusedModeStatus; + uint32_t cmeImgOffset; + uint32_t cmeImgLength; + uint32_t cmeCommonRingOffset; + uint32_t cmeCommonRingLength; + uint32_t cmePstateOffset; + uint32_t cmePstateLength; + uint32_t coreSpecRingOffset; + uint32_t coreSpecRingLen; + uint32_t coreScomOffset; + uint32_t coreScomLength; + uint32_t reserve2[184]; +} HomerImgDesc_t; + +/** + * @brief enumerates bit(s) positions of interest for PIR. + */ +enum +{ + FUSED_CORE_BIT0 = 0x08, + FUSED_CORE_BIT1 = 0x04, + FUSED_CORE_BIT2 = 0x02, + FUSED_CORE_BIT3 = 0x01, + QUAD_BITS = 0x70 +}; + +#ifndef __PPE_PLAT +/** + * @brief returns core id and thread id by parsing a given PIR. + * @param i_pStopImage points to STOP image associated with a proc chip. + * @param i_pir PIR associated with a core's thread. + * @param o_coreId points to core id value obtained from PIR. + * @param o_threadId points to thread id value obtained from PIR. + * @return SUCCESS if function suceeds, error code otherwise. + */ +StopReturnCode_t getCoreAndThread( void* const i_pStopImage, + const uint64_t i_pir, + uint32_t* o_coreId, + uint32_t* o_threadId ); +#ifdef __cplusplus +} // namespace stopImageSection ends + +#endif +#endif //__PPE_PLAT +#endif + + |