From aa29b21d62d298fadcfbc8e36d8d248fbb24b52d Mon Sep 17 00:00:00 2001 From: Chia-Wei Wang Date: Tue, 3 Aug 2021 10:50:09 +0800 Subject: arm: Fix option dependency with Kconfig language Use Kconfig 'depends on' instead of #if macro to express the option depdencies. Signed-off-by: Chia-Wei Wang --- arch/arm/Kconfig | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c815ad4..31ae295 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -9,9 +9,9 @@ config ARM64 select PHYS_64BIT select SYS_CACHE_SHIFT_6 -if ARM64 config POSITION_INDEPENDENT bool "Generate position-independent pre-relocation code" + depends on ARM64 help U-Boot expects to be linked to a specific hard-coded address, and to be loaded to and run from that address. This option lifts that @@ -22,6 +22,7 @@ config POSITION_INDEPENDENT config INIT_SP_RELATIVE bool "Specify the early stack pointer relative to the .bss section" + depends on ARM64 default n if ARCH_QEMU default y if POSITION_INDEPENDENT help @@ -37,6 +38,7 @@ config INIT_SP_RELATIVE config SYS_INIT_SP_BSS_OFFSET int "Early stack offset from the .bss base address" + depends on ARM64 depends on INIT_SP_RELATIVE default 524288 help @@ -46,6 +48,7 @@ config SYS_INIT_SP_BSS_OFFSET do not overlap any appended DTB. config LINUX_KERNEL_IMAGE_HEADER + depends on ARM64 bool help Place a Linux kernel image header at the start of the U-Boot binary. @@ -54,14 +57,12 @@ config LINUX_KERNEL_IMAGE_HEADER image header reports the amount of memory (BSS and similar) that U-Boot needs to use, but which isn't part of the binary. -if LINUX_KERNEL_IMAGE_HEADER config LNX_KRNL_IMG_TEXT_OFFSET_BASE + depends on LINUX_KERNEL_IMAGE_HEADER hex help The value subtracted from CONFIG_SYS_TEXT_BASE to calculate the TEXT_OFFSET value written to the Linux kernel image header. -endif -endif config GICV2 bool -- cgit v1.1 From cd82f199852d88218e1f17f5ec07cdd9112a89c4 Mon Sep 17 00:00:00 2001 From: Chia-Wei Wang Date: Tue, 3 Aug 2021 10:50:10 +0800 Subject: armv7: Add Position Independent Execution support A U-Boot image could be loaded and executed at a different location than it was linked at. For example, Aspeed takes a stable release version of U-Boot image as the golden one for recovery purposes. When the primary storage such as flash is corrupted, the golden image would be loaded to any SRAM/DRAM address on demands through ethernet/UART/etc and run for rescue. To deal with this condition, the PIE is needed as there is only one signed, golden image, which could be however executed at different places. This patch adds the PIE support for ARMv7 platform. Signed-off-by: Chia-Wei Wang --- arch/arm/Kconfig | 2 +- arch/arm/cpu/armv7/start.S | 43 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/lib/crt0.S | 11 +++++++++++ arch/arm/lib/relocate.S | 35 +++++++++++++++++++++++++---------- 4 files changed, 80 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 31ae295..50efb5e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -11,7 +11,7 @@ config ARM64 config POSITION_INDEPENDENT bool "Generate position-independent pre-relocation code" - depends on ARM64 + depends on ARM64 || CPU_V7A help U-Boot expects to be linked to a specific hard-coded address, and to be loaded to and run from that address. This option lifts that diff --git a/arch/arm/cpu/armv7/start.S b/arch/arm/cpu/armv7/start.S index 87329d2..698e15b 100644 --- a/arch/arm/cpu/armv7/start.S +++ b/arch/arm/cpu/armv7/start.S @@ -39,6 +39,42 @@ reset: /* Allow the board to save important registers */ b save_boot_params save_boot_params_ret: +#ifdef CONFIG_POSITION_INDEPENDENT + /* + * Fix .rela.dyn relocations. This allows U-Boot to loaded to and + * executed at a different address than it was linked at. + */ +pie_fixup: + adr r0, reset /* r0 <- Runtime value of reset label */ + ldr r1, =reset /* r1 <- Linked value of reset label */ + subs r4, r0, r1 /* r4 <- Runtime-vs-link offset */ + beq pie_fixup_done + + adr r0, pie_fixup + ldr r1, _rel_dyn_start_ofs + add r2, r0, r1 /* r2 <- Runtime &__rel_dyn_start */ + ldr r1, _rel_dyn_end_ofs + add r3, r0, r1 /* r3 <- Runtime &__rel_dyn_end */ + +pie_fix_loop: + ldr r0, [r2] /* r0 <- Link location */ + ldr r1, [r2, #4] /* r1 <- fixup */ + cmp r1, #23 /* relative fixup? */ + bne pie_skip_reloc + + /* relative fix: increase location by offset */ + add r0, r4 + ldr r1, [r0] + add r1, r4 + str r1, [r0] + str r0, [r2] + add r2, #8 +pie_skip_reloc: + cmp r2, r3 + blo pie_fix_loop +pie_fixup_done: +#endif + #ifdef CONFIG_ARMV7_LPAE /* * check for Hypervisor support @@ -340,3 +376,10 @@ ENTRY(cpu_init_crit) b lowlevel_init @ go setup pll,mux,memory ENDPROC(cpu_init_crit) #endif + +#if CONFIG_POSITION_INDEPENDENT +_rel_dyn_start_ofs: + .word __rel_dyn_start - pie_fixup +_rel_dyn_end_ofs: + .word __rel_dyn_end - pie_fixup +#endif diff --git a/arch/arm/lib/crt0.S b/arch/arm/lib/crt0.S index 46b6be2..956d258 100644 --- a/arch/arm/lib/crt0.S +++ b/arch/arm/lib/crt0.S @@ -130,6 +130,14 @@ ENTRY(_main) ldr r9, [r9, #GD_NEW_GD] /* r9 <- gd->new_gd */ adr lr, here +#if defined(CONFIG_POSITION_INDEPENDENT) + adr r0, _main + ldr r1, _start_ofs + add r0, r1 + ldr r1, =CONFIG_SYS_TEXT_BASE + sub r1, r0 + add lr, r1 +#endif ldr r0, [r9, #GD_RELOC_OFF] /* r0 = gd->reloc_off */ add lr, lr, r0 #if defined(CONFIG_CPU_V7M) @@ -180,3 +188,6 @@ here: #endif ENDPROC(_main) + +_start_ofs: + .word _start - _main diff --git a/arch/arm/lib/relocate.S b/arch/arm/lib/relocate.S index e5f7267..14b7f61 100644 --- a/arch/arm/lib/relocate.S +++ b/arch/arm/lib/relocate.S @@ -78,22 +78,28 @@ ENDPROC(relocate_vectors) */ ENTRY(relocate_code) - ldr r1, =__image_copy_start /* r1 <- SRC &__image_copy_start */ - subs r4, r0, r1 /* r4 <- relocation offset */ - beq relocate_done /* skip relocation */ - ldr r2, =__image_copy_end /* r2 <- SRC &__image_copy_end */ - + adr r3, relocate_code + ldr r1, _image_copy_start_ofs + add r1, r3 /* r1 <- Run &__image_copy_start */ + subs r4, r0, r1 /* r4 <- Run to copy offset */ + beq relocate_done /* skip relocation */ + ldr r1, _image_copy_start_ofs + add r1, r3 /* r1 <- Run &__image_copy_start */ + ldr r2, _image_copy_end_ofs + add r2, r3 /* r2 <- Run &__image_copy_end */ copy_loop: - ldmia r1!, {r10-r11} /* copy from source address [r1] */ - stmia r0!, {r10-r11} /* copy to target address [r0] */ - cmp r1, r2 /* until source end address [r2] */ + ldmia r1!, {r10-r11} /* copy from source address [r1] */ + stmia r0!, {r10-r11} /* copy to target address [r0] */ + cmp r1, r2 /* until source end address [r2] */ blo copy_loop /* * fix .rel.dyn relocations */ - ldr r2, =__rel_dyn_start /* r2 <- SRC &__rel_dyn_start */ - ldr r3, =__rel_dyn_end /* r3 <- SRC &__rel_dyn_end */ + ldr r1, _rel_dyn_start_ofs + add r2, r1, r3 /* r2 <- Run &__rel_dyn_start */ + ldr r1, _rel_dyn_end_ofs + add r3, r1, r3 /* r3 <- Run &__rel_dyn_end */ fixloop: ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */ and r1, r1, #0xff @@ -129,3 +135,12 @@ relocate_done: #endif ENDPROC(relocate_code) + +_image_copy_start_ofs: + .word __image_copy_start - relocate_code +_image_copy_end_ofs: + .word __image_copy_end - relocate_code +_rel_dyn_start_ofs: + .word __rel_dyn_start - relocate_code +_rel_dyn_end_ofs: + .word __rel_dyn_end - relocate_code -- cgit v1.1 From 1eb006249e2fe84e889a89154d71a9cb0093eed8 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Tue, 3 Aug 2021 12:12:37 +0200 Subject: arm: mach-snapdragon: misc: Initialize eMMC if necessary At the moment U-Boot produces an empty MAC address (02:00:00:00:00:00) if the eMMC is not used by anything in U-Boot (e.g. with CONFIG_ENV_IS_NOWHERE=y instead of having the environment on eMMC). This happens because then there is nothing that actually initializes the eMMC and reads the "cid" that is later accessed. To fix this, call mmc_init() to ensure the eMMC is initialized. There is no functional difference if the eMMC is already initialized since then mmc_init() will just return without doing anything. Reviewed-by: Ramon Fried Signed-off-by: Stephan Gerhold --- arch/arm/mach-snapdragon/misc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/mach-snapdragon/misc.c b/arch/arm/mach-snapdragon/misc.c index 985625a..fbd5f4d 100644 --- a/arch/arm/mach-snapdragon/misc.c +++ b/arch/arm/mach-snapdragon/misc.c @@ -33,6 +33,9 @@ u32 msm_board_serial(void) if (!mmc_dev) return 0; + if (mmc_init(mmc_dev)) + return 0; + return UNSTUFF_BITS(mmc_dev->cid, 16, 32); } -- cgit v1.1 From 15dd9412027e06f8ce0046e7d8de84489a7f34cb Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Tue, 3 Aug 2021 12:12:38 +0200 Subject: arm: mach-snapdrgon: misc: Simplify msm_generate_mac_addr() The logic in msm_generate_mac_addr() was originally taken from the LK bootloader where the serial number is a string and must be parsed first. However, in U-Boot msm_board_serial() returns an u32 and msm_generate_mac_addr() has quite complicated code that will first print it as a hex string and then immediately parse it again. What this function actually does at the end is to put the serial number encoded as big endian (the order used for the hex string) into the u8 *mac. Use put_unaligned_be32() to do that with bit shifts instead of going through the string format. This should be slightly more efficient and cleaner but does not result in any functional difference. Cc: Ramon Fried Signed-off-by: Stephan Gerhold Reviewed-by: Ramon Fried --- arch/arm/mach-snapdragon/misc.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/mach-snapdragon/misc.c b/arch/arm/mach-snapdragon/misc.c index fbd5f4d..7d452f4 100644 --- a/arch/arm/mach-snapdragon/misc.c +++ b/arch/arm/mach-snapdragon/misc.c @@ -9,6 +9,7 @@ #include #include #include +#include /* UNSTUFF_BITS macro taken from Linux Kernel: drivers/mmc/core/sd.c */ #define UNSTUFF_BITS(resp, start, size) \ @@ -41,16 +42,14 @@ u32 msm_board_serial(void) void msm_generate_mac_addr(u8 *mac) { - int i; - char sn[9]; - - snprintf(sn, 9, "%08x", msm_board_serial()); - - /* fill in the mac with serialno, use locally adminstrated pool */ + /* use locally adminstrated pool */ mac[0] = 0x02; - mac[1] = 00; - for (i = 3; i >= 0; i--) { - mac[i + 2] = hextoul(&sn[2 * i], NULL); - sn[2 * i] = 0; - } + mac[1] = 0x00; + + /* + * Put the 32-bit serial number in the last 32-bit of the MAC address. + * Use big endian order so it is consistent with the serial number + * written as a hexadecimal string, e.g. 0x1234abcd -> 02:00:12:34:ab:cd + */ + put_unaligned_be32(msm_board_serial(), &mac[2]); } -- cgit v1.1 From 53b40e8d54fcdb834e10e6538084517524b8401b Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 19 Aug 2021 16:53:09 +0100 Subject: armv8: Disable pointer authentication traps for EL1 The use of ARMv8.3 pointer authentication (PAuth) is governed by fields in HCR_EL2, which trigger a 'trap to EL2' if not enabled. The reset value of these fields is 'architecturally unknown' so we must ensure that the fields are enabled (to disable the traps) if we are entering the kernel at EL1. The APK field disables PAuth instruction traps and the API field disables PAuth register traps Add code to disable the traps in armv8_switch_to_el1_m. Prior to doing so, it checks fields in the ID_AA64ISAR1_EL1 register to ensure pointer authentication is supported by the hardware. The runtime checks require a second temporary register, so add this to the EL1 transition macro signature and update 2 call sites. Signed-off-by: Peter Hoyes --- arch/arm/cpu/armv8/fsl-layerscape/spintable.S | 2 +- arch/arm/cpu/armv8/transition.S | 2 +- arch/arm/include/asm/macro.h | 11 +++++++++-- arch/arm/include/asm/system.h | 15 +++++++++++++++ 4 files changed, 26 insertions(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/cpu/armv8/fsl-layerscape/spintable.S b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S index 363ded0..d6bd188 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/spintable.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S @@ -93,7 +93,7 @@ __secondary_boot_func: 4: #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 switch_el x7, _dead_loop, 0f, _dead_loop -0: armv8_switch_to_el1_m x4, x6, x7 +0: armv8_switch_to_el1_m x4, x6, x7, x9 #else switch_el x7, 0f, _dead_loop, _dead_loop 0: armv8_switch_to_el2_m x4, x6, x7 diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index a31af4f..9dbdff3 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -40,7 +40,7 @@ ENTRY(armv8_switch_to_el1) * now, jump to the address saved in x4. */ br x4 -1: armv8_switch_to_el1_m x4, x5, x6 +1: armv8_switch_to_el1_m x4, x5, x6, x7 ENDPROC(armv8_switch_to_el1) .popsection diff --git a/arch/arm/include/asm/macro.h b/arch/arm/include/asm/macro.h index 485310d..e1eefc2 100644 --- a/arch/arm/include/asm/macro.h +++ b/arch/arm/include/asm/macro.h @@ -256,7 +256,7 @@ lr .req x30 * For loading 64-bit OS, x0 is physical address to the FDT blob. * They will be passed to the guest. */ -.macro armv8_switch_to_el1_m, ep, flag, tmp +.macro armv8_switch_to_el1_m, ep, flag, tmp, tmp2 /* Initialize Generic Timers */ mrs \tmp, cnthctl_el2 /* Enable EL1 access to timers */ @@ -306,7 +306,14 @@ lr .req x30 b.eq 1f /* Initialize HCR_EL2 */ - ldr \tmp, =(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS) + /* Only disable PAuth traps if PAuth is supported */ + mrs \tmp, id_aa64isar1_el1 + ldr \tmp2, =(ID_AA64ISAR1_EL1_GPI | ID_AA64ISAR1_EL1_GPA | \ + ID_AA64ISAR1_EL1_API | ID_AA64ISAR1_EL1_APA) + tst \tmp, \tmp2 + mov \tmp2, #(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS) + orr \tmp, \tmp2, #(HCR_EL2_APK | HCR_EL2_API) + csel \tmp, \tmp2, \tmp, eq msr hcr_el2, \tmp /* Return to the EL1_SP1 mode from EL2 */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 8b3a54e..77aa189 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -75,11 +75,26 @@ /* * HCR_EL2 bits definitions */ +#define HCR_EL2_API (1 << 41) /* Trap pointer authentication + instructions */ +#define HCR_EL2_APK (1 << 40) /* Trap pointer authentication + key access */ #define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */ #define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */ #define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */ /* + * ID_AA64ISAR1_EL1 bits definitions + */ +#define ID_AA64ISAR1_EL1_GPI (0xF << 28) /* Implementation-defined generic + code auth algorithm */ +#define ID_AA64ISAR1_EL1_GPA (0xF << 24) /* QARMA generic code auth + algorithm */ +#define ID_AA64ISAR1_EL1_API (0xF << 8) /* Implementation-defined address + auth algorithm */ +#define ID_AA64ISAR1_EL1_APA (0xF << 4) /* QARMA address auth algorithm */ + +/* * ID_AA64PFR0_EL1 bits definitions */ #define ID_AA64PFR0_EL1_EL3 (0xF << 12) /* EL3 implemented */ -- cgit v1.1 From 37a757e227ccfc7d9eef82ab38f8500a832ea01b Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 19 Aug 2021 16:53:10 +0100 Subject: armv8: Ensure EL1&0 VMSA is enabled On Armv8-R, the EL1&0 memory system architecture is configurable as a VMSA or PMSA, and resets to an "architecturally unknown" value. Add code to armv8_switch_to_el1_m which detects whether the MSA at EL1&0 is configurable using the id_aa64mmfr0_el1 register MSA fields. If it is we must ensure the VMSA is enabled so that a rich OS can boot. The MSA and MSA_FRAC fields are described in the Armv8-R architecture profile supplement (section G1.3.7): https://developer.arm.com/documentation/ddi0600/latest/ Signed-off-by: Peter Hoyes --- arch/arm/include/asm/macro.h | 17 +++++++++++++++++ arch/arm/include/asm/system.h | 24 ++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/macro.h b/arch/arm/include/asm/macro.h index e1eefc2..ecd8221 100644 --- a/arch/arm/include/asm/macro.h +++ b/arch/arm/include/asm/macro.h @@ -316,6 +316,23 @@ lr .req x30 csel \tmp, \tmp2, \tmp, eq msr hcr_el2, \tmp + /* + * Detect whether the system has a configurable memory system + * architecture at EL1&0 + */ + mrs \tmp, id_aa64mmfr0_el1 + lsr \tmp, \tmp, #48 + and \tmp, \tmp, #((ID_AA64MMFR0_EL1_MSA_MASK | \ + ID_AA64MMFR0_EL1_MSA_FRAC_MASK) >> 48) + cmp \tmp, #((ID_AA64MMFR0_EL1_MSA_USE_FRAC | \ + ID_AA64MMFR0_EL1_MSA_FRAC_VMSA) >> 48) + bne 2f + + /* Ensure the EL1&0 VMSA is enabled */ + mov \tmp, #(VTCR_EL2_MSA) + msr vtcr_el2, \tmp +2: + /* Return to the EL1_SP1 mode from EL2 */ ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\ SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 77aa189..e4c11e8 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -84,6 +84,30 @@ #define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */ /* + * VTCR_EL2 bits definitions + */ +#define VTCR_EL2_MSA (1 << 31) /* EL1&0 memory architecture */ + +/* + * ID_AA64MMFR0_EL1 bits definitions + */ +#define ID_AA64MMFR0_EL1_MSA_FRAC_MASK (0xFUL << 52) /* Memory system + architecture + frac */ +#define ID_AA64MMFR0_EL1_MSA_FRAC_VMSA (0x2UL << 52) /* EL1&0 supports + VMSA */ +#define ID_AA64MMFR0_EL1_MSA_FRAC_PMSA (0x1UL << 52) /* EL1&0 only + supports PMSA*/ +#define ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA (0x0UL << 52) /* No PMSA + support */ +#define ID_AA64MMFR0_EL1_MSA_MASK (0xFUL << 48) /* Memory system + architecture */ +#define ID_AA64MMFR0_EL1_MSA_USE_FRAC (0xFUL << 48) /* Use MSA_FRAC */ +#define ID_AA64MMFR0_EL1_MSA_VMSA (0x0UL << 48) /* Memory system + architecture + is VMSA */ + +/* * ID_AA64ISAR1_EL1 bits definitions */ #define ID_AA64ISAR1_EL1_GPI (0xF << 28) /* Implementation-defined generic -- cgit v1.1 From 2f5b7b74903f747581aa4d63f492da7cc77377bf Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 19 Aug 2021 16:53:11 +0100 Subject: armv8: Add ARMv8 MPU configuration logic Armv8r64 is the first Armv8 platform that only has a PMSA at the current exception level. The architecture supplement for Armv8r64 describes new fields in ID_AA64MMFR0_EL1 which can be used to detect whether a VMSA or PMSA is present. These fields are RES0 on Armv8a. Add logic to read these fields and, for the protection of the memory used by U-Boot, initialize the MPU instead of the MMU during init, then clear the MPU regions before transition to the next stage. Provide a default (blank) MPU memory map, which can be overridden by board configurations. Signed-off-by: Peter Hoyes --- arch/arm/cpu/armv8/cache_v8.c | 96 ++++++++++++++++++++++++++++++++++++++-- arch/arm/include/asm/armv8/mpu.h | 61 +++++++++++++++++++++++++ 2 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/armv8/mpu.h (limited to 'arch/arm') diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 3de18c7..4662567 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -15,6 +15,7 @@ #include #include #include +#include DECLARE_GLOBAL_DATA_PTR; @@ -365,6 +366,86 @@ __weak u64 get_page_table_size(void) return size; } +static void mpu_clear_regions(void) +{ + int i; + + for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) { + setup_el2_mpu_region(i, 0, 0); + } +} + +static struct mpu_region default_mpu_mem_map[] = {{0,}}; +__weak struct mpu_region *mpu_mem_map = default_mpu_mem_map; + +static void mpu_setup(void) +{ + int i; + + if (current_el() != 2) { + panic("MPU configuration is only supported at EL2"); + } + + set_sctlr(get_sctlr() & ~(CR_M | CR_WXN)); + + asm volatile("msr MAIR_EL2, %0" : : "r" MEMORY_ATTRIBUTES); + + for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) { + setup_el2_mpu_region(i, + PRBAR_ADDRESS(mpu_mem_map[i].start) + | PRBAR_OUTER_SH | PRBAR_AP_RW_ANY, + PRLAR_ADDRESS(mpu_mem_map[i].end) + | mpu_mem_map[i].attrs | PRLAR_EN_BIT + ); + } + + set_sctlr(get_sctlr() | CR_M); +} + +static bool el_has_mmu(void) +{ + uint64_t id_aa64mmfr0; + asm volatile("mrs %0, id_aa64mmfr0_el1" + : "=r" (id_aa64mmfr0) : : "cc"); + uint64_t msa = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_MASK; + uint64_t msa_frac = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_FRAC_MASK; + + switch (msa) { + case ID_AA64MMFR0_EL1_MSA_VMSA: + /* + * VMSA supported in all translation regimes. + * No support for PMSA. + */ + return true; + case ID_AA64MMFR0_EL1_MSA_USE_FRAC: + /* See MSA_frac for the supported MSAs. */ + switch (msa_frac) { + case ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA: + /* + * PMSA not supported in any translation + * regime. + */ + return true; + case ID_AA64MMFR0_EL1_MSA_FRAC_VMSA: + /* + * PMSA supported in all translation + * regimes. No support for VMSA. + */ + case ID_AA64MMFR0_EL1_MSA_FRAC_PMSA: + /* + * PMSA supported in all translation + * regimes. + */ + return false; + default: + panic("Unsupported id_aa64mmfr0_el1 " \ + "MSA_frac value"); + } + default: + panic("Unsupported id_aa64mmfr0_el1 MSA value"); + } +} + void setup_pgtables(void) { int i; @@ -479,8 +560,13 @@ void dcache_enable(void) /* The data cache is not active unless the mmu is enabled */ if (!(get_sctlr() & CR_M)) { invalidate_dcache_all(); - __asm_invalidate_tlb_all(); - mmu_setup(); + + if (el_has_mmu()) { + __asm_invalidate_tlb_all(); + mmu_setup(); + } else { + mpu_setup(); + } } set_sctlr(get_sctlr() | CR_C); @@ -499,7 +585,11 @@ void dcache_disable(void) set_sctlr(sctlr & ~(CR_C|CR_M)); flush_dcache_all(); - __asm_invalidate_tlb_all(); + + if (el_has_mmu()) + __asm_invalidate_tlb_all(); + else + mpu_clear_regions(); } int dcache_status(void) diff --git a/arch/arm/include/asm/armv8/mpu.h b/arch/arm/include/asm/armv8/mpu.h new file mode 100644 index 0000000..c6c8828 --- /dev/null +++ b/arch/arm/include/asm/armv8/mpu.h @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: GPL-2.0+ + * + * (C) Copyright 2021 Arm Limited + */ + +#ifndef _ASM_ARMV8_MPU_H_ +#define _ASM_ARMV8_MPU_H_ + +#include +#include +#include + +#define PRSELR_EL2 S3_4_c6_c2_1 +#define PRBAR_EL2 S3_4_c6_c8_0 +#define PRLAR_EL2 S3_4_c6_c8_1 +#define MPUIR_EL2 S3_4_c0_c0_4 + +#define PRBAR_ADDRESS(addr) ((addr) & ~(0x3fULL)) + +/* Access permissions */ +#define PRBAR_AP(val) (((val) & 0x3) << 2) +#define PRBAR_AP_RW_HYP PRBAR_AP(0x0) +#define PRBAR_AP_RW_ANY PRBAR_AP(0x1) +#define PRBAR_AP_RO_HYP PRBAR_AP(0x2) +#define PRBAR_AP_RO_ANY PRBAR_AP(0x3) + +/* Shareability */ +#define PRBAR_SH(val) (((val) & 0x3) << 4) +#define PRBAR_NON_SH PRBAR_SH(0x0) +#define PRBAR_OUTER_SH PRBAR_SH(0x2) +#define PRBAR_INNER_SH PRBAR_SH(0x3) + +/* Memory attribute (MAIR idx) */ +#define PRLAR_ATTRIDX(val) (((val) & 0x7) << 1) +#define PRLAR_EN_BIT (0x1) +#define PRLAR_ADDRESS(addr) ((addr) & ~(0x3fULL)) + +#ifndef __ASSEMBLY__ + +static inline void setup_el2_mpu_region(uint8_t region, uint64_t base, uint64_t limit) +{ + asm volatile("msr " __stringify(PRSELR_EL2) ", %0" : : "r" (region)); + isb(); + asm volatile("msr " __stringify(PRBAR_EL2) ", %0" : : "r" (base)); + asm volatile("msr " __stringify(PRLAR_EL2) ", %0" : : "r" (limit)); + dsb(); + isb(); +} + +#endif + +struct mpu_region { + u64 start; + u64 end; + u64 attrs; +}; + +extern struct mpu_region *mpu_mem_map; + +#endif /* _ASM_ARMV8_MPU_H_ */ -- cgit v1.1 From b53bbca63bf42fe6d5174122e29e371691fe6dad Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 19 Aug 2021 16:53:12 +0100 Subject: vexpress64: Add BASER_FVP vexpress board variant The BASER_FVP board variant is implemented on top of the BASE_FVP board config (which, in turn, is based on the Juno Versatile Express board config). They all share a similar memory map - for BASER_FVP the map is inverted from the BASE_FVP (https://developer.arm.com/documentation/100964/1114/Base-Platform/Base---memory/BaseR-Platform-memory-map) * Create new TARGET_VEXPRESS64_BASER_FVP target, which uses the same board config as BASE_FVP and JUNO * Adapt vexpress_aemv8a.h header file to support BASER_FVP (and rename to vexpress_aemv8.h) * Enable config to switch to EL1 for the BASER_FVP * Create vexpress_aemv8r defconfig * Provide an MPU memory map for the BASER_FVP For now, only single core boot is supported. Signed-off-by: Peter Hoyes [trini: Add MAINTAINERS, move BOOTCOMMAND to defconfig] Signed-off-by: Tom Rini --- arch/arm/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 50efb5e..47f0945 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1185,6 +1185,13 @@ config TARGET_VEXPRESS64_BASE_FVP select PL01X_SERIAL select SEMIHOSTING +config TARGET_VEXPRESS64_BASER_FVP + bool "Support Versatile Express ARMv8r64 FVP BASE model" + select ARM64 + select DM + select DM_SERIAL + select PL01X_SERIAL + config TARGET_VEXPRESS64_JUNO bool "Support Versatile Express Juno Development Platform" select ARM64 -- cgit v1.1 From 30e5a449e8c7739965757879bb17efbd3a8f0ee2 Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 19 Aug 2021 16:53:13 +0100 Subject: arm: Use armv8_switch_to_el1 env to switch to EL1 Use the environment variable armv8_switch_to_el1 to determine whether to switch to EL1 at runtime. This is an alternative to the CONFIG_ARMV8_SWITCH_TO_EL1 compile-time option. The environment variable will be ineffective if the ARMV8_MULTIENTRY config is used. This is required by the Armv8r64 architecture, which must be able to boot at S-EL1 for Linux but may need to boot at other ELs for other systems. Signed-off-by: Peter Hoyes --- arch/arm/lib/bootm.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index f60ee3a..ea9bfe7 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -317,7 +317,6 @@ __weak void update_os_arch_secondary_cores(uint8_t os_arch) { } -#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 static void switch_to_el1(void) { if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && @@ -332,7 +331,6 @@ static void switch_to_el1(void) ES_TO_AARCH64); } #endif -#endif /* Subcommand: GO */ static void boot_jump_linux(bootm_headers_t *images, int flag) @@ -359,21 +357,33 @@ static void boot_jump_linux(bootm_headers_t *images, int flag) update_os_arch_secondary_cores(images->os.arch); -#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 - armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0, - (u64)switch_to_el1, ES_TO_AARCH64); +#ifdef CONFIG_ARMV8_MULTIENTRY + int armv8_switch_to_el1 = -1; #else - if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && - (images->os.arch == IH_ARCH_ARM)) - armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number, - (u64)images->ft_addr, 0, - (u64)images->ep, - ES_TO_AARCH32); - else - armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0, - images->ep, - ES_TO_AARCH64); + int armv8_switch_to_el1 = env_get_yesno("armv8_switch_to_el1"); #endif +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + if (armv8_switch_to_el1 == -1) { + armv8_switch_to_el1 = 1; + } +#endif + if (armv8_switch_to_el1 == 1) { + armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0, + (u64)switch_to_el1, ES_TO_AARCH64); + } else { + if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && + (images->os.arch == IH_ARCH_ARM)) + armv8_switch_to_el2(0, + (u64)gd->bd->bi_arch_number, + (u64)images->ft_addr, 0, + (u64)images->ep, + ES_TO_AARCH32); + else + armv8_switch_to_el2((u64)images->ft_addr, + 0, 0, 0, + images->ep, + ES_TO_AARCH64); + } } #else unsigned long machid = gd->bd->bi_arch_number; -- cgit v1.1 From f43312c974eaeb7301cb2638aa4ab05ed7ca4c44 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Sat, 21 Aug 2021 19:54:02 -0500 Subject: ARM: Prevent the compiler from using NEON registers For ARMv8-A, NEON is standard, so the compiler can use it even when no special target flags are provided. For example, it can use stores from NEON registers to zero-initialize large structures. GCC 11 decides to do this inside the DRAM init code for the Allwinner H6. However, GCC 11 has a bug where it generates misaligned NEON register stores even with -mstrict-align. Since the MMU is not enabled this early in SPL, the misaligned store causes an exception and breaks booting. Work around this issue by restricting the compiler to using GPRs only, not vector registers. This prevents any future surprises relating to NEON use as well. Signed-off-by: Samuel Holland Acked-by: Andre Przywara --- arch/arm/config.mk | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/config.mk b/arch/arm/config.mk index b684d8b..b107b1a 100644 --- a/arch/arm/config.mk +++ b/arch/arm/config.mk @@ -25,6 +25,7 @@ endif PLATFORM_RELFLAGS += -fno-common -ffixed-r9 PLATFORM_RELFLAGS += $(call cc-option, -msoft-float) \ + $(call cc-option,-mgeneral-regs-only) \ $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) # LLVM support -- cgit v1.1 From 7befc8490a008c332633e693f2adfab38d8d50f2 Mon Sep 17 00:00:00 2001 From: Oleksandr Suvorov Date: Tue, 24 Aug 2021 00:55:39 +0300 Subject: psci: fix double declaration The prototype of psci_features() duplicated. Remove extra declaration. Fixed: e21e3ffdd1 ("psci: Fix warnings when compiling with W=1") Reported-by: Michael Scott Signed-off-by: Oleksandr Suvorov --- arch/arm/include/asm/system.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index e4c11e8..1ec6237 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -590,7 +590,6 @@ s32 psci_affinity_info(u32 function_id, u32 target_affinity, u32 psci_migrate_info_type(void); void psci_system_off(void); void psci_system_reset(void); -s32 psci_features(u32 function_id, u32 psci_fid); #endif #endif /* __ASSEMBLY__ */ -- cgit v1.1