aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorTom Rini <trini@konsulko.com>2021-09-02 18:39:28 -0400
committerTom Rini <trini@konsulko.com>2021-09-02 18:39:28 -0400
commitb35be5ed42c8453ac95432b6fbc0d42b1e91c758 (patch)
treec3f4646963db0c7010ca32b024b6947f4f144d9f /arch/arm
parent4bb7de1b3c09ada52ec42249221f745a6cbd3360 (diff)
parent6628813f9d400c49da4926f01833063a30151cdb (diff)
downloadu-boot-b35be5ed42c8453ac95432b6fbc0d42b1e91c758.zip
u-boot-b35be5ed42c8453ac95432b6fbc0d42b1e91c758.tar.gz
u-boot-b35be5ed42c8453ac95432b6fbc0d42b1e91c758.tar.bz2
Merge branch '2021-09-02-assorted-platform-and-bugfixes' into next
- Add position independent execution support for ARMv7 - Snapdragon, synquacer, vexpress64 fixes / improvements - Prevent NEON register use on ARMv8 - Other assorted fixes
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig16
-rw-r--r--arch/arm/config.mk1
-rw-r--r--arch/arm/cpu/armv7/start.S43
-rw-r--r--arch/arm/cpu/armv8/cache_v8.c96
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/spintable.S2
-rw-r--r--arch/arm/cpu/armv8/transition.S2
-rw-r--r--arch/arm/include/asm/armv8/mpu.h61
-rw-r--r--arch/arm/include/asm/macro.h28
-rw-r--r--arch/arm/include/asm/system.h40
-rw-r--r--arch/arm/lib/bootm.c40
-rw-r--r--arch/arm/lib/crt0.S11
-rw-r--r--arch/arm/lib/relocate.S35
-rw-r--r--arch/arm/mach-snapdragon/misc.c24
13 files changed, 351 insertions, 48 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c815ad4..47f0945 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,9 +9,9 @@ config ARM64
select PHYS_64BIT
select SYS_CACHE_SHIFT_6
-if ARM64
config POSITION_INDEPENDENT
bool "Generate position-independent pre-relocation code"
+ depends on ARM64 || CPU_V7A
help
U-Boot expects to be linked to a specific hard-coded address, and to
be loaded to and run from that address. This option lifts that
@@ -22,6 +22,7 @@ config POSITION_INDEPENDENT
config INIT_SP_RELATIVE
bool "Specify the early stack pointer relative to the .bss section"
+ depends on ARM64
default n if ARCH_QEMU
default y if POSITION_INDEPENDENT
help
@@ -37,6 +38,7 @@ config INIT_SP_RELATIVE
config SYS_INIT_SP_BSS_OFFSET
int "Early stack offset from the .bss base address"
+ depends on ARM64
depends on INIT_SP_RELATIVE
default 524288
help
@@ -46,6 +48,7 @@ config SYS_INIT_SP_BSS_OFFSET
do not overlap any appended DTB.
config LINUX_KERNEL_IMAGE_HEADER
+ depends on ARM64
bool
help
Place a Linux kernel image header at the start of the U-Boot binary.
@@ -54,14 +57,12 @@ config LINUX_KERNEL_IMAGE_HEADER
image header reports the amount of memory (BSS and similar) that
U-Boot needs to use, but which isn't part of the binary.
-if LINUX_KERNEL_IMAGE_HEADER
config LNX_KRNL_IMG_TEXT_OFFSET_BASE
+ depends on LINUX_KERNEL_IMAGE_HEADER
hex
help
The value subtracted from CONFIG_SYS_TEXT_BASE to calculate the
TEXT_OFFSET value written to the Linux kernel image header.
-endif
-endif
config GICV2
bool
@@ -1184,6 +1185,13 @@ config TARGET_VEXPRESS64_BASE_FVP
select PL01X_SERIAL
select SEMIHOSTING
+config TARGET_VEXPRESS64_BASER_FVP
+ bool "Support Versatile Express ARMv8r64 FVP BASE model"
+ select ARM64
+ select DM
+ select DM_SERIAL
+ select PL01X_SERIAL
+
config TARGET_VEXPRESS64_JUNO
bool "Support Versatile Express Juno Development Platform"
select ARM64
diff --git a/arch/arm/config.mk b/arch/arm/config.mk
index b684d8b..b107b1a 100644
--- a/arch/arm/config.mk
+++ b/arch/arm/config.mk
@@ -25,6 +25,7 @@ endif
PLATFORM_RELFLAGS += -fno-common -ffixed-r9
PLATFORM_RELFLAGS += $(call cc-option, -msoft-float) \
+ $(call cc-option,-mgeneral-regs-only) \
$(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,))
# LLVM support
diff --git a/arch/arm/cpu/armv7/start.S b/arch/arm/cpu/armv7/start.S
index 87329d2..698e15b 100644
--- a/arch/arm/cpu/armv7/start.S
+++ b/arch/arm/cpu/armv7/start.S
@@ -39,6 +39,42 @@ reset:
/* Allow the board to save important registers */
b save_boot_params
save_boot_params_ret:
+#ifdef CONFIG_POSITION_INDEPENDENT
+ /*
+ * Fix .rela.dyn relocations. This allows U-Boot to loaded to and
+ * executed at a different address than it was linked at.
+ */
+pie_fixup:
+ adr r0, reset /* r0 <- Runtime value of reset label */
+ ldr r1, =reset /* r1 <- Linked value of reset label */
+ subs r4, r0, r1 /* r4 <- Runtime-vs-link offset */
+ beq pie_fixup_done
+
+ adr r0, pie_fixup
+ ldr r1, _rel_dyn_start_ofs
+ add r2, r0, r1 /* r2 <- Runtime &__rel_dyn_start */
+ ldr r1, _rel_dyn_end_ofs
+ add r3, r0, r1 /* r3 <- Runtime &__rel_dyn_end */
+
+pie_fix_loop:
+ ldr r0, [r2] /* r0 <- Link location */
+ ldr r1, [r2, #4] /* r1 <- fixup */
+ cmp r1, #23 /* relative fixup? */
+ bne pie_skip_reloc
+
+ /* relative fix: increase location by offset */
+ add r0, r4
+ ldr r1, [r0]
+ add r1, r4
+ str r1, [r0]
+ str r0, [r2]
+ add r2, #8
+pie_skip_reloc:
+ cmp r2, r3
+ blo pie_fix_loop
+pie_fixup_done:
+#endif
+
#ifdef CONFIG_ARMV7_LPAE
/*
* check for Hypervisor support
@@ -340,3 +376,10 @@ ENTRY(cpu_init_crit)
b lowlevel_init @ go setup pll,mux,memory
ENDPROC(cpu_init_crit)
#endif
+
+#if CONFIG_POSITION_INDEPENDENT
+_rel_dyn_start_ofs:
+ .word __rel_dyn_start - pie_fixup
+_rel_dyn_end_ofs:
+ .word __rel_dyn_end - pie_fixup
+#endif
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 3de18c7..4662567 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -15,6 +15,7 @@
#include <asm/global_data.h>
#include <asm/system.h>
#include <asm/armv8/mmu.h>
+#include <asm/armv8/mpu.h>
DECLARE_GLOBAL_DATA_PTR;
@@ -365,6 +366,86 @@ __weak u64 get_page_table_size(void)
return size;
}
+static void mpu_clear_regions(void)
+{
+ int i;
+
+ for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
+ setup_el2_mpu_region(i, 0, 0);
+ }
+}
+
+static struct mpu_region default_mpu_mem_map[] = {{0,}};
+__weak struct mpu_region *mpu_mem_map = default_mpu_mem_map;
+
+static void mpu_setup(void)
+{
+ int i;
+
+ if (current_el() != 2) {
+ panic("MPU configuration is only supported at EL2");
+ }
+
+ set_sctlr(get_sctlr() & ~(CR_M | CR_WXN));
+
+ asm volatile("msr MAIR_EL2, %0" : : "r" MEMORY_ATTRIBUTES);
+
+ for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
+ setup_el2_mpu_region(i,
+ PRBAR_ADDRESS(mpu_mem_map[i].start)
+ | PRBAR_OUTER_SH | PRBAR_AP_RW_ANY,
+ PRLAR_ADDRESS(mpu_mem_map[i].end)
+ | mpu_mem_map[i].attrs | PRLAR_EN_BIT
+ );
+ }
+
+ set_sctlr(get_sctlr() | CR_M);
+}
+
+static bool el_has_mmu(void)
+{
+ uint64_t id_aa64mmfr0;
+ asm volatile("mrs %0, id_aa64mmfr0_el1"
+ : "=r" (id_aa64mmfr0) : : "cc");
+ uint64_t msa = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_MASK;
+ uint64_t msa_frac = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_FRAC_MASK;
+
+ switch (msa) {
+ case ID_AA64MMFR0_EL1_MSA_VMSA:
+ /*
+ * VMSA supported in all translation regimes.
+ * No support for PMSA.
+ */
+ return true;
+ case ID_AA64MMFR0_EL1_MSA_USE_FRAC:
+ /* See MSA_frac for the supported MSAs. */
+ switch (msa_frac) {
+ case ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA:
+ /*
+ * PMSA not supported in any translation
+ * regime.
+ */
+ return true;
+ case ID_AA64MMFR0_EL1_MSA_FRAC_VMSA:
+ /*
+ * PMSA supported in all translation
+ * regimes. No support for VMSA.
+ */
+ case ID_AA64MMFR0_EL1_MSA_FRAC_PMSA:
+ /*
+ * PMSA supported in all translation
+ * regimes.
+ */
+ return false;
+ default:
+ panic("Unsupported id_aa64mmfr0_el1 " \
+ "MSA_frac value");
+ }
+ default:
+ panic("Unsupported id_aa64mmfr0_el1 MSA value");
+ }
+}
+
void setup_pgtables(void)
{
int i;
@@ -479,8 +560,13 @@ void dcache_enable(void)
/* The data cache is not active unless the mmu is enabled */
if (!(get_sctlr() & CR_M)) {
invalidate_dcache_all();
- __asm_invalidate_tlb_all();
- mmu_setup();
+
+ if (el_has_mmu()) {
+ __asm_invalidate_tlb_all();
+ mmu_setup();
+ } else {
+ mpu_setup();
+ }
}
set_sctlr(get_sctlr() | CR_C);
@@ -499,7 +585,11 @@ void dcache_disable(void)
set_sctlr(sctlr & ~(CR_C|CR_M));
flush_dcache_all();
- __asm_invalidate_tlb_all();
+
+ if (el_has_mmu())
+ __asm_invalidate_tlb_all();
+ else
+ mpu_clear_regions();
}
int dcache_status(void)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/spintable.S b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S
index 363ded0..d6bd188 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/spintable.S
+++ b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S
@@ -93,7 +93,7 @@ __secondary_boot_func:
4:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
switch_el x7, _dead_loop, 0f, _dead_loop
-0: armv8_switch_to_el1_m x4, x6, x7
+0: armv8_switch_to_el1_m x4, x6, x7, x9
#else
switch_el x7, 0f, _dead_loop, _dead_loop
0: armv8_switch_to_el2_m x4, x6, x7
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S
index a31af4f..9dbdff3 100644
--- a/arch/arm/cpu/armv8/transition.S
+++ b/arch/arm/cpu/armv8/transition.S
@@ -40,7 +40,7 @@ ENTRY(armv8_switch_to_el1)
* now, jump to the address saved in x4.
*/
br x4
-1: armv8_switch_to_el1_m x4, x5, x6
+1: armv8_switch_to_el1_m x4, x5, x6, x7
ENDPROC(armv8_switch_to_el1)
.popsection
diff --git a/arch/arm/include/asm/armv8/mpu.h b/arch/arm/include/asm/armv8/mpu.h
new file mode 100644
index 0000000..c6c8828
--- /dev/null
+++ b/arch/arm/include/asm/armv8/mpu.h
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0+
+ *
+ * (C) Copyright 2021 Arm Limited
+ */
+
+#ifndef _ASM_ARMV8_MPU_H_
+#define _ASM_ARMV8_MPU_H_
+
+#include <asm/armv8/mmu.h>
+#include <asm/barriers.h>
+#include <linux/stringify.h>
+
+#define PRSELR_EL2 S3_4_c6_c2_1
+#define PRBAR_EL2 S3_4_c6_c8_0
+#define PRLAR_EL2 S3_4_c6_c8_1
+#define MPUIR_EL2 S3_4_c0_c0_4
+
+#define PRBAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
+
+/* Access permissions */
+#define PRBAR_AP(val) (((val) & 0x3) << 2)
+#define PRBAR_AP_RW_HYP PRBAR_AP(0x0)
+#define PRBAR_AP_RW_ANY PRBAR_AP(0x1)
+#define PRBAR_AP_RO_HYP PRBAR_AP(0x2)
+#define PRBAR_AP_RO_ANY PRBAR_AP(0x3)
+
+/* Shareability */
+#define PRBAR_SH(val) (((val) & 0x3) << 4)
+#define PRBAR_NON_SH PRBAR_SH(0x0)
+#define PRBAR_OUTER_SH PRBAR_SH(0x2)
+#define PRBAR_INNER_SH PRBAR_SH(0x3)
+
+/* Memory attribute (MAIR idx) */
+#define PRLAR_ATTRIDX(val) (((val) & 0x7) << 1)
+#define PRLAR_EN_BIT (0x1)
+#define PRLAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
+
+#ifndef __ASSEMBLY__
+
+static inline void setup_el2_mpu_region(uint8_t region, uint64_t base, uint64_t limit)
+{
+ asm volatile("msr " __stringify(PRSELR_EL2) ", %0" : : "r" (region));
+ isb();
+ asm volatile("msr " __stringify(PRBAR_EL2) ", %0" : : "r" (base));
+ asm volatile("msr " __stringify(PRLAR_EL2) ", %0" : : "r" (limit));
+ dsb();
+ isb();
+}
+
+#endif
+
+struct mpu_region {
+ u64 start;
+ u64 end;
+ u64 attrs;
+};
+
+extern struct mpu_region *mpu_mem_map;
+
+#endif /* _ASM_ARMV8_MPU_H_ */
diff --git a/arch/arm/include/asm/macro.h b/arch/arm/include/asm/macro.h
index 485310d..ecd8221 100644
--- a/arch/arm/include/asm/macro.h
+++ b/arch/arm/include/asm/macro.h
@@ -256,7 +256,7 @@ lr .req x30
* For loading 64-bit OS, x0 is physical address to the FDT blob.
* They will be passed to the guest.
*/
-.macro armv8_switch_to_el1_m, ep, flag, tmp
+.macro armv8_switch_to_el1_m, ep, flag, tmp, tmp2
/* Initialize Generic Timers */
mrs \tmp, cnthctl_el2
/* Enable EL1 access to timers */
@@ -306,9 +306,33 @@ lr .req x30
b.eq 1f
/* Initialize HCR_EL2 */
- ldr \tmp, =(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS)
+ /* Only disable PAuth traps if PAuth is supported */
+ mrs \tmp, id_aa64isar1_el1
+ ldr \tmp2, =(ID_AA64ISAR1_EL1_GPI | ID_AA64ISAR1_EL1_GPA | \
+ ID_AA64ISAR1_EL1_API | ID_AA64ISAR1_EL1_APA)
+ tst \tmp, \tmp2
+ mov \tmp2, #(HCR_EL2_RW_AARCH64 | HCR_EL2_HCD_DIS)
+ orr \tmp, \tmp2, #(HCR_EL2_APK | HCR_EL2_API)
+ csel \tmp, \tmp2, \tmp, eq
msr hcr_el2, \tmp
+ /*
+ * Detect whether the system has a configurable memory system
+ * architecture at EL1&0
+ */
+ mrs \tmp, id_aa64mmfr0_el1
+ lsr \tmp, \tmp, #48
+ and \tmp, \tmp, #((ID_AA64MMFR0_EL1_MSA_MASK | \
+ ID_AA64MMFR0_EL1_MSA_FRAC_MASK) >> 48)
+ cmp \tmp, #((ID_AA64MMFR0_EL1_MSA_USE_FRAC | \
+ ID_AA64MMFR0_EL1_MSA_FRAC_VMSA) >> 48)
+ bne 2f
+
+ /* Ensure the EL1&0 VMSA is enabled */
+ mov \tmp, #(VTCR_EL2_MSA)
+ msr vtcr_el2, \tmp
+2:
+
/* Return to the EL1_SP1 mode from EL2 */
ldr \tmp, =(SPSR_EL_DEBUG_MASK | SPSR_EL_SERR_MASK |\
SPSR_EL_IRQ_MASK | SPSR_EL_FIQ_MASK |\
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 8b3a54e..1ec6237 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -75,11 +75,50 @@
/*
* HCR_EL2 bits definitions
*/
+#define HCR_EL2_API (1 << 41) /* Trap pointer authentication
+ instructions */
+#define HCR_EL2_APK (1 << 40) /* Trap pointer authentication
+ key access */
#define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */
#define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */
#define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */
/*
+ * VTCR_EL2 bits definitions
+ */
+#define VTCR_EL2_MSA (1 << 31) /* EL1&0 memory architecture */
+
+/*
+ * ID_AA64MMFR0_EL1 bits definitions
+ */
+#define ID_AA64MMFR0_EL1_MSA_FRAC_MASK (0xFUL << 52) /* Memory system
+ architecture
+ frac */
+#define ID_AA64MMFR0_EL1_MSA_FRAC_VMSA (0x2UL << 52) /* EL1&0 supports
+ VMSA */
+#define ID_AA64MMFR0_EL1_MSA_FRAC_PMSA (0x1UL << 52) /* EL1&0 only
+ supports PMSA*/
+#define ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA (0x0UL << 52) /* No PMSA
+ support */
+#define ID_AA64MMFR0_EL1_MSA_MASK (0xFUL << 48) /* Memory system
+ architecture */
+#define ID_AA64MMFR0_EL1_MSA_USE_FRAC (0xFUL << 48) /* Use MSA_FRAC */
+#define ID_AA64MMFR0_EL1_MSA_VMSA (0x0UL << 48) /* Memory system
+ architecture
+ is VMSA */
+
+/*
+ * ID_AA64ISAR1_EL1 bits definitions
+ */
+#define ID_AA64ISAR1_EL1_GPI (0xF << 28) /* Implementation-defined generic
+ code auth algorithm */
+#define ID_AA64ISAR1_EL1_GPA (0xF << 24) /* QARMA generic code auth
+ algorithm */
+#define ID_AA64ISAR1_EL1_API (0xF << 8) /* Implementation-defined address
+ auth algorithm */
+#define ID_AA64ISAR1_EL1_APA (0xF << 4) /* QARMA address auth algorithm */
+
+/*
* ID_AA64PFR0_EL1 bits definitions
*/
#define ID_AA64PFR0_EL1_EL3 (0xF << 12) /* EL3 implemented */
@@ -551,7 +590,6 @@ s32 psci_affinity_info(u32 function_id, u32 target_affinity,
u32 psci_migrate_info_type(void);
void psci_system_off(void);
void psci_system_reset(void);
-s32 psci_features(u32 function_id, u32 psci_fid);
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
index f60ee3a..ea9bfe7 100644
--- a/arch/arm/lib/bootm.c
+++ b/arch/arm/lib/bootm.c
@@ -317,7 +317,6 @@ __weak void update_os_arch_secondary_cores(uint8_t os_arch)
{
}
-#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
static void switch_to_el1(void)
{
if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
@@ -332,7 +331,6 @@ static void switch_to_el1(void)
ES_TO_AARCH64);
}
#endif
-#endif
/* Subcommand: GO */
static void boot_jump_linux(bootm_headers_t *images, int flag)
@@ -359,21 +357,33 @@ static void boot_jump_linux(bootm_headers_t *images, int flag)
update_os_arch_secondary_cores(images->os.arch);
-#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
- (u64)switch_to_el1, ES_TO_AARCH64);
+#ifdef CONFIG_ARMV8_MULTIENTRY
+ int armv8_switch_to_el1 = -1;
#else
- if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
- (images->os.arch == IH_ARCH_ARM))
- armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number,
- (u64)images->ft_addr, 0,
- (u64)images->ep,
- ES_TO_AARCH32);
- else
- armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
- images->ep,
- ES_TO_AARCH64);
+ int armv8_switch_to_el1 = env_get_yesno("armv8_switch_to_el1");
#endif
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ if (armv8_switch_to_el1 == -1) {
+ armv8_switch_to_el1 = 1;
+ }
+#endif
+ if (armv8_switch_to_el1 == 1) {
+ armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
+ (u64)switch_to_el1, ES_TO_AARCH64);
+ } else {
+ if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
+ (images->os.arch == IH_ARCH_ARM))
+ armv8_switch_to_el2(0,
+ (u64)gd->bd->bi_arch_number,
+ (u64)images->ft_addr, 0,
+ (u64)images->ep,
+ ES_TO_AARCH32);
+ else
+ armv8_switch_to_el2((u64)images->ft_addr,
+ 0, 0, 0,
+ images->ep,
+ ES_TO_AARCH64);
+ }
}
#else
unsigned long machid = gd->bd->bi_arch_number;
diff --git a/arch/arm/lib/crt0.S b/arch/arm/lib/crt0.S
index 46b6be2..956d258 100644
--- a/arch/arm/lib/crt0.S
+++ b/arch/arm/lib/crt0.S
@@ -130,6 +130,14 @@ ENTRY(_main)
ldr r9, [r9, #GD_NEW_GD] /* r9 <- gd->new_gd */
adr lr, here
+#if defined(CONFIG_POSITION_INDEPENDENT)
+ adr r0, _main
+ ldr r1, _start_ofs
+ add r0, r1
+ ldr r1, =CONFIG_SYS_TEXT_BASE
+ sub r1, r0
+ add lr, r1
+#endif
ldr r0, [r9, #GD_RELOC_OFF] /* r0 = gd->reloc_off */
add lr, lr, r0
#if defined(CONFIG_CPU_V7M)
@@ -180,3 +188,6 @@ here:
#endif
ENDPROC(_main)
+
+_start_ofs:
+ .word _start - _main
diff --git a/arch/arm/lib/relocate.S b/arch/arm/lib/relocate.S
index e5f7267..14b7f61 100644
--- a/arch/arm/lib/relocate.S
+++ b/arch/arm/lib/relocate.S
@@ -78,22 +78,28 @@ ENDPROC(relocate_vectors)
*/
ENTRY(relocate_code)
- ldr r1, =__image_copy_start /* r1 <- SRC &__image_copy_start */
- subs r4, r0, r1 /* r4 <- relocation offset */
- beq relocate_done /* skip relocation */
- ldr r2, =__image_copy_end /* r2 <- SRC &__image_copy_end */
-
+ adr r3, relocate_code
+ ldr r1, _image_copy_start_ofs
+ add r1, r3 /* r1 <- Run &__image_copy_start */
+ subs r4, r0, r1 /* r4 <- Run to copy offset */
+ beq relocate_done /* skip relocation */
+ ldr r1, _image_copy_start_ofs
+ add r1, r3 /* r1 <- Run &__image_copy_start */
+ ldr r2, _image_copy_end_ofs
+ add r2, r3 /* r2 <- Run &__image_copy_end */
copy_loop:
- ldmia r1!, {r10-r11} /* copy from source address [r1] */
- stmia r0!, {r10-r11} /* copy to target address [r0] */
- cmp r1, r2 /* until source end address [r2] */
+ ldmia r1!, {r10-r11} /* copy from source address [r1] */
+ stmia r0!, {r10-r11} /* copy to target address [r0] */
+ cmp r1, r2 /* until source end address [r2] */
blo copy_loop
/*
* fix .rel.dyn relocations
*/
- ldr r2, =__rel_dyn_start /* r2 <- SRC &__rel_dyn_start */
- ldr r3, =__rel_dyn_end /* r3 <- SRC &__rel_dyn_end */
+ ldr r1, _rel_dyn_start_ofs
+ add r2, r1, r3 /* r2 <- Run &__rel_dyn_start */
+ ldr r1, _rel_dyn_end_ofs
+ add r3, r1, r3 /* r3 <- Run &__rel_dyn_end */
fixloop:
ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */
and r1, r1, #0xff
@@ -129,3 +135,12 @@ relocate_done:
#endif
ENDPROC(relocate_code)
+
+_image_copy_start_ofs:
+ .word __image_copy_start - relocate_code
+_image_copy_end_ofs:
+ .word __image_copy_end - relocate_code
+_rel_dyn_start_ofs:
+ .word __rel_dyn_start - relocate_code
+_rel_dyn_end_ofs:
+ .word __rel_dyn_end - relocate_code
diff --git a/arch/arm/mach-snapdragon/misc.c b/arch/arm/mach-snapdragon/misc.c
index 985625a..7d452f4 100644
--- a/arch/arm/mach-snapdragon/misc.c
+++ b/arch/arm/mach-snapdragon/misc.c
@@ -9,6 +9,7 @@
#include <common.h>
#include <mmc.h>
#include <asm/arch/misc.h>
+#include <asm/unaligned.h>
/* UNSTUFF_BITS macro taken from Linux Kernel: drivers/mmc/core/sd.c */
#define UNSTUFF_BITS(resp, start, size) \
@@ -33,21 +34,22 @@ u32 msm_board_serial(void)
if (!mmc_dev)
return 0;
+ if (mmc_init(mmc_dev))
+ return 0;
+
return UNSTUFF_BITS(mmc_dev->cid, 16, 32);
}
void msm_generate_mac_addr(u8 *mac)
{
- int i;
- char sn[9];
-
- snprintf(sn, 9, "%08x", msm_board_serial());
-
- /* fill in the mac with serialno, use locally adminstrated pool */
+ /* use locally adminstrated pool */
mac[0] = 0x02;
- mac[1] = 00;
- for (i = 3; i >= 0; i--) {
- mac[i + 2] = hextoul(&sn[2 * i], NULL);
- sn[2 * i] = 0;
- }
+ mac[1] = 0x00;
+
+ /*
+ * Put the 32-bit serial number in the last 32-bit of the MAC address.
+ * Use big endian order so it is consistent with the serial number
+ * written as a hexadecimal string, e.g. 0x1234abcd -> 02:00:12:34:ab:cd
+ */
+ put_unaligned_be32(msm_board_serial(), &mac[2]);
}