aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorTom Rini <trini@konsulko.com>2017-03-16 16:43:32 -0400
committerTom Rini <trini@konsulko.com>2017-03-16 16:43:32 -0400
commitce38ebb6f7f0e2111b7d457651ae0a76bc5a2636 (patch)
treec2c424ead06d5f265d6e4c954550454b9cf8c650 /arch
parent2808576491ae36b6ea96743005058f370d936beb (diff)
parent9b6639fa85bddd90df4c371f25a89c791a6ee6ef (diff)
downloadu-boot-ce38ebb6f7f0e2111b7d457651ae0a76bc5a2636.zip
u-boot-ce38ebb6f7f0e2111b7d457651ae0a76bc5a2636.tar.gz
u-boot-ce38ebb6f7f0e2111b7d457651ae0a76bc5a2636.tar.bz2
Merge git://git.denx.de/u-boot-fsl-qoriq
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/cpu/armv8/cache_v8.c72
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/Kconfig26
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/cpu.c331
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/soc.c2
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/spl.c3
-rw-r--r--arch/arm/include/asm/arch-fsl-layerscape/config.h4
-rw-r--r--arch/arm/include/asm/arch-fsl-layerscape/cpu.h12
-rw-r--r--arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h69
-rw-r--r--arch/arm/include/asm/arch-fsl-layerscape/mmu.h2
-rw-r--r--arch/arm/include/asm/armv8/mmu.h5
-rw-r--r--arch/arm/include/asm/fsl_secure_boot.h4
-rw-r--r--arch/arm/include/asm/global_data.h7
-rw-r--r--arch/arm/include/asm/system.h1
13 files changed, 482 insertions, 56 deletions
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 6c5630c..bd1c3e0 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -501,7 +501,8 @@ static bool is_aligned(u64 addr, u64 size, u64 align)
return !(addr & (align - 1)) && !(size & (align - 1));
}
-static u64 set_one_region(u64 start, u64 size, u64 attrs, int level)
+/* Use flag to indicate if attrs has more than d-cache attributes */
+static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
{
int levelshift = level2shift(level);
u64 levelsize = 1ULL << levelshift;
@@ -509,8 +510,13 @@ static u64 set_one_region(u64 start, u64 size, u64 attrs, int level)
/* Can we can just modify the current level block PTE? */
if (is_aligned(start, size, levelsize)) {
- *pte &= ~PMD_ATTRINDX_MASK;
- *pte |= attrs;
+ if (flag) {
+ *pte &= ~PMD_ATTRMASK;
+ *pte |= attrs & PMD_ATTRMASK;
+ } else {
+ *pte &= ~PMD_ATTRINDX_MASK;
+ *pte |= attrs & PMD_ATTRINDX_MASK;
+ }
debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
return levelsize;
@@ -560,7 +566,8 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
u64 r;
for (level = 1; level < 4; level++) {
- r = set_one_region(start, size, attrs, level);
+ /* Set d-cache attributes only */
+ r = set_one_region(start, size, attrs, false, level);
if (r) {
/* PTE successfully replaced */
size -= r;
@@ -581,6 +588,63 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
flush_dcache_range(real_start, real_start + real_size);
}
+/*
+ * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
+ * The procecess is break-before-make. The target region will be marked as
+ * invalid during the process of changing.
+ */
+void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
+{
+ int level;
+ u64 r, size, start;
+
+ start = addr;
+ size = siz;
+ /*
+ * Loop through the address range until we find a page granule that fits
+ * our alignment constraints, then set it to "invalid".
+ */
+ while (size > 0) {
+ for (level = 1; level < 4; level++) {
+ /* Set PTE to fault */
+ r = set_one_region(start, size, PTE_TYPE_FAULT, true,
+ level);
+ if (r) {
+ /* PTE successfully invalidated */
+ size -= r;
+ start += r;
+ break;
+ }
+ }
+ }
+
+ flush_dcache_range(gd->arch.tlb_addr,
+ gd->arch.tlb_addr + gd->arch.tlb_size);
+ __asm_invalidate_tlb_all();
+
+ /*
+ * Loop through the address range until we find a page granule that fits
+ * our alignment constraints, then set it to the new cache attributes
+ */
+ start = addr;
+ size = siz;
+ while (size > 0) {
+ for (level = 1; level < 4; level++) {
+ /* Set PTE to new attributes */
+ r = set_one_region(start, size, attrs, true, level);
+ if (r) {
+ /* PTE successfully updated */
+ size -= r;
+ start += r;
+ break;
+ }
+ }
+ }
+ flush_dcache_range(gd->arch.tlb_addr,
+ gd->arch.tlb_addr + gd->arch.tlb_size);
+ __asm_invalidate_tlb_all();
+}
+
#else /* CONFIG_SYS_DCACHE_OFF */
/*
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/Kconfig b/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
index b5609ff..a99b1c6 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
+++ b/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
@@ -89,6 +89,14 @@ config FSL_LSCH3
select SYS_FSL_SRDS_1
select SYS_HAS_SERDES
+config FSL_MC_ENET
+ bool "Management Complex network"
+ depends on ARCH_LS2080A
+ default y
+ select RESV_RAM
+ help
+ Enable Management Complex (MC) network
+
menu "Layerscape architecture"
depends on FSL_LSCH2 || FSL_LSCH3
@@ -277,6 +285,16 @@ config SYS_FSL_SDHC_CLK_DIV
clock, in another word SDHC_clk = Platform_clk / this_divider.
endmenu
+config RESV_RAM
+ bool
+ help
+ Reserve memory from the top, tracked by gd->arch.resv_ram. This
+ reserved RAM can be used by special driver that resides in memory
+ after U-Boot exits. It's up to implementation to allocate and allow
+ access to this reserved memory. For example, the reserved RAM can
+ be at the high end of physical memory. The reserve RAM may be
+ excluded from memory bank(s) passed to OS, or marked as reserved.
+
config SYS_FSL_ERRATUM_A008336
bool
@@ -297,3 +315,11 @@ config SYS_FSL_ERRATUM_A009660
config SYS_FSL_ERRATUM_A009929
bool
+
+config SYS_MC_RSV_MEM_ALIGN
+ hex "Management Complex reserved memory alignment"
+ depends on RESV_RAM
+ default 0x20000000
+ help
+ Reserved memory needs to be aligned for MC to use. Default value
+ is 512MB.
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
index 335f225..7e66ee0 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/cpu.c
@@ -101,12 +101,50 @@ static inline void final_mmu_setup(void)
{
u64 tlb_addr_save = gd->arch.tlb_addr;
unsigned int el = current_el();
-#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
int index;
-#endif
mem_map = final_map;
+ /* Update mapping for DDR to actual size */
+ for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
+ /*
+ * Find the entry for DDR mapping and update the address and
+ * size. Zero-sized mapping will be skipped when creating MMU
+ * table.
+ */
+ switch (final_map[index].virt) {
+ case CONFIG_SYS_FSL_DRAM_BASE1:
+ final_map[index].virt = gd->bd->bi_dram[0].start;
+ final_map[index].phys = gd->bd->bi_dram[0].start;
+ final_map[index].size = gd->bd->bi_dram[0].size;
+ break;
+#ifdef CONFIG_SYS_FSL_DRAM_BASE2
+ case CONFIG_SYS_FSL_DRAM_BASE2:
+#if (CONFIG_NR_DRAM_BANKS >= 2)
+ final_map[index].virt = gd->bd->bi_dram[1].start;
+ final_map[index].phys = gd->bd->bi_dram[1].start;
+ final_map[index].size = gd->bd->bi_dram[1].size;
+#else
+ final_map[index].size = 0;
+#endif
+ break;
+#endif
+#ifdef CONFIG_SYS_FSL_DRAM_BASE3
+ case CONFIG_SYS_FSL_DRAM_BASE3:
+#if (CONFIG_NR_DRAM_BANKS >= 3)
+ final_map[index].virt = gd->bd->bi_dram[2].start;
+ final_map[index].phys = gd->bd->bi_dram[2].start;
+ final_map[index].size = gd->bd->bi_dram[2].size;
+#else
+ final_map[index].size = 0;
+#endif
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
if (el == 3) {
@@ -143,21 +181,14 @@ static inline void final_mmu_setup(void)
setup_pgtables();
gd->arch.tlb_addr = tlb_addr_save;
- /* flush new MMU table */
- flush_dcache_range(gd->arch.tlb_addr,
- gd->arch.tlb_addr + gd->arch.tlb_size);
+ /* Disable cache and MMU */
+ dcache_disable(); /* TLBs are invalidated */
+ invalidate_icache_all();
/* point TTBR to the new table */
set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
MEMORY_ATTRIBUTES);
- /*
- * EL3 MMU is already enabled, just need to invalidate TLB to load the
- * new table. The new table is compatible with the current table, if
- * MMU somehow walks through the new table before invalidation TLB,
- * it still works. So we don't need to turn off MMU here.
- * When EL2 MMU table is created by calling this function, MMU needs
- * to be enabled.
- */
+
set_sctlr(get_sctlr() | CR_M);
}
@@ -524,15 +555,277 @@ phys_size_t board_reserve_ram_top(phys_size_t ram_size)
{
phys_size_t ram_top = ram_size;
-#ifdef CONFIG_SYS_MEM_TOP_HIDE
-#error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
-#endif
-
-/* Carve the MC private DRAM block from the end of DRAM */
#ifdef CONFIG_FSL_MC_ENET
+ /* The start address of MC reserved memory needs to be aligned. */
ram_top -= mc_get_dram_block_size();
ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
#endif
- return ram_top;
+ return ram_size - ram_top;
+}
+
+phys_size_t get_effective_memsize(void)
+{
+ phys_size_t ea_size, rem = 0;
+
+ /*
+ * For ARMv8 SoCs, DDR memory is split into two or three regions. The
+ * first region is 2GB space at 0x8000_0000. If the memory extends to
+ * the second region (or the third region if applicable), the secure
+ * memory and Management Complex (MC) memory should be put into the
+ * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
+ * is set to the size of first region so U-Boot doesn't relocate itself
+ * into higher address. Should DDR be configured to skip the first
+ * region, this function needs to be adjusted.
+ */
+ if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
+ ea_size = CONFIG_MAX_MEM_MAPPED;
+ rem = gd->ram_size - ea_size;
+ } else {
+ ea_size = gd->ram_size;
+ }
+
+#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
+ /* Check if we have enough space for secure memory */
+ if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) {
+ rem -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ } else {
+ if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) {
+ ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ rem = 0; /* Presume MC requires more memory */
+ } else {
+ printf("Error: No enough space for secure memory.\n");
+ }
+ }
+#endif
+ /* Check if we have enough memory for MC */
+ if (rem < board_reserve_ram_top(rem)) {
+ /* Not enough memory in high region to reserve */
+ if (ea_size > board_reserve_ram_top(rem))
+ ea_size -= board_reserve_ram_top(rem);
+ else
+ printf("Error: No enough space for reserved memory.\n");
+ }
+
+ return ea_size;
+}
+
+void dram_init_banksize(void)
+{
+#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
+ phys_size_t dp_ddr_size;
+#endif
+
+ /*
+ * gd->ram_size has the total size of DDR memory, less reserved secure
+ * memory. The DDR extends from low region to high region(s) presuming
+ * no hole is created with DDR configuration. gd->arch.secure_ram tracks
+ * the location of secure memory. gd->arch.resv_ram tracks the location
+ * of reserved memory for Management Complex (MC).
+ */
+ gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
+ if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
+ gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
+ gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
+ gd->bd->bi_dram[1].size = gd->ram_size -
+ CONFIG_SYS_DDR_BLOCK1_SIZE;
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+ if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
+ gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
+ gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
+ CONFIG_SYS_DDR_BLOCK2_SIZE;
+ gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
+ }
+#endif
+ } else {
+ gd->bd->bi_dram[0].size = gd->ram_size;
+ }
+#ifdef CONFIG_SYS_MEM_RESERVE_SECURE
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+ if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
+ gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ gd->arch.secure_ram = gd->bd->bi_dram[2].start +
+ gd->bd->bi_dram[2].size;
+ gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
+ gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ } else
+#endif
+ {
+ if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
+ gd->bd->bi_dram[1].size -=
+ CONFIG_SYS_MEM_RESERVE_SECURE;
+ gd->arch.secure_ram = gd->bd->bi_dram[1].start +
+ gd->bd->bi_dram[1].size;
+ gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
+ gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ } else if (gd->bd->bi_dram[0].size >
+ CONFIG_SYS_MEM_RESERVE_SECURE) {
+ gd->bd->bi_dram[0].size -=
+ CONFIG_SYS_MEM_RESERVE_SECURE;
+ gd->arch.secure_ram = gd->bd->bi_dram[0].start +
+ gd->bd->bi_dram[0].size;
+ gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
+ gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
+ }
+ }
+#endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
+
+#ifdef CONFIG_FSL_MC_ENET
+ /* Assign memory for MC */
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+ if (gd->bd->bi_dram[2].size >=
+ board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
+ gd->arch.resv_ram = gd->bd->bi_dram[2].start +
+ gd->bd->bi_dram[2].size -
+ board_reserve_ram_top(gd->bd->bi_dram[2].size);
+ } else
+#endif
+ {
+ if (gd->bd->bi_dram[1].size >=
+ board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
+ gd->arch.resv_ram = gd->bd->bi_dram[1].start +
+ gd->bd->bi_dram[1].size -
+ board_reserve_ram_top(gd->bd->bi_dram[1].size);
+ } else if (gd->bd->bi_dram[0].size >
+ board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
+ gd->arch.resv_ram = gd->bd->bi_dram[0].start +
+ gd->bd->bi_dram[0].size -
+ board_reserve_ram_top(gd->bd->bi_dram[0].size);
+ }
+ }
+#endif /* CONFIG_FSL_MC_ENET */
+
+#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+#error "This SoC shouldn't have DP DDR"
+#endif
+ if (soc_has_dp_ddr()) {
+ /* initialize DP-DDR here */
+ puts("DP-DDR: ");
+ /*
+ * DDR controller use 0 as the base address for binding.
+ * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
+ */
+ dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
+ CONFIG_DP_DDR_CTRL,
+ CONFIG_DP_DDR_NUM_CTRLS,
+ CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
+ NULL, NULL, NULL);
+ if (dp_ddr_size) {
+ gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
+ gd->bd->bi_dram[2].size = dp_ddr_size;
+ } else {
+ puts("Not detected");
+ }
+ }
+#endif
+}
+
+#if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
+void efi_add_known_memory(void)
+{
+ int i;
+ phys_addr_t ram_start, start;
+ phys_size_t ram_size;
+ u64 pages;
+
+ /* Add RAM */
+ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
+#ifdef CONFIG_SYS_DP_DDR_BASE_PHY
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+#error "This SoC shouldn't have DP DDR"
+#endif
+ if (i == 2)
+ continue; /* skip DP-DDR */
+#endif
+ ram_start = gd->bd->bi_dram[i].start;
+ ram_size = gd->bd->bi_dram[i].size;
+#ifdef CONFIG_RESV_RAM
+ if (gd->arch.resv_ram >= ram_start &&
+ gd->arch.resv_ram < ram_start + ram_size)
+ ram_size = gd->arch.resv_ram - ram_start;
+#endif
+ start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
+ pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
+
+ efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
+ false);
+ }
+}
+#endif
+
+/*
+ * Before DDR size is known, early MMU table have DDR mapped as device memory
+ * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
+ * needs to be set for these mappings.
+ * If a special case configures DDR with holes in the mapping, the holes need
+ * to be marked as invalid. This is not implemented in this function.
+ */
+void update_early_mmu_table(void)
+{
+ if (!gd->arch.tlb_addr)
+ return;
+
+ if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
+ mmu_change_region_attr(
+ CONFIG_SYS_SDRAM_BASE,
+ gd->ram_size,
+ PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_OUTER_SHARE |
+ PTE_BLOCK_NS |
+ PTE_TYPE_VALID);
+ } else {
+ mmu_change_region_attr(
+ CONFIG_SYS_SDRAM_BASE,
+ CONFIG_SYS_DDR_BLOCK1_SIZE,
+ PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_OUTER_SHARE |
+ PTE_BLOCK_NS |
+ PTE_TYPE_VALID);
+#ifdef CONFIG_SYS_DDR_BLOCK3_BASE
+#ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
+#error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
+#endif
+ if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
+ CONFIG_SYS_DDR_BLOCK2_SIZE) {
+ mmu_change_region_attr(
+ CONFIG_SYS_DDR_BLOCK2_BASE,
+ CONFIG_SYS_DDR_BLOCK2_SIZE,
+ PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_OUTER_SHARE |
+ PTE_BLOCK_NS |
+ PTE_TYPE_VALID);
+ mmu_change_region_attr(
+ CONFIG_SYS_DDR_BLOCK3_BASE,
+ gd->ram_size -
+ CONFIG_SYS_DDR_BLOCK1_SIZE -
+ CONFIG_SYS_DDR_BLOCK2_SIZE,
+ PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_OUTER_SHARE |
+ PTE_BLOCK_NS |
+ PTE_TYPE_VALID);
+ } else
+#endif
+ {
+ mmu_change_region_attr(
+ CONFIG_SYS_DDR_BLOCK2_BASE,
+ gd->ram_size -
+ CONFIG_SYS_DDR_BLOCK1_SIZE,
+ PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_OUTER_SHARE |
+ PTE_BLOCK_NS |
+ PTE_TYPE_VALID);
+ }
+ }
+}
+
+__weak int dram_init(void)
+{
+ gd->ram_size = initdram(0);
+#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
+ /* This will break-before-make MMU for DDR */
+ update_early_mmu_table();
+#endif
+
+ return 0;
}
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
index 9489f85..b54a937 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
@@ -233,10 +233,8 @@ int sata_init(void)
{
struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA;
-#ifdef CONFIG_ARCH_LS1046A
/* Disable SATA ECC */
out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000);
-#endif
out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/spl.c b/arch/arm/cpu/armv8/fsl-layerscape/spl.c
index 1dabdbb..73a8680 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/spl.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/spl.c
@@ -45,9 +45,6 @@ void board_init_f(ulong dummy)
{
/* Clear global data */
memset((void *)gd, 0, sizeof(gd_t));
-#ifdef CONFIG_LS2080A
- arch_cpu_init();
-#endif
board_early_init_f();
timer_init();
#ifdef CONFIG_LS2080A
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/config.h b/arch/arm/include/asm/arch-fsl-layerscape/config.h
index 586ce17..b5b08aa 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/config.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/config.h
@@ -33,8 +33,8 @@
#define CONFIG_SYS_FSL_OCRAM_SIZE 0x00020000 /* Real size 128K */
/* DDR */
-#define CONFIG_SYS_LS2_DDR_BLOCK1_SIZE ((phys_size_t)2 << 30)
-#define CONFIG_MAX_MEM_MAPPED CONFIG_SYS_LS2_DDR_BLOCK1_SIZE
+#define CONFIG_SYS_DDR_BLOCK1_SIZE ((phys_size_t)2 << 30)
+#define CONFIG_MAX_MEM_MAPPED CONFIG_SYS_DDR_BLOCK1_SIZE
#define CONFIG_SYS_FSL_CCSR_GUR_LE
#define CONFIG_SYS_FSL_CCSR_SCFG_LE
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/cpu.h b/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
index 4ea4aeaf..bcf3e38 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/cpu.h
@@ -115,7 +115,11 @@ static struct mm_region early_map[] = {
},
{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
CONFIG_SYS_FSL_DRAM_SIZE1,
+#if defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)
PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+#else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
+ PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
+#endif
PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
},
/* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
@@ -130,7 +134,7 @@ static struct mm_region early_map[] = {
},
{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
CONFIG_SYS_FSL_DRAM_SIZE2,
- PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
},
#elif defined(CONFIG_FSL_LSCH2)
@@ -158,12 +162,16 @@ static struct mm_region early_map[] = {
},
{ CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
CONFIG_SYS_FSL_DRAM_SIZE1,
+#if defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)
PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+#else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
+ PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
+#endif
PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
},
{ CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
CONFIG_SYS_FSL_DRAM_SIZE2,
- PTE_BLOCK_MEMTYPE(MT_NORMAL) |
+ PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
},
#endif
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
index 43ae686..08ea8fb 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/immap_lsch3.h
@@ -177,21 +177,23 @@ struct ccsr_gur {
u8 res_008[0x20-0x8];
u32 gpporcr1; /* General-purpose POR configuration */
u32 gpporcr2; /* General-purpose POR configuration 2 */
-#define FSL_CHASSIS3_DCFG_FUSESR_VID_SHIFT 25
+ u32 gpporcr3;
+ u32 gpporcr4;
+ u8 res_030[0x60-0x30];
+#define FSL_CHASSIS3_DCFG_FUSESR_VID_SHIFT 2
#define FSL_CHASSIS3_DCFG_FUSESR_VID_MASK 0x1F
-#define FSL_CHASSIS3_DCFG_FUSESR_ALTVID_SHIFT 20
+#define FSL_CHASSIS3_DCFG_FUSESR_ALTVID_SHIFT 7
#define FSL_CHASSIS3_DCFG_FUSESR_ALTVID_MASK 0x1F
u32 dcfg_fusesr; /* Fuse status register */
- u32 gpporcr3;
- u32 gpporcr4;
- u8 res_034[0x70-0x34];
- u32 devdisr; /* Device disable control */
+ u8 res_064[0x70-0x64];
+ u32 devdisr; /* Device disable control 1 */
u32 devdisr2; /* Device disable control 2 */
u32 devdisr3; /* Device disable control 3 */
u32 devdisr4; /* Device disable control 4 */
u32 devdisr5; /* Device disable control 5 */
u32 devdisr6; /* Device disable control 6 */
- u32 devdisr7; /* Device disable control 7 */
+ u8 res_088[0x94-0x88];
+ u32 coredisr; /* Device disable control 7 */
#define FSL_CHASSIS3_DEVDISR2_DPMAC1 0x00000001
#define FSL_CHASSIS3_DEVDISR2_DPMAC2 0x00000002
#define FSL_CHASSIS3_DEVDISR2_DPMAC3 0x00000004
@@ -216,15 +218,11 @@ struct ccsr_gur {
#define FSL_CHASSIS3_DEVDISR2_DPMAC22 0x00200000
#define FSL_CHASSIS3_DEVDISR2_DPMAC23 0x00400000
#define FSL_CHASSIS3_DEVDISR2_DPMAC24 0x00800000
- u8 res_08c[0x90-0x8c];
- u32 coredisru; /* uppper portion for support of 64 cores */
- u32 coredisrl; /* lower portion for support of 64 cores */
u8 res_098[0xa0-0x98];
u32 pvr; /* Processor version */
u32 svr; /* System version */
- u32 mvr; /* Manufacturing version */
- u8 res_0ac[0x100-0xac];
- u32 rcwsr[32]; /* Reset control word status */
+ u8 res_0a8[0x100-0xa8];
+ u32 rcwsr[30]; /* Reset control word status */
#define FSL_CHASSIS3_RCWSR0_SYS_PLL_RAT_SHIFT 2
#define FSL_CHASSIS3_RCWSR0_SYS_PLL_RAT_MASK 0x1f
@@ -239,24 +237,53 @@ struct ccsr_gur {
#define RCW_SB_EN_REG_INDEX 9
#define RCW_SB_EN_MASK 0x00000400
- u8 res_180[0x200-0x180];
- u32 scratchrw[32]; /* Scratch Read/Write */
- u8 res_280[0x300-0x280];
+ u8 res_178[0x200-0x178];
+ u32 scratchrw[16]; /* Scratch Read/Write */
+ u8 res_240[0x300-0x240];
u32 scratchw1r[4]; /* Scratch Read (Write once) */
u8 res_310[0x400-0x310];
u32 bootlocptrl; /* Boot location pointer low-order addr */
u32 bootlocptrh; /* Boot location pointer high-order addr */
- u8 res_408[0x500-0x408];
- u8 res_500[0x740-0x500]; /* add more registers when needed */
+ u8 res_408[0x520-0x408];
+ u32 usb1_amqr;
+ u32 usb2_amqr;
+ u8 res_528[0x530-0x528]; /* add more registers when needed */
+ u32 sdmm1_amqr;
+ u8 res_534[0x550-0x534]; /* add more registers when needed */
+ u32 sata1_amqr;
+ u32 sata2_amqr;
+ u8 res_558[0x570-0x558]; /* add more registers when needed */
+ u32 misc1_amqr;
+ u8 res_574[0x590-0x574]; /* add more registers when needed */
+ u32 spare1_amqr;
+ u32 spare2_amqr;
+ u8 res_598[0x620-0x598]; /* add more registers when needed */
+ u32 gencr[7]; /* General Control Registers */
+ u8 res_63c[0x640-0x63c]; /* add more registers when needed */
+ u32 cgensr1; /* Core General Status Register */
+ u8 res_644[0x660-0x644]; /* add more registers when needed */
+ u32 cgencr1; /* Core General Control Register */
+ u8 res_664[0x740-0x664]; /* add more registers when needed */
u32 tp_ityp[64]; /* Topology Initiator Type Register */
struct {
u32 upper;
u32 lower;
- } tp_cluster[3]; /* Core Cluster n Topology Register */
- u8 res_858[0x1000-0x858];
+ } tp_cluster[4]; /* Core cluster n Topology Register */
+ u8 res_864[0x920-0x864]; /* add more registers when needed */
+ u32 ioqoscr[8]; /*I/O Quality of Services Register */
+ u32 uccr;
+ u8 res_944[0x960-0x944]; /* add more registers when needed */
+ u32 ftmcr;
+ u8 res_964[0x990-0x964]; /* add more registers when needed */
+ u32 coredisablesr;
+ u8 res_994[0xa00-0x994]; /* add more registers when needed */
+ u32 sdbgcr; /*Secure Debug Confifuration Register */
+ u8 res_a04[0xbf8-0xa04]; /* add more registers when needed */
+ u32 ipbrr1;
+ u32 ipbrr2;
+ u8 res_858[0x1000-0xc00];
};
-
struct ccsr_clk_cluster_group {
struct {
u8 res_00[0x10];
diff --git a/arch/arm/include/asm/arch-fsl-layerscape/mmu.h b/arch/arm/include/asm/arch-fsl-layerscape/mmu.h
index d54eacd..d232bec 100644
--- a/arch/arm/include/asm/arch-fsl-layerscape/mmu.h
+++ b/arch/arm/include/asm/arch-fsl-layerscape/mmu.h
@@ -6,5 +6,5 @@
#ifndef _ASM_ARMV8_FSL_LAYERSCAPE_MMU_H_
#define _ASM_ARMV8_FSL_LAYERSCAPE_MMU_H_
-#include <asm/arch-armv8/mmu.h>
+void update_early_mmu_table(void);
#endif /* _ASM_ARMV8_FSL_LAYERSCAPE_MMU_H_ */
diff --git a/arch/arm/include/asm/armv8/mmu.h b/arch/arm/include/asm/armv8/mmu.h
index e9b4cdb..a349903 100644
--- a/arch/arm/include/asm/armv8/mmu.h
+++ b/arch/arm/include/asm/armv8/mmu.h
@@ -53,6 +53,7 @@
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_TABLE (3 << 0)
#define PTE_TYPE_BLOCK (1 << 0)
+#define PTE_TYPE_VALID (1 << 0)
#define PTE_TABLE_PXN (1UL << 59)
#define PTE_TABLE_XN (1UL << 60)
@@ -77,6 +78,10 @@
*/
#define PMD_ATTRINDX(t) ((t) << 2)
#define PMD_ATTRINDX_MASK (7 << 2)
+#define PMD_ATTRMASK (PTE_BLOCK_PXN | \
+ PTE_BLOCK_UXN | \
+ PMD_ATTRINDX_MASK | \
+ PTE_TYPE_VALID)
/*
* TCR flags.
diff --git a/arch/arm/include/asm/fsl_secure_boot.h b/arch/arm/include/asm/fsl_secure_boot.h
index ccb513f..fd627c0 100644
--- a/arch/arm/include/asm/fsl_secure_boot.h
+++ b/arch/arm/include/asm/fsl_secure_boot.h
@@ -86,8 +86,8 @@
/* For SD boot address and size are assigned in terms of sector
* offset and no. of sectors respectively.
*/
-#define CONFIG_BS_HDR_ADDR_DEVICE 0x00000800
-#define CONFIG_BS_ADDR_DEVICE 0x00000840
+#define CONFIG_BS_HDR_ADDR_DEVICE 0x00000900
+#define CONFIG_BS_ADDR_DEVICE 0x00000940
#define CONFIG_BS_HDR_SIZE 0x00000010
#define CONFIG_BS_SIZE 0x00000008
#else
diff --git a/arch/arm/include/asm/global_data.h b/arch/arm/include/asm/global_data.h
index aee87cd..dfcbcce 100644
--- a/arch/arm/include/asm/global_data.h
+++ b/arch/arm/include/asm/global_data.h
@@ -59,6 +59,13 @@ struct arch_global_data {
phys_addr_t secure_ram;
unsigned long tlb_allocated;
#endif
+#ifdef CONFIG_RESV_RAM
+ /*
+ * Reserved RAM for memory resident, eg. Management Complex (MC)
+ * driver which continues to run after U-Boot exits.
+ */
+ phys_addr_t resv_ram;
+#endif
#ifdef CONFIG_ARCH_OMAP2
u32 omap_boot_device;
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 766e929..9c3261c 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -226,6 +226,7 @@ void protect_secure_region(void);
void smp_kick_all_cpus(void);
void flush_l3_cache(void);
+void mmu_change_region_attr(phys_addr_t start, size_t size, u64 attrs);
/*
*Issue a secure monitor call in accordance with ARM "SMC Calling convention",