diff options
43 files changed, 4467 insertions, 89 deletions
diff --git a/arch/arm/dts/Makefile b/arch/arm/dts/Makefile index c42715e..82a0790 100644 --- a/arch/arm/dts/Makefile +++ b/arch/arm/dts/Makefile @@ -400,6 +400,7 @@ dtb-$(CONFIG_ARCH_SOCFPGA) += \ socfpga_cyclone5_socrates.dtb \ socfpga_cyclone5_sr1500.dtb \ socfpga_cyclone5_vining_fpga.dtb \ + socfpga_n5x_socdk.dtb \ socfpga_stratix10_socdk.dtb dtb-$(CONFIG_TARGET_DRA7XX_EVM) += dra72-evm.dtb dra7-evm.dtb \ diff --git a/arch/arm/dts/socfpga_n5x-u-boot.dtsi b/arch/arm/dts/socfpga_n5x-u-boot.dtsi new file mode 100644 index 0000000..d377ae5 --- /dev/null +++ b/arch/arm/dts/socfpga_n5x-u-boot.dtsi @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * U-Boot additions + * + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#include "socfpga_soc64_fit-u-boot.dtsi" +#include <dt-bindings/clock/n5x-clock.h> + +/{ + memory { + #address-cells = <2>; + #size-cells = <2>; + u-boot,dm-pre-reloc; + }; + + soc { + u-boot,dm-pre-reloc; + + ccu: cache-controller@f7000000 { + compatible = "arteris,ncore-ccu"; + reg = <0xf7000000 0x100900>; + u-boot,dm-pre-reloc; + }; + + clocks { + dram_eosc_clk: dram-eosc-clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + }; + }; + + memclkmgr: mem-clock-controller@f8040000 { + compatible = "intel,n5x-mem-clkmgr"; + reg = <0xf8040000 0x1000>; + #clock-cells = <0>; + clocks = <&dram_eosc_clk>, <&f2s_free_clk>; + }; + }; +}; + +&clkmgr { + compatible = "intel,n5x-clkmgr"; + u-boot,dm-pre-reloc; +}; + +&gmac0 { + clocks = <&clkmgr N5X_EMAC0_CLK>; +}; + +&gmac1 { + altr,sysmgr-syscon = <&sysmgr 0x48 0>; + clocks = <&clkmgr N5X_EMAC1_CLK>; +}; + +&gmac2 { + altr,sysmgr-syscon = <&sysmgr 0x4c 0>; + clocks = <&clkmgr N5X_EMAC2_CLK>; +}; + +&i2c0 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + reset-names = "i2c"; +}; + +&i2c1 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + reset-names = "i2c"; +}; + +&i2c2 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + reset-names = "i2c"; +}; + +&i2c3 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + reset-names = "i2c"; +}; + +&i2c4 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + reset-names = "i2c"; +}; + +&memclkmgr { + u-boot,dm-pre-reloc; +}; + +&mmc { + clocks = <&clkmgr N5X_L4_MP_CLK>, + <&clkmgr N5X_SDMMC_CLK>; + resets = <&rst SDMMC_RESET>, <&rst SDMMC_OCP_RESET>; +}; + +&pdma { + clocks = <&clkmgr N5X_L4_MAIN_CLK>; +}; + +&porta { + bank-name = "porta"; +}; + +&portb { + bank-name = "portb"; +}; + +&qspi { + u-boot,dm-pre-reloc; +}; + +&rst { + compatible = "altr,rst-mgr"; + altr,modrst-offset = <0x20>; + u-boot,dm-pre-reloc; +}; + +&sdr { + compatible = "intel,sdr-ctl-n5x"; + resets = <&rst DDRSCH_RESET>; + clocks = <&memclkmgr>; + clock-names = "mem_clk"; + u-boot,dm-pre-reloc; +}; + +&spi0 { + clocks = <&clkmgr N5X_L4_MAIN_CLK>; +}; + +&spi1 { + clocks = <&clkmgr N5X_L4_MAIN_CLK>; +}; + +&sysmgr { + compatible = "altr,sys-mgr", "syscon"; + u-boot,dm-pre-reloc; +}; + +&timer0 { + clocks = <&clkmgr N5X_L4_SP_CLK>; +}; + +&timer1 { + clocks = <&clkmgr N5X_L4_SP_CLK>; +}; + +&timer2 { + clocks = <&clkmgr N5X_L4_SP_CLK>; +}; + +&timer3 { + clocks = <&clkmgr N5X_L4_SP_CLK>; +}; + +&uart0 { + clocks = <&clkmgr N5X_L4_SP_CLK>; + u-boot,dm-pre-reloc; +}; + +&uart1 { + clocks = <&clkmgr N5X_L4_SP_CLK>; +}; + +&usb0 { + clocks = <&clkmgr N5X_USB_CLK>; + disable-over-current; + u-boot,dm-pre-reloc; +}; + +&usb1 { + clocks = <&clkmgr N5X_USB_CLK>; + u-boot,dm-pre-reloc; +}; + +&watchdog0 { + clocks = <&clkmgr N5X_L4_SYS_FREE_CLK>; + u-boot,dm-pre-reloc; +}; + +&watchdog1 { + clocks = <&clkmgr N5X_L4_SYS_FREE_CLK>; +}; + +&watchdog2 { + clocks = <&clkmgr N5X_L4_SYS_FREE_CLK>; +}; + +&watchdog3 { + clocks = <&clkmgr N5X_L4_SYS_FREE_CLK>; +}; diff --git a/arch/arm/dts/socfpga_n5x_socdk-u-boot.dtsi b/arch/arm/dts/socfpga_n5x_socdk-u-boot.dtsi new file mode 100644 index 0000000..502da36 --- /dev/null +++ b/arch/arm/dts/socfpga_n5x_socdk-u-boot.dtsi @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * U-Boot additions + * + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#include "socfpga_n5x-u-boot.dtsi" + +/{ + aliases { + spi0 = &qspi; + i2c0 = &i2c1; + }; + + memory { + /* + * Memory type: DDR4 (non-interleaving mode) + * 16GB + * <0 0x00000000 0 0x80000000>, + * <4 0x80000000 3 0x80000000>; + * + * 8GB + * <0 0x00000000 0 0x80000000>, + * <2 0x80000000 1 0x80000000>; + * + * 4GB + * <0 0x00000000 0 0x80000000>, + * <1 0x80000000 0 0x80000000>; + * + * Memory type: LPDDR4 (non-interleaving mode) + * Total memory size 3GB, usable = 2.5GB, 0.5GB trade off for secure + * region. + */ + reg = <0 0x00000000 0 0x60000000>, + <0x10 0x00100000 0 0x40000000>; + }; +}; + +&flash0 { + compatible = "jedec,spi-nor"; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + u-boot,dm-pre-reloc; +}; + +&i2c1 { + status = "okay"; +}; + +&mmc { + drvsel = <3>; + smplsel = <0>; + u-boot,dm-pre-reloc; +}; + +&qspi { + status = "okay"; +}; + +&watchdog0 { + u-boot,dm-pre-reloc; +}; diff --git a/arch/arm/dts/socfpga_n5x_socdk.dts b/arch/arm/dts/socfpga_n5x_socdk.dts new file mode 100644 index 0000000..915b8f6 --- /dev/null +++ b/arch/arm/dts/socfpga_n5x_socdk.dts @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021, Intel Corporation + */ +#include "socfpga_agilex.dtsi" + +/ { + model = "eASIC N5X SoCDK"; + + aliases { + serial0 = &uart0; + ethernet0 = &gmac0; + ethernet1 = &gmac1; + ethernet2 = &gmac2; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory { + device_type = "memory"; + /* We expect the bootloader to fill in the reg */ + reg = <0 0 0 0>; + }; + + soc { + clocks { + osc1 { + clock-frequency = <25000000>; + }; + }; + }; +}; + +&gmac0 { + status = "okay"; + phy-mode = "rgmii"; + phy-handle = <&phy0>; + max-frame-size = <9000>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <4>; + + txd0-skew-ps = <0>; /* -420ps */ + txd1-skew-ps = <0>; /* -420ps */ + txd2-skew-ps = <0>; /* -420ps */ + txd3-skew-ps = <0>; /* -420ps */ + rxd0-skew-ps = <420>; /* 0ps */ + rxd1-skew-ps = <420>; /* 0ps */ + rxd2-skew-ps = <420>; /* 0ps */ + rxd3-skew-ps = <420>; /* 0ps */ + txen-skew-ps = <0>; /* -420ps */ + txc-skew-ps = <900>; /* 0ps */ + rxdv-skew-ps = <420>; /* 0ps */ + rxc-skew-ps = <1680>; /* 780ps */ + }; + }; +}; + +&gpio1 { + status = "okay"; +}; + +&mmc { + status = "okay"; + cap-sd-highspeed; + broken-cd; + bus-width = <4>; +}; + +&qspi { + status = "okay"; + flash0: flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "mt25qu02g"; + reg = <0>; + spi-max-frequency = <100000000>; + + m25p,fast-read; + cdns,page-size = <256>; + cdns,block-size = <16>; + cdns,read-delay = <3>; + cdns,tshsl-ns = <50>; + cdns,tsd2d-ns = <50>; + cdns,tchsh-ns = <4>; + cdns,tslch-ns = <4>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + qspi_boot: partition@0 { + label = "Boot and fpga data"; + reg = <0x0 0x034B0000>; + }; + + qspi_rootfs: partition@34B0000 { + label = "Root Filesystem - JFFS2"; + reg = <0x034B0000 0x0EB50000>; + }; + }; + }; +}; + +&uart0 { + status = "okay"; +}; + +&usb0 { + status = "okay"; +}; + +&usb1 { + status = "okay"; +}; diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index 0c35406..f4791c1 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -8,7 +8,7 @@ config NR_DRAM_BANKS config SOCFPGA_SECURE_VAB_AUTH bool "Enable boot image authentication with Secure Device Manager" - depends on TARGET_SOCFPGA_AGILEX + depends on TARGET_SOCFPGA_AGILEX || TARGET_SOCFPGA_N5X select FIT_IMAGE_POST_PROCESS select SHA384 select SHA512_ALGO @@ -91,6 +91,22 @@ config TARGET_SOCFPGA_GEN5 imply SPL_SYS_MALLOC_SIMPLE imply SPL_USE_TINY_PRINTF +config TARGET_SOCFPGA_N5X + bool + select ARMV8_MULTIENTRY + select ARMV8_SET_SMPEN + select BINMAN if SPL_ATF + select CLK + select FPGA_INTEL_SDM_MAILBOX + select NCORE_CACHE + select SPL_ALTERA_SDRAM + select SPL_CLK if SPL + select TARGET_SOCFPGA_SOC64 + +config TARGET_SOCFPGA_N5X_SOCDK + bool "Intel eASIC SoCDK (N5X)" + select TARGET_SOCFPGA_N5X + config TARGET_SOCFPGA_SOC64 bool @@ -185,6 +201,7 @@ config SYS_BOARD default "de10-nano" if TARGET_SOCFPGA_TERASIC_DE10_NANO default "is1" if TARGET_SOCFPGA_IS1 default "mcvevk" if TARGET_SOCFPGA_ARIES_MCVEVK + default "n5x-socdk" if TARGET_SOCFPGA_N5X_SOCDK default "secu1" if TARGET_SOCFPGA_ARRIA5_SECU1 default "sockit" if TARGET_SOCFPGA_TERASIC_SOCKIT default "socrates" if TARGET_SOCFPGA_EBV_SOCRATES @@ -194,6 +211,7 @@ config SYS_BOARD config SYS_VENDOR default "intel" if TARGET_SOCFPGA_AGILEX_SOCDK + default "intel" if TARGET_SOCFPGA_N5X_SOCDK default "altera" if TARGET_SOCFPGA_ARRIA5_SOCDK default "altera" if TARGET_SOCFPGA_ARRIA10_SOCDK default "altera" if TARGET_SOCFPGA_CYCLONE5_SOCDK @@ -223,6 +241,7 @@ config SYS_CONFIG_NAME default "socfpga_de10_nano" if TARGET_SOCFPGA_TERASIC_DE10_NANO default "socfpga_is1" if TARGET_SOCFPGA_IS1 default "socfpga_mcvevk" if TARGET_SOCFPGA_ARIES_MCVEVK + default "socfpga_n5x_socdk" if TARGET_SOCFPGA_N5X_SOCDK default "socfpga_sockit" if TARGET_SOCFPGA_TERASIC_SOCKIT default "socfpga_socrates" if TARGET_SOCFPGA_EBV_SOCRATES default "socfpga_sr1500" if TARGET_SOCFPGA_SR1500 diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile index 5779c55..ec38b64 100644 --- a/arch/arm/mach-socfpga/Makefile +++ b/arch/arm/mach-socfpga/Makefile @@ -4,7 +4,7 @@ # Wolfgang Denk, DENX Software Engineering, wd@denx.de. # # Copyright (C) 2012-2017 Altera Corporation <www.altera.com> -# Copyright (C) 2017-2020 Intel Corporation <www.intel.com> +# Copyright (C) 2017-2021 Intel Corporation <www.intel.com> obj-y += board.o obj-y += clock_manager.o @@ -32,7 +32,7 @@ ifdef CONFIG_TARGET_SOCFPGA_STRATIX10 obj-y += clock_manager_s10.o obj-y += lowlevel_init_soc64.o obj-y += mailbox_s10.o -obj-y += misc_s10.o +obj-y += misc_soc64.o obj-y += mmu-arm64_s10.o obj-y += reset_manager_s10.o obj-y += system_manager_soc64.o @@ -45,7 +45,22 @@ ifdef CONFIG_TARGET_SOCFPGA_AGILEX obj-y += clock_manager_agilex.o obj-y += lowlevel_init_soc64.o obj-y += mailbox_s10.o -obj-y += misc_s10.o +obj-y += misc_soc64.o +obj-y += mmu-arm64_s10.o +obj-y += reset_manager_s10.o +obj-$(CONFIG_SOCFPGA_SECURE_VAB_AUTH) += secure_vab.o +obj-y += system_manager_soc64.o +obj-y += timer_s10.o +obj-$(CONFIG_SOCFPGA_SECURE_VAB_AUTH) += vab.o +obj-y += wrap_handoff_soc64.o +obj-y += wrap_pll_config_soc64.o +endif + +ifdef CONFIG_TARGET_SOCFPGA_N5X +obj-y += clock_manager_n5x.o +obj-y += lowlevel_init_soc64.o +obj-y += mailbox_s10.o +obj-y += misc_soc64.o obj-y += mmu-arm64_s10.o obj-y += reset_manager_s10.o obj-$(CONFIG_SOCFPGA_SECURE_VAB_AUTH) += secure_vab.o @@ -64,18 +79,21 @@ obj-y += wrap_iocsr_config.o obj-y += wrap_pinmux_config.o obj-y += wrap_sdram_config.o endif +ifdef CONFIG_TARGET_SOCFPGA_SOC64 +obj-y += firewall.o +obj-y += spl_soc64.o +endif ifdef CONFIG_TARGET_SOCFPGA_ARRIA10 obj-y += spl_a10.o endif ifdef CONFIG_TARGET_SOCFPGA_STRATIX10 -obj-y += firewall.o obj-y += spl_s10.o -obj-y += spl_soc64.o endif ifdef CONFIG_TARGET_SOCFPGA_AGILEX -obj-y += firewall.o obj-y += spl_agilex.o -obj-y += spl_soc64.o +endif +ifdef CONFIG_TARGET_SOCFPGA_N5X +obj-y += spl_n5x.o endif else obj-$(CONFIG_SPL_ATF) += secure_reg_helper.o diff --git a/arch/arm/mach-socfpga/board.c b/arch/arm/mach-socfpga/board.c index 36eecdc..7267163 100644 --- a/arch/arm/mach-socfpga/board.c +++ b/arch/arm/mach-socfpga/board.c @@ -116,17 +116,18 @@ void board_fit_image_post_process(const void *fit, int node, void **p_image, #if !IS_ENABLED(CONFIG_SPL_BUILD) && IS_ENABLED(CONFIG_FIT) void board_prep_linux(bootm_headers_t *images) { - if (IS_ENABLED(CONFIG_SOCFPGA_SECURE_VAB_AUTH) && - !IS_ENABLED(CONFIG_SOCFPGA_SECURE_VAB_AUTH_ALLOW_NON_FIT_IMAGE)) { - /* - * Ensure the OS is always booted from FIT and with - * VAB signed certificate - */ - if (!images->fit_uname_cfg) { + if (!images->fit_uname_cfg) { + if (IS_ENABLED(CONFIG_SOCFPGA_SECURE_VAB_AUTH) && + !IS_ENABLED(CONFIG_SOCFPGA_SECURE_VAB_AUTH_ALLOW_NON_FIT_IMAGE)) { + /* + * Ensure the OS is always booted from FIT and with + * VAB signed certificate + */ printf("Please use FIT with VAB signed images!\n"); hang(); } - + } else { + /* Update fdt_addr in enviroment variable */ env_set_hex("fdt_addr", (ulong)images->ft_addr); debug("images->ft_addr = 0x%08lx\n", (ulong)images->ft_addr); } diff --git a/arch/arm/mach-socfpga/clock_manager_n5x.c b/arch/arm/mach-socfpga/clock_manager_n5x.c new file mode 100644 index 0000000..4f09853 --- /dev/null +++ b/arch/arm/mach-socfpga/clock_manager_n5x.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <asm/arch/clock_manager.h> +#include <asm/arch/system_manager.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <dt-bindings/clock/n5x-clock.h> +#include <malloc.h> + +DECLARE_GLOBAL_DATA_PTR; + +static ulong cm_get_rate_dm(u32 id) +{ + struct udevice *dev; + struct clk clk; + ulong rate; + int ret; + + ret = uclass_get_device_by_driver(UCLASS_CLK, + DM_DRIVER_GET(socfpga_n5x_clk), + &dev); + if (ret) + return 0; + + clk.id = id; + ret = clk_request(dev, &clk); + if (ret < 0) + return 0; + + rate = clk_get_rate(&clk); + + clk_free(&clk); + + if ((rate == (unsigned long)-ENXIO) || + (rate == (unsigned long)-EIO)) { + debug("%s id %u: clk_get_rate err: %ld\n", + __func__, id, rate); + return 0; + } + + return rate; +} + +static u32 cm_get_rate_dm_khz(u32 id) +{ + return cm_get_rate_dm(id) / 1000; +} + +unsigned long cm_get_mpu_clk_hz(void) +{ + return cm_get_rate_dm(N5X_MPU_CLK); +} + +unsigned int cm_get_l4_sys_free_clk_hz(void) +{ + return cm_get_rate_dm(N5X_L4_SYS_FREE_CLK); +} + +void cm_print_clock_quick_summary(void) +{ + printf("MPU %10d kHz\n", + cm_get_rate_dm_khz(N5X_MPU_CLK)); + printf("L4 Main %8d kHz\n", + cm_get_rate_dm_khz(N5X_L4_MAIN_CLK)); + printf("L4 sys free %8d kHz\n", + cm_get_rate_dm_khz(N5X_L4_SYS_FREE_CLK)); + printf("L4 MP %8d kHz\n", + cm_get_rate_dm_khz(N5X_L4_MP_CLK)); + printf("L4 SP %8d kHz\n", + cm_get_rate_dm_khz(N5X_L4_SP_CLK)); + printf("SDMMC %8d kHz\n", + cm_get_rate_dm_khz(N5X_SDMMC_CLK)); +} diff --git a/arch/arm/mach-socfpga/include/mach/base_addr_s10.h b/arch/arm/mach-socfpga/include/mach/base_addr_soc64.h index d3eca65..3f899fc 100644 --- a/arch/arm/mach-socfpga/include/mach/base_addr_s10.h +++ b/arch/arm/mach-socfpga/include/mach/base_addr_soc64.h @@ -1,16 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2016-2017 Intel Corporation <www.intel.com> + * Copyright (C) 2016-2021 Intel Corporation <www.intel.com> */ -#ifndef _SOCFPGA_S10_BASE_HARDWARE_H_ -#define _SOCFPGA_S10_BASE_HARDWARE_H_ +#ifndef _SOCFPGA_SOC64_BASE_HARDWARE_H_ +#define _SOCFPGA_SOC64_BASE_HARDWARE_H_ #define SOCFPGA_CCU_ADDRESS 0xf7000000 #define SOCFPGA_SDR_SCHEDULER_ADDRESS 0xf8000400 #define SOCFPGA_HMC_MMR_IO48_ADDRESS 0xf8010000 #define SOCFPGA_SDR_ADDRESS 0xf8011000 -#ifdef CONFIG_TARGET_SOCFPGA_AGILEX +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_AGILEX) || \ + IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) #define SOCFPGA_FW_MPU_DDR_SCR_ADDRESS 0xf8020200 #else #define SOCFPGA_FW_MPU_DDR_SCR_ADDRESS 0xf8020100 @@ -44,4 +45,4 @@ #define GICD_BASE 0xfffc1000 #define GICC_BASE 0xfffc2000 -#endif /* _SOCFPGA_S10_BASE_HARDWARE_H_ */ +#endif /* _SOCFPGA_SOC64_BASE_HARDWARE_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager.h b/arch/arm/mach-socfpga/include/mach/clock_manager.h index 2f9b471..a8cb07a 100644 --- a/arch/arm/mach-socfpga/include/mach/clock_manager.h +++ b/arch/arm/mach-socfpga/include/mach/clock_manager.h @@ -12,6 +12,7 @@ phys_addr_t socfpga_get_clkmgr_addr(void); void cm_wait_for_lock(u32 mask); int cm_wait_for_fsm(void); void cm_print_clock_quick_summary(void); +unsigned long cm_get_mpu_clk_hz(void); unsigned int cm_get_qspi_controller_clk_hz(void); #if defined(CONFIG_TARGET_SOCFPGA_SOC64) @@ -27,6 +28,8 @@ int cm_set_qspi_controller_clk_hz(u32 clk_hz); #include <asm/arch/clock_manager_s10.h> #elif defined(CONFIG_TARGET_SOCFPGA_AGILEX) #include <asm/arch/clock_manager_agilex.h> +#elif IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) +#include <asm/arch/clock_manager_n5x.h> #endif #endif /* _CLOCK_MANAGER_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager_agilex.h b/arch/arm/mach-socfpga/include/mach/clock_manager_agilex.h index 386e82a..4feae3d 100644 --- a/arch/arm/mach-socfpga/include/mach/clock_manager_agilex.h +++ b/arch/arm/mach-socfpga/include/mach/clock_manager_agilex.h @@ -6,8 +6,6 @@ #ifndef _CLOCK_MANAGER_AGILEX_ #define _CLOCK_MANAGER_AGILEX_ -unsigned long cm_get_mpu_clk_hz(void); - #include <asm/arch/clock_manager_soc64.h> #include "../../../../../drivers/clk/altera/clk-agilex.h" diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager_arria10.h b/arch/arm/mach-socfpga/include/mach/clock_manager_arria10.h index 798d374..553ebe6 100644 --- a/arch/arm/mach-socfpga/include/mach/clock_manager_arria10.h +++ b/arch/arm/mach-socfpga/include/mach/clock_manager_arria10.h @@ -68,7 +68,6 @@ int cm_basic_init(const void *blob); #include <linux/bitops.h> unsigned int cm_get_l4_sp_clk_hz(void); -unsigned long cm_get_mpu_clk_hz(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager_gen5.h b/arch/arm/mach-socfpga/include/mach/clock_manager_gen5.h index 4cc1268..d53095a 100644 --- a/arch/arm/mach-socfpga/include/mach/clock_manager_gen5.h +++ b/arch/arm/mach-socfpga/include/mach/clock_manager_gen5.h @@ -96,7 +96,6 @@ struct cm_config { #define CLKMGR_PERPLL_EN CLKMGR_GEN5_PERPLL_EN /* Clock speed accessors */ -unsigned long cm_get_mpu_clk_hz(void); unsigned long cm_get_sdram_clk_hz(void); unsigned int cm_get_l4_sp_clk_hz(void); unsigned int cm_get_mmc_controller_clk_hz(void); diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager_n5x.h b/arch/arm/mach-socfpga/include/mach/clock_manager_n5x.h new file mode 100644 index 0000000..54615ae --- /dev/null +++ b/arch/arm/mach-socfpga/include/mach/clock_manager_n5x.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#ifndef _CLOCK_MANAGER_N5X_ +#define _CLOCK_MANAGER_N5X_ + +#include <asm/arch/clock_manager_soc64.h> +#include "../../../../../drivers/clk/altera/clk-n5x.h" + +#endif /* _CLOCK_MANAGER_N5X_ */ diff --git a/arch/arm/mach-socfpga/include/mach/clock_manager_s10.h b/arch/arm/mach-socfpga/include/mach/clock_manager_s10.h index 98c3bf1..7f10296 100644 --- a/arch/arm/mach-socfpga/include/mach/clock_manager_s10.h +++ b/arch/arm/mach-socfpga/include/mach/clock_manager_s10.h @@ -11,7 +11,6 @@ #include <linux/bitops.h> /* Clock speed accessors */ -unsigned long cm_get_mpu_clk_hz(void); unsigned long cm_get_sdram_clk_hz(void); unsigned int cm_get_l4_sp_clk_hz(void); unsigned int cm_get_mmc_controller_clk_hz(void); diff --git a/arch/arm/mach-socfpga/include/mach/firewall.h b/arch/arm/mach-socfpga/include/mach/firewall.h index adab65b..5cb7f23 100644 --- a/arch/arm/mach-socfpga/include/mach/firewall.h +++ b/arch/arm/mach-socfpga/include/mach/firewall.h @@ -115,10 +115,16 @@ struct socfpga_firwall_l4_sys { /* Firewall MPU DDR SCR registers */ #define FW_MPU_DDR_SCR_EN 0x00 #define FW_MPU_DDR_SCR_EN_SET 0x04 +#define FW_MPU_DDR_SCR_MPUREGION0ADDR_BASE 0x10 +#define FW_MPU_DDR_SCR_MPUREGION0ADDR_BASEEXT 0x14 #define FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT 0x18 #define FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0x1c + +#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASE 0x90 +#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASEEXT 0x94 #define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0x98 #define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0x9c +#define FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT_FIELD 0xff #define MPUREGION0_ENABLE BIT(0) #define NONMPUREGION0_ENABLE BIT(8) diff --git a/arch/arm/mach-socfpga/include/mach/handoff_soc64.h b/arch/arm/mach-socfpga/include/mach/handoff_soc64.h index 3750216..902fc6b 100644 --- a/arch/arm/mach-socfpga/include/mach/handoff_soc64.h +++ b/arch/arm/mach-socfpga/include/mach/handoff_soc64.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2016-2020 Intel Corporation <www.intel.com> + * Copyright (C) 2016-2021 Intel Corporation <www.intel.com> * */ @@ -23,8 +23,36 @@ #define SOC64_HANDOFF_OFFSET_DATA 0x10 #define SOC64_HANDOFF_SIZE 4096 +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_STRATIX10) || \ + IS_ENABLED(CONFIG_TARGET_SOCFPGA_AGILEX) #define SOC64_HANDOFF_BASE 0xFFE3F000 #define SOC64_HANDOFF_MISC (SOC64_HANDOFF_BASE + 0x610) +#elif IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) +#define SOC64_HANDOFF_BASE 0xFFE5F000 +#define SOC64_HANDOFF_MISC (SOC64_HANDOFF_BASE + 0x630) + +/* DDR handoff */ +#define SOC64_HANDOFF_DDR_BASE 0xFFE5C000 +#define SOC64_HANDOFF_DDR_MAGIC 0x48524444 +#define SOC64_HANDOFF_DDR_UMCTL2_MAGIC 0x4C54434D +#define SOC64_HANDOFF_DDR_UMCTL2_DDR4_TYPE 0x34524444 +#define SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_0_TYPE 0x3044504C +#define SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_1_TYPE 0x3144504C +#define SOC64_HANDOFF_DDR_MEMRESET_BASE (SOC64_HANDOFF_DDR_BASE + 0xC) +#define SOC64_HANDOFF_DDR_UMCTL2_SECTION (SOC64_HANDOFF_DDR_BASE + 0x10) +#define SOC64_HANDOFF_DDR_PHY_MAGIC 0x43594850 +#define SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC 0x45594850 +#define SOC64_HANDOFF_DDR_PHY_BASE_OFFSET 0x8 +#define SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET 0x8 +#define SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET 0xC +#define SOC64_HANDOFF_DDR_TRAIN_IMEM_1D_SECTION 0xFFE50000 +#define SOC64_HANDOFF_DDR_TRAIN_DMEM_1D_SECTION 0xFFE58000 +#define SOC64_HANDOFF_DDR_TRAIN_IMEM_2D_SECTION 0xFFE44000 +#define SOC64_HANDOFF_DDR_TRAIN_DMEM_2D_SECTION 0xFFE4C000 +#define SOC64_HANDOFF_DDR_TRAIN_IMEM_LENGTH SZ_32K +#define SOC64_HANDOFF_DDR_TRAIN_DMEM_LENGTH SZ_16K +#endif + #define SOC64_HANDOFF_MUX (SOC64_HANDOFF_BASE + 0x10) #define SOC64_HANDOFF_IOCTL (SOC64_HANDOFF_BASE + 0x1A0) #define SOC64_HANDOFF_FPGA (SOC64_HANDOFF_BASE + 0x330) @@ -52,11 +80,11 @@ #include <asm/types.h> enum endianness { LITTLE_ENDIAN = 0, - BIG_ENDIAN + BIG_ENDIAN, + UNKNOWN_ENDIANNESS }; -int socfpga_get_handoff_size(void *handoff_address, enum endianness endian); -int socfpga_handoff_read(void *handoff_address, void *table, u32 table_len, - enum endianness big_endian); +int socfpga_get_handoff_size(void *handoff_address); +int socfpga_handoff_read(void *handoff_address, void *table, u32 table_len); #endif #endif /* _HANDOFF_SOC64_H_ */ diff --git a/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h b/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h index fc4e178..a800966 100644 --- a/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h +++ b/arch/arm/mach-socfpga/include/mach/system_manager_soc64.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Intel Corporation <www.intel.com> + * Copyright (C) 2019-2021 Intel Corporation <www.intel.com> */ #ifndef _SYSTEM_MANAGER_SOC64_H_ @@ -28,8 +28,12 @@ void populate_sysmgr_pinmux(void); #define SYSMGR_SOC64_FPGAINTF_EN2 0x6c #define SYSMGR_SOC64_FPGAINTF_EN3 0x70 #define SYSMGR_SOC64_DMA_L3MASTER 0x74 +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) +#define SYSMGR_SOC64_DDR_MODE 0xb8 +#else #define SYSMGR_SOC64_HMC_CLK 0xb4 #define SYSMGR_SOC64_IO_PA_CTRL 0xb8 +#endif #define SYSMGR_SOC64_NOC_TIMEOUT 0xc0 #define SYSMGR_SOC64_NOC_IDLEREQ_SET 0xc4 #define SYSMGR_SOC64_NOC_IDLEREQ_CLR 0xc8 @@ -143,4 +147,8 @@ void populate_sysmgr_pinmux(void); #define SYSMGR_WDDBG_PAUSE_ALL_CPU 0x0F0F0F0F +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) +#define SYSMGR_SOC64_DDR_MODE_MSK BIT(0) +#endif + #endif /* _SYSTEM_MANAGER_SOC64_H_ */ diff --git a/arch/arm/mach-socfpga/misc.c b/arch/arm/mach-socfpga/misc.c index f8d3d48..9c19157 100644 --- a/arch/arm/mach-socfpga/misc.c +++ b/arch/arm/mach-socfpga/misc.c @@ -254,6 +254,9 @@ void socfpga_get_managers_addr(void) #ifdef CONFIG_TARGET_SOCFPGA_AGILEX ret = socfpga_get_base_addr("intel,agilex-clkmgr", &socfpga_clkmgr_base); +#elif IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) + ret = socfpga_get_base_addr("intel,n5x-clkmgr", + &socfpga_clkmgr_base); #else ret = socfpga_get_base_addr("altr,clk-mgr", &socfpga_clkmgr_base); #endif diff --git a/arch/arm/mach-socfpga/misc_s10.c b/arch/arm/mach-socfpga/misc_soc64.c index 50c7f19..7b973a7 100644 --- a/arch/arm/mach-socfpga/misc_s10.c +++ b/arch/arm/mach-socfpga/misc_soc64.c @@ -6,16 +6,16 @@ #include <altera.h> #include <common.h> -#include <env.h> -#include <errno.h> -#include <init.h> -#include <log.h> -#include <asm/global_data.h> -#include <asm/io.h> #include <asm/arch/mailbox_s10.h> #include <asm/arch/misc.h> #include <asm/arch/reset_manager.h> #include <asm/arch/system_manager.h> +#include <asm/io.h> +#include <asm/global_data.h> +#include <env.h> +#include <errno.h> +#include <init.h> +#include <log.h> DECLARE_GLOBAL_DATA_PTR; diff --git a/arch/arm/mach-socfpga/spl_n5x.c b/arch/arm/mach-socfpga/spl_n5x.c new file mode 100644 index 0000000..d056871 --- /dev/null +++ b/arch/arm/mach-socfpga/spl_n5x.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <asm/arch/clock_manager.h> +#include <asm/arch/firewall.h> +#include <asm/arch/mailbox_s10.h> +#include <asm/arch/misc.h> +#include <asm/arch/reset_manager.h> +#include <asm/arch/system_manager.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm/u-boot.h> +#include <asm/utils.h> +#include <dm/uclass.h> +#include <hang.h> +#include <image.h> +#include <init.h> +#include <spl.h> +#include <watchdog.h> + +DECLARE_GLOBAL_DATA_PTR; + +void board_init_f(ulong dummy) +{ + int ret; + struct udevice *dev; + + ret = spl_early_init(); + if (ret) + hang(); + + socfpga_get_managers_addr(); + + /* Ensure watchdog is paused when debugging is happening */ + writel(SYSMGR_WDDBG_PAUSE_ALL_CPU, + socfpga_get_sysmgr_addr() + SYSMGR_SOC64_WDDBG); + +#ifdef CONFIG_HW_WATCHDOG + /* Enable watchdog before initializing the HW */ + socfpga_per_reset(SOCFPGA_RESET(L4WD0), 1); + socfpga_per_reset(SOCFPGA_RESET(L4WD0), 0); + hw_watchdog_init(); +#endif + + /* ensure all processors are not released prior Linux boot */ + writeq(0, CPU_RELEASE_ADDR); + + timer_init(); + + sysmgr_pinmux_init(); + + preloader_console_init(); + + ret = uclass_get_device(UCLASS_CLK, 0, &dev); + if (ret) { + printf("Clock init failed: %d\n", ret); + hang(); + } + + ret = uclass_get_device(UCLASS_CLK, 1, &dev); + if (ret) { + printf("Memory clock init failed: %d\n", ret); + hang(); + } + + print_reset_info(); + cm_print_clock_quick_summary(); + + firewall_setup(); + + ret = uclass_get_device(UCLASS_CACHE, 0, &dev); + if (ret) { + printf("CCU init failed: %d\n", ret); + hang(); + } + +#if CONFIG_IS_ENABLED(ALTERA_SDRAM) + ret = uclass_get_device(UCLASS_RAM, 0, &dev); + if (ret) { + printf("DRAM init failed: %d\n", ret); + hang(); + } +#endif + + mbox_init(); + +#ifdef CONFIG_CADENCE_QSPI + mbox_qspi_open(); +#endif +} diff --git a/arch/arm/mach-socfpga/system_manager_soc64.c b/arch/arm/mach-socfpga/system_manager_soc64.c index 3b5e774..958bb51 100644 --- a/arch/arm/mach-socfpga/system_manager_soc64.c +++ b/arch/arm/mach-socfpga/system_manager_soc64.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2016-2018 Intel Corporation <www.intel.com> + * Copyright (C) 2016-2021 Intel Corporation <www.intel.com> * */ @@ -66,10 +66,10 @@ void populate_sysmgr_fpgaintf_module(void) void populate_sysmgr_pinmux(void) { u32 len, i; - u32 len_mux = socfpga_get_handoff_size((void *)SOC64_HANDOFF_MUX, BIG_ENDIAN); - u32 len_ioctl = socfpga_get_handoff_size((void *)SOC64_HANDOFF_IOCTL, BIG_ENDIAN); - u32 len_fpga = socfpga_get_handoff_size((void *)SOC64_HANDOFF_FPGA, BIG_ENDIAN); - u32 len_delay = socfpga_get_handoff_size((void *)SOC64_HANDOFF_DELAY, BIG_ENDIAN); + u32 len_mux = socfpga_get_handoff_size((void *)SOC64_HANDOFF_MUX); + u32 len_ioctl = socfpga_get_handoff_size((void *)SOC64_HANDOFF_IOCTL); + u32 len_fpga = socfpga_get_handoff_size((void *)SOC64_HANDOFF_FPGA); + u32 len_delay = socfpga_get_handoff_size((void *)SOC64_HANDOFF_DELAY); len = (len_mux > len_ioctl) ? len_mux : len_ioctl; len = (len > len_fpga) ? len : len_fpga; @@ -79,7 +79,7 @@ void populate_sysmgr_pinmux(void) /* setup the pin sel */ len = (len_mux < SOC64_HANDOFF_MUX_LEN) ? len_mux : SOC64_HANDOFF_MUX_LEN; - socfpga_handoff_read((void *)SOC64_HANDOFF_MUX, handoff_table, len, BIG_ENDIAN); + socfpga_handoff_read((void *)SOC64_HANDOFF_MUX, handoff_table, len); for (i = 0; i < len; i = i + 2) { writel(handoff_table[i + 1], handoff_table[i] + @@ -89,7 +89,7 @@ void populate_sysmgr_pinmux(void) /* setup the pin ctrl */ len = (len_ioctl < SOC64_HANDOFF_IOCTL_LEN) ? len_ioctl : SOC64_HANDOFF_IOCTL_LEN; - socfpga_handoff_read((void *)SOC64_HANDOFF_IOCTL, handoff_table, len, BIG_ENDIAN); + socfpga_handoff_read((void *)SOC64_HANDOFF_IOCTL, handoff_table, len); for (i = 0; i < len; i = i + 2) { writel(handoff_table[i + 1], handoff_table[i] + @@ -99,7 +99,7 @@ void populate_sysmgr_pinmux(void) /* setup the fpga use */ len = (len_fpga < SOC64_HANDOFF_FPGA_LEN) ? len_fpga : SOC64_HANDOFF_FPGA_LEN; - socfpga_handoff_read((void *)SOC64_HANDOFF_FPGA, handoff_table, len, BIG_ENDIAN); + socfpga_handoff_read((void *)SOC64_HANDOFF_FPGA, handoff_table, len); for (i = 0; i < len; i = i + 2) { writel(handoff_table[i + 1], handoff_table[i] + @@ -109,7 +109,7 @@ void populate_sysmgr_pinmux(void) /* setup the IO delay */ len = (len_delay < SOC64_HANDOFF_DELAY_LEN) ? len_delay : SOC64_HANDOFF_DELAY_LEN; - socfpga_handoff_read((void *)SOC64_HANDOFF_DELAY, handoff_table, len, BIG_ENDIAN); + socfpga_handoff_read((void *)SOC64_HANDOFF_DELAY, handoff_table, len); for (i = 0; i < len; i = i + 2) { writel(handoff_table[i + 1], handoff_table[i] + diff --git a/arch/arm/mach-socfpga/wrap_handoff_soc64.c b/arch/arm/mach-socfpga/wrap_handoff_soc64.c index a7ad7a1..e7cb5ea 100644 --- a/arch/arm/mach-socfpga/wrap_handoff_soc64.c +++ b/arch/arm/mach-socfpga/wrap_handoff_soc64.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2020 Intel Corporation <www.intel.com> + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> * */ @@ -10,12 +10,64 @@ #include <errno.h> #include "log.h" -int socfpga_get_handoff_size(void *handoff_address, enum endianness endian) +static enum endianness check_endianness(u32 handoff) +{ + switch (handoff) { + case SOC64_HANDOFF_MAGIC_BOOT: + case SOC64_HANDOFF_MAGIC_MUX: + case SOC64_HANDOFF_MAGIC_IOCTL: + case SOC64_HANDOFF_MAGIC_FPGA: + case SOC64_HANDOFF_MAGIC_DELAY: + case SOC64_HANDOFF_MAGIC_CLOCK: + case SOC64_HANDOFF_MAGIC_MISC: + return BIG_ENDIAN; +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) + case SOC64_HANDOFF_DDR_UMCTL2_MAGIC: + debug("%s: umctl2 handoff data\n", __func__); + return LITTLE_ENDIAN; + case SOC64_HANDOFF_DDR_PHY_MAGIC: + debug("%s: PHY handoff data\n", __func__); + return LITTLE_ENDIAN; + case SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC: + debug("%s: PHY engine handoff data\n", __func__); + return LITTLE_ENDIAN; +#endif + default: + debug("%s: Unknown endianness!!\n", __func__); + return UNKNOWN_ENDIANNESS; + } +} + +static int getting_endianness(void *handoff_address, enum endianness *endian_t) +{ + /* Checking handoff data is little endian ? */ + *endian_t = check_endianness(readl(handoff_address)); + + if (*endian_t == UNKNOWN_ENDIANNESS) { + /* Trying to check handoff data is big endian? */ + *endian_t = check_endianness(swab32(readl(handoff_address))); + if (*endian_t == UNKNOWN_ENDIANNESS) { + debug("%s: Cannot find HANDOFF MAGIC ", __func__); + debug("at addr 0x%p\n", (u32 *)handoff_address); + return -EPERM; + } + } + + return 0; +} + +int socfpga_get_handoff_size(void *handoff_address) { u32 size; + int ret; + enum endianness endian_t; + + ret = getting_endianness(handoff_address, &endian_t); + if (ret) + return ret; size = readl(handoff_address + SOC64_HANDOFF_OFFSET_LENGTH); - if (endian == BIG_ENDIAN) + if (endian_t == BIG_ENDIAN) size = swab32(size); size = (size - SOC64_HANDOFF_OFFSET_DATA) / sizeof(u32); @@ -26,41 +78,53 @@ int socfpga_get_handoff_size(void *handoff_address, enum endianness endian) return size; } -int socfpga_handoff_read(void *handoff_address, void *table, u32 table_len, - enum endianness big_endian) +int socfpga_handoff_read(void *handoff_address, void *table, u32 table_len) { - u32 temp, i; + u32 temp; u32 *table_x32 = table; + u32 i = 0; + int ret; + enum endianness endian_t; - debug("%s: handoff addr = 0x%p ", __func__, (u32 *)handoff_address); - - if (big_endian) { - if (swab32(readl(SOC64_HANDOFF_BASE)) == SOC64_HANDOFF_MAGIC_BOOT) { - debug("Handoff table address = 0x%p ", table_x32); - debug("table length = 0x%x\n", table_len); - debug("%s: handoff data =\n{\n", __func__); - - for (i = 0; i < table_len; i++) { - temp = readl(handoff_address + - SOC64_HANDOFF_OFFSET_DATA + - (i * sizeof(u32))); - *table_x32 = swab32(temp); - - if (!(i % 2)) - debug(" No.%d Addr 0x%08x: ", i, - *table_x32); - else - debug(" 0x%08x\n", *table_x32); - - table_x32++; - } - debug("\n}\n"); - } else { - debug("%s: Cannot find SOC64_HANDOFF_MAGIC_BOOT ", __func__); - debug("at addr 0x%p\n", (u32 *)handoff_address); - return -EPERM; - } + ret = getting_endianness(handoff_address, &endian_t); + if (ret) + return ret; + + temp = readl(handoff_address + SOC64_HANDOFF_OFFSET_DATA + + (i * sizeof(u32))); + + if (endian_t == BIG_ENDIAN) { + debug("%s: Handoff addr = 0x%p ", __func__, (u32 *)handoff_address); + debug("Handoff table address = 0x%p ", table_x32); + debug("table length = 0x%x\n", table_len); + debug("%s: handoff data =\n{\n", __func__); + *table_x32 = swab32(temp); + } else if (endian_t == LITTLE_ENDIAN) { + debug(" {\n"); + *table_x32 = temp; + } + + debug(" No.%d Addr 0x%08x: ", i, *table_x32); + + for (i = 1; i < table_len; i++) { + table_x32++; + + temp = readl(handoff_address + + SOC64_HANDOFF_OFFSET_DATA + + (i * sizeof(u32))); + + if (endian_t == BIG_ENDIAN) + *table_x32 = swab32(temp); + else if (endian_t == LITTLE_ENDIAN) + *table_x32 = temp; + + if (!(i % 2)) + debug(" No.%d Addr 0x%08x: ", i, + *table_x32); + else + debug(" 0x%08x\n", *table_x32); } + debug("\n}\n"); return 0; } diff --git a/board/intel/n5x-socdk/MAINTAINERS b/board/intel/n5x-socdk/MAINTAINERS new file mode 100644 index 0000000..ca063a9 --- /dev/null +++ b/board/intel/n5x-socdk/MAINTAINERS @@ -0,0 +1,9 @@ +SOCFPGA BOARD +M: Chee Tien Fong <tien.fong.chee@intel.com> +M: Lim Siew Chin <elly.siew.chin.lim@intel.com> +S: Maintained +F: board/intel/n5x-socdk/ +F: include/configs/socfpga_n5x_socdk.h +F: configs/socfpga_n5x_defconfig +F: configs/socfpga_n5x_atf_defconfig +F: configs/socfpga_n5x_vab_defconfig diff --git a/board/intel/n5x-socdk/Makefile b/board/intel/n5x-socdk/Makefile new file mode 100644 index 0000000..accfdcd --- /dev/null +++ b/board/intel/n5x-socdk/Makefile @@ -0,0 +1,7 @@ +# +# Copyright (C) 2020-2021 Intel Corporation <www.intel.com> +# +# SPDX-License-Identifier: GPL-2.0 +# + +obj-y := socfpga.o diff --git a/board/intel/n5x-socdk/socfpga.c b/board/intel/n5x-socdk/socfpga.c new file mode 100644 index 0000000..985ba19 --- /dev/null +++ b/board/intel/n5x-socdk/socfpga.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> diff --git a/configs/socfpga_agilex_atf_defconfig b/configs/socfpga_agilex_atf_defconfig index 4eed95b..414f49b 100644 --- a/configs/socfpga_agilex_atf_defconfig +++ b/configs/socfpga_agilex_atf_defconfig @@ -21,7 +21,7 @@ CONFIG_BOOTDELAY=5 CONFIG_USE_BOOTARGS=y CONFIG_BOOTARGS="earlycon" CONFIG_USE_BOOTCOMMAND=y -CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run linux_qspi_enable; run mmcfitboot" +CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run mmcfitboot" CONFIG_SPL_CRC32=y CONFIG_SPL_CACHE=y CONFIG_SPL_SPI_LOAD=y diff --git a/configs/socfpga_n5x_atf_defconfig b/configs/socfpga_n5x_atf_defconfig new file mode 100644 index 0000000..a584537 --- /dev/null +++ b/configs/socfpga_n5x_atf_defconfig @@ -0,0 +1,74 @@ +CONFIG_ARM=y +CONFIG_SPL_LDSCRIPT="arch/arm/mach-socfpga/u-boot-spl-soc64.lds" +CONFIG_ARCH_SOCFPGA=y +CONFIG_SYS_TEXT_BASE=0x200000 +CONFIG_SYS_MALLOC_F_LEN=0x2000 +CONFIG_NR_DRAM_BANKS=2 +CONFIG_ENV_SIZE=0x1000 +CONFIG_ENV_OFFSET=0x200 +CONFIG_SYS_SPI_U_BOOT_OFFS=0x02000000 +CONFIG_DM_GPIO=y +CONFIG_DEFAULT_DEVICE_TREE="socfpga_n5x_socdk" +CONFIG_SPL_TEXT_BASE=0xFFE00000 +CONFIG_TARGET_SOCFPGA_N5X_SOCDK=y +CONFIG_IDENT_STRING="socfpga_n5x" +CONFIG_SPL_FS_FAT=y +CONFIG_FIT=y +CONFIG_SPL_FIT_SIGNATURE=y +CONFIG_SPL_LOAD_FIT=y +CONFIG_SPL_LOAD_FIT_ADDRESS=0x02000000 +# CONFIG_USE_SPL_FIT_GENERATOR is not set +CONFIG_BOOTDELAY=5 +CONFIG_USE_BOOTARGS=y +CONFIG_BOOTARGS="earlycon panic=-1 earlyprintk=ttyS0,115200" +CONFIG_USE_BOOTCOMMAND=y +CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run mmcfitboot" +CONFIG_SPL_CRC32=y +CONFIG_SPL_CACHE=y +CONFIG_SPL_SPI_LOAD=y +CONFIG_SPL_ATF=y +CONFIG_SPL_ATF_NO_PLATFORM_PARAM=y +CONFIG_HUSH_PARSER=y +CONFIG_SYS_PROMPT="SOCFPGA_N5X # " +CONFIG_CMD_MEMTEST=y +CONFIG_CMD_GPIO=y +CONFIG_CMD_I2C=y +CONFIG_CMD_MMC=y +CONFIG_CMD_SPI=y +CONFIG_CMD_USB=y +CONFIG_CMD_DHCP=y +CONFIG_CMD_MII=y +CONFIG_CMD_PING=y +CONFIG_CMD_CACHE=y +CONFIG_CMD_EXT4=y +CONFIG_CMD_FAT=y +CONFIG_CMD_FS_GENERIC=y +CONFIG_CMD_WDT=y +CONFIG_ENV_IS_IN_MMC=y +CONFIG_NET_RANDOM_ETHADDR=y +CONFIG_SPL_DM_SEQ_ALIAS=y +CONFIG_SPL_ALTERA_SDRAM=y +CONFIG_DWAPB_GPIO=y +CONFIG_DM_I2C=y +CONFIG_SYS_I2C_DW=y +CONFIG_MMC_DW=y +CONFIG_MTD=y +CONFIG_SF_DEFAULT_MODE=0x2003 +CONFIG_SPI_FLASH_SPANSION=y +CONFIG_SPI_FLASH_STMICRO=y +CONFIG_PHY_MICREL=y +CONFIG_PHY_MICREL_KSZ90X1=y +CONFIG_DM_ETH=y +CONFIG_ETH_DESIGNWARE=y +CONFIG_MII=y +CONFIG_DM_RESET=y +CONFIG_SPI=y +CONFIG_CADENCE_QSPI=y +CONFIG_DESIGNWARE_SPI=y +CONFIG_USB=y +CONFIG_USB_DWC2=y +CONFIG_USB_STORAGE=y +CONFIG_DESIGNWARE_WATCHDOG=y +CONFIG_WDT=y +# CONFIG_SPL_USE_TINY_PRINTF is not set +CONFIG_PANIC_HANG=y diff --git a/configs/socfpga_n5x_defconfig b/configs/socfpga_n5x_defconfig new file mode 100644 index 0000000..00d2a8c --- /dev/null +++ b/configs/socfpga_n5x_defconfig @@ -0,0 +1,65 @@ +CONFIG_ARM=y +CONFIG_ARCH_SOCFPGA=y +CONFIG_SYS_TEXT_BASE=0x1000 +CONFIG_SYS_MALLOC_F_LEN=0x2000 +CONFIG_NR_DRAM_BANKS=2 +CONFIG_ENV_SIZE=0x1000 +CONFIG_ENV_OFFSET=0x200 +CONFIG_SYS_SPI_U_BOOT_OFFS=0x3c00000 +CONFIG_DM_GPIO=y +CONFIG_DEFAULT_DEVICE_TREE="socfpga_n5x_socdk" +CONFIG_SPL_TEXT_BASE=0xFFE00000 +CONFIG_TARGET_SOCFPGA_N5X_SOCDK=y +CONFIG_IDENT_STRING="socfpga_n5x" +CONFIG_SPL_FS_FAT=y +# CONFIG_PSCI_RESET is not set +CONFIG_BOOTDELAY=5 +CONFIG_USE_BOOTARGS=y +CONFIG_BOOTARGS="earlycon panic=-1 earlyprintk=ttyS0,115200" +CONFIG_USE_BOOTCOMMAND=y +CONFIG_BOOTCOMMAND="run fatscript; run mmcload; run linux_qspi_enable; run mmcboot" +CONFIG_SPL_CACHE=y +CONFIG_SPL_SPI_LOAD=y +CONFIG_HUSH_PARSER=y +CONFIG_SYS_PROMPT="SOCFPGA_N5X # " +CONFIG_CMD_MEMTEST=y +CONFIG_CMD_GPIO=y +CONFIG_CMD_I2C=y +CONFIG_CMD_MMC=y +CONFIG_CMD_SPI=y +CONFIG_CMD_USB=y +CONFIG_CMD_DHCP=y +CONFIG_CMD_MII=y +CONFIG_CMD_PING=y +CONFIG_CMD_CACHE=y +CONFIG_CMD_EXT4=y +CONFIG_CMD_FAT=y +CONFIG_CMD_FS_GENERIC=y +CONFIG_CMD_WDT=y +CONFIG_ENV_IS_IN_MMC=y +CONFIG_NET_RANDOM_ETHADDR=y +CONFIG_SPL_DM_SEQ_ALIAS=y +CONFIG_SPL_ALTERA_SDRAM=y +CONFIG_DWAPB_GPIO=y +CONFIG_DM_I2C=y +CONFIG_SYS_I2C_DW=y +CONFIG_MMC_DW=y +CONFIG_SF_DEFAULT_MODE=0x2003 +CONFIG_SPI_FLASH_SPANSION=y +CONFIG_SPI_FLASH_STMICRO=y +CONFIG_PHY_MICREL=y +CONFIG_PHY_MICREL_KSZ90X1=y +CONFIG_DM_ETH=y +CONFIG_ETH_DESIGNWARE=y +CONFIG_MII=y +CONFIG_DM_RESET=y +CONFIG_SPI=y +CONFIG_CADENCE_QSPI=y +CONFIG_DESIGNWARE_SPI=y +CONFIG_USB=y +CONFIG_USB_DWC2=y +CONFIG_USB_STORAGE=y +CONFIG_DESIGNWARE_WATCHDOG=y +CONFIG_WDT=y +# CONFIG_SPL_USE_TINY_PRINTF is not set +CONFIG_PANIC_HANG=y diff --git a/configs/socfpga_n5x_vab_defconfig b/configs/socfpga_n5x_vab_defconfig new file mode 100644 index 0000000..18021e0 --- /dev/null +++ b/configs/socfpga_n5x_vab_defconfig @@ -0,0 +1,75 @@ +CONFIG_ARM=y +CONFIG_SPL_LDSCRIPT="arch/arm/mach-socfpga/u-boot-spl-soc64.lds" +CONFIG_ARCH_SOCFPGA=y +CONFIG_SYS_TEXT_BASE=0x200000 +CONFIG_SYS_MALLOC_F_LEN=0x2000 +CONFIG_NR_DRAM_BANKS=2 +CONFIG_ENV_SIZE=0x1000 +CONFIG_ENV_OFFSET=0x200 +CONFIG_SYS_SPI_U_BOOT_OFFS=0x02000000 +CONFIG_DM_GPIO=y +CONFIG_DEFAULT_DEVICE_TREE="socfpga_n5x_socdk" +CONFIG_SPL_TEXT_BASE=0xFFE00000 +CONFIG_SOCFPGA_SECURE_VAB_AUTH=y +CONFIG_TARGET_SOCFPGA_N5X_SOCDK=y +CONFIG_IDENT_STRING="socfpga_n5x" +CONFIG_SPL_FS_FAT=y +CONFIG_FIT=y +CONFIG_SPL_FIT_SIGNATURE=y +CONFIG_SPL_LOAD_FIT=y +CONFIG_SPL_LOAD_FIT_ADDRESS=0x02000000 +# CONFIG_USE_SPL_FIT_GENERATOR is not set +CONFIG_BOOTDELAY=5 +CONFIG_USE_BOOTARGS=y +CONFIG_BOOTARGS="earlycon panic=-1 earlyprintk=ttyS0,115200" +CONFIG_USE_BOOTCOMMAND=y +CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run mmcfitboot" +CONFIG_SPL_CRC32=y +CONFIG_SPL_CACHE=y +CONFIG_SPL_SPI_LOAD=y +CONFIG_SPL_ATF=y +CONFIG_SPL_ATF_NO_PLATFORM_PARAM=y +CONFIG_HUSH_PARSER=y +CONFIG_SYS_PROMPT="SOCFPGA_N5X # " +CONFIG_CMD_MEMTEST=y +CONFIG_CMD_GPIO=y +CONFIG_CMD_I2C=y +CONFIG_CMD_MMC=y +CONFIG_CMD_SPI=y +CONFIG_CMD_USB=y +CONFIG_CMD_DHCP=y +CONFIG_CMD_MII=y +CONFIG_CMD_PING=y +CONFIG_CMD_CACHE=y +CONFIG_CMD_EXT4=y +CONFIG_CMD_FAT=y +CONFIG_CMD_FS_GENERIC=y +CONFIG_CMD_WDT=y +CONFIG_ENV_IS_IN_MMC=y +CONFIG_NET_RANDOM_ETHADDR=y +CONFIG_SPL_DM_SEQ_ALIAS=y +CONFIG_SPL_ALTERA_SDRAM=y +CONFIG_DWAPB_GPIO=y +CONFIG_DM_I2C=y +CONFIG_SYS_I2C_DW=y +CONFIG_MMC_DW=y +CONFIG_MTD=y +CONFIG_SF_DEFAULT_MODE=0x2003 +CONFIG_SPI_FLASH_SPANSION=y +CONFIG_SPI_FLASH_STMICRO=y +CONFIG_PHY_MICREL=y +CONFIG_PHY_MICREL_KSZ90X1=y +CONFIG_DM_ETH=y +CONFIG_ETH_DESIGNWARE=y +CONFIG_MII=y +CONFIG_DM_RESET=y +CONFIG_SPI=y +CONFIG_CADENCE_QSPI=y +CONFIG_DESIGNWARE_SPI=y +CONFIG_USB=y +CONFIG_USB_DWC2=y +CONFIG_USB_STORAGE=y +CONFIG_DESIGNWARE_WATCHDOG=y +CONFIG_WDT=y +# CONFIG_SPL_USE_TINY_PRINTF is not set +CONFIG_PANIC_HANG=y diff --git a/configs/socfpga_stratix10_atf_defconfig b/configs/socfpga_stratix10_atf_defconfig index d99d90f..43c583b 100644 --- a/configs/socfpga_stratix10_atf_defconfig +++ b/configs/socfpga_stratix10_atf_defconfig @@ -21,7 +21,7 @@ CONFIG_BOOTDELAY=5 CONFIG_USE_BOOTARGS=y CONFIG_BOOTARGS="earlycon" CONFIG_USE_BOOTCOMMAND=y -CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run linux_qspi_enable; run mmcfitboot" +CONFIG_BOOTCOMMAND="run fatscript; run mmcfitload; run mmcfitboot" CONFIG_SPL_CRC32=y CONFIG_SPL_SPI_LOAD=y CONFIG_SYS_SPI_U_BOOT_OFFS=0x02000000 diff --git a/drivers/clk/altera/Makefile b/drivers/clk/altera/Makefile index 96215ad..33db092 100644 --- a/drivers/clk/altera/Makefile +++ b/drivers/clk/altera/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0+ # -# Copyright (C) 2018 Marek Vasut <marex@denx.de> +# Copyright (C) 2018-2021 Marek Vasut <marex@denx.de> # obj-$(CONFIG_TARGET_SOCFPGA_AGILEX) += clk-agilex.o obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += clk-arria10.o +obj-$(CONFIG_TARGET_SOCFPGA_N5X) += clk-n5x.o +obj-$(CONFIG_TARGET_SOCFPGA_N5X) += clk-mem-n5x.o diff --git a/drivers/clk/altera/clk-mem-n5x.c b/drivers/clk/altera/clk-mem-n5x.c new file mode 100644 index 0000000..ca44998 --- /dev/null +++ b/drivers/clk/altera/clk-mem-n5x.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#include <common.h> +#include <asm/arch/clock_manager.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include "clk-mem-n5x.h" +#include <clk-uclass.h> +#include <dm.h> +#include <dm/lists.h> +#include <dm/util.h> +#include <dt-bindings/clock/n5x-clock.h> + +DECLARE_GLOBAL_DATA_PTR; + +struct socfpga_mem_clk_plat { + void __iomem *regs; +}; + +void clk_mem_wait_for_lock(struct socfpga_mem_clk_plat *plat, u32 mask) +{ + u32 inter_val; + u32 retry = 0; + + do { + inter_val = CM_REG_READL(plat, MEMCLKMGR_STAT) & mask; + + /* Wait for stable lock */ + if (inter_val == mask) + retry++; + else + retry = 0; + + if (retry >= 10) + return; + } while (1); +} + +/* + * function to write the bypass register which requires a poll of the + * busy bit + */ +void clk_mem_write_bypass_mempll(struct socfpga_mem_clk_plat *plat, u32 val) +{ + CM_REG_WRITEL(plat, val, MEMCLKMGR_MEMPLL_BYPASS); +} + +/* + * Setup clocks while making no assumptions about previous state of the clocks. + */ +static void clk_mem_basic_init(struct udevice *dev, + const struct cm_config * const cfg) +{ + struct socfpga_mem_clk_plat *plat = dev_get_plat(dev); + + if (!cfg) + return; + + /* Put PLLs in bypass */ + clk_mem_write_bypass_mempll(plat, MEMCLKMGR_BYPASS_MEMPLL_ALL); + + /* Put PLLs in Reset */ + CM_REG_SETBITS(plat, MEMCLKMGR_MEMPLL_PLLCTRL, + MEMCLKMGR_PLLCTRL_BYPASS_MASK); + + /* setup mem PLL */ + CM_REG_WRITEL(plat, cfg->mem_memdiv, MEMCLKMGR_MEMPLL_MEMDIV); + CM_REG_WRITEL(plat, cfg->mem_pllglob, MEMCLKMGR_MEMPLL_PLLGLOB); + CM_REG_WRITEL(plat, cfg->mem_plldiv, MEMCLKMGR_MEMPLL_PLLDIV); + CM_REG_WRITEL(plat, cfg->mem_plloutdiv, MEMCLKMGR_MEMPLL_PLLOUTDIV); + + /* Take PLL out of reset and power up */ + CM_REG_CLRBITS(plat, MEMCLKMGR_MEMPLL_PLLCTRL, + MEMCLKMGR_PLLCTRL_BYPASS_MASK); +} + +static int socfpga_mem_clk_enable(struct clk *clk) +{ + const struct cm_config *cm_default_cfg = cm_get_default_config(); + struct socfpga_mem_clk_plat *plat = dev_get_plat(clk->dev); + + clk_mem_basic_init(clk->dev, cm_default_cfg); + + clk_mem_wait_for_lock(plat, MEMCLKMGR_STAT_ALLPLL_LOCKED_MASK); + + CM_REG_WRITEL(plat, CM_REG_READL(plat, MEMCLKMGR_MEMPLL_PLLGLOB) | + MEMCLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK, + MEMCLKMGR_MEMPLL_PLLGLOB); + + /* Take all PLLs out of bypass */ + clk_mem_write_bypass_mempll(plat, 0); + + /* Clear the loss of lock bits (write 1 to clear) */ + CM_REG_CLRBITS(plat, MEMCLKMGR_INTRCLR, + MEMCLKMGR_INTER_MEMPLLLOST_MASK); + + /* Take all ping pong counters out of reset */ + CM_REG_CLRBITS(plat, MEMCLKMGR_MEMPLL_EXTCNTRST, + MEMCLKMGR_EXTCNTRST_ALLCNTRST); + + return 0; +} + +static int socfpga_mem_clk_of_to_plat(struct udevice *dev) +{ + struct socfpga_mem_clk_plat *plat = dev_get_plat(dev); + fdt_addr_t addr; + + addr = devfdt_get_addr(dev); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + plat->regs = (void __iomem *)addr; + + return 0; +} + +static struct clk_ops socfpga_mem_clk_ops = { + .enable = socfpga_mem_clk_enable +}; + +static const struct udevice_id socfpga_mem_clk_match[] = { + { .compatible = "intel,n5x-mem-clkmgr" }, + {} +}; + +U_BOOT_DRIVER(socfpga_n5x_mem_clk) = { + .name = "mem-clk-n5x", + .id = UCLASS_CLK, + .of_match = socfpga_mem_clk_match, + .ops = &socfpga_mem_clk_ops, + .of_to_plat = socfpga_mem_clk_of_to_plat, + .plat_auto = sizeof(struct socfpga_mem_clk_plat), +}; diff --git a/drivers/clk/altera/clk-mem-n5x.h b/drivers/clk/altera/clk-mem-n5x.h new file mode 100644 index 0000000..d000ae2 --- /dev/null +++ b/drivers/clk/altera/clk-mem-n5x.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#ifndef _CLK_MEM_N5X_ +#define _CLK_MEM_N5X_ + +#ifndef __ASSEMBLY__ +#include <linux/bitops.h> +#endif + +/* Clock Manager registers */ +#define MEMCLKMGR_STAT 4 +#define MEMCLKMGR_INTRGEN 8 +#define MEMCLKMGR_INTRMSK 0x0c +#define MEMCLKMGR_INTRCLR 0x10 +#define MEMCLKMGR_INTRSTS 0x14 +#define MEMCLKMGR_INTRSTK 0x18 +#define MEMCLKMGR_INTRRAW 0x1c + +/* Memory Clock Manager PPL group registers */ +#define MEMCLKMGR_MEMPLL_EN 0x20 +#define MEMCLKMGR_MEMPLL_ENS 0x24 +#define MEMCLKMGR_MEMPLL_ENR 0x28 +#define MEMCLKMGR_MEMPLL_BYPASS 0x2c +#define MEMCLKMGR_MEMPLL_BYPASSS 0x30 +#define MEMCLKMGR_MEMPLL_BYPASSR 0x34 +#define MEMCLKMGR_MEMPLL_MEMDIV 0x38 +#define MEMCLKMGR_MEMPLL_PLLGLOB 0x3c +#define MEMCLKMGR_MEMPLL_PLLCTRL 0x40 +#define MEMCLKMGR_MEMPLL_PLLDIV 0x44 +#define MEMCLKMGR_MEMPLL_PLLOUTDIV 0x48 +#define MEMCLKMGR_MEMPLL_EXTCNTRST 0x4c + +#define MEMCLKMGR_CTRL_BOOTMODE BIT(0) + +#define MEMCLKMGR_STAT_MEMPLL_LOCKED BIT(8) + +#define MEMCLKMGR_STAT_ALLPLL_LOCKED_MASK \ + (MEMCLKMGR_STAT_MEMPLL_LOCKED) + +#define MEMCLKMGR_INTER_MEMPLLLOCKED_MASK BIT(0) +#define MEMCLKMGR_INTER_MEMPLLLOST_MASK BIT(2) + +#define MEMCLKMGR_BYPASS_MEMPLL_ALL 0x1 + +#define MEMCLKMGR_MEMDIV_MPFEDIV_OFFSET 0 +#define MEMCLKMGR_MEMDIV_APBDIV_OFFSET 4 +#define MEMCLKMGR_MEMDIV_DFICTRLDIV_OFFSET 8 +#define MEMCLKMGR_MEMDIV_DFIDIV_OFFSET 12 +#define MEMCLKMGR_MEMDIV_DFICTRLDIV_MASK BIT(0) +#define MEMCLKMGR_MEMDIV_DIVIDER_MASK GENMASK(1, 0) + +#define MEMCLKMGR_PLLGLOB_PSRC_MASK GENMASK(17, 16) +#define MEMCLKMGR_PLLGLOB_PSRC_OFFSET 16 +#define MEMCLKMGR_PLLGLOB_LOSTLOCK_BYPASS_EN_MASK BIT(28) +#define MEMCLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK BIT(29) + +#define MEMCLKMGR_PSRC_EOSC1 0 +#define MEMCLKMGR_PSRC_INTOSC 1 +#define MEMCLKMGR_PSRC_F2S 2 + +#define MEMCLKMGR_PLLCTRL_BYPASS_MASK BIT(0) +#define MEMCLKMGR_PLLCTRL_RST_N_MASK BIT(1) + +#define MEMCLKMGR_PLLDIV_DIVR_MASK GENMASK(5, 0) +#define MEMCLKMGR_PLLDIV_DIVF_MASK GENMASK(16, 8) +#define MEMCLKMGR_PLLDIV_DIVQ_MASK GENMASK(26, 24) +#define MEMCLKMGR_PLLDIV_RANGE_MASK GENMASK(30, 28) + +#define MEMCLKMGR_PLLDIV_DIVR_OFFSET 0 +#define MEMCLKMGR_PLLDIV_DIVF_OFFSET 8 +#define MEMCLKMGR_PLLDIV_DIVQ_QDIV_OFFSET 24 +#define MEMCLKMGR_PLLDIV_RANGE_OFFSET 28 + +#define MEMCLKMGR_PLLOUTDIV_C0CNT_MASK GENMASK(4, 0) +#define MEMCLKMGR_PLLOUTDIV_C0CNT_OFFSET 0 + +#define MEMCLKMGR_EXTCNTRST_C0CNTRST BIT(7) +#define MEMCLKMGR_EXTCNTRST_ALLCNTRST \ + (MEMCLKMGR_EXTCNTRST_C0CNTRST) + +#endif /* _CLK_MEM_N5X_ */ diff --git a/drivers/clk/altera/clk-n5x.c b/drivers/clk/altera/clk-n5x.c new file mode 100644 index 0000000..bdcbbaa --- /dev/null +++ b/drivers/clk/altera/clk-n5x.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#include <common.h> +#include <asm/arch/clock_manager.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <clk-uclass.h> +#include <dm.h> +#include <dm/lists.h> +#include <dm/util.h> +#include <dt-bindings/clock/n5x-clock.h> + +DECLARE_GLOBAL_DATA_PTR; + +struct socfpga_clk_plat { + void __iomem *regs; +}; + +/* + * function to write the bypass register which requires a poll of the + * busy bit + */ +static void clk_write_bypass_mainpll(struct socfpga_clk_plat *plat, u32 val) +{ + CM_REG_WRITEL(plat, val, CLKMGR_MAINPLL_BYPASS); + cm_wait_for_fsm(); +} + +static void clk_write_bypass_perpll(struct socfpga_clk_plat *plat, u32 val) +{ + CM_REG_WRITEL(plat, val, CLKMGR_PERPLL_BYPASS); + cm_wait_for_fsm(); +} + +/* function to write the ctrl register which requires a poll of the busy bit */ +static void clk_write_ctrl(struct socfpga_clk_plat *plat, u32 val) +{ + CM_REG_WRITEL(plat, val, CLKMGR_CTRL); + cm_wait_for_fsm(); +} + +/* + * Setup clocks while making no assumptions about previous state of the clocks. + */ +static void clk_basic_init(struct udevice *dev, + const struct cm_config * const cfg) +{ + struct socfpga_clk_plat *plat = dev_get_plat(dev); + + if (!cfg) + return; + +#if IS_ENABLED(CONFIG_SPL_BUILD) + /* Always force clock manager into boot mode before any configuration */ + clk_write_ctrl(plat, + CM_REG_READL(plat, CLKMGR_CTRL) | CLKMGR_CTRL_BOOTMODE); +#else + /* Skip clock configuration in SSBL if it's not in boot mode */ + if (!(CM_REG_READL(plat, CLKMGR_CTRL) & CLKMGR_CTRL_BOOTMODE)) + return; +#endif + + /* Put both PLLs in bypass */ + clk_write_bypass_mainpll(plat, CLKMGR_BYPASS_MAINPLL_ALL); + clk_write_bypass_perpll(plat, CLKMGR_BYPASS_PERPLL_ALL); + + /* Put both PLLs in Reset */ + CM_REG_SETBITS(plat, CLKMGR_MAINPLL_PLLCTRL, + CLKMGR_PLLCTRL_BYPASS_MASK); + CM_REG_SETBITS(plat, CLKMGR_PERPLL_PLLCTRL, + CLKMGR_PLLCTRL_BYPASS_MASK); + + /* setup main PLL */ + CM_REG_WRITEL(plat, cfg->main_pll_pllglob, CLKMGR_MAINPLL_PLLGLOB); + CM_REG_WRITEL(plat, cfg->main_pll_plldiv, CLKMGR_MAINPLL_PLLDIV); + CM_REG_WRITEL(plat, cfg->main_pll_plloutdiv, CLKMGR_MAINPLL_PLLOUTDIV); + CM_REG_WRITEL(plat, cfg->main_pll_mpuclk, CLKMGR_MAINPLL_MPUCLK); + CM_REG_WRITEL(plat, cfg->main_pll_nocclk, CLKMGR_MAINPLL_NOCCLK); + CM_REG_WRITEL(plat, cfg->main_pll_nocdiv, CLKMGR_MAINPLL_NOCDIV); + + /* setup peripheral */ + CM_REG_WRITEL(plat, cfg->per_pll_pllglob, CLKMGR_PERPLL_PLLGLOB); + CM_REG_WRITEL(plat, cfg->per_pll_plldiv, CLKMGR_PERPLL_PLLDIV); + CM_REG_WRITEL(plat, cfg->per_pll_plloutdiv, CLKMGR_PERPLL_PLLOUTDIV); + CM_REG_WRITEL(plat, cfg->per_pll_emacctl, CLKMGR_PERPLL_EMACCTL); + CM_REG_WRITEL(plat, cfg->per_pll_gpiodiv, CLKMGR_PERPLL_GPIODIV); + + /* Take both PLL out of reset and power up */ + CM_REG_CLRBITS(plat, CLKMGR_MAINPLL_PLLCTRL, + CLKMGR_PLLCTRL_BYPASS_MASK); + CM_REG_CLRBITS(plat, CLKMGR_PERPLL_PLLCTRL, + CLKMGR_PLLCTRL_BYPASS_MASK); + + cm_wait_for_lock(CLKMGR_STAT_ALLPLL_LOCKED_MASK); + + CM_REG_WRITEL(plat, cfg->alt_emacactr, CLKMGR_ALTR_EMACACTR); + CM_REG_WRITEL(plat, cfg->alt_emacbctr, CLKMGR_ALTR_EMACBCTR); + CM_REG_WRITEL(plat, cfg->alt_emacptpctr, CLKMGR_ALTR_EMACPTPCTR); + CM_REG_WRITEL(plat, cfg->alt_gpiodbctr, CLKMGR_ALTR_GPIODBCTR); + CM_REG_WRITEL(plat, cfg->alt_sdmmcctr, CLKMGR_ALTR_SDMMCCTR); + CM_REG_WRITEL(plat, cfg->alt_s2fuser0ctr, CLKMGR_ALTR_S2FUSER0CTR); + CM_REG_WRITEL(plat, cfg->alt_s2fuser1ctr, CLKMGR_ALTR_S2FUSER1CTR); + CM_REG_WRITEL(plat, cfg->alt_psirefctr, CLKMGR_ALTR_PSIREFCTR); + + /* Configure ping pong counters in altera group */ + CM_REG_WRITEL(plat, CLKMGR_LOSTLOCK_SET_MASK, CLKMGR_MAINPLL_LOSTLOCK); + CM_REG_WRITEL(plat, CLKMGR_LOSTLOCK_SET_MASK, CLKMGR_PERPLL_LOSTLOCK); + + CM_REG_WRITEL(plat, CM_REG_READL(plat, CLKMGR_MAINPLL_PLLGLOB) | + CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK, + CLKMGR_MAINPLL_PLLGLOB); + CM_REG_WRITEL(plat, CM_REG_READL(plat, CLKMGR_PERPLL_PLLGLOB) | + CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK, + CLKMGR_PERPLL_PLLGLOB); + + /* Take all PLLs out of bypass */ + clk_write_bypass_mainpll(plat, 0); + clk_write_bypass_perpll(plat, 0); + + /* Clear the loss of lock bits */ + CM_REG_CLRBITS(plat, CLKMGR_INTRCLR, + CLKMGR_INTER_PERPLLLOST_MASK | + CLKMGR_INTER_MAINPLLLOST_MASK); + + /* Take all ping pong counters out of reset */ + CM_REG_CLRBITS(plat, CLKMGR_ALTR_EXTCNTRST, + CLKMGR_ALT_EXTCNTRST_ALLCNTRST_MASK); + + /* Out of boot mode */ + clk_write_ctrl(plat, + CM_REG_READL(plat, CLKMGR_CTRL) & ~CLKMGR_CTRL_BOOTMODE); +} + +static u32 clk_get_5_1_clk_src(struct socfpga_clk_plat *plat, u32 reg) +{ + u32 clksrc = CM_REG_READL(plat, reg); + + return (clksrc & CLKMGR_CLKSRC_MASK) >> CLKMGR_CLKSRC_OFFSET; +} + +static u64 clk_get_pll_output_hz(struct socfpga_clk_plat *plat, + u32 pllglob_reg, u32 plldiv_reg) +{ + u64 clock = 0; + u32 clklsrc, divf, divr, divq, power = 1; + + /* Get input clock frequency */ + clklsrc = (CM_REG_READL(plat, pllglob_reg) & + CLKMGR_PLLGLOB_VCO_PSRC_MASK) >> + CLKMGR_PLLGLOB_VCO_PSRC_OFFSET; + + switch (clklsrc) { + case CLKMGR_VCO_PSRC_EOSC1: + clock = cm_get_osc_clk_hz(); + break; + case CLKMGR_VCO_PSRC_INTOSC: + clock = cm_get_intosc_clk_hz(); + break; + case CLKMGR_VCO_PSRC_F2S: + clock = cm_get_fpga_clk_hz(); + break; + } + + /* Calculate pll out clock frequency */ + divf = (CM_REG_READL(plat, plldiv_reg) & + CLKMGR_PLLDIV_FDIV_MASK) >> + CLKMGR_PLLDIV_FDIV_OFFSET; + + divr = (CM_REG_READL(plat, plldiv_reg) & + CLKMGR_PLLDIV_REFCLKDIV_MASK) >> + CLKMGR_PLLDIV_REFCLKDIV_OFFSET; + + divq = (CM_REG_READL(plat, plldiv_reg) & + CLKMGR_PLLDIV_OUTDIV_QDIV_MASK) >> + CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET; + + while (divq) { + power *= 2; + divq--; + } + + return (clock * 2 * (divf + 1)) / ((divr + 1) * power); +} + +static u64 clk_get_clksrc_hz(struct socfpga_clk_plat *plat, u32 clksrc_reg, + u32 main_div, u32 per_div) +{ + u64 clock = 0; + u32 clklsrc = clk_get_5_1_clk_src(plat, clksrc_reg); + + switch (clklsrc) { + case CLKMGR_CLKSRC_MAIN: + clock = clk_get_pll_output_hz(plat, + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_MAINPLL_PLLDIV); + clock /= 1 + main_div; + break; + + case CLKMGR_CLKSRC_PER: + clock = clk_get_pll_output_hz(plat, + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_PERPLL_PLLDIV); + clock /= 1 + per_div; + break; + + case CLKMGR_CLKSRC_OSC1: + clock = cm_get_osc_clk_hz(); + break; + + case CLKMGR_CLKSRC_INTOSC: + clock = cm_get_intosc_clk_hz(); + break; + + case CLKMGR_CLKSRC_FPGA: + clock = cm_get_fpga_clk_hz(); + break; + default: + return 0; + } + + return clock; +} + +static u64 clk_get_mpu_clk_hz(struct socfpga_clk_plat *plat) +{ + u32 mainpll_c0cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C0CNT_MASK) >> + CLKMGR_PLLOUTDIV_C0CNT_OFFSET; + + u32 perpll_c0cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C0CNT_MASK) >> + CLKMGR_PLLOUTDIV_C0CNT_OFFSET; + + u64 clock = clk_get_clksrc_hz(plat, CLKMGR_MAINPLL_MPUCLK, + mainpll_c0cnt, perpll_c0cnt); + + clock /= 1 + (CM_REG_READL(plat, CLKMGR_MAINPLL_MPUCLK) & + CLKMGR_CLKCNT_MSK); + + return clock; +} + +static u32 clk_get_l3_main_clk_hz(struct socfpga_clk_plat *plat) +{ + u32 mainpll_c1cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C1CNT_MASK) >> + CLKMGR_PLLOUTDIV_C1CNT_OFFSET; + + u32 perpll_c1cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C1CNT_MASK) >> + CLKMGR_PLLOUTDIV_C1CNT_OFFSET; + + return clk_get_clksrc_hz(plat, CLKMGR_MAINPLL_NOCCLK, + mainpll_c1cnt, perpll_c1cnt); +} + +static u32 clk_get_l4_main_clk_hz(struct socfpga_clk_plat *plat) +{ + u64 clock = clk_get_l3_main_clk_hz(plat); + + clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >> + CLKMGR_NOCDIV_L4MAIN_OFFSET) & + CLKMGR_NOCDIV_DIVIDER_MASK); + + return clock; +} + +static u32 clk_get_sdmmc_clk_hz(struct socfpga_clk_plat *plat) +{ + u32 mainpll_c3cnt = (CM_REG_READL(plat, CLKMGR_MAINPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C3CNT_MASK) >> + CLKMGR_PLLOUTDIV_C3CNT_OFFSET; + + u32 perpll_c3cnt = (CM_REG_READL(plat, CLKMGR_PERPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C3CNT_MASK) >> + CLKMGR_PLLOUTDIV_C3CNT_OFFSET; + + u64 clock = clk_get_clksrc_hz(plat, CLKMGR_ALTR_SDMMCCTR, + mainpll_c3cnt, perpll_c3cnt); + + clock /= 1 + (CM_REG_READL(plat, CLKMGR_ALTR_SDMMCCTR) & + CLKMGR_CLKCNT_MSK); + + return clock / 4; +} + +static u32 clk_get_l4_sp_clk_hz(struct socfpga_clk_plat *plat) +{ + u64 clock = clk_get_l3_main_clk_hz(plat); + + clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >> + CLKMGR_NOCDIV_L4SPCLK_OFFSET) & + CLKMGR_NOCDIV_DIVIDER_MASK); + + return clock; +} + +static u32 clk_get_l4_mp_clk_hz(struct socfpga_clk_plat *plat) +{ + u64 clock = clk_get_l3_main_clk_hz(plat); + + clock /= BIT((CM_REG_READL(plat, CLKMGR_MAINPLL_NOCDIV) >> + CLKMGR_NOCDIV_L4MPCLK_OFFSET) & + CLKMGR_NOCDIV_DIVIDER_MASK); + + return clock; +} + +static u32 clk_get_l4_sys_free_clk_hz(struct socfpga_clk_plat *plat) +{ + if (CM_REG_READL(plat, CLKMGR_STAT) & CLKMGR_STAT_BOOTMODE) + return clk_get_l3_main_clk_hz(plat) / 2; + + return clk_get_l3_main_clk_hz(plat) / 4; +} + +static u32 clk_get_emac_clk_hz(struct socfpga_clk_plat *plat, u32 emac_id) +{ + bool emacsel_a; + u32 ctl; + u32 ctr_reg; + u32 clock; + u32 div; + u32 reg; + + /* Get EMAC clock source */ + ctl = CM_REG_READL(plat, CLKMGR_PERPLL_EMACCTL); + if (emac_id == N5X_EMAC0_CLK) + ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_OFFSET) & + CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_MASK; + else if (emac_id == N5X_EMAC1_CLK) + ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_OFFSET) & + CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_MASK; + else if (emac_id == N5X_EMAC2_CLK) + ctl = (ctl >> CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_OFFSET) & + CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_MASK; + else + return 0; + + if (ctl) { + /* EMAC B source */ + emacsel_a = false; + ctr_reg = CLKMGR_ALTR_EMACBCTR; + } else { + /* EMAC A source */ + emacsel_a = true; + ctr_reg = CLKMGR_ALTR_EMACACTR; + } + + reg = CM_REG_READL(plat, ctr_reg); + clock = (reg & CLKMGR_ALT_EMACCTR_SRC_MASK) + >> CLKMGR_ALT_EMACCTR_SRC_OFFSET; + div = (reg & CLKMGR_ALT_EMACCTR_CNT_MASK) + >> CLKMGR_ALT_EMACCTR_CNT_OFFSET; + + switch (clock) { + case CLKMGR_CLKSRC_MAIN: + clock = clk_get_pll_output_hz(plat, + CLKMGR_MAINPLL_PLLGLOB, + CLKMGR_MAINPLL_PLLDIV); + + if (emacsel_a) { + clock /= 1 + ((CM_REG_READL(plat, + CLKMGR_MAINPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C2CNT_MASK) >> + CLKMGR_PLLOUTDIV_C2CNT_OFFSET); + } else { + clock /= 1 + ((CM_REG_READL(plat, + CLKMGR_MAINPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C3CNT_MASK) >> + CLKMGR_PLLOUTDIV_C3CNT_OFFSET); + } + break; + + case CLKMGR_CLKSRC_PER: + clock = clk_get_pll_output_hz(plat, + CLKMGR_PERPLL_PLLGLOB, + CLKMGR_PERPLL_PLLDIV); + if (emacsel_a) { + clock /= 1 + ((CM_REG_READL(plat, + CLKMGR_PERPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C2CNT_MASK) >> + CLKMGR_PLLOUTDIV_C2CNT_OFFSET); + } else { + clock /= 1 + ((CM_REG_READL(plat, + CLKMGR_PERPLL_PLLOUTDIV) & + CLKMGR_PLLOUTDIV_C3CNT_MASK >> + CLKMGR_PLLOUTDIV_C3CNT_OFFSET)); + } + break; + + case CLKMGR_CLKSRC_OSC1: + clock = cm_get_osc_clk_hz(); + break; + + case CLKMGR_CLKSRC_INTOSC: + clock = cm_get_intosc_clk_hz(); + break; + + case CLKMGR_CLKSRC_FPGA: + clock = cm_get_fpga_clk_hz(); + break; + } + + clock /= 1 + div; + + return clock; +} + +static ulong socfpga_clk_get_rate(struct clk *clk) +{ + struct socfpga_clk_plat *plat = dev_get_plat(clk->dev); + + switch (clk->id) { + case N5X_MPU_CLK: + return clk_get_mpu_clk_hz(plat); + case N5X_L4_MAIN_CLK: + return clk_get_l4_main_clk_hz(plat); + case N5X_L4_SYS_FREE_CLK: + return clk_get_l4_sys_free_clk_hz(plat); + case N5X_L4_MP_CLK: + return clk_get_l4_mp_clk_hz(plat); + case N5X_L4_SP_CLK: + return clk_get_l4_sp_clk_hz(plat); + case N5X_SDMMC_CLK: + return clk_get_sdmmc_clk_hz(plat); + case N5X_EMAC0_CLK: + case N5X_EMAC1_CLK: + case N5X_EMAC2_CLK: + return clk_get_emac_clk_hz(plat, clk->id); + case N5X_USB_CLK: + case N5X_NAND_X_CLK: + return clk_get_l4_mp_clk_hz(plat); + case N5X_NAND_CLK: + return clk_get_l4_mp_clk_hz(plat) / 4; + default: + return -ENXIO; + } +} + +static int socfpga_clk_enable(struct clk *clk) +{ + return 0; +} + +static int socfpga_clk_probe(struct udevice *dev) +{ + const struct cm_config *cm_default_cfg = cm_get_default_config(); + + clk_basic_init(dev, cm_default_cfg); + + return 0; +} + +static int socfpga_clk_of_to_plat(struct udevice *dev) +{ + struct socfpga_clk_plat *plat = dev_get_plat(dev); + fdt_addr_t addr; + + addr = devfdt_get_addr(dev); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + plat->regs = (void __iomem *)addr; + + return 0; +} + +static struct clk_ops socfpga_clk_ops = { + .enable = socfpga_clk_enable, + .get_rate = socfpga_clk_get_rate, +}; + +static const struct udevice_id socfpga_clk_match[] = { + { .compatible = "intel,n5x-clkmgr" }, + {} +}; + +U_BOOT_DRIVER(socfpga_n5x_clk) = { + .name = "clk-n5x", + .id = UCLASS_CLK, + .of_match = socfpga_clk_match, + .ops = &socfpga_clk_ops, + .probe = socfpga_clk_probe, + .of_to_plat = socfpga_clk_of_to_plat, + .plat_auto = sizeof(struct socfpga_clk_plat), +}; diff --git a/drivers/clk/altera/clk-n5x.h b/drivers/clk/altera/clk-n5x.h new file mode 100644 index 0000000..8c00e90 --- /dev/null +++ b/drivers/clk/altera/clk-n5x.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + */ + +#ifndef _CLK_N5X_ +#define _CLK_N5X_ + +#ifndef __ASSEMBLY__ +#include <linux/bitops.h> +#endif + +#define CM_REG_READL(plat, reg) \ + readl((plat)->regs + (reg)) + +#define CM_REG_WRITEL(plat, data, reg) \ + writel(data, (plat)->regs + (reg)) + +#define CM_REG_CLRBITS(plat, reg, clear) \ + clrbits_le32((plat)->regs + (reg), (clear)) + +#define CM_REG_SETBITS(plat, reg, set) \ + setbits_le32((plat)->regs + (reg), (set)) + +struct cm_config { + /* main group */ + u32 main_pll_mpuclk; + u32 main_pll_nocclk; + u32 main_pll_nocdiv; + u32 main_pll_pllglob; + u32 main_pll_plldiv; + u32 main_pll_plloutdiv; + u32 spare_1[4]; + + /* peripheral group */ + u32 per_pll_emacctl; + u32 per_pll_gpiodiv; + u32 per_pll_pllglob; + u32 per_pll_plldiv; + u32 per_pll_plloutdiv; + u32 spare_2[4]; + + /* altera group */ + u32 alt_emacactr; + u32 alt_emacbctr; + u32 alt_emacptpctr; + u32 alt_gpiodbctr; + u32 alt_sdmmcctr; + u32 alt_s2fuser0ctr; + u32 alt_s2fuser1ctr; + u32 alt_psirefctr; + + /* incoming clock */ + u32 hps_osc_clk_hz; + u32 fpga_clk_hz; + u32 spare_3[3]; + + /* memory clock group */ + u32 mem_memdiv; + u32 mem_pllglob; + u32 mem_plldiv; + u32 mem_plloutdiv; + u32 spare_4[4]; +}; + +/* Clock Manager registers */ +#define CLKMGR_CTRL 0 +#define CLKMGR_STAT 4 +#define CLKMGR_TESTIOCTRL 8 +#define CLKMGR_INTRGEN 0x0c +#define CLKMGR_INTRMSK 0x10 +#define CLKMGR_INTRCLR 0x14 +#define CLKMGR_INTRSTS 0x18 +#define CLKMGR_INTRSTK 0x1c +#define CLKMGR_INTRRAW 0x20 + +/* Clock Manager Main PPL group registers */ +#define CLKMGR_MAINPLL_EN 0x24 +#define CLKMGR_MAINPLL_ENS 0x28 +#define CLKMGR_MAINPLL_ENR 0x2c +#define CLKMGR_MAINPLL_BYPASS 0x30 +#define CLKMGR_MAINPLL_BYPASSS 0x34 +#define CLKMGR_MAINPLL_BYPASSR 0x38 +#define CLKMGR_MAINPLL_MPUCLK 0x3c +#define CLKMGR_MAINPLL_NOCCLK 0x40 +#define CLKMGR_MAINPLL_NOCDIV 0x44 +#define CLKMGR_MAINPLL_PLLGLOB 0x48 +#define CLKMGR_MAINPLL_PLLCTRL 0x4c +#define CLKMGR_MAINPLL_PLLDIV 0x50 +#define CLKMGR_MAINPLL_PLLOUTDIV 0x54 +#define CLKMGR_MAINPLL_LOSTLOCK 0x58 + +/* Clock Manager Peripheral PPL group registers */ +#define CLKMGR_PERPLL_EN 0x7c +#define CLKMGR_PERPLL_ENS 0x80 +#define CLKMGR_PERPLL_ENR 0x84 +#define CLKMGR_PERPLL_BYPASS 0x88 +#define CLKMGR_PERPLL_BYPASSS 0x8c +#define CLKMGR_PERPLL_BYPASSR 0x90 +#define CLKMGR_PERPLL_EMACCTL 0x94 +#define CLKMGR_PERPLL_GPIODIV 0x98 +#define CLKMGR_PERPLL_PLLGLOB 0x9c +#define CLKMGR_PERPLL_PLLCTRL 0xa0 +#define CLKMGR_PERPLL_PLLDIV 0xa4 +#define CLKMGR_PERPLL_PLLOUTDIV 0xa8 +#define CLKMGR_PERPLL_LOSTLOCK 0xac + +/* Clock Manager Altera group registers */ +#define CLKMGR_ALTR_EMACACTR 0xd4 +#define CLKMGR_ALTR_EMACBCTR 0xd8 +#define CLKMGR_ALTR_EMACPTPCTR 0xdc +#define CLKMGR_ALTR_GPIODBCTR 0xe0 +#define CLKMGR_ALTR_SDMMCCTR 0xe4 +#define CLKMGR_ALTR_S2FUSER0CTR 0xe8 +#define CLKMGR_ALTR_S2FUSER1CTR 0xec +#define CLKMGR_ALTR_PSIREFCTR 0xf0 +#define CLKMGR_ALTR_EXTCNTRST 0xf4 + +#define CLKMGR_CTRL_BOOTMODE BIT(0) + +#define CLKMGR_STAT_BUSY BIT(0) +#define CLKMGR_STAT_MAINPLL_LOCKED BIT(8) +#define CLKMGR_STAT_MAIN_TRANS BIT(9) +#define CLKMGR_STAT_PERPLL_LOCKED BIT(16) +#define CLKMGR_STAT_PERF_TRANS BIT(17) +#define CLKMGR_STAT_BOOTMODE BIT(24) +#define CLKMGR_STAT_BOOTCLKSRC BIT(25) + +#define CLKMGR_STAT_ALLPLL_LOCKED_MASK \ + (CLKMGR_STAT_MAINPLL_LOCKED | CLKMGR_STAT_PERPLL_LOCKED) + +#define CLKMGR_INTER_MAINPLLLOCKED_MASK BIT(0) +#define CLKMGR_INTER_PERPLLLOCKED_MASK BIT(1) +#define CLKMGR_INTER_MAINPLLLOST_MASK BIT(2) +#define CLKMGR_INTER_PERPLLLOST_MASK BIT(3) + +#define CLKMGR_CLKSRC_MASK GENMASK(18, 16) +#define CLKMGR_CLKSRC_OFFSET 16 +#define CLKMGR_CLKSRC_MAIN 0 +#define CLKMGR_CLKSRC_PER 1 +#define CLKMGR_CLKSRC_OSC1 2 +#define CLKMGR_CLKSRC_INTOSC 3 +#define CLKMGR_CLKSRC_FPGA 4 +#define CLKMGR_CLKCNT_MSK GENMASK(10, 0) + +#define CLKMGR_BYPASS_MAINPLL_ALL 0x7 +#define CLKMGR_BYPASS_PERPLL_ALL 0x7f + +#define CLKMGR_NOCDIV_L4MAIN_OFFSET 0 +#define CLKMGR_NOCDIV_L4MPCLK_OFFSET 8 +#define CLKMGR_NOCDIV_L4SPCLK_OFFSET 16 +#define CLKMGR_NOCDIV_CSATCLK_OFFSET 24 +#define CLKMGR_NOCDIV_CSTRACECLK_OFFSET 26 +#define CLKMGR_NOCDIV_CSPDBGCLK_OFFSET 28 +#define CLKMGR_NOCDIV_DIVIDER_MASK 0x3 + +#define CLKMGR_PLLGLOB_VCO_PSRC_MASK GENMASK(17, 16) +#define CLKMGR_PLLGLOB_VCO_PSRC_OFFSET 16 +#define CLKMGR_PLLGLOB_LOSTLOCK_BYPASS_EN_MASK BIT(28) +#define CLKMGR_PLLGLOB_CLR_LOSTLOCK_BYPASS_MASK BIT(29) + +#define CLKMGR_VCO_PSRC_EOSC1 0 +#define CLKMGR_VCO_PSRC_INTOSC 1 +#define CLKMGR_VCO_PSRC_F2S 2 + +#define CLKMGR_PLLCTRL_BYPASS_MASK BIT(0) +#define CLKMGR_PLLCTRL_RST_N_MASK BIT(1) + +#define CLKMGR_PLLDIV_REFCLKDIV_MASK GENMASK(5, 0) +#define CLKMGR_PLLDIV_FDIV_MASK GENMASK(16, 8) +#define CLKMGR_PLLDIV_OUTDIV_QDIV_MASK GENMASK(26, 24) +#define CLKMGR_PLLDIV_RANGE_MASK GENMASK(30, 28) + +#define CLKMGR_PLLDIV_REFCLKDIV_OFFSET 0 +#define CLKMGR_PLLDIV_FDIV_OFFSET 8 +#define CLKMGR_PLLDIV_OUTDIV_QDIV_OFFSET 24 +#define CLKMGR_PLLDIV_RANGE_OFFSET 28 + +#define CLKMGR_PLLOUTDIV_C0CNT_MASK GENMASK(4, 0) +#define CLKMGR_PLLOUTDIV_C1CNT_MASK GENMASK(12, 8) +#define CLKMGR_PLLOUTDIV_C2CNT_MASK GENMASK(20, 16) +#define CLKMGR_PLLOUTDIV_C3CNT_MASK GENMASK(28, 24) + +#define CLKMGR_PLLOUTDIV_C0CNT_OFFSET 0 +#define CLKMGR_PLLOUTDIV_C1CNT_OFFSET 8 +#define CLKMGR_PLLOUTDIV_C2CNT_OFFSET 16 +#define CLKMGR_PLLOUTDIV_C3CNT_OFFSET 24 + +#define CLKMGR_PLLCX_EN_SET_MSK BIT(27) +#define CLKMGR_PLLCX_MUTE_SET_MSK BIT(28) + +#define CLKMGR_VCOCALIB_MSCNT_MASK GENMASK(23, 16) +#define CLKMGR_VCOCALIB_MSCNT_OFFSET 16 +#define CLKMGR_VCOCALIB_HSCNT_MASK GENMASK(9, 0) +#define CLKMGR_VCOCALIB_MSCNT_CONST 100 +#define CLKMGR_VCOCALIB_HSCNT_CONST 4 + +#define CLKMGR_PLLM_MDIV_MASK GENMASK(9, 0) + +#define CLKMGR_LOSTLOCK_SET_MASK BIT(0) + +#define CLKMGR_PERPLLGRP_EN_SDMMCCLK_MASK BIT(5) +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_OFFSET 26 +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC0SELB_MASK BIT(26) +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_OFFSET 27 +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC1SELB_MASK BIT(27) +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_OFFSET 28 +#define CLKMGR_PERPLLGRP_EMACCTL_EMAC2SELB_MASK BIT(28) + +#define CLKMGR_ALT_EMACCTR_SRC_OFFSET 16 +#define CLKMGR_ALT_EMACCTR_SRC_MASK GENMASK(18, 16) +#define CLKMGR_ALT_EMACCTR_CNT_OFFSET 0 +#define CLKMGR_ALT_EMACCTR_CNT_MASK GENMASK(10, 0) + +#define CLKMGR_ALT_EXTCNTRST_ALLCNTRST_MASK GENMASK(15, 0) + +#endif /* _CLK_N5X_ */ diff --git a/drivers/ddr/altera/Makefile b/drivers/ddr/altera/Makefile index 39dfee5..9fa5d85 100644 --- a/drivers/ddr/altera/Makefile +++ b/drivers/ddr/altera/Makefile @@ -4,11 +4,12 @@ # Wolfgang Denk, DENX Software Engineering, wd@denx.de. # # (C) Copyright 2010, Thomas Chou <thomas@wytron.com.tw> -# Copyright (C) 2014 Altera Corporation <www.altera.com> +# Copyright (C) 2014-2021 Altera Corporation <www.altera.com> ifdef CONFIG_$(SPL_)ALTERA_SDRAM obj-$(CONFIG_TARGET_SOCFPGA_GEN5) += sdram_gen5.o sequencer.o obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += sdram_arria10.o obj-$(CONFIG_TARGET_SOCFPGA_STRATIX10) += sdram_soc64.o sdram_s10.o obj-$(CONFIG_TARGET_SOCFPGA_AGILEX) += sdram_soc64.o sdram_agilex.o +obj-$(CONFIG_TARGET_SOCFPGA_N5X) += sdram_soc64.o sdram_n5x.o endif diff --git a/drivers/ddr/altera/sdram_n5x.c b/drivers/ddr/altera/sdram_n5x.c new file mode 100644 index 0000000..ac13ac4 --- /dev/null +++ b/drivers/ddr/altera/sdram_n5x.c @@ -0,0 +1,2298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <clk.h> +#include <div64.h> +#include <dm.h> +#include <errno.h> +#include <fdtdec.h> +#include <hang.h> +#include <ram.h> +#include <reset.h> +#include "sdram_soc64.h" +#include <wait_bit.h> +#include <asm/arch/firewall.h> +#include <asm/arch/handoff_soc64.h> +#include <asm/arch/misc.h> +#include <asm/arch/reset_manager.h> +#include <asm/arch/system_manager.h> +#include <asm/io.h> +#include <linux/err.h> +#include <linux/sizes.h> + +DECLARE_GLOBAL_DATA_PTR; + +/* MPFE NOC registers */ +#define FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0 0xF8024050 + +/* Memory reset manager */ +#define MEM_RST_MGR_STATUS 0x8 + +/* Register and bit in memory reset manager */ +#define MEM_RST_MGR_STATUS_RESET_COMPLETE BIT(0) +#define MEM_RST_MGR_STATUS_PWROKIN_STATUS BIT(1) +#define MEM_RST_MGR_STATUS_CONTROLLER_RST BIT(2) +#define MEM_RST_MGR_STATUS_AXI_RST BIT(3) + +#define TIMEOUT_200MS 200 +#define TIMEOUT_5000MS 5000 + +/* DDR4 umctl2 */ +#define DDR4_MSTR_OFFSET 0x0 +#define DDR4_FREQ_RATIO BIT(22) + +#define DDR4_STAT_OFFSET 0x4 +#define DDR4_STAT_SELFREF_TYPE GENMASK(5, 4) +#define DDR4_STAT_SELFREF_TYPE_SHIFT 4 +#define DDR4_STAT_OPERATING_MODE GENMASK(2, 0) + +#define DDR4_MRCTRL0_OFFSET 0x10 +#define DDR4_MRCTRL0_MR_TYPE BIT(0) +#define DDR4_MRCTRL0_MPR_EN BIT(1) +#define DDR4_MRCTRL0_MR_RANK GENMASK(5, 4) +#define DDR4_MRCTRL0_MR_RANK_SHIFT 4 +#define DDR4_MRCTRL0_MR_ADDR GENMASK(15, 12) +#define DDR4_MRCTRL0_MR_ADDR_SHIFT 12 +#define DDR4_MRCTRL0_MR_WR BIT(31) + +#define DDR4_MRCTRL1_OFFSET 0x14 +#define DDR4_MRCTRL1_MR_DATA 0x3FFFF + +#define DDR4_MRSTAT_OFFSET 0x18 +#define DDR4_MRSTAT_MR_WR_BUSY BIT(0) + +#define DDR4_MRCTRL2_OFFSET 0x1C + +#define DDR4_PWRCTL_OFFSET 0x30 +#define DDR4_PWRCTL_SELFREF_EN BIT(0) +#define DDR4_PWRCTL_POWERDOWN_EN BIT(1) +#define DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE BIT(3) +#define DDR4_PWRCTL_SELFREF_SW BIT(5) + +#define DDR4_PWRTMG_OFFSET 0x34 +#define DDR4_HWLPCTL_OFFSET 0x38 +#define DDR4_RFSHCTL0_OFFSET 0x50 +#define DDR4_RFSHCTL1_OFFSET 0x54 + +#define DDR4_RFSHCTL3_OFFSET 0x60 +#define DDR4_RFSHCTL3_DIS_AUTO_REFRESH BIT(0) +#define DDR4_RFSHCTL3_REFRESH_MODE GENMASK(6, 4) +#define DDR4_RFSHCTL3_REFRESH_MODE_SHIFT 4 + +#define DDR4_ECCCFG0_OFFSET 0x70 +#define DDR4_ECC_MODE GENMASK(2, 0) +#define DDR4_DIS_SCRUB BIT(4) +#define LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT 30 +#define LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT 8 + +#define DDR4_ECCCFG1_OFFSET 0x74 +#define LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK BIT(4) + +#define DDR4_CRCPARCTL0_OFFSET 0xC0 +#define DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR BIT(1) + +#define DDR4_CRCPARCTL1_OFFSET 0xC4 +#define DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE BIT(8) +#define DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW BIT(9) + +#define DDR4_CRCPARSTAT_OFFSET 0xCC +#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT BIT(16) +#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT BIT(17) +#define DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW BIT(19) +#define DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW BIT(29) + +#define DDR4_INIT0_OFFSET 0xD0 +#define DDR4_INIT0_SKIP_RAM_INIT GENMASK(31, 30) + +#define DDR4_RANKCTL_OFFSET 0xF4 +#define DDR4_RANKCTL_DIFF_RANK_RD_GAP GENMASK(7, 4) +#define DDR4_RANKCTL_DIFF_RANK_WR_GAP GENMASK(11, 8) +#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB BIT(24) +#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB BIT(26) +#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT 4 +#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT 8 +#define DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB_SHIFT 24 +#define DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB_SHIFT 26 + +#define DDR4_RANKCTL1_OFFSET 0xF8 +#define DDR4_RANKCTL1_WR2RD_DR GENMASK(5, 0) + +#define DDR4_DRAMTMG2_OFFSET 0x108 +#define DDR4_DRAMTMG2_WR2RD GENMASK(5, 0) +#define DDR4_DRAMTMG2_RD2WR GENMASK(13, 8) +#define DDR4_DRAMTMG2_RD2WR_SHIFT 8 + +#define DDR4_DRAMTMG9_OFFSET 0x124 +#define DDR4_DRAMTMG9_W2RD_S GENMASK(5, 0) + +#define DDR4_DFITMG1_OFFSET 0x194 +#define DDR4_DFITMG1_DFI_T_WRDATA_DELAY GENMASK(20, 16) +#define DDR4_DFITMG1_DFI_T_WRDATA_SHIFT 16 + +#define DDR4_DFIMISC_OFFSET 0x1B0 +#define DDR4_DFIMISC_DFI_INIT_COMPLETE_EN BIT(0) +#define DDR4_DFIMISC_DFI_INIT_START BIT(5) + +#define DDR4_DFISTAT_OFFSET 0x1BC +#define DDR4_DFI_INIT_COMPLETE BIT(0) + +#define DDR4_DBG0_OFFSET 0x300 + +#define DDR4_DBG1_OFFSET 0x304 +#define DDR4_DBG1_DISDQ BIT(0) +#define DDR4_DBG1_DIS_HIF BIT(1) + +#define DDR4_DBGCAM_OFFSET 0x308 +#define DDR4_DBGCAM_DBG_RD_Q_EMPTY BIT(25) +#define DDR4_DBGCAM_DBG_WR_Q_EMPTY BIT(26) +#define DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY BIT(28) +#define DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY BIT(29) + +#define DDR4_SWCTL_OFFSET 0x320 +#define DDR4_SWCTL_SW_DONE BIT(0) + +#define DDR4_SWSTAT_OFFSET 0x324 +#define DDR4_SWSTAT_SW_DONE_ACK BIT(0) + +#define DDR4_PSTAT_OFFSET 0x3FC +#define DDR4_PSTAT_RD_PORT_BUSY_0 BIT(0) +#define DDR4_PSTAT_WR_PORT_BUSY_0 BIT(16) + +#define DDR4_PCTRL0_OFFSET 0x490 +#define DDR4_PCTRL0_PORT_EN BIT(0) + +#define DDR4_SBRCTL_OFFSET 0xF24 +#define DDR4_SBRCTL_SCRUB_INTERVAL 0x1FFF00 +#define DDR4_SBRCTL_SCRUB_EN BIT(0) +#define DDR4_SBRCTL_SCRUB_WRITE BIT(2) +#define DDR4_SBRCTL_SCRUB_BURST_1 BIT(4) + +#define DDR4_SBRSTAT_OFFSET 0xF28 +#define DDR4_SBRSTAT_SCRUB_BUSY BIT(0) +#define DDR4_SBRSTAT_SCRUB_DONE BIT(1) + +#define DDR4_SBRWDATA0_OFFSET 0xF2C +#define DDR4_SBRWDATA1_OFFSET 0xF30 +#define DDR4_SBRSTART0_OFFSET 0xF38 +#define DDR4_SBRSTART1_OFFSET 0xF3C +#define DDR4_SBRRANGE0_OFFSET 0xF40 +#define DDR4_SBRRANGE1_OFFSET 0xF44 + +/* DDR PHY */ +#define DDR_PHY_TXODTDRVSTREN_B0_P0 0x2009A +#define DDR_PHY_RXPBDLYTG0_R0 0x200D0 +#define DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0 0x201A0 + +#define DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0 0x203A0 +#define DDR_PHY_DBYTE1_TXDQDLYTG0_U0_P0 0x221A0 +#define DDR_PHY_DBYTE1_TXDQDLYTG0_U1_P0 0x223A0 +#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY GENMASK(9, 6) +#define DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT 6 + +#define DDR_PHY_CALRATE_OFFSET 0x40110 +#define DDR_PHY_CALZAP_OFFSET 0x40112 +#define DDR_PHY_SEQ0BDLY0_P0_OFFSET 0x40016 +#define DDR_PHY_SEQ0BDLY1_P0_OFFSET 0x40018 +#define DDR_PHY_SEQ0BDLY2_P0_OFFSET 0x4001A +#define DDR_PHY_SEQ0BDLY3_P0_OFFSET 0x4001C + +#define DDR_PHY_MEMRESETL_OFFSET 0x400C0 +#define DDR_PHY_MEMRESETL_VALUE BIT(0) +#define DDR_PHY_PROTECT_MEMRESET BIT(1) + +#define DDR_PHY_CALBUSY_OFFSET 0x4012E +#define DDR_PHY_CALBUSY BIT(0) + +#define DDR_PHY_TRAIN_IMEM_OFFSET 0xA0000 +#define DDR_PHY_TRAIN_DMEM_OFFSET 0xA8000 + +#define DMEM_MB_CDD_RR_1_0_OFFSET 0xA802C +#define DMEM_MB_CDD_RR_0_1_OFFSET 0xA8030 +#define DMEM_MB_CDD_WW_1_0_OFFSET 0xA8038 +#define DMEM_MB_CDD_WW_0_1_OFFSET 0xA803C +#define DMEM_MB_CDD_RW_1_1_OFFSET 0xA8046 +#define DMEM_MB_CDD_RW_1_0_OFFSET 0xA8048 +#define DMEM_MB_CDD_RW_0_1_OFFSET 0xA804A +#define DMEM_MB_CDD_RW_0_0_OFFSET 0xA804C + +#define DMEM_MB_CDD_CHA_RR_1_0_OFFSET 0xA8026 +#define DMEM_MB_CDD_CHA_RR_0_1_OFFSET 0xA8026 +#define DMEM_MB_CDD_CHB_RR_1_0_OFFSET 0xA8058 +#define DMEM_MB_CDD_CHB_RR_0_1_OFFSET 0xA805A +#define DMEM_MB_CDD_CHA_WW_1_0_OFFSET 0xA8030 +#define DMEM_MB_CDD_CHA_WW_0_1_OFFSET 0xA8030 +#define DMEM_MB_CDD_CHB_WW_1_0_OFFSET 0xA8062 +#define DMEM_MB_CDD_CHB_WW_0_1_OFFSET 0xA8064 + +#define DMEM_MB_CDD_CHA_RW_1_1_OFFSET 0xA8028 +#define DMEM_MB_CDD_CHA_RW_1_0_OFFSET 0xA8028 +#define DMEM_MB_CDD_CHA_RW_0_1_OFFSET 0xA802A +#define DMEM_MB_CDD_CHA_RW_0_0_OFFSET 0xA802A + +#define DMEM_MB_CDD_CHB_RW_1_1_OFFSET 0xA805A +#define DMEM_MB_CDD_CHB_RW_1_0_OFFSET 0xA805C +#define DMEM_MB_CDD_CHB_RW_0_1_OFFSET 0xA805c +#define DMEM_MB_CDD_CHB_RW_0_0_OFFSET 0xA805E + +#define DDR_PHY_SEQ0DISABLEFLAG0_OFFSET 0x120018 +#define DDR_PHY_SEQ0DISABLEFLAG1_OFFSET 0x12001A +#define DDR_PHY_SEQ0DISABLEFLAG2_OFFSET 0x12001C +#define DDR_PHY_SEQ0DISABLEFLAG3_OFFSET 0x12001E +#define DDR_PHY_SEQ0DISABLEFLAG4_OFFSET 0x120020 +#define DDR_PHY_SEQ0DISABLEFLAG5_OFFSET 0x120022 +#define DDR_PHY_SEQ0DISABLEFLAG6_OFFSET 0x120024 +#define DDR_PHY_SEQ0DISABLEFLAG7_OFFSET 0x120026 + +#define DDR_PHY_UCCLKHCLKENABLES_OFFSET 0x180100 +#define DDR_PHY_UCCLKHCLKENABLES_UCCLKEN BIT(0) +#define DDR_PHY_UCCLKHCLKENABLES_HCLKEN BIT(1) + +#define DDR_PHY_UCTWRITEPROT_OFFSET 0x180066 +#define DDR_PHY_UCTWRITEPROT BIT(0) + +#define DDR_PHY_APBONLY0_OFFSET 0x1A0000 +#define DDR_PHY_MICROCONTMUXSEL BIT(0) + +#define DDR_PHY_UCTSHADOWREGS_OFFSET 0x1A0008 +#define DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW BIT(0) + +#define DDR_PHY_DCTWRITEPROT_OFFSET 0x1A0062 +#define DDR_PHY_DCTWRITEPROT BIT(0) + +#define DDR_PHY_UCTWRITEONLYSHADOW_OFFSET 0x1A0064 +#define DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET 0x1A0068 + +#define DDR_PHY_MICRORESET_OFFSET 0x1A0132 +#define DDR_PHY_MICRORESET_STALL BIT(0) +#define DDR_PHY_MICRORESET_RESET BIT(3) + +#define DDR_PHY_TXODTDRVSTREN_B0_P1 0x22009A + +/* For firmware training */ +#define HW_DBG_TRACE_CONTROL_OFFSET 0x18 +#define FW_TRAINING_COMPLETED_STAT 0x07 +#define FW_TRAINING_FAILED_STAT 0xFF +#define FW_COMPLETION_MSG_ONLY_MODE 0xFF +#define FW_STREAMING_MSG_ID 0x08 +#define GET_LOWHW_DATA(x) ((x) & 0xFFFF) +#define GET_LOWB_DATA(x) ((x) & 0xFF) +#define GET_HIGHB_DATA(x) (((x) & 0xFF00) >> 8) + +/* Operating mode */ +#define OPM_INIT 0x000 +#define OPM_NORMAL 0x001 +#define OPM_PWR_D0WN 0x010 +#define OPM_SELF_SELFREF 0x011 +#define OPM_DDR4_DEEP_PWR_DOWN 0x100 + +/* Refresh mode */ +#define FIXED_1X 0 +#define FIXED_2X BIT(0) +#define FIXED_4X BIT(4) + +/* Address of mode register */ +#define MR0 0x0000 +#define MR1 0x0001 +#define MR2 0x0010 +#define MR3 0x0011 +#define MR4 0x0100 +#define MR5 0x0101 +#define MR6 0x0110 +#define MR7 0x0111 + +/* MR rank */ +#define RANK0 0x1 +#define RANK1 0x2 +#define ALL_RANK 0x3 + +#define MR5_BIT4 BIT(4) + +/* Value for ecc_region_map */ +#define ALL_PROTECTED 0x7F + +/* Region size for ECCCFG0.ecc_region_map */ +enum region_size { + ONE_EIGHT, + ONE_SIXTEENTH, + ONE_THIRTY_SECOND, + ONE_SIXTY_FOURTH +}; + +enum ddr_type { + DDRTYPE_LPDDR4_0, + DDRTYPE_LPDDR4_1, + DDRTYPE_DDR4, + DDRTYPE_UNKNOWN +}; + +/* Reset type */ +enum reset_type { + POR_RESET, + WARM_RESET, + COLD_RESET +}; + +/* DDR handoff structure */ +struct ddr_handoff { + /* Memory reset manager base */ + phys_addr_t mem_reset_base; + + /* First controller attributes */ + phys_addr_t cntlr_handoff_base; + phys_addr_t cntlr_base; + size_t cntlr_total_length; + enum ddr_type cntlr_t; + size_t cntlr_handoff_length; + + /* Second controller attributes*/ + phys_addr_t cntlr2_handoff_base; + phys_addr_t cntlr2_base; + size_t cntlr2_total_length; + enum ddr_type cntlr2_t; + size_t cntlr2_handoff_length; + + /* PHY attributes */ + phys_addr_t phy_handoff_base; + phys_addr_t phy_base; + size_t phy_total_length; + size_t phy_handoff_length; + + /* PHY engine attributes */ + phys_addr_t phy_engine_handoff_base; + size_t phy_engine_total_length; + size_t phy_engine_handoff_length; + + /* Calibration attributes */ + phys_addr_t train_imem_base; + phys_addr_t train_dmem_base; + size_t train_imem_length; + size_t train_dmem_length; +}; + +/* Message mode */ +enum message_mode { + MAJOR_MESSAGE, + STREAMING_MESSAGE +}; + +static int clr_ca_parity_error_status(phys_addr_t umctl2_base) +{ + int ret; + + debug("%s: Clear C/A parity error status in MR5[4]\n", __func__); + + /* Set mode register MRS */ + clrbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MPR_EN); + + /* Set mode register to write operation */ + setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_TYPE); + + /* Set the address of mode rgister to 0x101(MR5) */ + setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, + (MR5 << DDR4_MRCTRL0_MR_ADDR_SHIFT) & + DDR4_MRCTRL0_MR_ADDR); + + /* Set MR rank to rank 1 */ + setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, + (RANK1 << DDR4_MRCTRL0_MR_RANK_SHIFT) & + DDR4_MRCTRL0_MR_RANK); + + /* Clear C/A parity error status in MR5[4] */ + clrbits_le32(umctl2_base + DDR4_MRCTRL1_OFFSET, MR5_BIT4); + + /* Trigger mode register read or write operation */ + setbits_le32(umctl2_base + DDR4_MRCTRL0_OFFSET, DDR4_MRCTRL0_MR_WR); + + /* Wait for retry done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_MRSTAT_OFFSET), DDR4_MRSTAT_MR_WR_BUSY, + false, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" no outstanding MR transaction\n"); + return ret; + } + + return 0; +} + +static int ddr_retry_software_sequence(phys_addr_t umctl2_base) +{ + u32 value; + int ret; + + /* Check software can perform MRS/MPR/PDA? */ + value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) & + DDR4_CRCPARSTAT_DFI_ALERT_ERR_NO_SW; + + if (value) { + /* Clear interrupt bit for DFI alert error */ + setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET, + DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR); + } + + debug("%s: Software can perform MRS/MPR/PDA\n", __func__); + + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_MRSTAT_OFFSET), + DDR4_MRSTAT_MR_WR_BUSY, + false, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" no outstanding MR transaction\n"); + return ret; + } + + ret = clr_ca_parity_error_status(umctl2_base); + if (ret) + return ret; + + if (!value) { + /* Clear interrupt bit for DFI alert error */ + setbits_le32(umctl2_base + DDR4_CRCPARCTL0_OFFSET, + DDR4_CRCPARCTL0_DFI_ALERT_ERR_INIT_CLR); + } + + return 0; +} + +static int ensure_retry_procedure_complete(phys_addr_t umctl2_base) +{ + u32 value; + u32 start = get_timer(0); + int ret; + + /* Check parity/crc/error window is emptied ? */ + value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) & + DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW; + + /* Polling until parity/crc/error window is emptied */ + while (value) { + if (get_timer(start) > TIMEOUT_200MS) { + debug("%s: Timeout while waiting for", + __func__); + debug(" parity/crc/error window empty\n"); + return -ETIMEDOUT; + } + + /* Check software intervention is enabled? */ + value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) & + DDR4_CRCPARCTL1_ALERT_WAIT_FOR_SW; + if (value) { + debug("%s: Software intervention is enabled\n", + __func__); + + /* Check dfi alert error interrupt is set? */ + value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) & + DDR4_CRCPARSTAT_DFI_ALERT_ERR_INT; + + if (value) { + ret = ddr_retry_software_sequence(umctl2_base); + debug("%s: DFI alert error interrupt ", + __func__); + debug("is set\n"); + + if (ret) + return ret; + } + + /* + * Check fatal parity error interrupt is set? + */ + value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) & + DDR4_CRCPARSTAT_DFI_ALERT_ERR_FATL_INT; + if (value) { + printf("%s: Fatal parity error ", + __func__); + printf("interrupt is set, Hang it!!\n"); + hang(); + } + } + + value = readl(umctl2_base + DDR4_CRCPARSTAT_OFFSET) & + DDR4_CRCPARSTAT_CMD_IN_ERR_WINDOW; + + udelay(1); + WATCHDOG_RESET(); + } + + return 0; +} + +static int enable_quasi_dynamic_reg_grp3(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + u32 i, value, backup; + int ret = 0; + + /* Disable input traffic per port */ + clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN); + + /* Polling AXI port until idle */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_PSTAT_OFFSET), + DDR4_PSTAT_WR_PORT_BUSY_0 | + DDR4_PSTAT_RD_PORT_BUSY_0, false, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" controller idle\n"); + return ret; + } + + /* Backup user setting */ + backup = readl(umctl2_base + DDR4_DBG1_OFFSET); + + /* Disable input traffic to the controller */ + setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DIS_HIF); + + /* + * Ensure CAM/data pipelines are empty. + * Poll until CAM/data pipelines are set at least twice, + * timeout at 200ms + */ + for (i = 0; i < 2; i++) { + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_DBGCAM_OFFSET), + DDR4_DBGCAM_WR_DATA_PIPELINE_EMPTY | + DDR4_DBGCAM_RD_DATA_PIPELINE_EMPTY | + DDR4_DBGCAM_DBG_WR_Q_EMPTY | + DDR4_DBGCAM_DBG_RD_Q_EMPTY, true, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: loop(%u): Timeout while waiting for", + __func__, i + 1); + debug(" CAM/data pipelines are empty\n"); + + goto out; + } + } + + if (umctl2_type == DDRTYPE_DDR4) { + /* Check DDR4 retry is enabled ? */ + value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) & + DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE; + + if (value) { + debug("%s: DDR4 retry is enabled\n", __func__); + + ret = ensure_retry_procedure_complete(umctl2_base); + if (ret) { + debug("%s: Timeout while waiting for", + __func__); + debug(" retry procedure complete\n"); + + goto out; + } + } + } + + debug("%s: Quasi-dynamic group 3 registers are enabled\n", __func__); + +out: + /* Restore user setting */ + writel(backup, umctl2_base + DDR4_DBG1_OFFSET); + + return ret; +} + +static enum ddr_type get_ddr_type(phys_addr_t ddr_type_location) +{ + u32 ddr_type_magic = readl(ddr_type_location); + + if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_DDR4_TYPE) + return DDRTYPE_DDR4; + + if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_0_TYPE) + return DDRTYPE_LPDDR4_0; + + if (ddr_type_magic == SOC64_HANDOFF_DDR_UMCTL2_LPDDR4_1_TYPE) + return DDRTYPE_LPDDR4_1; + + return DDRTYPE_UNKNOWN; +} + +static void use_lpddr4_interleaving(bool set) +{ + if (set) { + printf("Starting LPDDR4 interleaving configuration ...\n"); + setbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0, + BIT(5)); + } else { + printf("Starting LPDDR4 non-interleaving configuration ...\n"); + clrbits_le32(FPGA2SDRAM_MGR_MAIN_SIDEBANDMGR_FLAGOUTSET0, + BIT(5)); + } +} + +static void use_ddr4(enum ddr_type type) +{ + if (type == DDRTYPE_DDR4) { + printf("Starting DDR4 configuration ...\n"); + setbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE, + SYSMGR_SOC64_DDR_MODE_MSK); + } else if (type == DDRTYPE_LPDDR4_0) { + printf("Starting LPDDR4 configuration ...\n"); + clrbits_le32(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_DDR_MODE, + SYSMGR_SOC64_DDR_MODE_MSK); + + use_lpddr4_interleaving(false); + } +} + +static int scrubber_ddr_config(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + u32 backup[9]; + int ret; + + /* Reset to default value, prevent scrubber stop due to lower power */ + writel(0, umctl2_base + DDR4_PWRCTL_OFFSET); + + /* Backup user settings */ + backup[0] = readl(umctl2_base + DDR4_SBRCTL_OFFSET); + backup[1] = readl(umctl2_base + DDR4_SBRWDATA0_OFFSET); + backup[2] = readl(umctl2_base + DDR4_SBRSTART0_OFFSET); + if (umctl2_type == DDRTYPE_DDR4) { + backup[3] = readl(umctl2_base + DDR4_SBRWDATA1_OFFSET); + backup[4] = readl(umctl2_base + DDR4_SBRSTART1_OFFSET); + } + backup[5] = readl(umctl2_base + DDR4_SBRRANGE0_OFFSET); + backup[6] = readl(umctl2_base + DDR4_SBRRANGE1_OFFSET); + backup[7] = readl(umctl2_base + DDR4_ECCCFG0_OFFSET); + backup[8] = readl(umctl2_base + DDR4_ECCCFG1_OFFSET); + + if (umctl2_type != DDRTYPE_DDR4) { + /* Lock ECC region, ensure this regions is not being accessed */ + setbits_le32(umctl2_base + DDR4_ECCCFG1_OFFSET, + LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK); + } + /* Disable input traffic per port */ + clrbits_le32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN); + /* Disables scrubber */ + clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN); + /* Polling all scrub writes data have been sent */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY, + false, TIMEOUT_5000MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" sending all scrub data\n"); + return ret; + } + + /* LPDDR4 supports inline ECC only */ + if (umctl2_type != DDRTYPE_DDR4) { + /* + * Setting all regions for protected, this is required for + * srubber to init whole LPDDR4 expect ECC region + */ + writel(((ONE_EIGHT << + LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) | + (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)), + umctl2_base + DDR4_ECCCFG0_OFFSET); + } + + /* Scrub_burst = 1, scrub_mode = 1(performs writes) */ + writel(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE, + umctl2_base + DDR4_SBRCTL_OFFSET); + + /* Zeroing whole DDR */ + writel(0, umctl2_base + DDR4_SBRWDATA0_OFFSET); + writel(0, umctl2_base + DDR4_SBRSTART0_OFFSET); + if (umctl2_type == DDRTYPE_DDR4) { + writel(0, umctl2_base + DDR4_SBRWDATA1_OFFSET); + writel(0, umctl2_base + DDR4_SBRSTART1_OFFSET); + } + writel(0, umctl2_base + DDR4_SBRRANGE0_OFFSET); + writel(0, umctl2_base + DDR4_SBRRANGE1_OFFSET); + + /* Enables scrubber */ + setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN); + /* Polling all scrub writes commands have been sent */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_DONE, + true, TIMEOUT_5000MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" sending all scrub commands\n"); + return ret; + } + + /* Polling all scrub writes data have been sent */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SBRSTAT_OFFSET), DDR4_SBRSTAT_SCRUB_BUSY, + false, TIMEOUT_5000MS, false); + if (ret) { + printf("%s: Timeout while waiting for", __func__); + printf(" sending all scrub data\n"); + return ret; + } + + /* Disables scrubber */ + clrbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN); + + /* Restore user settings */ + writel(backup[0], umctl2_base + DDR4_SBRCTL_OFFSET); + writel(backup[1], umctl2_base + DDR4_SBRWDATA0_OFFSET); + writel(backup[2], umctl2_base + DDR4_SBRSTART0_OFFSET); + if (umctl2_type == DDRTYPE_DDR4) { + writel(backup[3], umctl2_base + DDR4_SBRWDATA1_OFFSET); + writel(backup[4], umctl2_base + DDR4_SBRSTART1_OFFSET); + } + writel(backup[5], umctl2_base + DDR4_SBRRANGE0_OFFSET); + writel(backup[6], umctl2_base + DDR4_SBRRANGE1_OFFSET); + writel(backup[7], umctl2_base + DDR4_ECCCFG0_OFFSET); + writel(backup[8], umctl2_base + DDR4_ECCCFG1_OFFSET); + + /* Enables ECC scrub on scrubber */ + if (!(readl(umctl2_base + DDR4_SBRCTL_OFFSET) & + DDR4_SBRCTL_SCRUB_WRITE)) { + /* Enables scrubber */ + setbits_le32(umctl2_base + DDR4_SBRCTL_OFFSET, + DDR4_SBRCTL_SCRUB_EN); + } + + return 0; +} + +static void handoff_process(struct ddr_handoff *ddr_handoff_info, + phys_addr_t handoff_base, size_t length, + phys_addr_t base) +{ + u32 handoff_table[length]; + u32 i, value = 0; + + /* Execute configuration handoff */ + socfpga_handoff_read((void *)handoff_base, handoff_table, length); + + for (i = 0; i < length; i = i + 2) { + debug("%s: wr = 0x%08x ", __func__, handoff_table[i + 1]); + if (ddr_handoff_info && base == ddr_handoff_info->phy_base) { + /* + * Convert PHY odd offset to even offset that + * supported by ARM processor. + */ + value = handoff_table[i] << 1; + + writew(handoff_table[i + 1], + (uintptr_t)(value + base)); + debug("rd = 0x%08x ", + readw((uintptr_t)(value + base))); + debug("PHY offset: 0x%08x ", handoff_table[i + 1]); + } else { + value = handoff_table[i]; + writel(handoff_table[i + 1], (uintptr_t)(value + + base)); + debug("rd = 0x%08x ", + readl((uintptr_t)(value + base))); + } + + debug("Absolute addr: 0x%08llx, APB offset: 0x%08x\n", + value + base, value); + } +} + +static int init_umctl2(phys_addr_t umctl2_handoff_base, + phys_addr_t umctl2_base, enum ddr_type umctl2_type, + size_t umctl2_handoff_length, + u32 *user_backup) +{ + int ret; + + if (umctl2_type == DDRTYPE_DDR4) + printf("Initializing DDR4 controller ...\n"); + else if (umctl2_type == DDRTYPE_LPDDR4_0) + printf("Initializing LPDDR4_0 controller ...\n"); + else if (umctl2_type == DDRTYPE_LPDDR4_1) + printf("Initializing LPDDR4_1 controller ...\n"); + + /* Prevent controller from issuing read/write to SDRAM */ + setbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ); + + /* Put SDRAM into self-refresh */ + setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN); + + /* Enable quasi-dynamic programing of the controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Ensure the controller is in initialization mode */ + ret = wait_for_bit_le32((const void *)(umctl2_base + DDR4_STAT_OFFSET), + DDR4_STAT_OPERATING_MODE, false, TIMEOUT_200MS, + false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" init operating mode\n"); + return ret; + } + + debug("%s: UMCTL2 handoff base address = 0x%p table length = 0x%08x\n", + __func__, (u32 *)umctl2_handoff_base, + (u32)umctl2_handoff_length); + + handoff_process(NULL, umctl2_handoff_base, umctl2_handoff_length, + umctl2_base); + + /* Backup user settings, restore after DDR up running */ + *user_backup = readl(umctl2_base + DDR4_PWRCTL_OFFSET); + + /* Disable self resfresh */ + clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_EN); + + if (umctl2_type == DDRTYPE_LPDDR4_0 || + umctl2_type == DDRTYPE_LPDDR4_1) { + /* Setting selfref_sw to 1, based on lpddr4 requirement */ + setbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, + DDR4_PWRCTL_SELFREF_SW); + + /* Backup user settings, restore after DDR up running */ + user_backup++; + *user_backup = readl(umctl2_base + DDR4_INIT0_OFFSET) & + DDR4_INIT0_SKIP_RAM_INIT; + + /* + * Setting INIT0.skip_dram_init to 0x3, based on lpddr4 + * requirement + */ + setbits_le32(umctl2_base + DDR4_INIT0_OFFSET, + DDR4_INIT0_SKIP_RAM_INIT); + } + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Enable controller from issuing read/write to SDRAM */ + clrbits_le32(umctl2_base + DDR4_DBG1_OFFSET, DDR4_DBG1_DISDQ); + + return 0; +} + +static int phy_pre_handoff_config(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret; + u32 value; + + if (umctl2_type == DDRTYPE_DDR4) { + /* Check DDR4 retry is enabled ? */ + value = readl(umctl2_base + DDR4_CRCPARCTL1_OFFSET) & + DDR4_CRCPARCTL1_CRC_PARITY_RETRY_ENABLE; + + if (value) { + debug("%s: DDR4 retry is enabled\n", __func__); + debug("%s: Disable auto refresh is not supported\n", + __func__); + } else { + /* Disable auto refresh */ + setbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET, + DDR4_RFSHCTL3_DIS_AUTO_REFRESH); + } + } + + /* Disable selfref_en & powerdown_en, nvr disable dfi dram clk */ + clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, + DDR4_PWRCTL_EN_DFI_DRAM_CLK_DISABLE | + DDR4_PWRCTL_POWERDOWN_EN | DDR4_PWRCTL_SELFREF_EN); + + /* Enable quasi-dynamic programing of the controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type); + if (ret) + return ret; + + /* Masking dfi init complete */ + clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET, + DDR4_DFIMISC_DFI_INIT_COMPLETE_EN); + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Polling programming done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK, + true, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" programming done\n"); + } + + return ret; +} + +static int init_phy(struct ddr_handoff *ddr_handoff_info) +{ + int ret; + + printf("Initializing DDR PHY ...\n"); + + if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 || + ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) { + ret = phy_pre_handoff_config(ddr_handoff_info->cntlr_base, + ddr_handoff_info->cntlr_t); + if (ret) + return ret; + } + + if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) { + ret = phy_pre_handoff_config + (ddr_handoff_info->cntlr2_base, + ddr_handoff_info->cntlr2_t); + if (ret) + return ret; + } + + /* Execute PHY configuration handoff */ + handoff_process(ddr_handoff_info, ddr_handoff_info->phy_handoff_base, + ddr_handoff_info->phy_handoff_length, + ddr_handoff_info->phy_base); + + printf("DDR PHY configuration is completed\n"); + + return 0; +} + +static void phy_init_engine(struct ddr_handoff *handoff) +{ + printf("Load PHY Init Engine ...\n"); + + /* Execute PIE production code handoff */ + handoff_process(handoff, handoff->phy_engine_handoff_base, + handoff->phy_engine_handoff_length, handoff->phy_base); + + printf("End of loading PHY Init Engine\n"); +} + +int populate_ddr_handoff(struct ddr_handoff *handoff) +{ + phys_addr_t next_section_header; + + /* DDR handoff */ + handoff->mem_reset_base = SOC64_HANDOFF_DDR_MEMRESET_BASE; + debug("%s: DDR memory reset base = 0x%x\n", __func__, + (u32)handoff->mem_reset_base); + debug("%s: DDR memory reset address = 0x%x\n", __func__, + readl(handoff->mem_reset_base)); + + /* Beginning of DDR controller handoff */ + handoff->cntlr_handoff_base = SOC64_HANDOFF_DDR_UMCTL2_SECTION; + debug("%s: cntlr handoff base = 0x%x\n", __func__, + (u32)handoff->cntlr_handoff_base); + + /* Get 1st DDR type */ + handoff->cntlr_t = get_ddr_type(handoff->cntlr_handoff_base + + SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET); + if (handoff->cntlr_t == DDRTYPE_LPDDR4_1 || + handoff->cntlr_t == DDRTYPE_UNKNOWN) { + debug("%s: Wrong DDR handoff format, the 1st DDR ", __func__); + debug("type must be DDR4 or LPDDR4_0\n"); + return -ENOEXEC; + } + + /* 1st cntlr base physical address */ + handoff->cntlr_base = readl(handoff->cntlr_handoff_base + + SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET); + debug("%s: cntlr base = 0x%x\n", __func__, (u32)handoff->cntlr_base); + + /* Get the total length of DDR cntlr handoff section */ + handoff->cntlr_total_length = readl(handoff->cntlr_handoff_base + + SOC64_HANDOFF_OFFSET_LENGTH); + debug("%s: Umctl2 total length in byte = 0x%x\n", __func__, + (u32)handoff->cntlr_total_length); + + /* Get the length of user setting data in DDR cntlr handoff section */ + handoff->cntlr_handoff_length = socfpga_get_handoff_size((void *) + handoff->cntlr_handoff_base); + debug("%s: Umctl2 handoff length in word(32-bit) = 0x%x\n", __func__, + (u32)handoff->cntlr_handoff_length); + + /* Wrong format on user setting data */ + if (handoff->cntlr_handoff_length < 0) { + debug("%s: Wrong format on user setting data\n", __func__); + return -ENOEXEC; + } + + /* Get the next handoff section address */ + next_section_header = handoff->cntlr_handoff_base + + handoff->cntlr_total_length; + debug("%s: Next handoff section header location = 0x%llx\n", __func__, + next_section_header); + + /* + * Checking next section handoff is cntlr or PHY, and changing + * subsequent implementation accordingly + */ + if (readl(next_section_header) == SOC64_HANDOFF_DDR_UMCTL2_MAGIC) { + /* Get the next cntlr handoff section address */ + handoff->cntlr2_handoff_base = next_section_header; + debug("%s: umctl2 2nd handoff base = 0x%x\n", __func__, + (u32)handoff->cntlr2_handoff_base); + + /* Get 2nd DDR type */ + handoff->cntlr2_t = get_ddr_type(handoff->cntlr2_handoff_base + + SOC64_HANDOFF_DDR_UMCTL2_TYPE_OFFSET); + if (handoff->cntlr2_t == DDRTYPE_LPDDR4_0 || + handoff->cntlr2_t == DDRTYPE_UNKNOWN) { + debug("%s: Wrong DDR handoff format, the 2nd DDR ", + __func__); + debug("type must be LPDDR4_1\n"); + return -ENOEXEC; + } + + /* 2nd umctl2 base physical address */ + handoff->cntlr2_base = + readl(handoff->cntlr2_handoff_base + + SOC64_HANDOFF_DDR_UMCTL2_BASE_ADDR_OFFSET); + debug("%s: cntlr2 base = 0x%x\n", __func__, + (u32)handoff->cntlr2_base); + + /* Get the total length of 2nd DDR umctl2 handoff section */ + handoff->cntlr2_total_length = + readl(handoff->cntlr2_handoff_base + + SOC64_HANDOFF_OFFSET_LENGTH); + debug("%s: Umctl2_2nd total length in byte = 0x%x\n", __func__, + (u32)handoff->cntlr2_total_length); + + /* + * Get the length of user setting data in DDR umctl2 handoff + * section + */ + handoff->cntlr2_handoff_length = + socfpga_get_handoff_size((void *) + handoff->cntlr2_handoff_base); + debug("%s: cntlr2 handoff length in word(32-bit) = 0x%x\n", + __func__, + (u32)handoff->cntlr2_handoff_length); + + /* Wrong format on user setting data */ + if (handoff->cntlr2_handoff_length < 0) { + debug("%s: Wrong format on umctl2 user setting data\n", + __func__); + return -ENOEXEC; + } + + /* Get the next handoff section address */ + next_section_header = handoff->cntlr2_handoff_base + + handoff->cntlr2_total_length; + debug("%s: Next handoff section header location = 0x%llx\n", + __func__, next_section_header); + } + + /* Checking next section handoff is PHY ? */ + if (readl(next_section_header) == SOC64_HANDOFF_DDR_PHY_MAGIC) { + /* DDR PHY handoff */ + handoff->phy_handoff_base = next_section_header; + debug("%s: PHY handoff base = 0x%x\n", __func__, + (u32)handoff->phy_handoff_base); + + /* PHY base physical address */ + handoff->phy_base = readl(handoff->phy_handoff_base + + SOC64_HANDOFF_DDR_PHY_BASE_OFFSET); + debug("%s: PHY base = 0x%x\n", __func__, + (u32)handoff->phy_base); + + /* Get the total length of PHY handoff section */ + handoff->phy_total_length = readl(handoff->phy_handoff_base + + SOC64_HANDOFF_OFFSET_LENGTH); + debug("%s: PHY total length in byte = 0x%x\n", __func__, + (u32)handoff->phy_total_length); + + /* + * Get the length of user setting data in DDR PHY handoff + * section + */ + handoff->phy_handoff_length = socfpga_get_handoff_size((void *) + handoff->phy_handoff_base); + debug("%s: PHY handoff length in word(32-bit) = 0x%x\n", + __func__, (u32)handoff->phy_handoff_length); + + /* Wrong format on PHY user setting data */ + if (handoff->phy_handoff_length < 0) { + debug("%s: Wrong format on PHY user setting data\n", + __func__); + return -ENOEXEC; + } + + /* Get the next handoff section address */ + next_section_header = handoff->phy_handoff_base + + handoff->phy_total_length; + debug("%s: Next handoff section header location = 0x%llx\n", + __func__, next_section_header); + } else { + debug("%s: Wrong format for DDR handoff, expect PHY", + __func__); + debug(" handoff section after umctl2 handoff section\n"); + return -ENOEXEC; + } + + /* Checking next section handoff is PHY init Engine ? */ + if (readl(next_section_header) == + SOC64_HANDOFF_DDR_PHY_INIT_ENGINE_MAGIC) { + /* DDR PHY Engine handoff */ + handoff->phy_engine_handoff_base = next_section_header; + debug("%s: PHY init engine handoff base = 0x%x\n", __func__, + (u32)handoff->phy_engine_handoff_base); + + /* Get the total length of PHY init engine handoff section */ + handoff->phy_engine_total_length = + readl(handoff->phy_engine_handoff_base + + SOC64_HANDOFF_OFFSET_LENGTH); + debug("%s: PHY engine total length in byte = 0x%x\n", __func__, + (u32)handoff->phy_engine_total_length); + + /* + * Get the length of user setting data in DDR PHY init engine + * handoff section + */ + handoff->phy_engine_handoff_length = + socfpga_get_handoff_size((void *) + handoff->phy_engine_handoff_base); + debug("%s: PHY engine handoff length in word(32-bit) = 0x%x\n", + __func__, (u32)handoff->phy_engine_handoff_length); + + /* Wrong format on PHY init engine setting data */ + if (handoff->phy_engine_handoff_length < 0) { + debug("%s: Wrong format on PHY init engine ", + __func__); + debug("user setting data\n"); + return -ENOEXEC; + } + } else { + debug("%s: Wrong format for DDR handoff, expect PHY", + __func__); + debug(" init engine handoff section after PHY handoff\n"); + debug(" section\n"); + return -ENOEXEC; + } + + handoff->train_imem_base = handoff->phy_base + + DDR_PHY_TRAIN_IMEM_OFFSET; + debug("%s: PHY train IMEM base = 0x%x\n", + __func__, (u32)handoff->train_imem_base); + + handoff->train_dmem_base = handoff->phy_base + + DDR_PHY_TRAIN_DMEM_OFFSET; + debug("%s: PHY train DMEM base = 0x%x\n", + __func__, (u32)handoff->train_dmem_base); + + handoff->train_imem_length = SOC64_HANDOFF_DDR_TRAIN_IMEM_LENGTH; + debug("%s: PHY train IMEM length = 0x%x\n", + __func__, (u32)handoff->train_imem_length); + + handoff->train_dmem_length = SOC64_HANDOFF_DDR_TRAIN_DMEM_LENGTH; + debug("%s: PHY train DMEM length = 0x%x\n", + __func__, (u32)handoff->train_dmem_length); + + return 0; +} + +int enable_ddr_clock(struct udevice *dev) +{ + struct clk *ddr_clk; + int ret; + + /* Enable clock before init DDR */ + ddr_clk = devm_clk_get(dev, "mem_clk"); + if (!IS_ERR(ddr_clk)) { + ret = clk_enable(ddr_clk); + if (ret) { + printf("%s: Failed to enable DDR clock\n", __func__); + return ret; + } + } else { + ret = PTR_ERR(ddr_clk); + debug("%s: Failed to get DDR clock from dts\n", __func__); + return ret; + } + + printf("%s: DDR clock is enabled\n", __func__); + + return 0; +} + +static int ddr_start_dfi_init(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret; + + debug("%s: Start DFI init\n", __func__); + + /* Enable quasi-dynamic programing of controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type); + if (ret) + return ret; + + /* Start DFI init sequence */ + setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET, + DDR4_DFIMISC_DFI_INIT_START); + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Polling programming done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SWSTAT_OFFSET), + DDR4_SWSTAT_SW_DONE_ACK, true, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" programming done\n"); + } + + return ret; +} + +static int ddr_check_dfi_init_complete(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret; + + /* Polling DFI init complete */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_DFISTAT_OFFSET), + DDR4_DFI_INIT_COMPLETE, true, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" DFI init done\n"); + return ret; + } + + debug("%s: DFI init completed.\n", __func__); + + /* Enable quasi-dynamic programing of controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type); + if (ret) + return ret; + + /* Stop DFI init sequence */ + clrbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET, + DDR4_DFIMISC_DFI_INIT_START); + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Polling programming done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SWSTAT_OFFSET), + DDR4_SWSTAT_SW_DONE_ACK, true, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" programming done\n"); + return ret; + } + + debug("%s:DDR programming done\n", __func__); + + return ret; +} + +static int ddr_trigger_sdram_init(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret; + + /* Enable quasi-dynamic programing of controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type); + if (ret) + return ret; + + /* Unmasking dfi init complete */ + setbits_le32(umctl2_base + DDR4_DFIMISC_OFFSET, + DDR4_DFIMISC_DFI_INIT_COMPLETE_EN); + + /* Software exit from self-refresh */ + clrbits_le32(umctl2_base + DDR4_PWRCTL_OFFSET, DDR4_PWRCTL_SELFREF_SW); + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Polling programming done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SWSTAT_OFFSET), + DDR4_SWSTAT_SW_DONE_ACK, true, + TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" programming done\n"); + return ret; + } + + debug("%s:DDR programming done\n", __func__); + return ret; +} + +static int ddr_post_handoff_config(phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret = 0; + u32 value; + u32 start = get_timer(0); + + do { + if (get_timer(start) > TIMEOUT_200MS) { + debug("%s: Timeout while waiting for", + __func__); + debug(" DDR enters normal operating mode\n"); + return -ETIMEDOUT; + } + + udelay(1); + WATCHDOG_RESET(); + + /* Polling until SDRAM entered normal operating mode */ + value = readl(umctl2_base + DDR4_STAT_OFFSET) & + DDR4_STAT_OPERATING_MODE; + } while (value != OPM_NORMAL); + + printf("DDR entered normal operating mode\n"); + + /* Enabling auto refresh */ + clrbits_le32(umctl2_base + DDR4_RFSHCTL3_OFFSET, + DDR4_RFSHCTL3_DIS_AUTO_REFRESH); + + /* Checking ECC is enabled? */ + value = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE; + if (value) { + printf("ECC is enabled\n"); + ret = scrubber_ddr_config(umctl2_base, umctl2_type); + if (ret) + printf("Failed to enable ECC\n"); + } + + return ret; +} + +static int configure_training_firmware(struct ddr_handoff *ddr_handoff_info, + const void *train_imem, + const void *train_dmem) +{ + int ret = 0; + + printf("Configuring training firmware ...\n"); + + /* Reset SDRAM */ + writew(DDR_PHY_PROTECT_MEMRESET, + (uintptr_t)(ddr_handoff_info->phy_base + + DDR_PHY_MEMRESETL_OFFSET)); + + /* Enable access to the PHY configuration registers */ + clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET, + DDR_PHY_MICROCONTMUXSEL); + + /* Copy train IMEM bin */ + memcpy((void *)ddr_handoff_info->train_imem_base, train_imem, + ddr_handoff_info->train_imem_length); + + ret = memcmp((void *)ddr_handoff_info->train_imem_base, train_imem, + ddr_handoff_info->train_imem_length); + if (ret) { + debug("%s: Failed to copy train IMEM binary\n", __func__); + /* Isolate the APB access from internal CSRs */ + setbits_le16(ddr_handoff_info->phy_base + + DDR_PHY_APBONLY0_OFFSET, DDR_PHY_MICROCONTMUXSEL); + return ret; + } + + memcpy((void *)ddr_handoff_info->train_dmem_base, train_dmem, + ddr_handoff_info->train_dmem_length); + + ret = memcmp((void *)ddr_handoff_info->train_dmem_base, train_dmem, + ddr_handoff_info->train_dmem_length); + if (ret) + debug("%s: Failed to copy train DMEM binary\n", __func__); + + /* Isolate the APB access from internal CSRs */ + setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_APBONLY0_OFFSET, + DDR_PHY_MICROCONTMUXSEL); + + return ret; +} + +static void calibrating_sdram(struct ddr_handoff *ddr_handoff_info) +{ + /* Init mailbox protocol - set 1 to DCTWRITEPROT[0] */ + setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET, + DDR_PHY_DCTWRITEPROT); + + /* Init mailbox protocol - set 1 to UCTWRITEPROT[0] */ + setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_UCTWRITEPROT_OFFSET, + DDR_PHY_UCTWRITEPROT); + + /* Reset and stalling ARC processor */ + setbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET, + DDR_PHY_MICRORESET_RESET | DDR_PHY_MICRORESET_STALL); + + /* Release ARC processor */ + clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET, + DDR_PHY_MICRORESET_RESET); + + /* Starting PHY firmware execution */ + clrbits_le16(ddr_handoff_info->phy_base + DDR_PHY_MICRORESET_OFFSET, + DDR_PHY_MICRORESET_STALL); +} + +static int get_mail(struct ddr_handoff *handoff, enum message_mode mode, + u32 *message_id) +{ + int ret; + + /* Polling major messages from PMU */ + ret = wait_for_bit_le16((const void *)(handoff->phy_base + + DDR_PHY_UCTSHADOWREGS_OFFSET), + DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW, + false, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", + __func__); + debug(" major messages from PMU\n"); + return ret; + } + + *message_id = readw((uintptr_t)(handoff->phy_base + + DDR_PHY_UCTWRITEONLYSHADOW_OFFSET)); + + if (mode == STREAMING_MESSAGE) + *message_id |= readw((uintptr_t)((handoff->phy_base + + DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) << + SZ_16; + + /* Ack the receipt of the major message */ + clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET, + DDR_PHY_DCTWRITEPROT); + + ret = wait_for_bit_le16((const void *)(handoff->phy_base + + DDR_PHY_UCTSHADOWREGS_OFFSET), + DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW, + true, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", + __func__); + debug(" ack the receipt of the major message completed\n"); + return ret; + } + + /* Complete protocol */ + setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET, + DDR_PHY_DCTWRITEPROT); + + return ret; +} + +static int get_mail_streaming(struct ddr_handoff *handoff, + enum message_mode mode, u32 *index) +{ + int ret; + + *index = readw((uintptr_t)(handoff->phy_base + + DDR_PHY_UCTWRITEONLYSHADOW_OFFSET)); + + if (mode == STREAMING_MESSAGE) + *index |= readw((uintptr_t)((handoff->phy_base + + DDR_PHY_UCTDATWRITEONLYSHADOW_OFFSET))) << + SZ_16; + + /* Ack the receipt of the major message */ + clrbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET, + DDR_PHY_DCTWRITEPROT); + + ret = wait_for_bit_le16((const void *)(handoff->phy_base + + DDR_PHY_UCTSHADOWREGS_OFFSET), + DDR_PHY_UCTSHADOWREGS_UCTWRITEPROTESHADOW, + true, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", + __func__); + debug(" ack the receipt of the major message completed\n"); + return ret; + } + + /* Complete protocol */ + setbits_le16(handoff->phy_base + DDR_PHY_DCTWRITEPROT_OFFSET, + DDR_PHY_DCTWRITEPROT); + + return 0; +} + +static int decode_streaming_message(struct ddr_handoff *ddr_handoff_info, + u32 *streaming_index) +{ + int i = 0, ret; + u32 temp; + + temp = *streaming_index; + + while (i < GET_LOWHW_DATA(temp)) { + ret = get_mail(ddr_handoff_info, STREAMING_MESSAGE, + streaming_index); + if (ret) + return ret; + + printf("args[%d]: 0x%x ", i, *streaming_index); + i++; + } + + return 0; +} + +static int poll_for_training_complete(struct ddr_handoff *ddr_handoff_info) +{ + int ret; + u32 message_id = 0; + u32 streaming_index = 0; + + do { + ret = get_mail(ddr_handoff_info, MAJOR_MESSAGE, &message_id); + if (ret) + return ret; + + printf("Major message id = 0%x\n", message_id); + + if (message_id == FW_STREAMING_MSG_ID) { + ret = get_mail_streaming(ddr_handoff_info, + STREAMING_MESSAGE, + &streaming_index); + if (ret) + return ret; + + printf("streaming index 0%x : ", streaming_index); + + decode_streaming_message(ddr_handoff_info, + &streaming_index); + + printf("\n"); + } + } while ((message_id != FW_TRAINING_COMPLETED_STAT) && + (message_id != FW_TRAINING_FAILED_STAT)); + + if (message_id == FW_TRAINING_COMPLETED_STAT) { + printf("DDR firmware training completed\n"); + } else if (message_id == FW_TRAINING_FAILED_STAT) { + printf("DDR firmware training failed\n"); + hang(); + } + + return 0; +} + +static void enable_phy_clk_for_csr_access(struct ddr_handoff *handoff, + bool enable) +{ + if (enable) { + /* Enable PHY clk */ + setbits_le16((uintptr_t)(handoff->phy_base + + DDR_PHY_UCCLKHCLKENABLES_OFFSET), + DDR_PHY_UCCLKHCLKENABLES_UCCLKEN | + DDR_PHY_UCCLKHCLKENABLES_HCLKEN); + } else { + /* Disable PHY clk */ + clrbits_le16((uintptr_t)(handoff->phy_base + + DDR_PHY_UCCLKHCLKENABLES_OFFSET), + DDR_PHY_UCCLKHCLKENABLES_UCCLKEN | + DDR_PHY_UCCLKHCLKENABLES_HCLKEN); + } +} + +/* helper function for updating train result to umctl2 RANKCTL register */ +static void set_cal_res_to_rankctrl(u32 reg_addr, u16 update_value, + u32 mask, u32 msb_mask, u32 shift) +{ + u32 reg, value; + + reg = readl((uintptr_t)reg_addr); + + debug("max value divided by 2 is 0x%x\n", update_value); + debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg); + debug("update with train result\n"); + + value = (reg & mask) >> shift; + + value += update_value + 3; + + /* reg value greater than 0xF, set one to diff_rank_wr_gap_msb */ + if (value > 0xF) + setbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask); + else + clrbits_le32((u32 *)(uintptr_t)reg_addr, msb_mask); + + reg = readl((uintptr_t)reg_addr); + + value = (value << shift) & mask; + + /* update register */ + writel((reg & (~mask)) | value, (uintptr_t)reg_addr); + + reg = readl((uintptr_t)reg_addr); + debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg); + debug("update with train result\n"); +} + +/* helper function for updating train result to register */ +static void set_cal_res_to_reg(u32 reg_addr, u16 update_value, u32 mask, + u32 shift) +{ + u32 reg, value; + + reg = readl((uintptr_t)reg_addr); + + debug("max value divided by 2 is 0x%x\n", update_value); + debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg); + debug("update with train result\n"); + + value = (reg & mask) >> shift; + + value = ((value + update_value + 3) << shift) & mask; + + /* update register */ + writel((reg & (~mask)) | value, (uintptr_t)reg_addr); + + reg = readl((uintptr_t)reg_addr); + debug("umclt2 register 0x%x value is 0%x before ", reg_addr, reg); + debug("update with train result\n"); +} + +static u16 get_max_txdqsdlytg0_ux_p0(struct ddr_handoff *handoff, u32 reg, + u8 numdbyte, u16 upd_val) +{ + u32 b_addr; + u16 val; + u8 byte; + + /* Getting max value from DBYTEx TxDqsDlyTg0_ux_p0 */ + for (byte = 0; byte < numdbyte; byte++) { + b_addr = byte << 13; + + /* TxDqsDlyTg0[9:6] is the coarse delay */ + val = (readw((uintptr_t)(handoff->phy_base + + reg + b_addr)) & + DDR_PHY_TXDQDLYTG0_COARSE_DELAY) >> + DDR_PHY_TXDQDLYTG0_COARSE_DELAY_SHIFT; + + upd_val = max(val, upd_val); + } + + return upd_val; +} + +static int set_cal_res_to_umctl2(struct ddr_handoff *handoff, + phys_addr_t umctl2_base, + enum ddr_type umctl2_type) +{ + int ret; + u8 numdbyte = 0x8; + u16 upd_val, val; + u32 dramtmg2_reg_addr, rankctl_reg_addr, reg_addr; + + /* Enable quasi-dynamic programing of the controller registers */ + clrbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + ret = enable_quasi_dynamic_reg_grp3(umctl2_base, umctl2_type); + if (ret) + return ret; + + /* Enable access to the PHY configuration registers */ + clrbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET, + DDR_PHY_MICROCONTMUXSEL); + + if (umctl2_type == DDRTYPE_DDR4) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_WW_1_0_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_WW_0_1_OFFSET))); + } else if (umctl2_type == DDRTYPE_LPDDR4_0) { + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_WW_1_0_OFFSET))); + + upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_WW_0_1_OFFSET))); + } else if (umctl2_type == DDRTYPE_LPDDR4_1) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_WW_1_0_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_WW_0_1_OFFSET))); + } + + upd_val = max(val, upd_val); + debug("max value is 0x%x\n", upd_val); + + /* Divided by two is required when running in freq ratio 1:2 */ + if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO)) + upd_val = DIV_ROUND_CLOSEST(upd_val, 2); + + debug("Update train value to umctl2 RANKCTL.diff_rank_wr_gap\n"); + rankctl_reg_addr = umctl2_base + DDR4_RANKCTL_OFFSET; + /* Update train value to umctl2 RANKCTL.diff_rank_wr_gap */ + set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val, + DDR4_RANKCTL_DIFF_RANK_WR_GAP, + DDR4_RANKCTL_DIFF_RANK_WR_GAP_MSB, + DDR4_RANKCTL_DIFF_RANK_WR_GAP_SHIFT); + + debug("Update train value to umctl2 DRAMTMG2.W2RD\n"); + dramtmg2_reg_addr = umctl2_base + DDR4_DRAMTMG2_OFFSET; + /* Update train value to umctl2 dramtmg2.wr2rd */ + set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_WR2RD, 0); + + if (umctl2_type == DDRTYPE_DDR4) { + debug("Update train value to umctl2 DRAMTMG9.W2RD_S\n"); + reg_addr = umctl2_base + DDR4_DRAMTMG9_OFFSET; + /* Update train value to umctl2 dramtmg9.wr2rd_s */ + set_cal_res_to_reg(reg_addr, upd_val, DDR4_DRAMTMG9_W2RD_S, 0); + } + + if (umctl2_type == DDRTYPE_DDR4) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RR_1_0_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RR_0_1_OFFSET))); + } else if (umctl2_type == DDRTYPE_LPDDR4_0) { + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RR_1_0_OFFSET))); + + upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RR_0_1_OFFSET))); + } else if (umctl2_type == DDRTYPE_LPDDR4_1) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RR_1_0_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RR_0_1_OFFSET))); + } + + upd_val = max(val, upd_val); + debug("max value is 0x%x\n", upd_val); + + /* Divided by two is required when running in freq ratio 1:2 */ + if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO)) + upd_val = DIV_ROUND_CLOSEST(upd_val, 2); + + debug("Update train value to umctl2 RANKCTL.diff_rank_rd_gap\n"); + /* Update train value to umctl2 RANKCTL.diff_rank_rd_gap */ + set_cal_res_to_rankctrl(rankctl_reg_addr, upd_val, + DDR4_RANKCTL_DIFF_RANK_RD_GAP, + DDR4_RANKCTL_DIFF_RANK_RD_GAP_MSB, + DDR4_RANKCTL_DIFF_RANK_RD_GAP_SHIFT); + + if (umctl2_type == DDRTYPE_DDR4) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RW_1_1_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RW_1_0_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RW_0_1_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_RW_0_0_OFFSET))); + + upd_val = max(val, upd_val); + } else if (umctl2_type == DDRTYPE_LPDDR4_0) { + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RW_1_1_OFFSET))); + + upd_val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RW_1_0_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RW_0_1_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHA_RW_0_0_OFFSET))); + + upd_val = max(val, upd_val); + } else if (umctl2_type == DDRTYPE_LPDDR4_1) { + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RW_1_1_OFFSET))); + + upd_val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RW_1_0_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_HIGHB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RW_0_1_OFFSET))); + + upd_val = max(val, upd_val); + + val = GET_LOWB_DATA(readw((uintptr_t)(handoff->phy_base + + DMEM_MB_CDD_CHB_RW_0_0_OFFSET))); + + upd_val = max(val, upd_val); + } + + debug("max value is 0x%x\n", upd_val); + + /* Divided by two is required when running in freq ratio 1:2 */ + if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO)) + upd_val = DIV_ROUND_CLOSEST(upd_val, 2); + + debug("Update train value to umctl2 dramtmg2.rd2wr\n"); + /* Update train value to umctl2 dramtmg2.rd2wr */ + set_cal_res_to_reg(dramtmg2_reg_addr, upd_val, DDR4_DRAMTMG2_RD2WR, + DDR4_DRAMTMG2_RD2WR_SHIFT); + + /* Checking ECC is enabled?, lpddr4 using inline ECC */ + val = readl(umctl2_base + DDR4_ECCCFG0_OFFSET) & DDR4_ECC_MODE; + if (val && umctl2_type == DDRTYPE_DDR4) + numdbyte = 0x9; + + upd_val = 0; + + /* Getting max value from DBYTEx TxDqsDlyTg0_u0_p0 */ + upd_val = get_max_txdqsdlytg0_ux_p0(handoff, + DDR_PHY_DBYTE0_TXDQDLYTG0_U0_P0, + numdbyte, upd_val); + + /* Getting max value from DBYTEx TxDqsDlyTg0_u1_p0 */ + upd_val = get_max_txdqsdlytg0_ux_p0(handoff, + DDR_PHY_DBYTE0_TXDQDLYTG0_U1_P0, + numdbyte, upd_val); + + debug("TxDqsDlyTg0 max value is 0x%x\n", upd_val); + + /* Divided by two is required when running in freq ratio 1:2 */ + if (!(readl(umctl2_base + DDR4_MSTR_OFFSET) & DDR4_FREQ_RATIO)) + upd_val = DIV_ROUND_CLOSEST(upd_val, 2); + + reg_addr = umctl2_base + DDR4_DFITMG1_OFFSET; + /* Update train value to umctl2 dfitmg1.dfi_wrdata_delay */ + set_cal_res_to_reg(reg_addr, upd_val, DDR4_DFITMG1_DFI_T_WRDATA_DELAY, + DDR4_DFITMG1_DFI_T_WRDATA_SHIFT); + + /* Complete quasi-dynamic register programming */ + setbits_le32(umctl2_base + DDR4_SWCTL_OFFSET, DDR4_SWCTL_SW_DONE); + + /* Polling programming done */ + ret = wait_for_bit_le32((const void *)(umctl2_base + + DDR4_SWSTAT_OFFSET), DDR4_SWSTAT_SW_DONE_ACK, + true, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" programming done\n"); + } + + /* Isolate the APB access from internal CSRs */ + setbits_le16(handoff->phy_base + DDR_PHY_APBONLY0_OFFSET, + DDR_PHY_MICROCONTMUXSEL); + + return ret; +} + +static int update_training_result(struct ddr_handoff *ddr_handoff_info) +{ + int ret = 0; + + /* Updating training result to first DDR controller */ + if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 || + ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) { + ret = set_cal_res_to_umctl2(ddr_handoff_info, + ddr_handoff_info->cntlr_base, + ddr_handoff_info->cntlr_t); + if (ret) { + debug("%s: Failed to update train result to ", + __func__); + debug("first DDR controller\n"); + return ret; + } + } + + /* Updating training result to 2nd DDR controller */ + if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) { + ret = set_cal_res_to_umctl2(ddr_handoff_info, + ddr_handoff_info->cntlr2_base, + ddr_handoff_info->cntlr2_t); + if (ret) { + debug("%s: Failed to update train result to ", + __func__); + debug("2nd DDR controller\n"); + } + } + + return ret; +} + +static int start_ddr_calibration(struct ddr_handoff *ddr_handoff_info) +{ + int ret; + + /* Implement 1D training firmware */ + ret = configure_training_firmware(ddr_handoff_info, + (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_1D_SECTION, + (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_1D_SECTION); + if (ret) { + debug("%s: Failed to configure 1D training firmware\n", + __func__); + return ret; + } + + calibrating_sdram(ddr_handoff_info); + + ret = poll_for_training_complete(ddr_handoff_info); + if (ret) { + debug("%s: Failed to get FW training completed\n", + __func__); + return ret; + } + + /* Updating training result to DDR controller */ + ret = update_training_result(ddr_handoff_info); + if (ret) + return ret; + + /* Implement 2D training firmware */ + ret = configure_training_firmware(ddr_handoff_info, + (const void *)SOC64_HANDOFF_DDR_TRAIN_IMEM_2D_SECTION, + (const void *)SOC64_HANDOFF_DDR_TRAIN_DMEM_2D_SECTION); + if (ret) { + debug("%s: Failed to update train result to ", __func__); + debug("DDR controller\n"); + return ret; + } + + calibrating_sdram(ddr_handoff_info); + + ret = poll_for_training_complete(ddr_handoff_info); + if (ret) + debug("%s: Failed to get FW training completed\n", + __func__); + + return ret; +} + +static int init_controller(struct ddr_handoff *ddr_handoff_info, + u32 *user_backup, u32 *user_backup_2nd) +{ + int ret = 0; + + if (ddr_handoff_info->cntlr_t == DDRTYPE_DDR4 || + ddr_handoff_info->cntlr_t == DDRTYPE_LPDDR4_0) { + /* Initialize 1st DDR controller */ + ret = init_umctl2(ddr_handoff_info->cntlr_handoff_base, + ddr_handoff_info->cntlr_base, + ddr_handoff_info->cntlr_t, + ddr_handoff_info->cntlr_handoff_length, + user_backup); + if (ret) { + debug("%s: Failed to inilialize first controller\n", + __func__); + return ret; + } + } + + if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) { + /* Initialize 2nd DDR controller */ + ret = init_umctl2(ddr_handoff_info->cntlr2_handoff_base, + ddr_handoff_info->cntlr2_base, + ddr_handoff_info->cntlr2_t, + ddr_handoff_info->cntlr2_handoff_length, + user_backup_2nd); + if (ret) + debug("%s: Failed to inilialize 2nd controller\n", + __func__); + } + + return ret; +} + +static int dfi_init(struct ddr_handoff *ddr_handoff_info) +{ + int ret; + + ret = ddr_start_dfi_init(ddr_handoff_info->cntlr_base, + ddr_handoff_info->cntlr_t); + if (ret) + return ret; + + if (ddr_handoff_info->cntlr2_t == DDRTYPE_LPDDR4_1) + ret = ddr_start_dfi_init(ddr_handoff_info->cntlr2_base, + ddr_handoff_info->cntlr2_t); + + return ret; +} + +static int check_dfi_init(struct ddr_handoff *handoff) +{ + int ret; + + ret = ddr_check_dfi_init_complete(handoff->cntlr_base, + handoff->cntlr_t); + if (ret) + return ret; + + if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1) + ret = ddr_check_dfi_init_complete(handoff->cntlr2_base, + handoff->cntlr2_t); + + return ret; +} + +static int trigger_sdram_init(struct ddr_handoff *handoff) +{ + int ret; + + ret = ddr_trigger_sdram_init(handoff->cntlr_base, + handoff->cntlr_t); + if (ret) + return ret; + + if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1) + ret = ddr_trigger_sdram_init(handoff->cntlr2_base, + handoff->cntlr2_t); + + return ret; +} + +static int ddr_post_config(struct ddr_handoff *handoff) +{ + int ret; + + ret = ddr_post_handoff_config(handoff->cntlr_base, + handoff->cntlr_t); + if (ret) + return ret; + + if (handoff->cntlr2_t == DDRTYPE_LPDDR4_1) + ret = ddr_post_handoff_config(handoff->cntlr2_base, + handoff->cntlr2_t); + + return ret; +} + +static bool is_ddr_retention_enabled(u32 boot_scratch_cold0_reg) +{ + return boot_scratch_cold0_reg & + ALT_SYSMGR_SCRATCH_REG_0_DDR_RETENTION_MASK; +} + +static bool is_ddr_bitstream_sha_matching(u32 boot_scratch_cold0_reg) +{ + return boot_scratch_cold0_reg & ALT_SYSMGR_SCRATCH_REG_0_DDR_SHA_MASK; +} + +static enum reset_type get_reset_type(u32 boot_scratch_cold0_reg) +{ + return (boot_scratch_cold0_reg & + ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_MASK) >> + ALT_SYSMGR_SCRATCH_REG_0_DDR_RESET_TYPE_SHIFT; +} + +void reset_type_debug_print(u32 boot_scratch_cold0_reg) +{ + switch (get_reset_type(boot_scratch_cold0_reg)) { + case POR_RESET: + debug("%s: POR is triggered\n", __func__); + break; + case WARM_RESET: + debug("%s: Warm reset is triggered\n", __func__); + break; + case COLD_RESET: + debug("%s: Cold reset is triggered\n", __func__); + break; + default: + debug("%s: Invalid reset type\n", __func__); + } +} + +bool is_ddr_init(void) +{ + u32 reg = readl(socfpga_get_sysmgr_addr() + + SYSMGR_SOC64_BOOT_SCRATCH_COLD0); + + reset_type_debug_print(reg); + + if (get_reset_type(reg) == POR_RESET) { + debug("%s: DDR init is required\n", __func__); + return true; + } + + if (get_reset_type(reg) == WARM_RESET) { + debug("%s: DDR init is skipped\n", __func__); + return false; + } + + if (get_reset_type(reg) == COLD_RESET) { + if (is_ddr_retention_enabled(reg) && + is_ddr_bitstream_sha_matching(reg)) { + debug("%s: DDR retention bit is set\n", __func__); + debug("%s: Matching in DDR bistream\n", __func__); + debug("%s: DDR init is skipped\n", __func__); + return false; + } + } + + debug("%s: DDR init is required\n", __func__); + return true; +} + +int sdram_mmr_init_full(struct udevice *dev) +{ + u32 user_backup[2], user_backup_2nd[2]; + int ret; + struct bd_info bd; + struct ddr_handoff ddr_handoff_info; + struct altera_sdram_priv *priv = dev_get_priv(dev); + + printf("Checking SDRAM configuration in progress ...\n"); + ret = populate_ddr_handoff(&ddr_handoff_info); + if (ret) { + debug("%s: Failed to populate DDR handoff\n", + __func__); + return ret; + } + + /* Set the MPFE NoC mux to correct DDR controller type */ + use_ddr4(ddr_handoff_info.cntlr_t); + + if (is_ddr_init()) { + printf("SDRAM init in progress ...\n"); + + /* + * Polling reset complete, must be high to ensure DDR subsystem + * in complete reset state before init DDR clock and DDR + * controller + */ + ret = wait_for_bit_le32((const void *)((uintptr_t)(readl + (ddr_handoff_info.mem_reset_base) + + MEM_RST_MGR_STATUS)), + MEM_RST_MGR_STATUS_RESET_COMPLETE, + true, TIMEOUT_200MS, false); + if (ret) { + debug("%s: Timeout while waiting for", __func__); + debug(" reset complete done\n"); + return ret; + } + + ret = enable_ddr_clock(dev); + if (ret) + return ret; + + ret = init_controller(&ddr_handoff_info, user_backup, + user_backup_2nd); + if (ret) { + debug("%s: Failed to inilialize DDR controller\n", + __func__); + return ret; + } + + /* Release the controller from reset */ + setbits_le32((uintptr_t) + (readl(ddr_handoff_info.mem_reset_base) + + MEM_RST_MGR_STATUS), MEM_RST_MGR_STATUS_AXI_RST | + MEM_RST_MGR_STATUS_CONTROLLER_RST | + MEM_RST_MGR_STATUS_RESET_COMPLETE); + + printf("DDR controller configuration is completed\n"); + + /* Initialize DDR PHY */ + ret = init_phy(&ddr_handoff_info); + if (ret) { + debug("%s: Failed to inilialize DDR PHY\n", __func__); + return ret; + } + + enable_phy_clk_for_csr_access(&ddr_handoff_info, true); + + ret = start_ddr_calibration(&ddr_handoff_info); + if (ret) { + debug("%s: Failed to calibrate DDR\n", __func__); + return ret; + } + + enable_phy_clk_for_csr_access(&ddr_handoff_info, false); + + /* Reset ARC processor when no using for security purpose */ + setbits_le16(ddr_handoff_info.phy_base + + DDR_PHY_MICRORESET_OFFSET, + DDR_PHY_MICRORESET_RESET); + + /* DDR freq set to support DDR4-3200 */ + phy_init_engine(&ddr_handoff_info); + + ret = dfi_init(&ddr_handoff_info); + if (ret) + return ret; + + ret = check_dfi_init(&ddr_handoff_info); + if (ret) + return ret; + + ret = trigger_sdram_init(&ddr_handoff_info); + if (ret) + return ret; + + ret = ddr_post_config(&ddr_handoff_info); + if (ret) + return ret; + + /* Restore user settings */ + writel(user_backup[0], ddr_handoff_info.cntlr_base + + DDR4_PWRCTL_OFFSET); + + if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_0) + setbits_le32(ddr_handoff_info.cntlr_base + + DDR4_INIT0_OFFSET, user_backup[1]); + + if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) { + /* Restore user settings */ + writel(user_backup_2nd[0], + ddr_handoff_info.cntlr2_base + + DDR4_PWRCTL_OFFSET); + + setbits_le32(ddr_handoff_info.cntlr2_base + + DDR4_INIT0_OFFSET, user_backup_2nd[1]); + } + + /* Enable input traffic per port */ + setbits_le32(ddr_handoff_info.cntlr_base + DDR4_PCTRL0_OFFSET, + DDR4_PCTRL0_PORT_EN); + + if (ddr_handoff_info.cntlr2_t == DDRTYPE_LPDDR4_1) { + /* Enable input traffic per port */ + setbits_le32(ddr_handoff_info.cntlr2_base + + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN); + } + + printf("DDR init success\n"); + } + + /* Get bank configuration from devicetree */ + ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL, + (phys_size_t *)&gd->ram_size, &bd); + if (ret) { + debug("%s: Failed to decode memory node\n", __func__); + return -1; + } + + printf("DDR: %lld MiB\n", gd->ram_size >> 20); + + priv->info.base = bd.bi_dram[0].start; + priv->info.size = gd->ram_size; + + sdram_size_check(&bd); + + sdram_set_firewall(&bd); + + return 0; +} diff --git a/drivers/ddr/altera/sdram_soc64.c b/drivers/ddr/altera/sdram_soc64.c index a08f095..d6baac2 100644 --- a/drivers/ddr/altera/sdram_soc64.c +++ b/drivers/ddr/altera/sdram_soc64.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2016-2019 Intel Corporation <www.intel.com> + * Copyright (C) 2016-2021 Intel Corporation <www.intel.com> * */ @@ -100,12 +100,14 @@ int emif_reset(struct altera_sdram_plat *plat) return 0; } +#if !IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) int poll_hmc_clock_status(void) { return wait_for_bit_le32((const void *)(socfpga_get_sysmgr_addr() + SYSMGR_SOC64_HMC_CLK), SYSMGR_HMC_CLK_STATUS_MSK, true, 1000, false); } +#endif void sdram_clear_mem(phys_addr_t addr, phys_size_t size) { @@ -182,6 +184,7 @@ void sdram_size_check(struct bd_info *bd) phys_size_t total_ram_check = 0; phys_size_t ram_check = 0; phys_addr_t start = 0; + phys_size_t size, remaining_size; int bank; /* Sanity check ensure correct SDRAM size specified */ @@ -189,10 +192,27 @@ void sdram_size_check(struct bd_info *bd) for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { start = bd->bi_dram[bank].start; + remaining_size = bd->bi_dram[bank].size; while (ram_check < bd->bi_dram[bank].size) { - ram_check += get_ram_size((void *)(start + ram_check), - (phys_size_t)SZ_1G); + size = min((phys_addr_t)SZ_1G, + (phys_addr_t)remaining_size); + + /* + * Ensure the size is power of two, this is requirement + * to run get_ram_size() / memory test + */ + if (size != 0 && ((size & (size - 1)) == 0)) { + ram_check += get_ram_size((void *) + (start + ram_check), size); + remaining_size = bd->bi_dram[bank].size - + ram_check; + } else { + puts("DDR: Memory test requires SDRAM size "); + puts("in power of two!\n"); + hang(); + } } + total_ram_check += ram_check; ram_check = 0; } @@ -231,11 +251,78 @@ phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat) return size; } +void sdram_set_firewall(struct bd_info *bd) +{ + u32 i; + phys_size_t value; + u32 lower, upper; + + for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { + if (!bd->bi_dram[i].size) + continue; + + value = bd->bi_dram[i].start; + + /* Keep first 1MB of SDRAM memory region as secure region when + * using ATF flow, where the ATF code is located. + */ + if (IS_ENABLED(CONFIG_SPL_ATF) && i == 0) + value += SZ_1M; + + /* Setting non-secure MPU region base and base extended */ + lower = lower_32_bits(value); + upper = upper_32_bits(value); + FW_MPU_DDR_SCR_WRITEL(lower, + FW_MPU_DDR_SCR_MPUREGION0ADDR_BASE + + (i * 4 * sizeof(u32))); + FW_MPU_DDR_SCR_WRITEL(upper & 0xff, + FW_MPU_DDR_SCR_MPUREGION0ADDR_BASEEXT + + (i * 4 * sizeof(u32))); + + /* Setting non-secure Non-MPU region base and base extended */ + FW_MPU_DDR_SCR_WRITEL(lower, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASE + + (i * 4 * sizeof(u32))); + FW_MPU_DDR_SCR_WRITEL(upper & 0xff, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_BASEEXT + + (i * 4 * sizeof(u32))); + + /* Setting non-secure MPU limit and limit extexded */ + value = bd->bi_dram[i].start + bd->bi_dram[i].size - 1; + + lower = lower_32_bits(value); + upper = upper_32_bits(value); + + FW_MPU_DDR_SCR_WRITEL(lower, + FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT + + (i * 4 * sizeof(u32))); + FW_MPU_DDR_SCR_WRITEL(upper & 0xff, + FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT + + (i * 4 * sizeof(u32))); + + /* Setting non-secure Non-MPU limit and limit extexded */ + FW_MPU_DDR_SCR_WRITEL(lower, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT + + (i * 4 * sizeof(u32))); + FW_MPU_DDR_SCR_WRITEL(upper & 0xff, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT + + (i * 4 * sizeof(u32))); + + FW_MPU_DDR_SCR_WRITEL(BIT(i) | BIT(i + 8), + FW_MPU_DDR_SCR_EN_SET); + } +} + static int altera_sdram_of_to_plat(struct udevice *dev) { struct altera_sdram_plat *plat = dev_get_plat(dev); fdt_addr_t addr; + /* These regs info are part of DDR handoff in bitstream */ +#if IS_ENABLED(CONFIG_TARGET_SOCFPGA_N5X) + return 0; +#endif + addr = dev_read_addr_index(dev, 0); if (addr == FDT_ADDR_T_NONE) return -EINVAL; @@ -296,6 +383,7 @@ static struct ram_ops altera_sdram_ops = { static const struct udevice_id altera_sdram_ids[] = { { .compatible = "altr,sdr-ctl-s10" }, { .compatible = "intel,sdr-ctl-agilex" }, + { .compatible = "intel,sdr-ctl-n5x" }, { /* sentinel */ } }; diff --git a/drivers/ddr/altera/sdram_soc64.h b/drivers/ddr/altera/sdram_soc64.h index 8af0afc..7460f8c 100644 --- a/drivers/ddr/altera/sdram_soc64.h +++ b/drivers/ddr/altera/sdram_soc64.h @@ -180,6 +180,7 @@ int emif_reset(struct altera_sdram_plat *plat); int poll_hmc_clock_status(void); void sdram_clear_mem(phys_addr_t addr, phys_size_t size); void sdram_init_ecc_bits(struct bd_info *bd); +void sdram_set_firewall(struct bd_info *bd); void sdram_size_check(struct bd_info *bd); phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat); int sdram_mmr_init_full(struct udevice *dev); diff --git a/include/configs/socfpga_n5x_socdk.h b/include/configs/socfpga_n5x_socdk.h new file mode 100644 index 0000000..c295e91 --- /dev/null +++ b/include/configs/socfpga_n5x_socdk.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020-2021 Intel Corporation <www.intel.com> + * + */ + +#ifndef __CONFIG_SOCFGPA_N5X_H__ +#define __CONFIG_SOCFGPA_N5X_H__ + +#include <configs/socfpga_soc64_common.h> + +#undef CONFIG_EXTRA_ENV_SETTINGS +#define CONFIG_EXTRA_ENV_SETTINGS \ + "loadaddr=" __stringify(CONFIG_SYS_LOAD_ADDR) "\0" \ + "bootfile=" CONFIG_BOOTFILE "\0" \ + "fdt_addr=1100000\0" \ + "fdtimage=" CONFIG_DEFAULT_DEVICE_TREE ".dtb\0" \ + "mmcroot=/dev/mmcblk0p2\0" \ + "mmcboot=setenv bootargs " CONFIG_BOOTARGS \ + " root=${mmcroot} rw rootwait;" \ + "booti ${loadaddr} - ${fdt_addr}\0" \ + "mmcload=mmc rescan;" \ + "load mmc 0:1 ${loadaddr} ${bootfile};" \ + "load mmc 0:1 ${fdt_addr} ${fdtimage}\0" \ + "mmcfitboot=setenv bootargs " CONFIG_BOOTARGS \ + " root=${mmcroot} rw rootwait;" \ + "bootm ${loadaddr}\0" \ + "mmcfitload=mmc rescan;" \ + "load mmc 0:1 ${loadaddr} ${bootfile}\0" \ + "ramboot=setenv bootargs " CONFIG_BOOTARGS";" \ + "booti ${loadaddr} - ${fdt_addr}\0" \ + "linux_qspi_enable=if sf probe; then " \ + "echo Enabling QSPI at Linux DTB...;" \ + "fdt addr ${fdt_addr}; fdt resize;" \ + "fdt set /soc/spi@ff8d2000 status okay;" \ + "if fdt set /soc/clocks/qspi-clk clock-frequency" \ + " ${qspi_clock}; then" \ + " else fdt set /soc/clkmgr/clocks/qspi_clk clock-frequency" \ + " ${qspi_clock}; fi; fi\0" \ + "scriptaddr=0x02100000\0" \ + "scriptfile=u-boot.scr\0" \ + "fatscript=if fatload mmc 0:1 ${scriptaddr} ${scriptfile};" \ + "then source ${scriptaddr}; fi\0" + +#endif /* __CONFIG_SOCFGPA_N5X_H__ */ diff --git a/include/configs/socfpga_soc64_common.h b/include/configs/socfpga_soc64_common.h index 38fd775..0b0470e 100644 --- a/include/configs/socfpga_soc64_common.h +++ b/include/configs/socfpga_soc64_common.h @@ -7,7 +7,7 @@ #ifndef __CONFIG_SOCFPGA_SOC64_COMMON_H__ #define __CONFIG_SOCFPGA_SOC64_COMMON_H__ -#include <asm/arch/base_addr_s10.h> +#include <asm/arch/base_addr_soc64.h> #include <asm/arch/handoff_soc64.h> #include <linux/stringify.h> diff --git a/include/dt-bindings/clock/n5x-clock.h b/include/dt-bindings/clock/n5x-clock.h new file mode 100644 index 0000000..a56e4db --- /dev/null +++ b/include/dt-bindings/clock/n5x-clock.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2021, Intel Corporation + */ + +#ifndef __N5X_CLOCK_H +#define __N5X_CLOCK_H + +/* fixed rate clocks */ +#define N5X_OSC1 0 +#define N5X_CB_INTOSC_HS_DIV2_CLK 1 +#define N5X_CB_INTOSC_LS_CLK 2 +#define N5X_L4_SYS_FREE_CLK 3 +#define N5X_F2S_FREE_CLK 4 + +/* PLL clocks */ +#define N5X_MAIN_PLL_CLK 5 +#define N5X_MAIN_PLL_C0_CLK 6 +#define N5X_MAIN_PLL_C1_CLK 7 +#define N5X_MAIN_PLL_C2_CLK 8 +#define N5X_MAIN_PLL_C3_CLK 9 +#define N5X_PERIPH_PLL_CLK 10 +#define N5X_PERIPH_PLL_C0_CLK 11 +#define N5X_PERIPH_PLL_C1_CLK 12 +#define N5X_PERIPH_PLL_C2_CLK 13 +#define N5X_PERIPH_PLL_C3_CLK 14 +#define N5X_MPU_FREE_CLK 15 +#define N5X_MPU_CCU_CLK 16 +#define N5X_BOOT_CLK 17 + +/* fixed factor clocks */ +#define N5X_L3_MAIN_FREE_CLK 18 +#define N5X_NOC_FREE_CLK 19 +#define N5X_S2F_USR0_CLK 20 +#define N5X_NOC_CLK 21 +#define N5X_EMAC_A_FREE_CLK 22 +#define N5X_EMAC_B_FREE_CLK 23 +#define N5X_EMAC_PTP_FREE_CLK 24 +#define N5X_GPIO_DB_FREE_CLK 25 +#define N5X_SDMMC_FREE_CLK 26 +#define N5X_S2F_USER0_FREE_CLK 27 +#define N5X_S2F_USER1_FREE_CLK 28 +#define N5X_PSI_REF_FREE_CLK 29 + +/* Gate clocks */ +#define N5X_MPU_CLK 30 +#define N5X_MPU_PERIPH_CLK 31 +#define N5X_L4_MAIN_CLK 32 +#define N5X_L4_MP_CLK 33 +#define N5X_L4_SP_CLK 34 +#define N5X_CS_AT_CLK 35 +#define N5X_CS_TRACE_CLK 36 +#define N5X_CS_PDBG_CLK 37 +#define N5X_CS_TIMER_CLK 38 +#define N5X_S2F_USER0_CLK 39 +#define N5X_EMAC0_CLK 40 +#define N5X_EMAC1_CLK 41 +#define N5X_EMAC2_CLK 42 +#define N5X_EMAC_PTP_CLK 43 +#define N5X_GPIO_DB_CLK 44 +#define N5X_NAND_CLK 45 +#define N5X_PSI_REF_CLK 46 +#define N5X_S2F_USER1_CLK 47 +#define N5X_SDMMC_CLK 48 +#define N5X_SPI_M_CLK 49 +#define N5X_USB_CLK 50 +#define N5X_NAND_X_CLK 51 +#define N5X_NAND_ECC_CLK 52 +#define N5X_NUM_CLKS 53 + +#endif /* __N5X_CLOCK_H */ |