diff options
author | Tom Rini <trini@konsulko.com> | 2021-07-29 08:20:06 -0400 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2021-07-29 08:20:06 -0400 |
commit | 38436abd5e58044eccddbcd7ec3610a9104e86b6 (patch) | |
tree | 328cb2b1b355964ab196f3d5ba4bb7134d74d8a3 /drivers | |
parent | bbcacdf4cd23f2e5deb9ef916096c956c955243d (diff) | |
parent | a6c64d255e5117bcf78aec6911d7c034fbfe46f7 (diff) | |
download | u-boot-WIP/29Jul2021.zip u-boot-WIP/29Jul2021.tar.gz u-boot-WIP/29Jul2021.tar.bz2 |
Merge tag 'ti-v2021.10-rc2' of https://source.denx.de/u-boot/custodians/u-boot-tiWIP/29Jul2021
- Add MMC High speed modes for AM64 and J7200
- Add Sierra/Torrent SERDES driver
- Minor clean-ups for R5F boot from SPL
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/core/ofnode.c | 13 | ||||
-rw-r--r-- | drivers/mmc/am654_sdhci.c | 1 | ||||
-rw-r--r-- | drivers/mmc/sdhci.c | 3 | ||||
-rw-r--r-- | drivers/phy/Kconfig | 3 | ||||
-rw-r--r-- | drivers/phy/Makefile | 2 | ||||
-rw-r--r-- | drivers/phy/cadence/Kconfig | 11 | ||||
-rw-r--r-- | drivers/phy/cadence/Makefile | 2 | ||||
-rw-r--r-- | drivers/phy/cadence/phy-cadence-sierra.c | 751 | ||||
-rw-r--r-- | drivers/phy/cadence/phy-cadence-torrent.c | 2463 | ||||
-rw-r--r-- | drivers/phy/ti/Kconfig | 9 | ||||
-rw-r--r-- | drivers/phy/ti/Makefile | 1 | ||||
-rw-r--r-- | drivers/phy/ti/phy-j721e-wiz.c | 1156 |
12 files changed, 4415 insertions, 0 deletions
diff --git a/drivers/core/ofnode.c b/drivers/core/ofnode.c index dda6c76..701b23e 100644 --- a/drivers/core/ofnode.c +++ b/drivers/core/ofnode.c @@ -18,6 +18,19 @@ #include <linux/ioport.h> #include <asm/global_data.h> +bool ofnode_name_eq(ofnode node, const char *name) +{ + const char *node_name; + size_t len; + + assert(ofnode_valid(node)); + + node_name = ofnode_get_name(node); + len = strchrnul(node_name, '@') - node_name; + + return (strlen(name) == len) && !strncmp(node_name, name, len); +} + int ofnode_read_u32(ofnode node, const char *propname, u32 *outp) { return ofnode_read_u32_index(node, propname, 0, outp); diff --git a/drivers/mmc/am654_sdhci.c b/drivers/mmc/am654_sdhci.c index a86d96a..4305967 100644 --- a/drivers/mmc/am654_sdhci.c +++ b/drivers/mmc/am654_sdhci.c @@ -619,6 +619,7 @@ static int am654_sdhci_of_to_plat(struct udevice *dev) } } + dev_read_u32(dev, "ti,strobe-sel", &plat->strb_sel); dev_read_u32(dev, "ti,clkbuf-sel", &plat->clkbuf_sel); ret = mmc_of_parse(dev, cfg); diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c index d9ab6a0..eea4701 100644 --- a/drivers/mmc/sdhci.c +++ b/drivers/mmc/sdhci.c @@ -507,6 +507,9 @@ void sdhci_set_uhs_timing(struct sdhci_host *host) case MMC_HS_200: reg |= SDHCI_CTRL_UHS_SDR104; break; + case MMC_HS_400: + reg |= SDHCI_CTRL_HS400; + break; default: reg |= SDHCI_CTRL_UHS_SDR12; } diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 80ae1af..4767d21 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -282,4 +282,7 @@ config PHY_IMX8MQ_USB Support the USB3.0 PHY in NXP i.MX8MQ SoC source "drivers/phy/rockchip/Kconfig" +source "drivers/phy/cadence/Kconfig" +source "drivers/phy/ti/Kconfig" + endmenu diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index b2285c4..13a8ade 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -38,3 +38,5 @@ obj-$(CONFIG_MT76X8_USB_PHY) += mt76x8-usb-phy.o obj-$(CONFIG_PHY_DA8XX_USB) += phy-da8xx-usb.o obj-$(CONFIG_PHY_MTK_TPHY) += phy-mtk-tphy.o obj-$(CONFIG_PHY_IMX8MQ_USB) += phy-imx8mq-usb.o +obj-y += cadence/ +obj-y += ti/ diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig new file mode 100644 index 0000000..549ddbf --- /dev/null +++ b/drivers/phy/cadence/Kconfig @@ -0,0 +1,11 @@ +config PHY_CADENCE_SIERRA + tristate "Cadence Sierra PHY Driver" + depends on DM_RESET + help + Enable this to support the Cadence Sierra PHY driver + +config PHY_CADENCE_TORRENT + tristate "Cadence Torrent PHY Driver" + depends on DM_RESET + help + Enable this to support the Cadence Torrent PHY driver diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile new file mode 100644 index 0000000..af63b32 --- /dev/null +++ b/drivers/phy/cadence/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_$(SPL_)PHY_CADENCE_SIERRA) += phy-cadence-sierra.o +obj-$(CONFIG_$(SPL_)PHY_CADENCE_TORRENT) += phy-cadence-torrent.o diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c new file mode 100644 index 0000000..715def6 --- /dev/null +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -0,0 +1,751 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence Sierra PHY Driver + * + * Based on the linux driver provided by Cadence + * + * Copyright (c) 2018 Cadence Design Systems + * Author: Alan Douglas <adouglas@cadence.com> + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Jean-Jacques Hiblot <jjhiblot@ti.com> + * + */ +#include <common.h> +#include <clk.h> +#include <generic-phy.h> +#include <reset.h> +#include <dm/device.h> +#include <dm/device-internal.h> +#include <dm/device_compat.h> +#include <dm/lists.h> +#include <dm/read.h> +#include <dm/uclass.h> +#include <dm/devres.h> +#include <linux/io.h> +#include <dt-bindings/phy/phy.h> +#include <regmap.h> + +/* PHY register offsets */ +#define SIERRA_COMMON_CDB_OFFSET 0x0 +#define SIERRA_MACRO_ID_REG 0x0 +#define SIERRA_CMN_PLLLC_MODE_PREG 0x48 +#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49 +#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A +#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B +#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F +#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50 +#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62 + +#define SIERRA_LANE_CDB_OFFSET(ln, offset) \ + (0x4000 + ((ln) * (0x800 >> (2 - (offset))))) + +#define SIERRA_DET_STANDEC_A_PREG 0x000 +#define SIERRA_DET_STANDEC_B_PREG 0x001 +#define SIERRA_DET_STANDEC_C_PREG 0x002 +#define SIERRA_DET_STANDEC_D_PREG 0x003 +#define SIERRA_DET_STANDEC_E_PREG 0x004 +#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008 +#define SIERRA_PSM_A0IN_TMR_PREG 0x009 +#define SIERRA_PSM_DIAG_PREG 0x015 +#define SIERRA_PSC_TX_A0_PREG 0x028 +#define SIERRA_PSC_TX_A1_PREG 0x029 +#define SIERRA_PSC_TX_A2_PREG 0x02A +#define SIERRA_PSC_TX_A3_PREG 0x02B +#define SIERRA_PSC_RX_A0_PREG 0x030 +#define SIERRA_PSC_RX_A1_PREG 0x031 +#define SIERRA_PSC_RX_A2_PREG 0x032 +#define SIERRA_PSC_RX_A3_PREG 0x033 +#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A +#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E +#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F +#define SIERRA_PLLCTRL_STATUS_PREG 0x044 +#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B +#define SIERRA_DFE_BIASTRIM_PREG 0x04C +#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A +#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081 +#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085 +#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086 +#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087 +#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088 +#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E +#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091 +#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092 +#define SIERRA_CREQ_EQ_CTRL_PREG 0x093 +#define SIERRA_CREQ_SPARE_PREG 0x096 +#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097 +#define SIERRA_CTLELUT_CTRL_PREG 0x098 +#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0 +#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1 +#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4 +#define SIERRA_DEQ_CONCUR_CTRL1_PREG 0x0C8 +#define SIERRA_DEQ_CONCUR_CTRL2_PREG 0x0C9 +#define SIERRA_DEQ_EPIPWR_CTRL2_PREG 0x0CD +#define SIERRA_DEQ_FAST_MAINT_CYCLES_PREG 0x0CE +#define SIERRA_DEQ_ERRCMP_CTRL_PREG 0x0D0 +#define SIERRA_DEQ_OFFSET_CTRL_PREG 0x0D8 +#define SIERRA_DEQ_GAIN_CTRL_PREG 0x0E0 +#define SIERRA_DEQ_VGATUNE_CTRL_PREG 0x0E1 +#define SIERRA_DEQ_GLUT0 0x0E8 +#define SIERRA_DEQ_GLUT1 0x0E9 +#define SIERRA_DEQ_GLUT2 0x0EA +#define SIERRA_DEQ_GLUT3 0x0EB +#define SIERRA_DEQ_GLUT4 0x0EC +#define SIERRA_DEQ_GLUT5 0x0ED +#define SIERRA_DEQ_GLUT6 0x0EE +#define SIERRA_DEQ_GLUT7 0x0EF +#define SIERRA_DEQ_GLUT8 0x0F0 +#define SIERRA_DEQ_GLUT9 0x0F1 +#define SIERRA_DEQ_GLUT10 0x0F2 +#define SIERRA_DEQ_GLUT11 0x0F3 +#define SIERRA_DEQ_GLUT12 0x0F4 +#define SIERRA_DEQ_GLUT13 0x0F5 +#define SIERRA_DEQ_GLUT14 0x0F6 +#define SIERRA_DEQ_GLUT15 0x0F7 +#define SIERRA_DEQ_GLUT16 0x0F8 +#define SIERRA_DEQ_ALUT0 0x108 +#define SIERRA_DEQ_ALUT1 0x109 +#define SIERRA_DEQ_ALUT2 0x10A +#define SIERRA_DEQ_ALUT3 0x10B +#define SIERRA_DEQ_ALUT4 0x10C +#define SIERRA_DEQ_ALUT5 0x10D +#define SIERRA_DEQ_ALUT6 0x10E +#define SIERRA_DEQ_ALUT7 0x10F +#define SIERRA_DEQ_ALUT8 0x110 +#define SIERRA_DEQ_ALUT9 0x111 +#define SIERRA_DEQ_ALUT10 0x112 +#define SIERRA_DEQ_ALUT11 0x113 +#define SIERRA_DEQ_ALUT12 0x114 +#define SIERRA_DEQ_ALUT13 0x115 +#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128 +#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134 +#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150 +#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151 +#define SIERRA_DEQ_PICTRL_PREG 0x161 +#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170 +#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171 +#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174 +#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C +#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183 +#define SIERRA_LFPSDET_SUPPORT_PREG 0x188 +#define SIERRA_LFPSFILT_NS_PREG 0x18A +#define SIERRA_LFPSFILT_RD_PREG 0x18B +#define SIERRA_LFPSFILT_MP_PREG 0x18C +#define SIERRA_SIGDET_SUPPORT_PREG 0x190 +#define SIERRA_SDFILT_H2L_A_PREG 0x191 +#define SIERRA_SDFILT_L2H_PREG 0x193 +#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E +#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F +#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0 +#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F +#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150 + +#define SIERRA_PHY_CONFIG_CTRL_OFFSET 0xc000 +#define SIERRA_PHY_PLL_CFG 0xe + +#define SIERRA_MACRO_ID 0x00007364 +#define SIERRA_MAX_LANES 16 +#define PLL_LOCK_TIME 100 + +static const struct reg_field macro_id_type = + REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15); +static const struct reg_field phy_pll_cfg_1 = + REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1); +static const struct reg_field pllctrl_lock = + REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0); + +#define reset_control_assert(rst) cdns_reset_assert(rst) +#define reset_control_deassert(rst) cdns_reset_deassert(rst) +#define reset_control reset_ctl + +struct cdns_sierra_inst { + u32 phy_type; + u32 num_lanes; + u32 mlane; + struct reset_ctl_bulk *lnk_rst; +}; + +struct cdns_reg_pairs { + u16 val; + u32 off; +}; + +struct cdns_sierra_data { + u32 id_value; + u8 block_offset_shift; + u8 reg_offset_shift; + u32 pcie_cmn_regs; + u32 pcie_ln_regs; + u32 usb_cmn_regs; + u32 usb_ln_regs; + struct cdns_reg_pairs *pcie_cmn_vals; + struct cdns_reg_pairs *pcie_ln_vals; + struct cdns_reg_pairs *usb_cmn_vals; + struct cdns_reg_pairs *usb_ln_vals; +}; + +struct cdns_regmap_cdb_context { + struct udevice *dev; + void __iomem *base; + u8 reg_offset_shift; +}; + +struct cdns_sierra_phy { + struct udevice *dev; + void *base; + size_t size; + struct regmap *regmap; + struct cdns_sierra_data *init_data; + struct cdns_sierra_inst phys[SIERRA_MAX_LANES]; + struct reset_control *phy_rst; + struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES]; + struct regmap *regmap_phy_config_ctrl; + struct regmap *regmap_common_cdb; + struct regmap_field *macro_id_type; + struct regmap_field *phy_pll_cfg_1; + struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES]; + struct clk *clk; + struct clk *cmn_refclk; + struct clk *cmn_refclk1; + int nsubnodes; + u32 num_lanes; + bool autoconf; +}; + +static inline int cdns_reset_assert(struct reset_control *rst) +{ + if (rst) + return reset_assert(rst); + else + return 0; +} + +static inline int cdns_reset_deassert(struct reset_control *rst) +{ + if (rst) + return reset_deassert(rst); + else + return 0; +} + +static inline struct cdns_sierra_inst *phy_get_drvdata(struct phy *phy) +{ + struct cdns_sierra_phy *sp = dev_get_priv(phy->dev); + int index; + + if (phy->id >= SIERRA_MAX_LANES) + return NULL; + + for (index = 0; index < sp->nsubnodes; index++) { + if (phy->id == sp->phys[index].mlane) + return &sp->phys[index]; + } + + return NULL; +} + +static int cdns_sierra_phy_init(struct phy *gphy) +{ + struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); + struct cdns_sierra_phy *phy = dev_get_priv(gphy->dev); + struct regmap *regmap = phy->regmap; + int i, j; + struct cdns_reg_pairs *cmn_vals, *ln_vals; + u32 num_cmn_regs, num_ln_regs; + + /* Initialise the PHY registers, unless auto configured */ + if (phy->autoconf) + return 0; + + clk_set_rate(phy->cmn_refclk, 25000000); + clk_set_rate(phy->cmn_refclk1, 25000000); + + if (ins->phy_type == PHY_TYPE_PCIE) { + num_cmn_regs = phy->init_data->pcie_cmn_regs; + num_ln_regs = phy->init_data->pcie_ln_regs; + cmn_vals = phy->init_data->pcie_cmn_vals; + ln_vals = phy->init_data->pcie_ln_vals; + } else if (ins->phy_type == PHY_TYPE_USB3) { + num_cmn_regs = phy->init_data->usb_cmn_regs; + num_ln_regs = phy->init_data->usb_ln_regs; + cmn_vals = phy->init_data->usb_cmn_vals; + ln_vals = phy->init_data->usb_ln_vals; + } else { + return -EINVAL; + } + + regmap = phy->regmap_common_cdb; + for (j = 0; j < num_cmn_regs ; j++) + regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val); + + for (i = 0; i < ins->num_lanes; i++) { + for (j = 0; j < num_ln_regs ; j++) { + regmap = phy->regmap_lane_cdb[i + ins->mlane]; + regmap_write(regmap, ln_vals[j].off, ln_vals[j].val); + } + } + + return 0; +} + +static int cdns_sierra_phy_on(struct phy *gphy) +{ + struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); + struct cdns_sierra_phy *sp = dev_get_priv(gphy->dev); + struct udevice *dev = gphy->dev; + u32 val; + int ret; + + /* Take the PHY lane group out of reset */ + ret = reset_deassert_bulk(ins->lnk_rst); + if (ret) { + dev_err(dev, "Failed to take the PHY lane out of reset\n"); + return ret; + } + + ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane], + val, val, 1000, PLL_LOCK_TIME); + if (ret < 0) + dev_err(dev, "PLL lock of lane failed\n"); + + reset_control_assert(sp->phy_rst); + reset_control_deassert(sp->phy_rst); + + return ret; +} + +static int cdns_sierra_phy_off(struct phy *gphy) +{ + struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); + + return reset_assert_bulk(ins->lnk_rst); +} + +static int cdns_sierra_phy_reset(struct phy *gphy) +{ + struct cdns_sierra_phy *sp = dev_get_priv(gphy->dev); + + reset_control_assert(sp->phy_rst); + reset_control_deassert(sp->phy_rst); + return 0; +}; + +static const struct phy_ops ops = { + .init = cdns_sierra_phy_init, + .power_on = cdns_sierra_phy_on, + .power_off = cdns_sierra_phy_off, + .reset = cdns_sierra_phy_reset, +}; + +static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst, + ofnode child) +{ + if (ofnode_read_u32(child, "reg", &inst->mlane)) + return -EINVAL; + + if (ofnode_read_u32(child, "cdns,num-lanes", &inst->num_lanes)) + return -EINVAL; + + if (ofnode_read_u32(child, "cdns,phy-type", &inst->phy_type)) + return -EINVAL; + + return 0; +} + +static struct regmap *cdns_regmap_init(struct udevice *dev, void __iomem *base, + u32 block_offset, u8 block_offset_shift, + u8 reg_offset_shift) +{ + struct cdns_sierra_phy *sp = dev_get_priv(dev); + struct regmap_config config; + + config.r_start = (ulong)(base + (block_offset << block_offset_shift)); + config.r_size = sp->size - (block_offset << block_offset_shift); + config.reg_offset_shift = reg_offset_shift; + config.width = REGMAP_SIZE_16; + + return devm_regmap_init(dev, NULL, NULL, &config); +} + +static int cdns_regfield_init(struct cdns_sierra_phy *sp) +{ + struct udevice *dev = sp->dev; + struct regmap_field *field; + struct regmap *regmap; + int i; + + regmap = sp->regmap_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, macro_id_type); + if (IS_ERR(field)) { + dev_err(dev, "MACRO_ID_TYPE reg field init failed\n"); + return PTR_ERR(field); + } + sp->macro_id_type = field; + + regmap = sp->regmap_phy_config_ctrl; + field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n"); + return PTR_ERR(field); + } + sp->phy_pll_cfg_1 = field; + + for (i = 0; i < SIERRA_MAX_LANES; i++) { + regmap = sp->regmap_lane_cdb[i]; + field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock); + if (IS_ERR(field)) { + dev_err(dev, "P%d_ENABLE reg field init failed\n", i); + return PTR_ERR(field); + } + sp->pllctrl_lock[i] = field; + } + + return 0; +} + +static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp, + void __iomem *base, u8 block_offset_shift, + u8 reg_offset_shift) +{ + struct udevice *dev = sp->dev; + struct regmap *regmap; + u32 block_offset; + int i; + + for (i = 0; i < SIERRA_MAX_LANES; i++) { + block_offset = SIERRA_LANE_CDB_OFFSET(i, reg_offset_shift); + regmap = cdns_regmap_init(dev, base, block_offset, + block_offset_shift, reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init lane CDB regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_lane_cdb[i] = regmap; + } + + regmap = cdns_regmap_init(dev, base, SIERRA_COMMON_CDB_OFFSET, + block_offset_shift, reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init common CDB regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_common_cdb = regmap; + + regmap = cdns_regmap_init(dev, base, SIERRA_PHY_CONFIG_CTRL_OFFSET, + block_offset_shift, reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY config and control regmap\n"); + return PTR_ERR(regmap); + } + sp->regmap_phy_config_ctrl = regmap; + + return 0; +} + +static int cdns_sierra_phy_probe(struct udevice *dev) +{ + struct cdns_sierra_phy *sp = dev_get_priv(dev); + struct cdns_sierra_data *data; + unsigned int id_value; + int ret, node = 0; + struct clk *clk; + ofnode child; + + sp->dev = dev; + + sp->base = devfdt_remap_addr_index(dev, 0); + if (!sp->base) { + dev_err(dev, "unable to map regs\n"); + return -ENOMEM; + } + devfdt_get_addr_size_index(dev, 0, (fdt_size_t *)&sp->size); + + /* Get init data for this PHY */ + data = (struct cdns_sierra_data *)dev_get_driver_data(dev); + sp->init_data = data; + + ret = cdns_regmap_init_blocks(sp, sp->base, data->block_offset_shift, + data->reg_offset_shift); + if (ret) + return ret; + + ret = cdns_regfield_init(sp); + if (ret) + return ret; + + sp->clk = devm_clk_get_optional(dev, "phy_clk"); + if (IS_ERR(sp->clk)) { + dev_err(dev, "failed to get clock phy_clk\n"); + return PTR_ERR(sp->clk); + } + + sp->phy_rst = devm_reset_control_get(dev, "sierra_reset"); + if (IS_ERR(sp->phy_rst)) { + dev_err(dev, "failed to get reset\n"); + return PTR_ERR(sp->phy_rst); + } + + clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div"); + if (IS_ERR(clk)) { + dev_err(dev, "cmn_refclk clock not found\n"); + ret = PTR_ERR(clk); + return ret; + } + sp->cmn_refclk = clk; + + clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div"); + if (IS_ERR(clk)) { + dev_err(dev, "cmn_refclk1 clock not found\n"); + ret = PTR_ERR(clk); + return ret; + } + sp->cmn_refclk1 = clk; + + ret = clk_prepare_enable(sp->clk); + if (ret) + return ret; + + /* Check that PHY is present */ + regmap_field_read(sp->macro_id_type, &id_value); + if (sp->init_data->id_value != id_value) { + dev_err(dev, "PHY not found 0x%x vs 0x%x\n", + sp->init_data->id_value, id_value); + ret = -EINVAL; + goto clk_disable; + } + + sp->autoconf = dev_read_bool(dev, "cdns,autoconf"); + + ofnode_for_each_subnode(child, dev_ofnode(dev)) { + sp->phys[node].lnk_rst = devm_reset_bulk_get_by_node(dev, + child); + if (IS_ERR(sp->phys[node].lnk_rst)) { + ret = PTR_ERR(sp->phys[node].lnk_rst); + dev_err(dev, "failed to get reset %s\n", + ofnode_get_name(child)); + goto put_child2; + } + + if (!sp->autoconf) { + ret = cdns_sierra_get_optional(&sp->phys[node], child); + if (ret) { + dev_err(dev, "missing property in node %s\n", + ofnode_get_name(child)); + goto put_child; + } + } + sp->num_lanes += sp->phys[node].num_lanes; + + node++; + } + sp->nsubnodes = node; + + /* If more than one subnode, configure the PHY as multilink */ + if (!sp->autoconf && sp->nsubnodes > 1) + regmap_field_write(sp->phy_pll_cfg_1, 0x1); + + reset_control_deassert(sp->phy_rst); + dev_info(dev, "sierra probed\n"); + return 0; + +put_child: + node++; +put_child2: + +clk_disable: + clk_disable_unprepare(sp->clk); + return ret; +} + +static int cdns_sierra_phy_remove(struct udevice *dev) +{ + struct cdns_sierra_phy *phy = dev_get_priv(dev); + int i; + + reset_control_assert(phy->phy_rst); + + /* + * The device level resets will be put automatically. + * Need to put the subnode resets here though. + */ + for (i = 0; i < phy->nsubnodes; i++) + reset_assert_bulk(phy->phys[i].lnk_rst); + + return 0; +} + +/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */ +static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { + {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, + {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, + {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG} +}; + +/* refclk100MHz_32b_PCIe_ln_ext_ssc */ +static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { + {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG} +}; + +/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */ +static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = { + {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, + {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG} +}; + +/* refclk100MHz_20b_USB_ln_ext_ssc */ +static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = { + {0xFE0A, SIERRA_DET_STANDEC_A_PREG}, + {0x000F, SIERRA_DET_STANDEC_B_PREG}, + {0x00A5, SIERRA_DET_STANDEC_C_PREG}, + {0x69ad, SIERRA_DET_STANDEC_D_PREG}, + {0x0241, SIERRA_DET_STANDEC_E_PREG}, + {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG}, + {0x0014, SIERRA_PSM_A0IN_TMR_PREG}, + {0xCF00, SIERRA_PSM_DIAG_PREG}, + {0x001F, SIERRA_PSC_TX_A0_PREG}, + {0x0007, SIERRA_PSC_TX_A1_PREG}, + {0x0003, SIERRA_PSC_TX_A2_PREG}, + {0x0003, SIERRA_PSC_TX_A3_PREG}, + {0x0FFF, SIERRA_PSC_RX_A0_PREG}, + {0x0619, SIERRA_PSC_RX_A1_PREG}, + {0x0003, SIERRA_PSC_RX_A2_PREG}, + {0x0001, SIERRA_PSC_RX_A3_PREG}, + {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG}, + {0x0406, SIERRA_PLLCTRL_GEN_D_PREG}, + {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG}, + {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG}, + {0x2512, SIERRA_DFE_BIASTRIM_PREG}, + {0x0000, SIERRA_DRVCTRL_ATTEN_PREG}, + {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG}, + {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG}, + {0x0000, SIERRA_CREQ_EQ_CTRL_PREG}, + {0x8000, SIERRA_CREQ_SPARE_PREG}, + {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x8453, SIERRA_CTLELUT_CTRL_PREG}, + {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG}, + {0x4110, SIERRA_DFE_SMP_RATESEL_PREG}, + {0x0002, SIERRA_DEQ_PHALIGN_CTRL}, + {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG}, + {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x0048, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG}, + {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG}, + {0x0014, SIERRA_DEQ_GLUT0}, + {0x0014, SIERRA_DEQ_GLUT1}, + {0x0014, SIERRA_DEQ_GLUT2}, + {0x0014, SIERRA_DEQ_GLUT3}, + {0x0014, SIERRA_DEQ_GLUT4}, + {0x0014, SIERRA_DEQ_GLUT5}, + {0x0014, SIERRA_DEQ_GLUT6}, + {0x0014, SIERRA_DEQ_GLUT7}, + {0x0014, SIERRA_DEQ_GLUT8}, + {0x0014, SIERRA_DEQ_GLUT9}, + {0x0014, SIERRA_DEQ_GLUT10}, + {0x0014, SIERRA_DEQ_GLUT11}, + {0x0014, SIERRA_DEQ_GLUT12}, + {0x0014, SIERRA_DEQ_GLUT13}, + {0x0014, SIERRA_DEQ_GLUT14}, + {0x0014, SIERRA_DEQ_GLUT15}, + {0x0014, SIERRA_DEQ_GLUT16}, + {0x0BAE, SIERRA_DEQ_ALUT0}, + {0x0AEB, SIERRA_DEQ_ALUT1}, + {0x0A28, SIERRA_DEQ_ALUT2}, + {0x0965, SIERRA_DEQ_ALUT3}, + {0x08A2, SIERRA_DEQ_ALUT4}, + {0x07DF, SIERRA_DEQ_ALUT5}, + {0x071C, SIERRA_DEQ_ALUT6}, + {0x0659, SIERRA_DEQ_ALUT7}, + {0x0596, SIERRA_DEQ_ALUT8}, + {0x0514, SIERRA_DEQ_ALUT9}, + {0x0492, SIERRA_DEQ_ALUT10}, + {0x0410, SIERRA_DEQ_ALUT11}, + {0x038E, SIERRA_DEQ_ALUT12}, + {0x030C, SIERRA_DEQ_ALUT13}, + {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG}, + {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG}, + {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG}, + {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0033, SIERRA_DEQ_PICTRL_PREG}, + {0x0400, SIERRA_CPICAL_TMRVAL_MODE1_PREG}, + {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG}, + {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG}, + {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG}, + {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG}, + {0x0005, SIERRA_LFPSDET_SUPPORT_PREG}, + {0x000F, SIERRA_LFPSFILT_NS_PREG}, + {0x0009, SIERRA_LFPSFILT_RD_PREG}, + {0x0001, SIERRA_LFPSFILT_MP_PREG}, + {0x8013, SIERRA_SDFILT_H2L_A_PREG}, + {0x8009, SIERRA_SDFILT_L2H_PREG}, + {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static const struct cdns_sierra_data cdns_map_sierra = { + SIERRA_MACRO_ID, + 0x2, + 0x2, + ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), + ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), + ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), + ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), + cdns_pcie_cmn_regs_ext_ssc, + cdns_pcie_ln_regs_ext_ssc, + cdns_usb_cmn_regs_ext_ssc, + cdns_usb_ln_regs_ext_ssc, +}; + +static const struct cdns_sierra_data cdns_ti_map_sierra = { + SIERRA_MACRO_ID, + 0x0, + 0x1, + ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), + ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), + ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), + ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), + cdns_pcie_cmn_regs_ext_ssc, + cdns_pcie_ln_regs_ext_ssc, + cdns_usb_cmn_regs_ext_ssc, + cdns_usb_ln_regs_ext_ssc, +}; + +static const struct udevice_id cdns_sierra_id_table[] = { + { + .compatible = "cdns,sierra-phy-t0", + .data = (ulong)&cdns_map_sierra, + }, + { + .compatible = "ti,sierra-phy-t0", + .data = (ulong)&cdns_ti_map_sierra, + }, + {} +}; + +U_BOOT_DRIVER(sierra_phy_provider) = { + .name = "cdns,sierra", + .id = UCLASS_PHY, + .of_match = cdns_sierra_id_table, + .probe = cdns_sierra_phy_probe, + .remove = cdns_sierra_phy_remove, + .ops = &ops, + .priv_auto = sizeof(struct cdns_sierra_phy), +}; diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c new file mode 100644 index 0000000..141ece4 --- /dev/null +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -0,0 +1,2463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Cadence Torrent SD0801 PHY driver. + * + * Based on the linux driver provided by Cadence + * + * Copyright (c) 2018 Cadence Design Systems + * + * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <common.h> +#include <clk.h> +#include <generic-phy.h> +#include <reset.h> +#include <dm/device.h> +#include <dm/device_compat.h> +#include <dm/device-internal.h> +#include <dm/lists.h> +#include <dm/read.h> +#include <dm/uclass.h> +#include <linux/io.h> +#include <dt-bindings/phy/phy.h> +#include <regmap.h> +#include <linux/delay.h> +#include <linux/string.h> + +#define REF_CLK_19_2MHz 19200000 +#define REF_CLK_25MHz 25000000 + +#define MAX_NUM_LANES 4 +#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps*/ + +#define NUM_SSC_MODE 3 +#define NUM_PHY_TYPE 6 + +#define POLL_TIMEOUT_US 5000 +#define PLL_LOCK_TIMEOUT 100000 + +#define TORRENT_COMMON_CDB_OFFSET 0x0 + +#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x4000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) +#define TORRENT_RX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \ + ((0x8000 << (block_offset)) + \ + (((ln) << 9) << (reg_offset))) + +#define TORRENT_PHY_PCS_COMMON_OFFSET(block_offset) \ + (0xC000 << (block_offset)) + +#define TORRENT_PHY_PMA_COMMON_OFFSET(block_offset) \ + (0xE000 << (block_offset)) + +/* + * register offsets from SD0801 PHY register block base (i.e MHDP + * register base + 0x500000) + */ +#define CMN_SSM_BANDGAP_TMR 0x0021U +#define CMN_SSM_BIAS_TMR 0x0022U +#define CMN_PLLSM0_PLLPRE_TMR 0x002AU +#define CMN_PLLSM0_PLLLOCK_TMR 0x002CU +#define CMN_PLLSM1_PLLPRE_TMR 0x0032U +#define CMN_PLLSM1_PLLLOCK_TMR 0x0034U +#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U +#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U +#define CMN_BGCAL_INIT_TMR 0x0064U +#define CMN_BGCAL_ITER_TMR 0x0065U +#define CMN_IBCAL_INIT_TMR 0x0074U +#define CMN_PLL0_VCOCAL_TCTRL 0x0082U +#define CMN_PLL0_VCOCAL_INIT_TMR 0x0084U +#define CMN_PLL0_VCOCAL_ITER_TMR 0x0085U +#define CMN_PLL0_VCOCAL_REFTIM_START 0x0086U +#define CMN_PLL0_VCOCAL_PLLCNT_START 0x0088U +#define CMN_PLL0_INTDIV_M0 0x0090U +#define CMN_PLL0_FRACDIVL_M0 0x0091U +#define CMN_PLL0_FRACDIVH_M0 0x0092U +#define CMN_PLL0_HIGH_THR_M0 0x0093U +#define CMN_PLL0_DSM_DIAG_M0 0x0094U +#define CMN_PLL0_SS_CTRL1_M0 0x0098U +#define CMN_PLL0_SS_CTRL2_M0 0x0099U +#define CMN_PLL0_SS_CTRL3_M0 0x009AU +#define CMN_PLL0_SS_CTRL4_M0 0x009BU +#define CMN_PLL0_LOCK_REFCNT_START 0x009CU +#define CMN_PLL0_LOCK_PLLCNT_START 0x009EU +#define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU +#define CMN_PLL0_INTDIV_M1 0x00A0U +#define CMN_PLL0_FRACDIVH_M1 0x00A2U +#define CMN_PLL0_HIGH_THR_M1 0x00A3U +#define CMN_PLL0_DSM_DIAG_M1 0x00A4U +#define CMN_PLL0_SS_CTRL1_M1 0x00A8U +#define CMN_PLL0_SS_CTRL2_M1 0x00A9U +#define CMN_PLL0_SS_CTRL3_M1 0x00AAU +#define CMN_PLL0_SS_CTRL4_M1 0x00ABU +#define CMN_PLL1_VCOCAL_TCTRL 0x00C2U +#define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U +#define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U +#define CMN_PLL1_VCOCAL_REFTIM_START 0x00C6U +#define CMN_PLL1_VCOCAL_PLLCNT_START 0x00C8U +#define CMN_PLL1_INTDIV_M0 0x00D0U +#define CMN_PLL1_FRACDIVL_M0 0x00D1U +#define CMN_PLL1_FRACDIVH_M0 0x00D2U +#define CMN_PLL1_HIGH_THR_M0 0x00D3U +#define CMN_PLL1_DSM_DIAG_M0 0x00D4U +#define CMN_PLL1_DSM_FBH_OVRD_M0 0x00D5U +#define CMN_PLL1_DSM_FBL_OVRD_M0 0x00D6U +#define CMN_PLL1_SS_CTRL1_M0 0x00D8U +#define CMN_PLL1_SS_CTRL2_M0 0x00D9U +#define CMN_PLL1_SS_CTRL3_M0 0x00DAU +#define CMN_PLL1_SS_CTRL4_M0 0x00DBU +#define CMN_PLL1_LOCK_REFCNT_START 0x00DCU +#define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU +#define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU +#define CMN_TXPUCAL_TUNE 0x0103U +#define CMN_TXPUCAL_INIT_TMR 0x0104U +#define CMN_TXPUCAL_ITER_TMR 0x0105U +#define CMN_CMN_TXPDCAL_OVRD 0x0109U +#define CMN_TXPDCAL_TUNE 0x010BU +#define CMN_TXPDCAL_INIT_TMR 0x010CU +#define CMN_TXPDCAL_ITER_TMR 0x010DU +#define CMN_RXCAL_INIT_TMR 0x0114U +#define CMN_RXCAL_ITER_TMR 0x0115U +#define CMN_SD_CAL_INIT_TMR 0x0124U +#define CMN_SD_CAL_ITER_TMR 0x0125U +#define CMN_SD_CAL_REFTIM_START 0x0126U +#define CMN_SD_CAL_PLLCNT_START 0x0128U +#define CMN_PDIAG_PLL0_CTRL_M0 0x01A0U +#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x01A1U +#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U +#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U +#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U +#define CMN_PDIAG_PLL0_CTRL_M1 0x01B0U +#define CMN_PDIAG_PLL0_CLK_SEL_M1 0x01B1U +#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U +#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U +#define CMN_PDIAG_PLL0_FILT_PADJ_M1 0x01B6U +#define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U +#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U +#define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U +#define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U +#define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U +#define CMN_DIAG_BIAS_OVRD1 0x01E1U + +/* PMA TX Lane registers */ +#define TX_TXCC_CTRL 0x0040U +#define TX_TXCC_CPOST_MULT_00 0x004CU +#define TX_TXCC_CPOST_MULT_01 0x004DU +#define TX_TXCC_MGNFS_MULT_000 0x0050U +#define TX_TXCC_MGNFS_MULT_100 0x0054U +#define DRV_DIAG_TX_DRV 0x00C6U +#define XCVR_DIAG_PLLDRC_CTRL 0x00E5U +#define XCVR_DIAG_HSCLK_SEL 0x00E6U +#define XCVR_DIAG_HSCLK_DIV 0x00E7U +#define XCVR_DIAG_RXCLK_CTRL 0x00E9U +#define XCVR_DIAG_BIDI_CTRL 0x00EAU +#define XCVR_DIAG_PSC_OVRD 0x00EBU +#define TX_PSC_A0 0x0100U +#define TX_PSC_A1 0x0101U +#define TX_PSC_A2 0x0102U +#define TX_PSC_A3 0x0103U +#define TX_RCVDET_ST_TMR 0x0123U +#define TX_DIAG_ACYA 0x01E7U +#define TX_DIAG_ACYA_HBDC_MASK 0x0001U + +/* PMA RX Lane registers */ +#define RX_PSC_A0 0x0000U +#define RX_PSC_A1 0x0001U +#define RX_PSC_A2 0x0002U +#define RX_PSC_A3 0x0003U +#define RX_PSC_CAL 0x0006U +#define RX_CDRLF_CNFG 0x0080U +#define RX_CDRLF_CNFG3 0x0082U +#define RX_SIGDET_HL_FILT_TMR 0x0090U +#define RX_REE_GCSM1_CTRL 0x0108U +#define RX_REE_GCSM1_EQENM_PH1 0x0109U +#define RX_REE_GCSM1_EQENM_PH2 0x010AU +#define RX_REE_GCSM2_CTRL 0x0110U +#define RX_REE_PERGCSM_CTRL 0x0118U +#define RX_REE_ATTEN_THR 0x0149U +#define RX_REE_TAP1_CLIP 0x0171U +#define RX_REE_TAP2TON_CLIP 0x0172U +#define RX_REE_SMGM_CTRL1 0x0177U +#define RX_REE_SMGM_CTRL2 0x0178U +#define RX_DIAG_DFE_CTRL 0x01E0U +#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U +#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U +#define RX_DIAG_NQST_CTRL 0x01E5U +#define RX_DIAG_SIGDET_TUNE 0x01E8U +#define RX_DIAG_PI_RATE 0x01F4U +#define RX_DIAG_PI_CAP 0x01F5U +#define RX_DIAG_ACYA 0x01FFU + +/* PHY PCS common registers */ +#define PHY_PLL_CFG 0x000EU +#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U +#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U +#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U + +/* PHY PMA common registers */ +#define PHY_PMA_CMN_CTRL1 0x0000U +#define PHY_PMA_CMN_CTRL2 0x0001U +#define PHY_PMA_PLL_RAW_CTRL 0x0003U + +static const struct reg_field phy_pll_cfg = REG_FIELD(PHY_PLL_CFG, 0, 1); +static const struct reg_field phy_pma_cmn_ctrl_1 = + REG_FIELD(PHY_PMA_CMN_CTRL1, 0, 0); +static const struct reg_field phy_pma_cmn_ctrl_2 = + REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7); +static const struct reg_field phy_pma_pll_raw_ctrl = + REG_FIELD(PHY_PMA_PLL_RAW_CTRL, 0, 1); + +#define reset_control_assert reset_assert +#define reset_control_deassert reset_deassert +#define reset_control reset_ctl +#define reset_control_put reset_free + +enum cdns_torrent_phy_type { + TYPE_NONE, + TYPE_DP, + TYPE_PCIE, + TYPE_SGMII, + TYPE_QSGMII, + TYPE_USB, +}; + +enum cdns_torrent_ssc_mode { + NO_SSC, + EXTERNAL_SSC, + INTERNAL_SSC +}; + +struct cdns_torrent_inst { + struct phy *phy; + u32 mlane; + enum cdns_torrent_phy_type phy_type; + u32 num_lanes; + struct reset_ctl_bulk *lnk_rst; + enum cdns_torrent_ssc_mode ssc_mode; +}; + +struct cdns_torrent_phy { + void __iomem *sd_base; /* SD0801 register base */ + size_t size; + struct reset_control *phy_rst; + struct udevice *dev; + struct cdns_torrent_inst phys[MAX_NUM_LANES]; + int nsubnodes; + const struct cdns_torrent_data *init_data; + struct regmap *regmap; + struct regmap *regmap_common_cdb; + struct regmap *regmap_phy_pcs_common_cdb; + struct regmap *regmap_phy_pma_common_cdb; + struct regmap *regmap_tx_lane_cdb[MAX_NUM_LANES]; + struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES]; + struct regmap_field *phy_pll_cfg; + struct regmap_field *phy_pma_cmn_ctrl_1; + struct regmap_field *phy_pma_cmn_ctrl_2; + struct regmap_field *phy_pma_pll_raw_ctrl; +}; + +struct cdns_reg_pairs { + u32 val; + u32 off; +}; + +struct cdns_torrent_vals { + struct cdns_reg_pairs *reg_pairs; + u32 num_regs; +}; + +struct cdns_torrent_data { + u8 block_offset_shift; + u8 reg_offset_shift; + struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *tx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *rx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; +}; + +static inline struct cdns_torrent_inst *phy_get_drvdata(struct phy *phy) +{ + struct cdns_torrent_phy *sp = dev_get_priv(phy->dev); + int index; + + if (phy->id >= MAX_NUM_LANES) + return NULL; + + for (index = 0; index < sp->nsubnodes; index++) { + if (phy->id == sp->phys[index].mlane) + return &sp->phys[index]; + } + + return NULL; +} + +static struct regmap *cdns_regmap_init(struct udevice *dev, void __iomem *base, + u32 block_offset, + u8 reg_offset_shift) +{ + struct cdns_torrent_phy *sp = dev_get_priv(dev); + struct regmap_config config; + + config.r_start = (ulong)(base + block_offset); + config.r_size = sp->size - block_offset; + config.reg_offset_shift = reg_offset_shift; + config.width = REGMAP_SIZE_16; + + return devm_regmap_init(dev, NULL, NULL, &config); +} + +static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy) +{ + struct udevice *dev = cdns_phy->dev; + struct regmap_field *field; + struct regmap *regmap; + + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PLL_CFG reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pll_cfg = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_1); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL1 reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_cmn_ctrl_1 = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_cmn_ctrl_2 = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_pll_raw_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_PLL_RAW_CTRL reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_pll_raw_ctrl = field; + + return 0; +} + +static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy) +{ + void __iomem *sd_base = cdns_phy->sd_base; + u8 block_offset_shift, reg_offset_shift; + struct udevice *dev = cdns_phy->dev; + struct regmap *regmap; + u32 block_offset; + int i; + + block_offset_shift = cdns_phy->init_data->block_offset_shift; + reg_offset_shift = cdns_phy->init_data->reg_offset_shift; + + for (i = 0; i < MAX_NUM_LANES; i++) { + block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init tx lane CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_tx_lane_cdb[i] = regmap; + block_offset = TORRENT_RX_LANE_CDB_OFFSET(i, block_offset_shift, + reg_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init rx lane CDB regmap"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_rx_lane_cdb[i] = regmap; + } + + block_offset = TORRENT_COMMON_CDB_OFFSET; + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_common_cdb = regmap; + + block_offset = TORRENT_PHY_PCS_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PCS common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pcs_common_cdb = regmap; + + block_offset = TORRENT_PHY_PMA_COMMON_OFFSET(block_offset_shift); + regmap = cdns_regmap_init(dev, sd_base, block_offset, + reg_offset_shift); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init PHY PMA common CDB regmap\n"); + return PTR_ERR(regmap); + } + cdns_phy->regmap_phy_pma_common_cdb = regmap; + + return 0; +} + +static int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) +{ + const struct cdns_torrent_data *init_data = cdns_phy->init_data; + struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; + enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type; + struct cdns_torrent_vals *pcs_cmn_vals; + int i, j, node, mlane, num_lanes, ret; + struct cdns_reg_pairs *reg_pairs; + enum cdns_torrent_ssc_mode ssc; + struct regmap *regmap; + u32 num_regs; + + /* Maximum 2 links (subnodes) are supported */ + if (cdns_phy->nsubnodes != 2) + return -EINVAL; + + phy_t1 = cdns_phy->phys[0].phy_type; + phy_t2 = cdns_phy->phys[1].phy_type; + + /* + * First configure the PHY for first link with phy_t1. Geth the array + * values are [phy_t1][phy_t2][ssc]. + */ + for (node = 0; node < cdns_phy->nsubnodes; node++) { + if (node == 1) { + /* + * If fist link with phy_t1 is configured, then + * configure the PHY for second link with phy_t2. + * Get the array values as [phy_t2][phy_t1][ssc] + */ + tmp_phy_type = phy_t1; + phy_t1 = phy_t2; + phy_t2 = tmp_phy_type; + } + + mlane = cdns_phy->phys[node].mlane; + ssc = cdns_phy->phys[node].ssc_mode; + num_lanes = cdns_phy->phys[node].num_lanes; + + /** + * PHY configuration specific registers: + * link_cmn_vals depend on combination of PHY types being + * configured and are common for both PHY types, so array + * values should be same for [phy_t1][phy_t2][ssc] and + * [phy_t2][phy_t1][ssc]. + * xcvr_diag_vals also depend on combination of PHY types + * being configured, but these can be different for particular + * PHY type and are per lane. + */ + link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc]; + if (link_cmn_vals) { + reg_pairs = link_cmn_vals->reg_pairs; + num_regs = link_cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + + /** + * First array value in link_cmn_vals must be of + * PHY_PLL_CFG register + */ + regmap_field_write(cdns_phy->phy_pll_cfg, + reg_pairs[0].val); + + for (i = 1; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc]; + if (xcvr_diag_vals) { + reg_pairs = xcvr_diag_vals->reg_pairs; + num_regs = xcvr_diag_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA common registers configurations */ + cmn_vals = init_data->cmn_vals[phy_t1][phy_t2][ssc]; + if (cmn_vals) { + reg_pairs = cmn_vals->reg_pairs; + num_regs = cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA TX lane registers configurations */ + tx_ln_vals = init_data->tx_ln_vals[phy_t1][phy_t2][ssc]; + if (tx_ln_vals) { + reg_pairs = tx_ln_vals->reg_pairs; + num_regs = tx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PMA RX lane registers configurations */ + rx_ln_vals = init_data->rx_ln_vals[phy_t1][phy_t2][ssc]; + if (rx_ln_vals) { + reg_pairs = rx_ln_vals->reg_pairs; + num_regs = rx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + reset_deassert_bulk(cdns_phy->phys[node].lnk_rst); + } + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; + + return 0; +} + +static int cdns_torrent_phy_probe(struct udevice *dev) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_priv(dev); + int ret, subnodes = 0, node = 0, i; + struct cdns_torrent_data *data; + u32 total_num_lanes = 0; + struct clk *clk; + ofnode child; + u32 phy_type; + + cdns_phy->dev = dev; + + /* Get init data for this phy */ + data = (struct cdns_torrent_data *)dev_get_driver_data(dev); + cdns_phy->init_data = data; + + cdns_phy->phy_rst = devm_reset_control_get_by_index(dev, 0); + if (IS_ERR(cdns_phy->phy_rst)) { + dev_err(dev, "failed to get reset\n"); + return PTR_ERR(cdns_phy->phy_rst); + } + + clk = devm_clk_get(dev, "refclk"); + if (IS_ERR(clk)) { + dev_err(dev, "phy ref clock not found\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(cdns_phy->dev, "Failed to prepare ref clock\n"); + return ret; + } + + cdns_phy->sd_base = devfdt_remap_addr_index(dev, 0); + if (IS_ERR(cdns_phy->sd_base)) + return PTR_ERR(cdns_phy->sd_base); + devfdt_get_addr_size_index(dev, 0, (fdt_size_t *)&cdns_phy->size); + + dev_for_each_subnode(child, dev) + subnodes++; + if (subnodes == 0) { + dev_err(dev, "No available link subnodes found\n"); + return -EINVAL; + } + ret = cdns_torrent_regmap_init(cdns_phy); + if (ret) + return ret; + + ret = cdns_torrent_regfield_init(cdns_phy); + if (ret) + return ret; + + /* Going through all the available subnodes or children*/ + ofnode_for_each_subnode(child, dev_ofnode(dev)) { + /* PHY subnode name must be a 'link' */ + if (!ofnode_name_eq(child, "link")) + continue; + cdns_phy->phys[node].lnk_rst = + devm_reset_bulk_get_by_node(dev, child); + if (IS_ERR(cdns_phy->phys[node].lnk_rst)) { + dev_err(dev, "%s: failed to get reset\n", + ofnode_get_name(child)); + ret = PTR_ERR(cdns_phy->phys[node].lnk_rst); + goto put_lnk_rst; + } + + if (ofnode_read_u32(child, "reg", + &cdns_phy->phys[node].mlane)) { + dev_err(dev, "%s: No \"reg \" - property.\n", + ofnode_get_name(child)); + ret = -EINVAL; + goto put_child; + } + + if (ofnode_read_u32(child, "cdns,phy-type", &phy_type)) { + dev_err(dev, "%s: No \"cdns,phy-type \" - property.\n", + ofnode_get_name(child)); + ret = -EINVAL; + goto put_child; + } + + switch (phy_type) { + case PHY_TYPE_PCIE: + cdns_phy->phys[node].phy_type = TYPE_PCIE; + break; + case PHY_TYPE_DP: + cdns_phy->phys[node].phy_type = TYPE_DP; + break; + case PHY_TYPE_SGMII: + cdns_phy->phys[node].phy_type = TYPE_SGMII; + break; + case PHY_TYPE_QSGMII: + cdns_phy->phys[node].phy_type = TYPE_QSGMII; + break; + case PHY_TYPE_USB3: + cdns_phy->phys[node].phy_type = TYPE_USB; + break; + default: + dev_err(dev, "Unsupported protocol\n"); + ret = -EINVAL; + goto put_child; + } + + if (ofnode_read_u32(child, "cdns,num-lanes", + &cdns_phy->phys[node].num_lanes)) { + dev_err(dev, "%s: No \"cdns,num-lanes \" - property.\n", + ofnode_get_name(child)); + ret = -EINVAL; + goto put_child; + } + + total_num_lanes += cdns_phy->phys[node].num_lanes; + + /* Get SSC mode */ + ofnode_read_u32(child, "cdns,ssc-mode", + &cdns_phy->phys[node].ssc_mode); + node++; + } + + cdns_phy->nsubnodes = node; + + if (total_num_lanes > MAX_NUM_LANES) { + dev_err(dev, "Invalid lane configuration\n"); + goto put_lnk_rst; + } + + if (cdns_phy->nsubnodes > 1) { + ret = cdns_torrent_phy_configure_multilink(cdns_phy); + if (ret) + goto put_lnk_rst; + } + + reset_control_deassert(cdns_phy->phy_rst); + return 0; + +put_child: + node++; +put_lnk_rst: + for (i = 0; i < node; i++) + reset_release_bulk(cdns_phy->phys[i].lnk_rst); + return ret; +} + +static int cdns_torrent_phy_on(struct phy *gphy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(gphy); + struct cdns_torrent_phy *cdns_phy = dev_get_priv(gphy->dev); + u32 read_val; + int ret; + + if (cdns_phy->nsubnodes == 1) { + /* Take the PHY lane group out of reset */ + reset_deassert_bulk(inst->lnk_rst); + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; + } + + /* + * Wait for cmn_ready assertion + * PHY_PMA_CMN_CTRL1[0] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1, + read_val, read_val, 1000, + PLL_LOCK_TIMEOUT); + if (ret) { + dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n"); + return ret; + } + mdelay(10); + + return 0; +} + +static int cdns_torrent_phy_init(struct phy *phy) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_priv(phy->dev); + const struct cdns_torrent_data *init_data = cdns_phy->init_data; + struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + enum cdns_torrent_phy_type phy_type = inst->phy_type; + enum cdns_torrent_ssc_mode ssc = inst->ssc_mode; + struct cdns_torrent_vals *pcs_cmn_vals; + struct cdns_reg_pairs *reg_pairs; + struct regmap *regmap; + u32 num_regs; + int i, j; + + if (cdns_phy->nsubnodes > 1) + return 0; + + /** + * Spread spectrum generation is not required or supported + * for SGMII/QSGMII + */ + if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII) + ssc = NO_SSC; + + /* PHY configuration specific registers for single link */ + link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (link_cmn_vals) { + reg_pairs = link_cmn_vals->reg_pairs; + num_regs = link_cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + + /** + * First array value in link_cmn_vals must be of + * PHY_PLL_CFG register + */ + regmap_field_write(cdns_phy->phy_pll_cfg, reg_pairs[0].val); + + for (i = 1; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc]; + if (xcvr_diag_vals) { + reg_pairs = xcvr_diag_vals->reg_pairs; + num_regs = xcvr_diag_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA common registers configurations */ + cmn_vals = init_data->cmn_vals[phy_type][TYPE_NONE][ssc]; + if (cmn_vals) { + reg_pairs = cmn_vals->reg_pairs; + num_regs = cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA TX lane registers configurations */ + tx_ln_vals = init_data->tx_ln_vals[phy_type][TYPE_NONE][ssc]; + if (tx_ln_vals) { + reg_pairs = tx_ln_vals->reg_pairs; + num_regs = tx_ln_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PMA RX lane registers configurations */ + rx_ln_vals = init_data->rx_ln_vals[phy_type][TYPE_NONE][ssc]; + if (rx_ln_vals) { + reg_pairs = rx_ln_vals->reg_pairs; + num_regs = rx_ln_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_rx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + return 0; +} + +static int cdns_torrent_phy_off(struct phy *gphy) +{ + struct cdns_torrent_inst *inst = phy_get_drvdata(gphy); + struct cdns_torrent_phy *cdns_phy = dev_get_priv(gphy->dev); + int ret; + + if (cdns_phy->nsubnodes != 1) + return 0; + + ret = reset_control_assert(cdns_phy->phy_rst); + if (ret) + return ret; + + return reset_assert_bulk(inst->lnk_rst); +} + +static int cdns_torrent_phy_remove(struct udevice *dev) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_priv(dev); + int i; + + reset_control_assert(cdns_phy->phy_rst); + for (i = 0; i < cdns_phy->nsubnodes; i++) + reset_release_bulk(cdns_phy->phys[i].lnk_rst); + + return 0; +} + +/* USB and SGMII/QSGMII link configuration */ +static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { + {0x0002, PHY_PLL_CFG}, + {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0041, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x009B, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { + .reg_pairs = usb_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = usb_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { + .reg_pairs = sgmii_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs), +}; + +/* PCIe and USB Unique SSC link configuration */ +static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { + {0x0003, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, + {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0012, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x00C9, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals pcie_usb_link_cmn_vals = { + .reg_pairs = pcie_usb_link_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { + .reg_pairs = pcie_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { + .reg_pairs = usb_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs), +}; + +/* USB 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}, + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { + .reg_pairs = usb_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs), +}; + +/* Single USB link configuration */ +static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { + {0x0000, PHY_PLL_CFG}, + {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0041, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sl_usb_link_cmn_vals = { + .reg_pairs = sl_usb_link_cmn_regs, + .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs), +}; + +static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { + .reg_pairs = sl_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs), +}; + +/* USB PHY PCS common configuration */ +static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { + {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0}, + {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0}, + {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1} +}; + +static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { + .reg_pairs = usb_phy_pcs_cmn_regs, + .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs), +}; + +/* USB 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { + {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, + {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} +}; + +static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = { + .reg_pairs = sl_usb_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs), +}; + +static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}, + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { + {0x02FF, TX_PSC_A0}, + {0x06AF, TX_PSC_A1}, + {0x06AE, TX_PSC_A2}, + {0x06AE, TX_PSC_A3}, + {0x2A82, TX_TXCC_CTRL}, + {0x0014, TX_TXCC_CPOST_MULT_01}, + {0x0003, XCVR_DIAG_PSC_OVRD} +}; + +static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { + {0x0D1D, RX_PSC_A0}, + {0x0D1D, RX_PSC_A1}, + {0x0D00, RX_PSC_A2}, + {0x0500, RX_PSC_A3}, + {0x0013, RX_SIGDET_HL_FILT_TMR}, + {0x0000, RX_REE_GCSM1_CTRL}, + {0x0C02, RX_REE_ATTEN_THR}, + {0x0330, RX_REE_SMGM_CTRL1}, + {0x0300, RX_REE_SMGM_CTRL2}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x1004, RX_DIAG_SIGDET_TUNE}, + {0x00F9, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0002, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0031, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, + {0x0003, RX_CDRLF_CNFG3} +}; + +static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { + .reg_pairs = usb_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { + .reg_pairs = usb_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { + .reg_pairs = usb_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs), +}; + +/* Single link USB, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} +}; + +static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { + .reg_pairs = sl_usb_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs), +}; + +/* PCIe and SGMII/QSGMII Unique SSC link configuration */ +static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { + {0x0003, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, + {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0012, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x009B, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { + .reg_pairs = pcie_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { + .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs), +}; + +/* SGMII 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { + {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, + {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = { + .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x00B3, DRV_DIAG_TX_DRV} +}; + +static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x00B3, DRV_DIAG_TX_DRV}, + {0x4000, XCVR_DIAG_RXCLK_CTRL}, +}; + +static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { + {0x091D, RX_PSC_A0}, + {0x0900, RX_PSC_A2}, + {0x0100, RX_PSC_A3}, + {0x03C7, RX_REE_GCSM1_EQENM_PH1}, + {0x01C7, RX_REE_GCSM1_EQENM_PH2}, + {0x0000, RX_DIAG_DFE_CTRL}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0098, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0000, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0010, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { + .reg_pairs = sgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = sgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { + .reg_pairs = sgmii_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs), +}; + +/* SGMII 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { + .reg_pairs = sgmii_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs), +}; + +/* QSGMII 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { + {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, + {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = { + .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x0011, TX_TXCC_MGNFS_MULT_100}, + {0x0003, DRV_DIAG_TX_DRV} +}; + +static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x0011, TX_TXCC_MGNFS_MULT_100}, + {0x0003, DRV_DIAG_TX_DRV}, + {0x4000, XCVR_DIAG_RXCLK_CTRL}, +}; + +static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { + {0x091D, RX_PSC_A0}, + {0x0900, RX_PSC_A2}, + {0x0100, RX_PSC_A3}, + {0x03C7, RX_REE_GCSM1_EQENM_PH1}, + {0x01C7, RX_REE_GCSM1_EQENM_PH2}, + {0x0000, RX_DIAG_DFE_CTRL}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0098, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0000, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0010, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { + .reg_pairs = qsgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { + .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs), +}; + +/* QSGMII 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x007F, CMN_TXPUCAL_TUNE}, + {0x007F, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { + .reg_pairs = qsgmii_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs), +}; + +/* Single SGMII/QSGMII link configuration */ +static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { + {0x0000, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x0013, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { + .reg_pairs = sl_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = sl_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs), +}; + +/* Multi link PCIe, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { + .reg_pairs = pcie_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs), +}; + +/* Single link PCIe, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0050, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0036, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x0058, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x0012, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { + .reg_pairs = sl_pcie_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs), +}; + +/* PCIe, 100 MHz Ref clk, no SSC & external SSC */ +static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { + {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, + {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0} +}; + +static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0001, RX_DIAG_ACYA} +}; + +static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { + .reg_pairs = pcie_100_ext_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { + .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs), +}; + +static const struct cdns_torrent_data cdns_map_torrent = { + .block_offset_shift = 0x2, + .reg_offset_shift = 0x2, + .link_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_link_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_link_cmn_vals, + [INTERNAL_SSC] = &sl_usb_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + }, + .xcvr_diag_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + }, + }, + .pcs_cmn_vals = { + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + }, + }, + .cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + }, + }, + .tx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + }, + }, + .rx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + }, + }, +}; + +static const struct cdns_torrent_data ti_j721e_map_torrent = { + .block_offset_shift = 0x0, + .reg_offset_shift = 0x1, + .link_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_link_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_link_cmn_vals, + [INTERNAL_SSC] = &sl_usb_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + }, + .xcvr_diag_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + }, + }, + .pcs_cmn_vals = { + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + }, + }, + .cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + }, + }, + .tx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + }, + }, + .rx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + }, + }, +}; + +static int cdns_torrent_phy_reset(struct phy *gphy) +{ + struct cdns_torrent_phy *sp = dev_get_priv(gphy->dev); + + reset_control_assert(sp->phy_rst); + reset_control_deassert(sp->phy_rst); + return 0; +} + +static const struct udevice_id cdns_torrent_id_table[] = { + { + .compatible = "cdns,torrent-phy", + .data = (ulong)&cdns_map_torrent, + }, + { + .compatible = "ti,j721e-serdes-10g", + .data = (ulong)&ti_j721e_map_torrent, + }, + {} +}; + +static const struct phy_ops cdns_torrent_phy_ops = { + .init = cdns_torrent_phy_init, + .power_on = cdns_torrent_phy_on, + .power_off = cdns_torrent_phy_off, + .reset = cdns_torrent_phy_reset, +}; + +U_BOOT_DRIVER(torrent_phy_provider) = { + .name = "cdns,torrent", + .id = UCLASS_PHY, + .of_match = cdns_torrent_id_table, + .probe = cdns_torrent_phy_probe, + .remove = cdns_torrent_phy_remove, + .ops = &cdns_torrent_phy_ops, + .priv_auto = sizeof(struct cdns_torrent_phy), +}; diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig new file mode 100644 index 0000000..111085f --- /dev/null +++ b/drivers/phy/ti/Kconfig @@ -0,0 +1,9 @@ +config PHY_J721E_WIZ + tristate "TI J721E WIZ (SERDES Wrapper) support" + depends on ARCH_K3 + help + This option enables support for WIZ module present in TI's J721E + SoC. WIZ is a serdes wrapper used to configure some of the input + signals to the SERDES (Sierra/Torrent). This driver configures + three clock selects (pll0, pll1, dig) and resets for each of the + lanes. diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile new file mode 100644 index 0000000..873ddbf --- /dev/null +++ b/drivers/phy/ti/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_$(SPL_)PHY_J721E_WIZ) += phy-j721e-wiz.o diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c new file mode 100644 index 0000000..d74efcd --- /dev/null +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -0,0 +1,1156 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Jean-Jacques Hiblot <jjhiblot@ti.com> + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <asm/gpio.h> +#include <dm/lists.h> +#include <dm/device-internal.h> +#include <regmap.h> +#include <reset-uclass.h> +#include <dt-bindings/phy/phy.h> + +#include <dt-bindings/phy/phy-ti.h> + +#define WIZ_MAX_INPUT_CLOCKS 4 +/* To include mux clocks, divider clocks and gate clocks */ +#define WIZ_MAX_OUTPUT_CLOCKS 32 + +#define WIZ_MAX_LANES 4 +#define WIZ_MUX_NUM_CLOCKS 3 +#define WIZ_DIV_NUM_CLOCKS_16G 2 +#define WIZ_DIV_NUM_CLOCKS_10G 1 + +#define WIZ_SERDES_CTRL 0x404 +#define WIZ_SERDES_TOP_CTRL 0x408 +#define WIZ_SERDES_RST 0x40c +#define WIZ_SERDES_TYPEC 0x410 +#define WIZ_LANECTL(n) (0x480 + (0x40 * (n))) +#define WIZ_LANEDIV(n) (0x484 + (0x40 * (n))) + +#define WIZ_MAX_LANES 4 +#define WIZ_MUX_NUM_CLOCKS 3 +#define WIZ_DIV_NUM_CLOCKS_16G 2 +#define WIZ_DIV_NUM_CLOCKS_10G 1 + +#define WIZ_SERDES_TYPEC_LN10_SWAP BIT(30) + +enum wiz_lane_standard_mode { + LANE_MODE_GEN1, + LANE_MODE_GEN2, + LANE_MODE_GEN3, + LANE_MODE_GEN4, +}; + +enum wiz_refclk_mux_sel { + PLL0_REFCLK, + PLL1_REFCLK, + REFCLK_DIG, +}; + +enum wiz_refclk_div_sel { + CMN_REFCLK, + CMN_REFCLK1, +}; + +enum wiz_clock_input { + WIZ_CORE_REFCLK, + WIZ_EXT_REFCLK, + WIZ_CORE_REFCLK1, + WIZ_EXT_REFCLK1, +}; + +static const struct reg_field por_en = REG_FIELD(WIZ_SERDES_CTRL, 31, 31); +static const struct reg_field phy_reset_n = REG_FIELD(WIZ_SERDES_RST, 31, 31); +static const struct reg_field pll1_refclk_mux_sel = + REG_FIELD(WIZ_SERDES_RST, 29, 29); +static const struct reg_field pll0_refclk_mux_sel = + REG_FIELD(WIZ_SERDES_RST, 28, 28); +static const struct reg_field refclk_dig_sel_16g = + REG_FIELD(WIZ_SERDES_RST, 24, 25); +static const struct reg_field refclk_dig_sel_10g = + REG_FIELD(WIZ_SERDES_RST, 24, 24); +static const struct reg_field pma_cmn_refclk_int_mode = + REG_FIELD(WIZ_SERDES_TOP_CTRL, 28, 29); +static const struct reg_field pma_cmn_refclk_mode = + REG_FIELD(WIZ_SERDES_TOP_CTRL, 30, 31); +static const struct reg_field pma_cmn_refclk_dig_div = + REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27); +static const struct reg_field pma_cmn_refclk1_dig_div = + REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25); + +static const struct reg_field p_enable[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANECTL(0), 30, 31), + REG_FIELD(WIZ_LANECTL(1), 30, 31), + REG_FIELD(WIZ_LANECTL(2), 30, 31), + REG_FIELD(WIZ_LANECTL(3), 30, 31), +}; + +enum p_enable { P_ENABLE = 2, P_ENABLE_FORCE = 1, P_ENABLE_DISABLE = 0 }; + +static const struct reg_field p_align[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANECTL(0), 29, 29), + REG_FIELD(WIZ_LANECTL(1), 29, 29), + REG_FIELD(WIZ_LANECTL(2), 29, 29), + REG_FIELD(WIZ_LANECTL(3), 29, 29), +}; + +static const struct reg_field p_raw_auto_start[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANECTL(0), 28, 28), + REG_FIELD(WIZ_LANECTL(1), 28, 28), + REG_FIELD(WIZ_LANECTL(2), 28, 28), + REG_FIELD(WIZ_LANECTL(3), 28, 28), +}; + +static const struct reg_field p_standard_mode[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANECTL(0), 24, 25), + REG_FIELD(WIZ_LANECTL(1), 24, 25), + REG_FIELD(WIZ_LANECTL(2), 24, 25), + REG_FIELD(WIZ_LANECTL(3), 24, 25), +}; + +static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANECTL(0), 22, 23), + REG_FIELD(WIZ_LANECTL(1), 22, 23), + REG_FIELD(WIZ_LANECTL(2), 22, 23), + REG_FIELD(WIZ_LANECTL(3), 22, 23), +}; + +static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANEDIV(0), 16, 22), + REG_FIELD(WIZ_LANEDIV(1), 16, 22), + REG_FIELD(WIZ_LANEDIV(2), 16, 22), + REG_FIELD(WIZ_LANEDIV(3), 16, 22), +}; + +static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = { + REG_FIELD(WIZ_LANEDIV(0), 0, 8), + REG_FIELD(WIZ_LANEDIV(1), 0, 8), + REG_FIELD(WIZ_LANEDIV(2), 0, 8), + REG_FIELD(WIZ_LANEDIV(3), 0, 8), +}; + +struct wiz_clk_mux_sel { + enum wiz_refclk_mux_sel mux_sel; + u32 table[WIZ_MAX_INPUT_CLOCKS]; + const char *node_name; + u32 num_parents; + u32 parents[WIZ_MAX_INPUT_CLOCKS]; +}; + +struct wiz_clk_div_sel { + enum wiz_refclk_div_sel div_sel; + const char *node_name; +}; + +static struct wiz_clk_mux_sel clk_mux_sel_16g[] = { + { + /* + * Mux value to be configured for each of the input clocks + * in the order populated in device tree + */ + .num_parents = 2, + .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK }, + .mux_sel = PLL0_REFCLK, + .table = { 1, 0 }, + .node_name = "pll0-refclk", + }, + { + .num_parents = 2, + .parents = { WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK1 }, + .mux_sel = PLL1_REFCLK, + .table = { 1, 0 }, + .node_name = "pll1-refclk", + }, + { + .num_parents = 4, + .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK, WIZ_EXT_REFCLK1 }, + .mux_sel = REFCLK_DIG, + .table = { 1, 3, 0, 2 }, + .node_name = "refclk-dig", + }, +}; + +static struct wiz_clk_mux_sel clk_mux_sel_10g[] = { + { + /* + * Mux value to be configured for each of the input clocks + * in the order populated in device tree + */ + .num_parents = 2, + .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK }, + .mux_sel = PLL0_REFCLK, + .table = { 1, 0 }, + .node_name = "pll0-refclk", + }, + { + .num_parents = 2, + .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK }, + .mux_sel = PLL1_REFCLK, + .table = { 1, 0 }, + .node_name = "pll1-refclk", + }, + { + .num_parents = 2, + .parents = { WIZ_CORE_REFCLK, WIZ_EXT_REFCLK }, + .mux_sel = REFCLK_DIG, + .table = { 1, 0 }, + .node_name = "refclk-dig", + }, +}; + +static struct wiz_clk_div_sel clk_div_sel[] = { + { + .div_sel = CMN_REFCLK, + .node_name = "cmn-refclk-dig-div", + }, + { + .div_sel = CMN_REFCLK1, + .node_name = "cmn-refclk1-dig-div", + }, +}; + +enum wiz_type { + J721E_WIZ_16G, + J721E_WIZ_10G, + AM64_WIZ_10G, +}; + +#define WIZ_TYPEC_DIR_DEBOUNCE_MIN 100 /* ms */ +#define WIZ_TYPEC_DIR_DEBOUNCE_MAX 1000 + +struct wiz { + struct regmap *regmap; + enum wiz_type type; + struct wiz_clk_mux_sel *clk_mux_sel; + struct wiz_clk_div_sel *clk_div_sel; + unsigned int clk_div_sel_num; + struct regmap_field *por_en; + struct regmap_field *phy_reset_n; + struct regmap_field *phy_en_refclk; + struct regmap_field *p_enable[WIZ_MAX_LANES]; + struct regmap_field *p_align[WIZ_MAX_LANES]; + struct regmap_field *p_raw_auto_start[WIZ_MAX_LANES]; + struct regmap_field *p_standard_mode[WIZ_MAX_LANES]; + struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES]; + struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES]; + struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES]; + struct regmap_field *pma_cmn_refclk_int_mode; + struct regmap_field *pma_cmn_refclk_mode; + struct regmap_field *pma_cmn_refclk_dig_div; + struct regmap_field *pma_cmn_refclk1_dig_div; + struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G]; + struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS]; + + struct udevice *dev; + u32 num_lanes; + struct gpio_desc *gpio_typec_dir; + u32 lane_phy_type[WIZ_MAX_LANES]; + struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS]; + unsigned int id; +}; + +struct wiz_div_clk { + struct clk parent_clk; + struct wiz *wiz; +}; + +struct wiz_mux_clk { + struct clk parent_clks[4]; + struct wiz *wiz; +}; + +struct wiz_clk { + struct wiz *wiz; +}; + +struct wiz_reset { + struct wiz *wiz; +}; + +static ulong wiz_div_clk_get_rate(struct clk *clk) +{ + struct udevice *dev = clk->dev; + struct wiz_div_clk *priv = dev_get_priv(dev); + struct wiz_clk_div_sel *data = dev_get_plat(dev); + struct wiz *wiz = priv->wiz; + ulong parent_rate = clk_get_rate(&priv->parent_clk); + u32 val; + + regmap_field_read(wiz->div_sel_field[data->div_sel], &val); + + return parent_rate >> val; +} + +static ulong wiz_div_clk_set_rate(struct clk *clk, ulong rate) +{ + struct udevice *dev = clk->dev; + struct wiz_div_clk *priv = dev_get_priv(dev); + struct wiz_clk_div_sel *data = dev_get_plat(dev); + struct wiz *wiz = priv->wiz; + ulong parent_rate = clk_get_rate(&priv->parent_clk); + u32 div = parent_rate / rate; + + div = __ffs(div); + regmap_field_write(wiz->div_sel_field[data->div_sel], div); + + return parent_rate >> div; +} + +const struct clk_ops wiz_div_clk_ops = { + .get_rate = wiz_div_clk_get_rate, + .set_rate = wiz_div_clk_set_rate, +}; + +int wiz_div_clk_probe(struct udevice *dev) +{ + struct wiz_div_clk *priv = dev_get_priv(dev); + struct clk parent_clk; + int rc; + + rc = clk_get_by_index(dev, 0, &parent_clk); + if (rc) { + dev_err(dev, "unable to get parent clock. ret %d\n", rc); + return rc; + } + priv->parent_clk = parent_clk; + priv->wiz = dev_get_priv(dev->parent); + return 0; +} + +U_BOOT_DRIVER(wiz_div_clk) = { + .name = "wiz_div_clk", + .id = UCLASS_CLK, + .priv_auto = sizeof(struct wiz_div_clk), + .ops = &wiz_div_clk_ops, + .probe = wiz_div_clk_probe, +}; + +static int wiz_clk_mux_set_parent(struct clk *clk, struct clk *parent) +{ + struct udevice *dev = clk->dev; + struct wiz_mux_clk *priv = dev_get_priv(dev); + struct wiz_clk_mux_sel *data = dev_get_plat(dev); + struct wiz *wiz = priv->wiz; + int i; + + for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) + if (parent->dev == priv->parent_clks[i].dev) + break; + + if (i == ARRAY_SIZE(priv->parent_clks)) + return -EINVAL; + + regmap_field_write(wiz->mux_sel_field[data->mux_sel], data->table[i]); + return 0; +} + +static int wiz_clk_xlate(struct clk *clk, struct ofnode_phandle_args *args) +{ + struct udevice *dev = clk->dev; + struct wiz_mux_clk *priv = dev_get_priv(dev); + struct wiz *wiz = priv->wiz; + + clk->id = wiz->id; + + return 0; +} + +static const struct clk_ops wiz_clk_mux_ops = { + .set_parent = wiz_clk_mux_set_parent, + .of_xlate = wiz_clk_xlate, +}; + +int wiz_mux_clk_probe(struct udevice *dev) +{ + struct wiz_mux_clk *priv = dev_get_priv(dev); + int rc; + int i; + + for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) { + rc = clk_get_by_index(dev, i, &priv->parent_clks[i]); + if (rc) + priv->parent_clks[i].dev = NULL; + } + priv->wiz = dev_get_priv(dev->parent); + return 0; +} + +U_BOOT_DRIVER(wiz_mux_clk) = { + .name = "wiz_mux_clk", + .id = UCLASS_CLK, + .priv_auto = sizeof(struct wiz_mux_clk), + .ops = &wiz_clk_mux_ops, + .probe = wiz_mux_clk_probe, +}; + +static int wiz_clk_set_parent(struct clk *clk, struct clk *parent) +{ + struct udevice *dev = clk->dev; + struct wiz_clk *priv = dev_get_priv(dev); + const struct wiz_clk_mux_sel *mux_sel; + struct wiz *wiz = priv->wiz; + int num_parents; + int i, j, id; + + id = clk->id >> 10; + + /* set_parent is applicable only for MUX clocks */ + if (id > TI_WIZ_REFCLK_DIG) + return 0; + + for (i = 0; i < WIZ_MAX_INPUT_CLOCKS; i++) + if (wiz->input_clks[i]->dev == parent->dev) + break; + + if (i == WIZ_MAX_INPUT_CLOCKS) + return -EINVAL; + + mux_sel = &wiz->clk_mux_sel[id]; + num_parents = mux_sel->num_parents; + for (j = 0; j < num_parents; j++) + if (mux_sel->parents[j] == i) + break; + + if (j == num_parents) + return -EINVAL; + + regmap_field_write(wiz->mux_sel_field[id], mux_sel->table[j]); + + return 0; +} + +static int wiz_clk_of_xlate(struct clk *clk, struct ofnode_phandle_args *args) +{ + struct udevice *dev = clk->dev; + struct wiz_clk *priv = dev_get_priv(dev); + struct wiz *wiz = priv->wiz; + + clk->id = args->args[0] << 10 | wiz->id; + + return 0; +} + +static const struct clk_ops wiz_clk_ops = { + .set_parent = wiz_clk_set_parent, + .of_xlate = wiz_clk_of_xlate, +}; + +int wiz_clk_probe(struct udevice *dev) +{ + struct wiz_clk *priv = dev_get_priv(dev); + + priv->wiz = dev_get_priv(dev->parent); + + return 0; +} + +U_BOOT_DRIVER(wiz_clk) = { + .name = "wiz_clk", + .id = UCLASS_CLK, + .priv_auto = sizeof(struct wiz_clk), + .ops = &wiz_clk_ops, + .probe = wiz_clk_probe, +}; + +static int wiz_reset_request(struct reset_ctl *reset_ctl) +{ + return 0; +} + +static int wiz_reset_free(struct reset_ctl *reset_ctl) +{ + return 0; +} + +static int wiz_reset_assert(struct reset_ctl *reset_ctl) +{ + struct wiz_reset *priv = dev_get_priv(reset_ctl->dev); + struct wiz *wiz = priv->wiz; + int ret; + int id = reset_ctl->id; + + if (id == 0) { + ret = regmap_field_write(wiz->phy_reset_n, false); + return ret; + } + + ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_DISABLE); + return ret; +} + +static int wiz_phy_fullrt_div(struct wiz *wiz, int lane) +{ + if (wiz->type != AM64_WIZ_10G) + return 0; + + if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE) + return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1); + + return 0; +} + +static int wiz_reset_deassert(struct reset_ctl *reset_ctl) +{ + struct wiz_reset *priv = dev_get_priv(reset_ctl->dev); + struct wiz *wiz = priv->wiz; + int ret; + int id = reset_ctl->id; + + ret = wiz_phy_fullrt_div(wiz, id - 1); + if (ret) + return ret; + + /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */ + if (id == 0 && wiz->gpio_typec_dir) { + if (dm_gpio_get_value(wiz->gpio_typec_dir)) { + regmap_update_bits(wiz->regmap, WIZ_SERDES_TYPEC, + WIZ_SERDES_TYPEC_LN10_SWAP, + WIZ_SERDES_TYPEC_LN10_SWAP); + } else { + regmap_update_bits(wiz->regmap, WIZ_SERDES_TYPEC, + WIZ_SERDES_TYPEC_LN10_SWAP, 0); + } + } + + if (id == 0) { + ret = regmap_field_write(wiz->phy_reset_n, true); + return ret; + } + + if (wiz->lane_phy_type[id - 1] == PHY_TYPE_PCIE) + ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE); + else + ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE); + + return ret; +} + +static struct reset_ops wiz_reset_ops = { + .request = wiz_reset_request, + .rfree = wiz_reset_free, + .rst_assert = wiz_reset_assert, + .rst_deassert = wiz_reset_deassert, +}; + +int wiz_reset_probe(struct udevice *dev) +{ + struct wiz_reset *priv = dev_get_priv(dev); + + priv->wiz = dev_get_priv(dev->parent); + + return 0; +} + +U_BOOT_DRIVER(wiz_reset) = { + .name = "wiz-reset", + .id = UCLASS_RESET, + .probe = wiz_reset_probe, + .ops = &wiz_reset_ops, + .flags = DM_FLAG_LEAVE_PD_ON, +}; + +static int wiz_reset(struct wiz *wiz) +{ + int ret; + + ret = regmap_field_write(wiz->por_en, 0x1); + if (ret) + return ret; + + mdelay(1); + + ret = regmap_field_write(wiz->por_en, 0x0); + if (ret) + return ret; + + return 0; +} + +static int wiz_p_mac_div_sel(struct wiz *wiz) +{ + u32 num_lanes = wiz->num_lanes; + int ret; + int i; + + for (i = 0; i < num_lanes; i++) { + if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) { + ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1); + if (ret) + return ret; + + ret = regmap_field_write(wiz->p_mac_div_sel1[i], 2); + if (ret) + return ret; + } + } + + return 0; +} + +static int wiz_mode_select(struct wiz *wiz) +{ + u32 num_lanes = wiz->num_lanes; + int ret; + int i; + + for (i = 0; i < num_lanes; i++) { + if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) { + ret = regmap_field_write(wiz->p_standard_mode[i], + LANE_MODE_GEN2); + if (ret) + return ret; + } + } + + return 0; +} + +static int wiz_init_raw_interface(struct wiz *wiz, bool enable) +{ + u32 num_lanes = wiz->num_lanes; + int i; + int ret; + + for (i = 0; i < num_lanes; i++) { + ret = regmap_field_write(wiz->p_align[i], enable); + if (ret) + return ret; + + ret = regmap_field_write(wiz->p_raw_auto_start[i], enable); + if (ret) + return ret; + } + + return 0; +} + +static int wiz_init(struct wiz *wiz) +{ + struct udevice *dev = wiz->dev; + int ret; + + ret = wiz_reset(wiz); + if (ret) { + dev_err(dev, "WIZ reset failed\n"); + return ret; + } + + ret = wiz_mode_select(wiz); + if (ret) { + dev_err(dev, "WIZ mode select failed\n"); + return ret; + } + + ret = wiz_p_mac_div_sel(wiz); + if (ret) { + dev_err(dev, "Configuring P0 MAC DIV SEL failed\n"); + return ret; + } + + ret = wiz_init_raw_interface(wiz, true); + if (ret) { + dev_err(dev, "WIZ interface initialization failed\n"); + return ret; + } + + return 0; +} + +static int wiz_regfield_init(struct wiz *wiz) +{ + struct regmap *regmap = wiz->regmap; + int num_lanes = wiz->num_lanes; + struct udevice *dev = wiz->dev; + enum wiz_type type; + int i; + + wiz->por_en = devm_regmap_field_alloc(dev, regmap, por_en); + if (IS_ERR(wiz->por_en)) { + dev_err(dev, "POR_EN reg field init failed\n"); + return PTR_ERR(wiz->por_en); + } + + wiz->phy_reset_n = devm_regmap_field_alloc(dev, regmap, + phy_reset_n); + if (IS_ERR(wiz->phy_reset_n)) { + dev_err(dev, "PHY_RESET_N reg field init failed\n"); + return PTR_ERR(wiz->phy_reset_n); + } + + wiz->pma_cmn_refclk_int_mode = + devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_int_mode); + if (IS_ERR(wiz->pma_cmn_refclk_int_mode)) { + dev_err(dev, "PMA_CMN_REFCLK_INT_MODE reg field init failed\n"); + return PTR_ERR(wiz->pma_cmn_refclk_int_mode); + } + + wiz->pma_cmn_refclk_mode = + devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_mode); + if (IS_ERR(wiz->pma_cmn_refclk_mode)) { + dev_err(dev, "PMA_CMN_REFCLK_MODE reg field init failed\n"); + return PTR_ERR(wiz->pma_cmn_refclk_mode); + } + + wiz->div_sel_field[CMN_REFCLK] = + devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk_dig_div); + if (IS_ERR(wiz->div_sel_field[CMN_REFCLK])) { + dev_err(dev, "PMA_CMN_REFCLK_DIG_DIV reg field init failed\n"); + return PTR_ERR(wiz->div_sel_field[CMN_REFCLK]); + } + + wiz->div_sel_field[CMN_REFCLK1] = + devm_regmap_field_alloc(dev, regmap, pma_cmn_refclk1_dig_div); + if (IS_ERR(wiz->div_sel_field[CMN_REFCLK1])) { + dev_err(dev, "PMA_CMN_REFCLK1_DIG_DIV reg field init failed\n"); + return PTR_ERR(wiz->div_sel_field[CMN_REFCLK1]); + } + + wiz->mux_sel_field[PLL0_REFCLK] = + devm_regmap_field_alloc(dev, regmap, pll0_refclk_mux_sel); + if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) { + dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n"); + return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]); + } + + wiz->mux_sel_field[PLL1_REFCLK] = + devm_regmap_field_alloc(dev, regmap, pll1_refclk_mux_sel); + if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) { + dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n"); + return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]); + } + + type = dev_get_driver_data(dev); + if (type == J721E_WIZ_10G || type == AM64_WIZ_10G) + wiz->mux_sel_field[REFCLK_DIG] = + devm_regmap_field_alloc(dev, regmap, + refclk_dig_sel_10g); + else + wiz->mux_sel_field[REFCLK_DIG] = + devm_regmap_field_alloc(dev, regmap, + refclk_dig_sel_16g); + if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) { + dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n"); + return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]); + } + + for (i = 0; i < num_lanes; i++) { + wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap, + p_enable[i]); + if (IS_ERR(wiz->p_enable[i])) { + dev_err(dev, "P%d_ENABLE reg field init failed\n", i); + return PTR_ERR(wiz->p_enable[i]); + } + + wiz->p_align[i] = devm_regmap_field_alloc(dev, regmap, + p_align[i]); + if (IS_ERR(wiz->p_align[i])) { + dev_err(dev, "P%d_ALIGN reg field init failed\n", i); + return PTR_ERR(wiz->p_align[i]); + } + + wiz->p_raw_auto_start[i] = + devm_regmap_field_alloc(dev, regmap, p_raw_auto_start[i]); + if (IS_ERR(wiz->p_raw_auto_start[i])) { + dev_err(dev, "P%d_RAW_AUTO_START reg field init fail\n", + i); + return PTR_ERR(wiz->p_raw_auto_start[i]); + } + + wiz->p_standard_mode[i] = + devm_regmap_field_alloc(dev, regmap, p_standard_mode[i]); + if (IS_ERR(wiz->p_standard_mode[i])) { + dev_err(dev, "P%d_STANDARD_MODE reg field init fail\n", + i); + return PTR_ERR(wiz->p_standard_mode[i]); + } + + wiz->p0_fullrt_div[i] = devm_regmap_field_alloc(dev, regmap, p0_fullrt_div[i]); + if (IS_ERR(wiz->p0_fullrt_div[i])) { + dev_err(dev, "P%d_FULLRT_DIV reg field init failed\n", i); + return PTR_ERR(wiz->p0_fullrt_div[i]); + } + + wiz->p_mac_div_sel0[i] = + devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]); + if (IS_ERR(wiz->p_mac_div_sel0[i])) { + dev_err(dev, "P%d_MAC_DIV_SEL0 reg field init fail\n", + i); + return PTR_ERR(wiz->p_mac_div_sel0[i]); + } + + wiz->p_mac_div_sel1[i] = + devm_regmap_field_alloc(dev, regmap, p_mac_div_sel1[i]); + if (IS_ERR(wiz->p_mac_div_sel1[i])) { + dev_err(dev, "P%d_MAC_DIV_SEL1 reg field init fail\n", + i); + return PTR_ERR(wiz->p_mac_div_sel1[i]); + } + } + + return 0; +} + +static int wiz_clock_init(struct wiz *wiz) +{ + struct udevice *dev = wiz->dev; + unsigned long rate; + struct clk *clk; + int ret; + + clk = devm_clk_get(dev, "core_ref_clk"); + if (IS_ERR(clk)) { + dev_err(dev, "core_ref_clk clock not found\n"); + ret = PTR_ERR(clk); + return ret; + } + wiz->input_clks[WIZ_CORE_REFCLK] = clk; + /* Initialize CORE_REFCLK1 to the same clock reference to maintain old DT compatibility */ + wiz->input_clks[WIZ_CORE_REFCLK1] = clk; + + rate = clk_get_rate(clk); + if (rate >= 100000000) + regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1); + else + regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3); + + clk = devm_clk_get(dev, "ext_ref_clk"); + if (IS_ERR(clk)) { + dev_err(dev, "ext_ref_clk clock not found\n"); + ret = PTR_ERR(clk); + return ret; + } + + wiz->input_clks[WIZ_EXT_REFCLK] = clk; + /* Initialize EXT_REFCLK1 to the same clock reference to maintain old DT compatibility */ + wiz->input_clks[WIZ_EXT_REFCLK1] = clk; + + rate = clk_get_rate(clk); + if (rate >= 100000000) + regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0); + else + regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2); + + return 0; +} + +static ofnode get_child_by_name(struct udevice *dev, const char *name) +{ + int l = strlen(name); + ofnode node = dev_read_first_subnode(dev); + + while (ofnode_valid(node)) { + const char *child_name = ofnode_get_name(node); + + if (!strncmp(child_name, name, l)) { + if (child_name[l] == '\0' || child_name[l] == '@') + return node; + } + node = dev_read_next_subnode(node); + } + return node; +} + +static int j721e_wiz_bind_clocks(struct wiz *wiz) +{ + struct udevice *dev = wiz->dev; + struct driver *wiz_clk_drv; + int i, rc; + + wiz_clk_drv = lists_driver_lookup_name("wiz_clk"); + if (!wiz_clk_drv) { + dev_err(dev, "Cannot find driver 'wiz_clk'\n"); + return -ENOENT; + } + + for (i = 0; i < WIZ_DIV_NUM_CLOCKS_10G; i++) { + rc = device_bind(dev, wiz_clk_drv, clk_div_sel[i].node_name, + &clk_div_sel[i], dev_ofnode(dev), NULL); + if (rc) { + dev_err(dev, "cannot bind driver for clock %s\n", + clk_div_sel[i].node_name); + } + } + + for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) { + rc = device_bind(dev, wiz_clk_drv, clk_mux_sel_10g[i].node_name, + &clk_mux_sel_10g[i], dev_ofnode(dev), NULL); + if (rc) { + dev_err(dev, "cannot bind driver for clock %s\n", + clk_mux_sel_10g[i].node_name); + } + } + + return 0; +} + +static int j721e_wiz_bind_of_clocks(struct wiz *wiz) +{ + struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel; + struct udevice *dev = wiz->dev; + enum wiz_type type = wiz->type; + struct driver *div_clk_drv; + struct driver *mux_clk_drv; + ofnode node; + int i, rc; + + if (type == AM64_WIZ_10G) + return j721e_wiz_bind_clocks(wiz); + + div_clk_drv = lists_driver_lookup_name("wiz_div_clk"); + if (!div_clk_drv) { + dev_err(dev, "Cannot find driver 'wiz_div_clk'\n"); + return -ENOENT; + } + + mux_clk_drv = lists_driver_lookup_name("wiz_mux_clk"); + if (!mux_clk_drv) { + dev_err(dev, "Cannot find driver 'wiz_mux_clk'\n"); + return -ENOENT; + } + + for (i = 0; i < wiz->clk_div_sel_num; i++) { + node = get_child_by_name(dev, clk_div_sel[i].node_name); + if (!ofnode_valid(node)) { + dev_err(dev, "cannot find node for clock %s\n", + clk_div_sel[i].node_name); + continue; + } + rc = device_bind(dev, div_clk_drv, clk_div_sel[i].node_name, + &clk_div_sel[i], node, NULL); + if (rc) { + dev_err(dev, "cannot bind driver for clock %s\n", + clk_div_sel[i].node_name); + } + } + + for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) { + node = get_child_by_name(dev, clk_mux_sel[i].node_name); + if (!ofnode_valid(node)) { + dev_err(dev, "cannot find node for clock %s\n", + clk_mux_sel[i].node_name); + continue; + } + rc = device_bind(dev, mux_clk_drv, clk_mux_sel[i].node_name, + &clk_mux_sel[i], node, NULL); + if (rc) { + dev_err(dev, "cannot bind driver for clock %s\n", + clk_mux_sel[i].node_name); + } + } + + return 0; +} + +static int j721e_wiz_bind_reset(struct udevice *dev) +{ + int rc; + struct driver *drv; + + drv = lists_driver_lookup_name("wiz-reset"); + if (!drv) { + dev_err(dev, "Cannot find driver 'wiz-reset'\n"); + return -ENOENT; + } + + rc = device_bind(dev, drv, "wiz-reset", NULL, dev_ofnode(dev), NULL); + if (rc) { + dev_err(dev, "cannot bind driver for wiz-reset\n"); + return rc; + } + + return 0; +} + +static int j721e_wiz_bind(struct udevice *dev) +{ + dm_scan_fdt_dev(dev); + + return 0; +} + +static int wiz_get_lane_phy_types(struct udevice *dev, struct wiz *wiz) +{ + ofnode child, serdes; + + serdes = get_child_by_name(dev, "serdes"); + if (!ofnode_valid(serdes)) { + dev_err(dev, "%s: Getting \"serdes\"-node failed\n", __func__); + return -EINVAL; + } + + ofnode_for_each_subnode(child, serdes) { + u32 reg, num_lanes = 1, phy_type = PHY_NONE; + int ret, i; + + ret = ofnode_read_u32(child, "reg", ®); + if (ret) { + dev_err(dev, "%s: Reading \"reg\" from failed: %d\n", + __func__, ret); + return ret; + } + ofnode_read_u32(child, "cdns,num-lanes", &num_lanes); + ofnode_read_u32(child, "cdns,phy-type", &phy_type); + + dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__, + reg, reg + num_lanes - 1, phy_type); + + for (i = reg; i < reg + num_lanes; i++) + wiz->lane_phy_type[i] = phy_type; + } + + return 0; +} + +static int j721e_wiz_probe(struct udevice *dev) +{ + struct wiz *wiz = dev_get_priv(dev); + struct ofnode_phandle_args args; + unsigned int val; + int rc, i; + ofnode node; + struct regmap *regmap; + u32 num_lanes; + + node = get_child_by_name(dev, "serdes"); + + if (!ofnode_valid(node)) { + dev_err(dev, "Failed to get SERDES child DT node\n"); + return -ENODEV; + } + + rc = regmap_init_mem(node, ®map); + if (rc) { + dev_err(dev, "Failed to get memory resource\n"); + return rc; + } + rc = dev_read_u32(dev, "num-lanes", &num_lanes); + if (rc) { + dev_err(dev, "Failed to read num-lanes property\n"); + goto err_addr_to_resource; + } + + if (num_lanes > WIZ_MAX_LANES) { + dev_err(dev, "Cannot support %d lanes\n", num_lanes); + goto err_addr_to_resource; + } + + wiz->gpio_typec_dir = devm_gpiod_get_optional(dev, "typec-dir", + GPIOD_IS_IN); + if (IS_ERR(wiz->gpio_typec_dir)) { + rc = PTR_ERR(wiz->gpio_typec_dir); + dev_err(dev, "Failed to request typec-dir gpio: %d\n", rc); + goto err_addr_to_resource; + } + + rc = dev_read_phandle_with_args(dev, "power-domains", "#power-domain-cells", 0, 0, &args); + if (rc) { + dev_err(dev, "Failed to get power domain: %d\n", rc); + goto err_addr_to_resource; + } + + wiz->id = args.args[0]; + wiz->regmap = regmap; + wiz->num_lanes = num_lanes; + wiz->dev = dev; + wiz->clk_div_sel = clk_div_sel; + wiz->type = dev_get_driver_data(dev); + if (wiz->type == J721E_WIZ_10G || wiz->type == AM64_WIZ_10G) { + wiz->clk_mux_sel = clk_mux_sel_10g; + wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G; + } else { + wiz->clk_mux_sel = clk_mux_sel_16g; + wiz->clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_16G; + } + + rc = wiz_get_lane_phy_types(dev, wiz); + if (rc) { + dev_err(dev, "Failed to get lane PHY types\n"); + goto err_addr_to_resource; + } + + rc = wiz_regfield_init(wiz); + if (rc) { + dev_err(dev, "Failed to initialize regfields\n"); + goto err_addr_to_resource; + } + + for (i = 0; i < wiz->num_lanes; i++) { + regmap_field_read(wiz->p_enable[i], &val); + if (val & (P_ENABLE | P_ENABLE_FORCE)) { + dev_err(dev, "SERDES already configured\n"); + rc = -EBUSY; + goto err_addr_to_resource; + } + } + + rc = j721e_wiz_bind_of_clocks(wiz); + if (rc) { + dev_err(dev, "Failed to bind clocks\n"); + goto err_addr_to_resource; + } + + rc = j721e_wiz_bind_reset(dev); + if (rc) { + dev_err(dev, "Failed to bind reset\n"); + goto err_addr_to_resource; + } + + rc = wiz_clock_init(wiz); + if (rc) { + dev_warn(dev, "Failed to initialize clocks\n"); + goto err_addr_to_resource; + } + + rc = wiz_init(wiz); + if (rc) { + dev_err(dev, "WIZ initialization failed\n"); + goto err_addr_to_resource; + } + + return 0; + +err_addr_to_resource: + free(regmap); + + return rc; +} + +static int j721e_wiz_remove(struct udevice *dev) +{ + struct wiz *wiz = dev_get_priv(dev); + + if (wiz->regmap) + free(wiz->regmap); + + return 0; +} + +static const struct udevice_id j721e_wiz_ids[] = { + { + .compatible = "ti,j721e-wiz-16g", .data = J721E_WIZ_16G, + }, + { + .compatible = "ti,j721e-wiz-10g", .data = J721E_WIZ_10G, + }, + { + .compatible = "ti,am64-wiz-10g", .data = AM64_WIZ_10G, + }, + {} +}; + +U_BOOT_DRIVER(phy_j721e_wiz) = { + .name = "phy-j721e-wiz", + .id = UCLASS_NOP, + .of_match = j721e_wiz_ids, + .bind = j721e_wiz_bind, + .probe = j721e_wiz_probe, + .remove = j721e_wiz_remove, + .priv_auto = sizeof(struct wiz), + .flags = DM_FLAG_LEAVE_PD_ON, +}; |