diff options
Diffstat (limited to 'drivers/net')
28 files changed, 3183 insertions, 124 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index b4ff033..576cd2d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -48,6 +48,7 @@ config DM_DSA bool "Enable Driver Model for DSA switches" depends on DM_MDIO depends on PHY_FIXED + depends on !NET_LWIP help Enable driver model for DSA switches @@ -96,7 +97,7 @@ config DSA_SANDBOX menuconfig NETDEVICES bool "Network device support" - depends on NET + depends on NET || NET_LWIP select DM_ETH help You must select Y to enable any network device support @@ -243,6 +244,13 @@ config DWC_ETH_QOS_IMX The Synopsys Designware Ethernet QOS IP block with the specific configuration used in IMX soc. +config DWC_ETH_QOS_INTEL + bool "Synopsys DWC Ethernet QOS device support for Intel" + depends on DWC_ETH_QOS + help + The Synopsys Designware Ethernet QOS IP block with the specific + configuration used in the Intel Elkhart-Lake soc. + config DWC_ETH_QOS_ROCKCHIP bool "Synopsys DWC Ethernet QOS device support for Rockchip SoCs" depends on DWC_ETH_QOS @@ -325,8 +333,17 @@ config EEPRO100 This driver supports Intel(R) PRO/100 82557/82559/82559ER fast ethernet family of adapters. +config ESSEDMA + bool "Qualcomm ESS Edma support" + depends on DM_ETH && ARCH_IPQ40XX + select PHYLIB + help + This driver supports ethernet DMA adapter found in + Qualcomm IPQ40xx series SoC-s. + config ETH_SANDBOX depends on SANDBOX + depends on NET default y bool "Sandbox: Mocked Ethernet driver" help @@ -335,8 +352,20 @@ config ETH_SANDBOX This driver is particularly useful in the test/dm/eth.c tests +config ETH_SANDBOX_LWIP + depends on SANDBOX + depends on NET_LWIP + default y + bool "Sandbox: Mocked Ethernet driver (for NET_LWIP)" + help + This driver is meant as a replacement for ETH_SANDBOX when + the network stack is NET_LWIP rather than NET. It currently + does nothing, i.e. it drops the sent packets and never receives + data. + config ETH_SANDBOX_RAW depends on SANDBOX + depends on NET default y bool "Sandbox: Bridge to Linux Raw Sockets" help @@ -453,6 +482,7 @@ config FTMAC100 config FTGMAC100 bool "Ftgmac100 Ethernet Support" select PHYLIB + depends on NET help This driver supports the Faraday's FTGMAC100 Gigabit SoC Ethernet controller that can be found on Aspeed SoCs (which @@ -495,11 +525,11 @@ config KS8851_MLL The Microchip KS8851 parallel bus external ethernet interface chip. config KSZ9477 - bool "Microchip KSZ9477 I2C controller driver" - depends on DM_DSA && DM_I2C + bool "Microchip KSZ9477 controller driver" + depends on DM_DSA && (DM_I2C || DM_SPI) help This driver implements a DSA switch driver for the KSZ9477 family - of GbE switches using the I2C interface. + of GbE switches using the I2C or SPI interface. config LITEETH bool "LiteX LiteEth Ethernet MAC" @@ -732,6 +762,38 @@ config TULIP help This driver supports DEC DC2114x Fast ethernet chips. +config TULIP_SUPPORT_NON_PCI + bool "No PCI controller" + depends on TULIP + default n + help + Say Y to this and you can run this driver on platforms that do not + have PCI controllers. + +config TULIP_IGNORE_TX_NO_CARRIER + bool "Ignore tx no carrier error" + depends on TULIP + default n + help + Some IP cores of dc2114x or its variants do not comply so well with + the behaviors described by the official document. A packet could be + sent successfully but reported with No Carrier error. Latest drivers + of this IP core do not detect this error anymore. Say Y to this could + disable handling of this error. + +config TULIP_MULTIPLE_TX_DESC + bool "Use multiple tx descriptors" + depends on TULIP + default n + help + Some IP cores of dc2114x or its variants do not comply so well with + the behaviors described by the official document. Originally this + driver uses only one tx descriptor and organizes it as a ring buffer, + which would lead to a problem that one packet would be sent twice. + Say Y to this could prevent this bug if you are using IP cores with + this issue, by using multiple tx descriptors and organizing them as + a real well-defined ring buffer. + config XILINX_AXIEMAC select PHYLIB select MII @@ -875,7 +937,7 @@ config FEC2_PHY_NORXERR config SYS_DPAA_QBMAN bool "Device tree fixup for QBMan on freescale SOCs" - depends on (ARM || PPC) && !SPL_BUILD + depends on ARM || PPC default y if ARCH_B4860 || \ ARCH_B4420 || \ ARCH_P1023 || \ diff --git a/drivers/net/Makefile b/drivers/net/Makefile index dce7168..f5ab1f5 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_DRIVER_DM9000) += dm9000x.o obj-$(CONFIG_DSA_SANDBOX) += dsa_sandbox.o obj-$(CONFIG_DWC_ETH_QOS) += dwc_eth_qos.o obj-$(CONFIG_DWC_ETH_QOS_IMX) += dwc_eth_qos_imx.o +obj-$(CONFIG_DWC_ETH_QOS_INTEL) += dwc_eth_qos_intel.o obj-$(CONFIG_DWC_ETH_QOS_ROCKCHIP) += dwc_eth_qos_rockchip.o obj-$(CONFIG_DWC_ETH_QOS_QCOM) += dwc_eth_qos_qcom.o obj-$(CONFIG_DWC_ETH_XGMAC) += dwc_eth_xgmac.o @@ -29,6 +30,7 @@ obj-$(CONFIG_DWC_ETH_QOS_STM32) += dwc_eth_qos_stm32.o obj-$(CONFIG_E1000) += e1000.o obj-$(CONFIG_E1000_SPI) += e1000_spi.o obj-$(CONFIG_EEPRO100) += eepro100.o +obj-$(CONFIG_ESSEDMA) += essedma.o obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_ETH_DESIGNWARE) += designware.o obj-$(CONFIG_ETH_DESIGNWARE_MESON8B) += dwmac_meson8b.o @@ -37,6 +39,7 @@ obj-$(CONFIG_ETH_DESIGNWARE_SOCFPGA) += dwmac_socfpga.o obj-$(CONFIG_ETH_SANDBOX) += sandbox.o obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw-bus.o obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw.o +obj-$(CONFIG_ETH_SANDBOX_LWIP) += sandbox-lwip.o obj-$(CONFIG_FEC_MXC) += fec_mxc.o obj-$(CONFIG_FMAN_ENET) += fm/ obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o diff --git a/drivers/net/aspeed_mdio.c b/drivers/net/aspeed_mdio.c index f2e4392..2e1f3cd 100644 --- a/drivers/net/aspeed_mdio.c +++ b/drivers/net/aspeed_mdio.c @@ -113,6 +113,7 @@ static int aspeed_mdio_probe(struct udevice *dev) static const struct udevice_id aspeed_mdio_ids[] = { { .compatible = "aspeed,ast2600-mdio" }, + { .compatible = "aspeed,ast2700-mdio" }, { } }; diff --git a/drivers/net/dc2114x.c b/drivers/net/dc2114x.c index ce028f4..7c0665f 100644 --- a/drivers/net/dc2114x.c +++ b/drivers/net/dc2114x.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ #include <asm/io.h> +#include <cpu_func.h> #include <dm.h> #include <malloc.h> #include <net.h> @@ -72,10 +73,20 @@ #define POLL_DEMAND 1 +#if CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) +#define phys_to_bus(dev, a) virt_to_phys((volatile const void *)(a)) +#else #define phys_to_bus(dev, a) dm_pci_phys_to_mem((dev), (a)) +#endif + +/* Number of TX descriptors */ +#if CONFIG_IS_ENABLED(TULIP_MULTIPLE_TX_DESC) +#define NUM_TX_DESC 4 +#else +#define NUM_TX_DESC 1 +#endif #define NUM_RX_DESC PKTBUFSRX -#define NUM_TX_DESC 1 /* Number of TX descriptors */ #define RX_BUFF_SZ PKTSIZE_ALIGN #define TOUT_LOOP 1000000 @@ -89,9 +100,17 @@ struct de4x5_desc { u32 next; }; +/* Assigned for network card's ring buffer: + * Some CPU might treat these memories as cached, and changes to these memories + * won't immediately be visible to each other. It is necessary to ensure that + * these memories between the CPU and the network card are marked as uncached. + */ +static struct de4x5_desc rx_ring[NUM_RX_DESC] __aligned(32); +static struct de4x5_desc tx_ring[NUM_TX_DESC] __aligned(32); + struct dc2114x_priv { - struct de4x5_desc rx_ring[NUM_RX_DESC] __aligned(32); - struct de4x5_desc tx_ring[NUM_TX_DESC] __aligned(32); + struct de4x5_desc *rx_ring; /* Must be uncached to CPU */ + struct de4x5_desc *tx_ring; /* Must be uncached to CPU */ int rx_new; /* RX descriptor ring pointer */ int tx_new; /* TX descriptor ring pointer */ char rx_ring_size; @@ -271,7 +290,12 @@ static int read_srom(struct dc2114x_priv *priv, u_long ioaddr, int index) static void send_setup_frame(struct dc2114x_priv *priv) { - char setup_frame[SETUP_FRAME_LEN]; + /* We are writing setup frame and these changes should be visible to the + * network card immediately. So let's directly read/write through the + * uncached window. + */ + char __setup_frame[SETUP_FRAME_LEN] __aligned(32); + char *setup_frame = (char *)map_physmem((phys_addr_t)virt_to_phys(__setup_frame), 0, MAP_NOCACHE); char *pa = &setup_frame[0]; int i; @@ -292,8 +316,13 @@ static void send_setup_frame(struct dc2114x_priv *priv) } priv->tx_ring[priv->tx_new].buf = cpu_to_le32(phys_to_bus(priv->devno, - (u32)&setup_frame[0])); + (phys_addr_t)&setup_frame[0])); +#if CONFIG_IS_ENABLED(TULIP_MULTIPLE_TX_DESC) + priv->tx_ring[priv->tx_new].des1 = cpu_to_le32(TD_SET | SETUP_FRAME_LEN); + priv->tx_ring[priv->tx_ring_size - 1].des1 |= cpu_to_le32(TD_TER); +#else priv->tx_ring[priv->tx_new].des1 = cpu_to_le32(TD_TER | TD_SET | SETUP_FRAME_LEN); +#endif priv->tx_ring[priv->tx_new].status = cpu_to_le32(T_OWN); dc2114x_outl(priv, POLL_DEMAND, DE4X5_TPD); @@ -307,7 +336,7 @@ static void send_setup_frame(struct dc2114x_priv *priv) } if (le32_to_cpu(priv->tx_ring[priv->tx_new].status) != 0x7FFFFFFF) { - printf("TX error status2 = 0x%08X\n", + debug("TX error status2 = 0x%08X\n", le32_to_cpu(priv->tx_ring[priv->tx_new].status)); } @@ -332,9 +361,17 @@ static int dc21x4x_send_common(struct dc2114x_priv *priv, void *packet, int leng goto done; } + /* Packet should be visible to the network card */ + flush_dcache_range((phys_addr_t)packet, (phys_addr_t)(packet + RX_BUFF_SZ)); + priv->tx_ring[priv->tx_new].buf = cpu_to_le32(phys_to_bus(priv->devno, - (u32)packet)); + (phys_addr_t)packet)); +#if CONFIG_IS_ENABLED(TULIP_MULTIPLE_TX_DESC) + priv->tx_ring[priv->tx_new].des1 = cpu_to_le32(TD_LS | TD_FS | length); + priv->tx_ring[priv->tx_ring_size - 1].des1 |= cpu_to_le32(TD_TER); +#else priv->tx_ring[priv->tx_new].des1 = cpu_to_le32(TD_TER | TD_LS | TD_FS | length); +#endif priv->tx_ring[priv->tx_new].status = cpu_to_le32(T_OWN); dc2114x_outl(priv, POLL_DEMAND, DE4X5_TPD); @@ -349,7 +386,9 @@ static int dc21x4x_send_common(struct dc2114x_priv *priv, void *packet, int leng if (le32_to_cpu(priv->tx_ring[priv->tx_new].status) & TD_ES) { priv->tx_ring[priv->tx_new].status = 0x0; +#if !CONFIG_IS_ENABLED(TULIP_IGNORE_TX_NO_CARRIER) goto done; +#endif } status = length; @@ -398,13 +437,22 @@ static int dc21x4x_init_common(struct dc2114x_priv *priv) return -1; } - dc2114x_outl(priv, OMR_SDP | OMR_PS | OMR_PM, DE4X5_OMR); + /* 2024-07: + * Remove the OMR_PM flag and choose 16 perfect filtering mode since in + * modern networks there're plenty of multicasts and set ORM_PM flag will + * increase the dc2114x's workload and ask the U-Boot to handle packets + * not related to itself. And most of the time, U-Boot does not need this + * feature. + * + * A better way: let user to decide whether to have this flag. + */ + dc2114x_outl(priv, OMR_SDP | OMR_PS, DE4X5_OMR); for (i = 0; i < NUM_RX_DESC; i++) { priv->rx_ring[i].status = cpu_to_le32(R_OWN); priv->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); priv->rx_ring[i].buf = cpu_to_le32(phys_to_bus(priv->devno, - (u32)net_rx_packets[i])); + (phys_addr_t)net_rx_packets[i])); priv->rx_ring[i].next = 0; } @@ -423,9 +471,9 @@ static int dc21x4x_init_common(struct dc2114x_priv *priv) priv->tx_ring[priv->tx_ring_size - 1].des1 |= cpu_to_le32(TD_TER); /* Tell the adapter where the TX/RX rings are located. */ - dc2114x_outl(priv, phys_to_bus(priv->devno, (u32)&priv->rx_ring), + dc2114x_outl(priv, phys_to_bus(priv->devno, (phys_addr_t)priv->rx_ring), DE4X5_RRBA); - dc2114x_outl(priv, phys_to_bus(priv->devno, (u32)&priv->tx_ring), + dc2114x_outl(priv, phys_to_bus(priv->devno, (phys_addr_t)priv->tx_ring), DE4X5_TRBA); start_de4x5(priv); @@ -461,21 +509,32 @@ static void read_hw_addr(struct dc2114x_priv *priv) } } +#if !CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) static struct pci_device_id supported[] = { { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST) }, { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142) }, { } }; +#endif static int dc2114x_start(struct udevice *dev) { - struct eth_pdata *plat = dev_get_plat(dev); struct dc2114x_priv *priv = dev_get_priv(dev); + int rval; - memcpy(priv->enetaddr, plat->enetaddr, sizeof(plat->enetaddr)); + if (!priv->enetaddr) { + rval = eth_env_get_enetaddr("ethaddr", priv->enetaddr); + if (!rval) { + printf("dc2114x: Err: please set a valid MAC address\n"); + return -EINVAL; + } + } + +#if !CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) /* Ensure we're not sleeping. */ dm_pci_write_config8(dev, PCI_CFDA_PSM, WAKEUP); +#endif return dc21x4x_init_common(priv); } @@ -485,8 +544,9 @@ static void dc2114x_stop(struct udevice *dev) struct dc2114x_priv *priv = dev_get_priv(dev); dc21x4x_halt_common(priv); - +#if !CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) dm_pci_write_config8(dev, PCI_CFDA_PSM, SLEEP); +#endif } static int dc2114x_send(struct udevice *dev, void *packet, int length) @@ -515,7 +575,8 @@ static int dc2114x_recv(struct udevice *dev, int flags, uchar **packetp) if (!ret) return 0; - *packetp = net_rx_packets[priv->rx_new]; + invalidate_dcache_range((phys_addr_t)net_rx_packets[priv->rx_new], (phys_addr_t)(net_rx_packets[priv->rx_new] + RX_BUFF_SZ)); + *packetp = (uchar *)net_rx_packets[priv->rx_new]; return ret - 4; } @@ -543,7 +604,7 @@ static int dc2114x_read_rom_hwaddr(struct udevice *dev) static int dc2114x_bind(struct udevice *dev) { - static int card_number; + static int card_number = 0; char name[16]; sprintf(name, "dc2114x#%u", card_number++); @@ -555,6 +616,8 @@ static int dc2114x_probe(struct udevice *dev) { struct eth_pdata *plat = dev_get_plat(dev); struct dc2114x_priv *priv = dev_get_priv(dev); + +#if !CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) u16 command, status; u32 iobase; @@ -562,9 +625,6 @@ static int dc2114x_probe(struct udevice *dev) iobase &= ~0xf; debug("dc2114x: DEC 2114x PCI Device @0x%x\n", iobase); - - priv->devno = dev; - priv->enetaddr = plat->enetaddr; priv->iobase = (void __iomem *)dm_pci_mem_to_phys(dev, iobase); command = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; @@ -576,10 +636,29 @@ static int dc2114x_probe(struct udevice *dev) } dm_pci_write_config8(dev, PCI_LATENCY_TIMER, 0x60); +#endif + + priv->devno = dev; + priv->enetaddr = plat->enetaddr; + priv->rx_ring = (struct de4x5_desc *)map_physmem((phys_addr_t)virt_to_phys(rx_ring), 0, MAP_NOCACHE); + priv->tx_ring = (struct de4x5_desc *)map_physmem((phys_addr_t)virt_to_phys(tx_ring), 0, MAP_NOCACHE); return 0; } +#if CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) +static int dc2114x_of_to_plat(struct udevice *dev) +{ + struct eth_pdata *plat = dev_get_plat(dev); + struct dc2114x_priv *priv = dev_get_priv(dev); + + plat->iobase = (phys_addr_t)map_physmem((phys_addr_t)devfdt_get_addr(dev), 0, MAP_NOCACHE); + priv->iobase = (void *)plat->iobase; + + return 0; +} +#endif + static const struct eth_ops dc2114x_ops = { .start = dc2114x_start, .send = dc2114x_send, @@ -589,9 +668,23 @@ static const struct eth_ops dc2114x_ops = { .read_rom_hwaddr = dc2114x_read_rom_hwaddr, }; +#if CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) +static const struct udevice_id dc2114x_eth_ids[] = { + { .compatible = "dec,dmfe" }, + { .compatible = "tulip,dmfe" }, + { .compatible = "dec,dc2114x" }, + { .compatible = "tulip,dc2114x" }, + { } +}; +#endif + U_BOOT_DRIVER(eth_dc2114x) = { .name = "eth_dc2114x", .id = UCLASS_ETH, +#if CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) + .of_match = dc2114x_eth_ids, + .of_to_plat = dc2114x_of_to_plat, +#endif .bind = dc2114x_bind, .probe = dc2114x_probe, .ops = &dc2114x_ops, @@ -599,4 +692,6 @@ U_BOOT_DRIVER(eth_dc2114x) = { .plat_auto = sizeof(struct eth_pdata), }; +#if !CONFIG_IS_ENABLED(TULIP_SUPPORT_NON_PCI) U_BOOT_PCI_DEVICE(eth_dc2114x, supported); +#endif diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c index 67ac86f..2279481 100644 --- a/drivers/net/dwc_eth_qos.c +++ b/drivers/net/dwc_eth_qos.c @@ -32,6 +32,7 @@ #include <clk.h> #include <cpu_func.h> #include <dm.h> +#include <dm/device_compat.h> #include <errno.h> #include <eth_phy.h> #include <log.h> @@ -46,10 +47,7 @@ #include <asm/cache.h> #include <asm/gpio.h> #include <asm/io.h> -#ifdef CONFIG_ARCH_IMX8M -#include <asm/arch/clock.h> -#include <asm/mach-imx/sys_proto.h> -#endif +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/printk.h> @@ -146,6 +144,25 @@ static int eqos_mdio_wait_idle(struct eqos_priv *eqos) 1000000, true); } +/* Bitmask common for mdio_read and mdio_write */ +#define EQOS_MDIO_BITFIELD(pa, rda, cr) \ + FIELD_PREP(EQOS_MAC_MDIO_ADDRESS_PA_MASK, pa) | \ + FIELD_PREP(EQOS_MAC_MDIO_ADDRESS_RDA_MASK, rda) | \ + FIELD_PREP(EQOS_MAC_MDIO_ADDRESS_CR_MASK, cr) | \ + EQOS_MAC_MDIO_ADDRESS_GB + +static u32 eqos_mdio_bitfield(struct eqos_priv *eqos, int addr, int devad, int reg) +{ + int cr = eqos->config->config_mac_mdio; + bool c22 = devad == MDIO_DEVAD_NONE ? true : false; + + if (c22) + return EQOS_MDIO_BITFIELD(addr, reg, cr); + else + return EQOS_MDIO_BITFIELD(addr, devad, cr) | + EQOS_MAC_MDIO_ADDRESS_C45E; +} + static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, int mdio_reg) { @@ -163,15 +180,17 @@ static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, } val = readl(&eqos->mac_regs->mdio_address); - val &= EQOS_MAC_MDIO_ADDRESS_SKAP | - EQOS_MAC_MDIO_ADDRESS_C45E; - val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | - (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | - (eqos->config->config_mac_mdio << - EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | - (EQOS_MAC_MDIO_ADDRESS_GOC_READ << - EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | - EQOS_MAC_MDIO_ADDRESS_GB; + val &= EQOS_MAC_MDIO_ADDRESS_SKAP; + + val |= eqos_mdio_bitfield(eqos, mdio_addr, mdio_devad, mdio_reg) | + FIELD_PREP(EQOS_MAC_MDIO_ADDRESS_GOC_MASK, + EQOS_MAC_MDIO_ADDRESS_GOC_READ); + + if (val & EQOS_MAC_MDIO_ADDRESS_C45E) { + writel(FIELD_PREP(EQOS_MAC_MDIO_DATA_RA_MASK, mdio_reg), + &eqos->mac_regs->mdio_data); + } + writel(val, &eqos->mac_regs->mdio_address); udelay(eqos->config->mdio_wait); @@ -194,7 +213,8 @@ static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, int mdio_reg, u16 mdio_val) { struct eqos_priv *eqos = bus->priv; - u32 val; + u32 v_addr; + u32 v_data; int ret; debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev, @@ -206,20 +226,19 @@ static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, return ret; } - writel(mdio_val, &eqos->mac_regs->mdio_data); + v_addr = readl(&eqos->mac_regs->mdio_address); + v_addr &= EQOS_MAC_MDIO_ADDRESS_SKAP; - val = readl(&eqos->mac_regs->mdio_address); - val &= EQOS_MAC_MDIO_ADDRESS_SKAP | - EQOS_MAC_MDIO_ADDRESS_C45E; - val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) | - (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) | - (eqos->config->config_mac_mdio << - EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) | - (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE << - EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) | - EQOS_MAC_MDIO_ADDRESS_GB; - writel(val, &eqos->mac_regs->mdio_address); + v_addr |= eqos_mdio_bitfield(eqos, mdio_addr, mdio_devad, mdio_reg) | + FIELD_PREP(EQOS_MAC_MDIO_ADDRESS_GOC_MASK, + EQOS_MAC_MDIO_ADDRESS_GOC_WRITE); + v_data = mdio_val; + if (v_addr & EQOS_MAC_MDIO_ADDRESS_C45E) + v_data |= FIELD_PREP(EQOS_MAC_MDIO_DATA_RA_MASK, mdio_reg); + + writel(v_data, &eqos->mac_regs->mdio_data); + writel(v_addr, &eqos->mac_regs->mdio_address); udelay(eqos->config->mdio_wait); ret = eqos_mdio_wait_idle(eqos); @@ -689,6 +708,9 @@ static int eqos_start(struct udevice *dev) */ setbits_le32(&eqos->dma_regs->mode, EQOS_DMA_MODE_SWR); + if (eqos->config->ops->eqos_fix_soc_reset) + eqos->config->ops->eqos_fix_soc_reset(dev); + ret = wait_for_bit_le32(&eqos->dma_regs->mode, EQOS_DMA_MODE_SWR, false, eqos->config->swr_wait, false); @@ -1279,6 +1301,13 @@ static int eqos_probe_resources_tegra186(struct udevice *dev) debug("%s(dev=%p):\n", __func__, dev); + ret = eqos_get_base_addr_dt(dev); + if (ret) { + pr_err("eqos_get_base_addr_dt failed: %d\n", ret); + return ret; + } + eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); + ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); if (ret) { pr_err("reset_get_by_name(rst) failed: %d\n", ret); @@ -1353,6 +1382,69 @@ static int eqos_remove_resources_tegra186(struct udevice *dev) return 0; } +static int eqos_bind(struct udevice *dev) +{ + static int dev_num; + const size_t name_sz = 16; + char name[name_sz]; + + /* Device name defaults to DT node name. */ + if (ofnode_valid(dev_ofnode(dev))) + return 0; + + /* Assign unique names in case there is no DT node. */ + snprintf(name, name_sz, "eth_eqos#%d", dev_num++); + return device_set_name(dev, name); +} + +/* + * Get driver data based on the device tree. Boards not using a device tree can + * overwrite this function. + */ +__weak void *eqos_get_driver_data(struct udevice *dev) +{ + return (void *)dev_get_driver_data(dev); +} + +static fdt_addr_t eqos_get_base_addr_common(struct udevice *dev, fdt_addr_t addr) +{ + struct eqos_priv *eqos = dev_get_priv(dev); + + if (addr == FDT_ADDR_T_NONE) { +#if CONFIG_IS_ENABLED(FDT_64BIT) + dev_err(dev, "addr=0x%llx is invalid.\n", addr); +#else + dev_err(dev, "addr=0x%x is invalid.\n", addr); +#endif + return -EINVAL; + } + + eqos->regs = addr; + eqos->mac_regs = (void *)(addr + EQOS_MAC_REGS_BASE); + eqos->mtl_regs = (void *)(addr + EQOS_MTL_REGS_BASE); + eqos->dma_regs = (void *)(addr + EQOS_DMA_REGS_BASE); + + return 0; +} + +int eqos_get_base_addr_dt(struct udevice *dev) +{ + fdt_addr_t addr = dev_read_addr(dev); + return eqos_get_base_addr_common(dev, addr); +} + +int eqos_get_base_addr_pci(struct udevice *dev) +{ + fdt_addr_t addr; + void *paddr; + + paddr = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, + PCI_REGION_MEM); + addr = paddr ? (fdt_addr_t)paddr : FDT_ADDR_T_NONE; + + return eqos_get_base_addr_common(dev, addr); +} + static int eqos_probe(struct udevice *dev) { struct eqos_priv *eqos = dev_get_priv(dev); @@ -1361,17 +1453,12 @@ static int eqos_probe(struct udevice *dev) debug("%s(dev=%p):\n", __func__, dev); eqos->dev = dev; - eqos->config = (void *)dev_get_driver_data(dev); - eqos->regs = dev_read_addr(dev); - if (eqos->regs == FDT_ADDR_T_NONE) { - pr_err("dev_read_addr() failed\n"); + eqos->config = eqos_get_driver_data(dev); + if (!eqos->config) { + pr_err("Failed to get driver data.\n"); return -ENODEV; } - eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); - eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE); - eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE); - eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE); eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0); @@ -1552,6 +1639,7 @@ U_BOOT_DRIVER(eth_eqos) = { .name = "eth_eqos", .id = UCLASS_ETH, .of_match = of_match_ptr(eqos_ids), + .bind = eqos_bind, .probe = eqos_probe, .remove = eqos_remove, .ops = &eqos_ops, diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h index 8b3d0d4..123f98d 100644 --- a/drivers/net/dwc_eth_qos.h +++ b/drivers/net/dwc_eth_qos.h @@ -3,8 +3,11 @@ * Copyright 2022 NXP */ -#include <phy_interface.h> +#include <asm/gpio.h> +#include <clk.h> #include <linux/bitops.h> +#include <phy_interface.h> +#include <reset.h> /* Core registers */ @@ -79,19 +82,20 @@ struct eqos_mac_regs { #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3 -#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21 -#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16 -#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8 +#define EQOS_MAC_MDIO_ADDRESS_PA_MASK GENMASK(25, 21) +#define EQOS_MAC_MDIO_ADDRESS_RDA_MASK GENMASK(20, 16) +#define EQOS_MAC_MDIO_ADDRESS_CR_MASK GENMASK(11, 8) #define EQOS_MAC_MDIO_ADDRESS_CR_100_150 1 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4) -#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2 +#define EQOS_MAC_MDIO_ADDRESS_GOC_MASK GENMASK(3, 2) #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1) #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0) +#define EQOS_MAC_MDIO_DATA_RA_MASK GENMASK(31, 16) #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff #define EQOS_MTL_REGS_BASE 0xd00 @@ -244,6 +248,7 @@ struct eqos_ops { int (*eqos_set_tx_clk_speed)(struct udevice *dev); int (*eqos_get_enetaddr)(struct udevice *dev); ulong (*eqos_get_tick_clk_rate)(struct udevice *dev); + void (*eqos_fix_soc_reset)(struct udevice *dev); }; struct eqos_priv { @@ -285,7 +290,10 @@ void eqos_inval_desc_generic(void *desc); void eqos_flush_desc_generic(void *desc); void eqos_inval_buffer_generic(void *buf, size_t size); void eqos_flush_buffer_generic(void *buf, size_t size); +int eqos_get_base_addr_dt(struct udevice *dev); +int eqos_get_base_addr_pci(struct udevice *dev); int eqos_null_ops(struct udevice *dev); +void *eqos_get_driver_data(struct udevice *dev); extern struct eqos_config eqos_imx_config; extern struct eqos_config eqos_rockchip_config; diff --git a/drivers/net/dwc_eth_qos_imx.c b/drivers/net/dwc_eth_qos_imx.c index d6bed27..af42f74 100644 --- a/drivers/net/dwc_eth_qos_imx.c +++ b/drivers/net/dwc_eth_qos_imx.c @@ -47,6 +47,12 @@ static int eqos_probe_resources_imx(struct udevice *dev) debug("%s(dev=%p):\n", __func__, dev); + ret = eqos_get_base_addr_dt(dev); + if (ret) { + dev_dbg(dev, "eqos_get_base_addr_dt failed: %d", ret); + goto err_probe; + } + interface = eqos->config->interface(dev); if (interface == PHY_INTERFACE_MODE_NA) { @@ -210,6 +216,27 @@ static int eqos_get_enetaddr_imx(struct udevice *dev) return 0; } +static void eqos_fix_soc_reset_imx(struct udevice *dev) +{ + struct eqos_priv *eqos = dev_get_priv(dev); + + if (IS_ENABLED(CONFIG_IMX93)) { + /* + * Workaround for ERR051683 in i.MX93 + * The i.MX93 requires speed configuration bits to be set to + * complete the reset procedure in RMII mode. + * See b536f32b5b03 ("net: stmmac: dwmac-imx: use platform + * specific reset for imx93 SoCs") in linux + */ + if (eqos->config->interface(dev) == PHY_INTERFACE_MODE_RMII) { + udelay(200); + setbits_le32(&eqos->mac_regs->configuration, + EQOS_MAC_CONFIGURATION_PS | + EQOS_MAC_CONFIGURATION_FES); + } + } +} + static struct eqos_ops eqos_imx_ops = { .eqos_inval_desc = eqos_inval_desc_generic, .eqos_flush_desc = eqos_flush_desc_generic, @@ -226,6 +253,7 @@ static struct eqos_ops eqos_imx_ops = { .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx, .eqos_get_enetaddr = eqos_get_enetaddr_imx, .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx, + .eqos_fix_soc_reset = eqos_fix_soc_reset_imx, }; struct eqos_config __maybe_unused eqos_imx_config = { diff --git a/drivers/net/dwc_eth_qos_intel.c b/drivers/net/dwc_eth_qos_intel.c new file mode 100644 index 0000000..a2c6825 --- /dev/null +++ b/drivers/net/dwc_eth_qos_intel.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023-2024 DENX Software Engineering GmbH + * Philip Oberfichtner <pro@denx.de> + * + * Based on linux v6.6.39, especially drivers/net/ethernet/stmicro/stmmac + */ + +#include <asm/io.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <miiphy.h> +#include <net.h> +#include <pci.h> + +#include "dwc_eth_qos.h" +#include "dwc_eth_qos_intel.h" + +static struct pci_device_id intel_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_RGMII1G) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_SGMII1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_SGMII2G5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5) }, + {} +}; + +static int pci_config(struct udevice *dev) +{ + u32 val; + + /* Try to enable I/O accesses and bus-mastering */ + dm_pci_read_config32(dev, PCI_COMMAND, &val); + val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; + dm_pci_write_config32(dev, PCI_COMMAND, val); + + /* Make sure it worked */ + dm_pci_read_config32(dev, PCI_COMMAND, &val); + if (!(val & PCI_COMMAND_MEMORY)) { + dev_err(dev, "%s: Can't enable I/O memory\n", __func__); + return -ENOSPC; + } + + if (!(val & PCI_COMMAND_MASTER)) { + dev_err(dev, "%s: Can't enable bus-mastering\n", __func__); + return -EPERM; + } + + return 0; +} + +static void limit_fifo_size(struct udevice *dev) +{ + /* + * As described in Intel Erratum EHL22, Document Number: 636674-2.1, + * the PSE GbE Controllers advertise a wrong RX and TX fifo size. + * Software should limit this value to 64KB. + */ + struct eqos_priv *eqos = dev_get_priv(dev); + + eqos->tx_fifo_sz = 0x8000; + eqos->rx_fifo_sz = 0x8000; +} + +static int serdes_status_poll(struct udevice *dev, + unsigned char phyaddr, unsigned char phyreg, + unsigned short mask, unsigned short val) +{ + struct eqos_priv *eqos = dev_get_priv(dev); + unsigned int retries = 10; + unsigned short val_rd; + + do { + miiphy_read(eqos->mii->name, phyaddr, phyreg, &val_rd); + if ((val_rd & mask) == (val & mask)) + return 0; + udelay(POLL_DELAY_US); + } while (--retries); + + return -ETIMEDOUT; +} + + /* Returns -ve if MAC is unknown and 0 on success */ +static int mac_check_pse(const struct udevice *dev, bool *is_pse) +{ + struct pci_child_plat *plat = dev_get_parent_plat(dev); + + if (!plat || plat->vendor != PCI_VENDOR_ID_INTEL) + return -ENXIO; + + switch (plat->device) { + case PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5: + *is_pse = 1; + return 0; + + case PCI_DEVICE_ID_INTEL_EHL_RGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_SGMII1: + case PCI_DEVICE_ID_INTEL_EHL_SGMII2G5: + *is_pse = 0; + return 0; + }; + + return -ENXIO; +} + +/* Check if we're in 2G5 mode */ +static bool serdes_link_mode_2500(struct udevice *dev) +{ + const unsigned char phyad = INTEL_MGBE_ADHOC_ADDR; + struct eqos_priv *eqos = dev_get_priv(dev); + unsigned short data; + + miiphy_read(eqos->mii->name, phyad, SERDES_GCR, &data); + if (FIELD_GET(SERDES_LINK_MODE_MASK, data) == SERDES_LINK_MODE_2G5) + return true; + + return false; +} + +static int serdes_powerup(struct udevice *dev) +{ + /* Based on linux/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c */ + + const unsigned char phyad = INTEL_MGBE_ADHOC_ADDR; + struct eqos_priv *eqos = dev_get_priv(dev); + unsigned short data; + int ret; + bool is_pse; + + /* Set the serdes rate and the PCLK rate */ + miiphy_read(eqos->mii->name, phyad, SERDES_GCR0, &data); + + data &= ~SERDES_RATE_MASK; + data &= ~SERDES_PCLK_MASK; + + if (serdes_link_mode_2500(dev)) + data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; + else + data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT | + SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT; + + miiphy_write(eqos->mii->name, phyad, SERDES_GCR0, data); + + /* assert clk_req */ + miiphy_read(eqos->mii->name, phyad, SERDES_GCR0, &data); + data |= SERDES_PLL_CLK; + miiphy_write(eqos->mii->name, phyad, SERDES_GCR0, data); + + /* check for clk_ack assertion */ + ret = serdes_status_poll(dev, phyad, SERDES_GSR0, + SERDES_PLL_CLK, SERDES_PLL_CLK); + + if (ret) { + dev_err(dev, "Serdes PLL clk request timeout\n"); + return ret; + } + + /* assert lane reset*/ + miiphy_read(eqos->mii->name, phyad, SERDES_GCR0, &data); + data |= SERDES_RST; + miiphy_write(eqos->mii->name, phyad, SERDES_GCR0, data); + + /* check for assert lane reset reflection */ + ret = serdes_status_poll(dev, phyad, SERDES_GSR0, + SERDES_RST, SERDES_RST); + + if (ret) { + dev_err(dev, "Serdes assert lane reset timeout\n"); + return ret; + } + + /* move power state to P0 */ + miiphy_read(eqos->mii->name, phyad, SERDES_GCR0, &data); + data &= ~SERDES_PWR_ST_MASK; + data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT; + miiphy_write(eqos->mii->name, phyad, SERDES_GCR0, data); + + /* Check for P0 state */ + ret = serdes_status_poll(dev, phyad, SERDES_GSR0, + SERDES_PWR_ST_MASK, + SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT); + + if (ret) { + dev_err(dev, "Serdes power state P0 timeout.\n"); + return ret; + } + + /* PSE only - ungate SGMII PHY Rx Clock*/ + ret = mac_check_pse(dev, &is_pse); + if (ret) { + dev_err(dev, "Failed to determine MAC type.\n"); + return ret; + } + + if (is_pse) { + miiphy_read(eqos->mii->name, phyad, SERDES_GCR0, &data); + data |= SERDES_PHY_RX_CLK; + miiphy_write(eqos->mii->name, phyad, SERDES_GCR0, data); + } + + return 0; +} + +static int xpcs_access(struct udevice *dev, int reg, int v) +{ + /* + * Common read/write helper function + * + * It may seem a bit odd at a first glance that we use bus->read() + * directly insetad of one of the wrapper functions. But: + * + * (1) phy_read() can't be used because we do not access an acutal PHY, + * but a MAC-internal submodule. + * + * (2) miiphy_read() can't be used because it assumes MDIO_DEVAD_NONE. + */ + + int port = INTEL_MGBE_XPCS_ADDR; + int devad = 0x1f; + u16 val; + struct eqos_priv *eqos; + struct mii_dev *bus; + + eqos = dev_get_priv(dev); + bus = eqos->mii; + + if (v < 0) + return bus->read(bus, port, devad, reg); + + val = v; + return bus->write(bus, port, devad, reg, val); +} + +static int xpcs_read(struct udevice *dev, int reg) +{ + return xpcs_access(dev, reg, -1); +} + +static int xpcs_write(struct udevice *dev, int reg, u16 val) +{ + return xpcs_access(dev, reg, val); +} + +static int xpcs_clr_bits(struct udevice *dev, int reg, u16 bits) +{ + int ret; + + ret = xpcs_read(dev, reg); + if (ret < 0) + return ret; + + ret &= ~bits; + + return xpcs_write(dev, reg, ret); +} + +static int xpcs_set_bits(struct udevice *dev, int reg, u16 bits) +{ + int ret; + + ret = xpcs_read(dev, reg); + if (ret < 0) + return ret; + + ret |= bits; + + return xpcs_write(dev, reg, ret); +} + +static int xpcs_init(struct udevice *dev) +{ + /* Based on linux/drivers/net/pcs/pcs-xpcs.c */ + struct eqos_priv *eqos = dev_get_priv(dev); + phy_interface_t interface = eqos->config->interface(dev); + + if (interface != PHY_INTERFACE_MODE_SGMII) + return 0; + + if (xpcs_clr_bits(dev, VR_MII_MMD_CTRL, XPCS_AN_CL37_EN) || + xpcs_set_bits(dev, VR_MII_AN_CTRL, XPCS_MODE_SGMII) || + xpcs_set_bits(dev, VR_MII_DIG_CTRL1, XPCS_MAC_AUTO_SW) || + xpcs_set_bits(dev, VR_MII_MMD_CTRL, XPCS_AN_CL37_EN)) + return -EIO; + + return 0; +} + +static int eqos_probe_ressources_intel(struct udevice *dev) +{ + int ret; + + ret = eqos_get_base_addr_pci(dev); + if (ret) { + dev_err(dev, "eqos_get_base_addr_pci failed: %d\n", ret); + return ret; + } + + limit_fifo_size(dev); + + ret = pci_config(dev); + if (ret) { + dev_err(dev, "pci_config failed: %d\n", ret); + return ret; + } + + return 0; +} + +struct eqos_config eqos_intel_config; + +/* + * overwrite __weak function from eqos_intel.c + * + * For PCI devices the devcie tree is optional. Choose driver data based on PCI + * IDs instead. + */ +void *eqos_get_driver_data(struct udevice *dev) +{ + const struct pci_device_id *id; + const struct pci_child_plat *plat; + + plat = dev_get_parent_plat(dev); + + if (!plat) + return NULL; + + /* last intel_pci_ids element is zero initialized */ + for (id = intel_pci_ids; id->vendor != 0; id++) { + if (id->vendor == plat->vendor && id->device == plat->device) + return &eqos_intel_config; + } + + return NULL; +} + +static int eqos_start_resets_intel(struct udevice *dev) +{ + int ret; + + ret = xpcs_init(dev); + if (ret) { + dev_err(dev, "xpcs init failed.\n"); + return ret; + } + + ret = serdes_powerup(dev); + if (ret) { + dev_err(dev, "Failed to power up serdes.\n"); + return ret; + } + + return 0; +} + +static ulong eqos_get_tick_clk_rate_intel(struct udevice *dev) +{ + return 0; +} + +static int eqos_get_enetaddr_intel(struct udevice *dev) +{ + /* Assume MAC address is programmed by previous boot stage */ + struct eth_pdata *plat = dev_get_plat(dev); + struct eqos_priv *eqos = dev_get_priv(dev); + u8 *lo = (u8 *)&eqos->mac_regs->address0_low; + u8 *hi = (u8 *)&eqos->mac_regs->address0_high; + + plat->enetaddr[0] = lo[0]; + plat->enetaddr[1] = lo[1]; + plat->enetaddr[2] = lo[2]; + plat->enetaddr[3] = lo[3]; + plat->enetaddr[4] = hi[0]; + plat->enetaddr[5] = hi[1]; + + return 0; +} + +static phy_interface_t eqos_get_interface_intel(const struct udevice *dev) +{ + struct pci_child_plat *plat = dev_get_parent_plat(dev); + + if (!plat || plat->vendor != PCI_VENDOR_ID_INTEL) + return PHY_INTERFACE_MODE_NA; + + switch (plat->device) { + /* The GbE Host Controller has no RGMII interface */ + case PCI_DEVICE_ID_INTEL_EHL_RGMII1G: + return PHY_INTERFACE_MODE_NA; + + case PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G: + return PHY_INTERFACE_MODE_RGMII; + + /* Host SGMII and Host SGMII2G5 share the same device id */ + case PCI_DEVICE_ID_INTEL_EHL_SGMII1: + case PCI_DEVICE_ID_INTEL_EHL_SGMII2G5: + case PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5: + case PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G: + case PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5: + return PHY_INTERFACE_MODE_SGMII; + }; + + return PHY_INTERFACE_MODE_NA; +} + +static struct eqos_ops eqos_intel_ops = { + .eqos_inval_desc = eqos_inval_desc_generic, + .eqos_flush_desc = eqos_flush_desc_generic, + .eqos_inval_buffer = eqos_inval_buffer_generic, + .eqos_flush_buffer = eqos_flush_buffer_generic, + .eqos_probe_resources = eqos_probe_ressources_intel, + .eqos_remove_resources = eqos_null_ops, + .eqos_stop_resets = eqos_null_ops, + .eqos_start_resets = eqos_start_resets_intel, + .eqos_stop_clks = eqos_null_ops, + .eqos_start_clks = eqos_null_ops, + .eqos_calibrate_pads = eqos_null_ops, + .eqos_disable_calibration = eqos_null_ops, + .eqos_set_tx_clk_speed = eqos_null_ops, + .eqos_get_enetaddr = eqos_get_enetaddr_intel, + .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_intel, +}; + +struct eqos_config eqos_intel_config = { + .reg_access_always_ok = false, + .mdio_wait = 10, + .swr_wait = 50, + .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB, + .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300, + .axi_bus_width = EQOS_AXI_WIDTH_64, + .interface = eqos_get_interface_intel, + .ops = &eqos_intel_ops +}; + +extern U_BOOT_DRIVER(eth_eqos); +U_BOOT_PCI_DEVICE(eth_eqos, intel_pci_ids); diff --git a/drivers/net/dwc_eth_qos_intel.h b/drivers/net/dwc_eth_qos_intel.h new file mode 100644 index 0000000..847c75e --- /dev/null +++ b/drivers/net/dwc_eth_qos_intel.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2023-2024 DENX Software Engineering GmbH + * Philip Oberfichtner <pro@denx.de> + * + * This header is based on linux v6.6.39, + * + * drivers/net/pcs/pcs-xpcs.h + * drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h, + * + * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates + * Copyright (c) 2020 Intel Corporation + */ + +#ifndef __DWMAC_INTEL_H__ +#define __DWMAC_INTEL_H__ + +#define POLL_DELAY_US 8 + +/* SERDES Register */ +#define SERDES_GCR 0x0 /* Global Conguration */ +#define SERDES_GSR0 0x5 /* Global Status Reg0 */ +#define SERDES_GCR0 0xb /* Global Configuration Reg0 */ + +/* SERDES defines */ +#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */ +#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */ +#define SERDES_RST BIT(2) /* Serdes Reset */ +#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/ +#define SERDES_RATE_MASK GENMASK(9, 8) +#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */ +#define SERDES_LINK_MODE_MASK GENMASK(2, 1) +#define SERDES_PWR_ST_SHIFT 4 +#define SERDES_PWR_ST_P0 0x0 +#define SERDES_PWR_ST_P3 0x3 +#define SERDES_LINK_MODE_2G5 0x3 +#define SERSED_LINK_MODE_1G 0x2 +#define SERDES_PCLK_37p5MHZ 0x0 +#define SERDES_PCLK_70MHZ 0x1 +#define SERDES_RATE_PCIE_GEN1 0x0 +#define SERDES_RATE_PCIE_GEN2 0x1 +#define SERDES_RATE_PCIE_SHIFT 8 +#define SERDES_PCLK_SHIFT 12 + +#define INTEL_MGBE_ADHOC_ADDR 0x15 +#define INTEL_MGBE_XPCS_ADDR 0x16 + +/* XPCS defines */ +#define XPCS_MODE_SGMII BIT(2) +#define XPCS_MAC_AUTO_SW BIT(9) +#define XPCS_AN_CL37_EN BIT(12) + +#define VR_MII_MMD_CTRL 0x0000 +#define VR_MII_DIG_CTRL1 0x8000 +#define VR_MII_AN_CTRL 0x8001 + +#endif /* __DWMAC_INTEL_H__ */ diff --git a/drivers/net/dwc_eth_qos_qcom.c b/drivers/net/dwc_eth_qos_qcom.c index 77d6263..de0ae09 100644 --- a/drivers/net/dwc_eth_qos_qcom.c +++ b/drivers/net/dwc_eth_qos_qcom.c @@ -522,6 +522,12 @@ static int eqos_probe_resources_qcom(struct udevice *dev) debug("%s(dev=%p):\n", __func__, dev); + ret = eqos_get_base_addr_dt(dev); + if (ret) { + pr_err("eqos_get_base_addr_dt failed: %d\n", ret); + return ret; + } + interface = eqos->config->interface(dev); if (interface == PHY_INTERFACE_MODE_NA) { diff --git a/drivers/net/dwc_eth_qos_rockchip.c b/drivers/net/dwc_eth_qos_rockchip.c index c4557e5..9fc8c68 100644 --- a/drivers/net/dwc_eth_qos_rockchip.c +++ b/drivers/net/dwc_eth_qos_rockchip.c @@ -311,6 +311,12 @@ static int eqos_probe_resources_rk(struct udevice *dev) int reset_flags = GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE; int ret; + ret = eqos_get_base_addr_dt(dev); + if (ret) { + dev_err(dev, "eqos_get_base_addr_dt failed: %d\n", ret); + return ret; + } + data = calloc(1, sizeof(struct rockchip_platform_data)); if (!data) return -ENOMEM; diff --git a/drivers/net/dwc_eth_qos_starfive.c b/drivers/net/dwc_eth_qos_starfive.c index 09e714c..d9ace43 100644 --- a/drivers/net/dwc_eth_qos_starfive.c +++ b/drivers/net/dwc_eth_qos_starfive.c @@ -183,6 +183,12 @@ static int eqos_probe_resources_jh7110(struct udevice *dev) struct starfive_platform_data *data; int ret; + ret = eqos_get_base_addr_dt(dev); + if (ret) { + pr_err("eqos_get_base_addr_dt failed: %d\n", ret); + return ret; + } + data = calloc(1, sizeof(struct starfive_platform_data)); if (!data) return -ENOMEM; diff --git a/drivers/net/dwc_eth_qos_stm32.c b/drivers/net/dwc_eth_qos_stm32.c index cffaa10..f3a973f 100644 --- a/drivers/net/dwc_eth_qos_stm32.c +++ b/drivers/net/dwc_eth_qos_stm32.c @@ -234,6 +234,12 @@ static int eqos_probe_resources_stm32(struct udevice *dev) interface = eqos->config->interface(dev); + ret = eqos_get_base_addr_dt(dev); + if (ret) { + dev_err(dev, "eqos_get_base_addr_dt failed: %d\n", ret); + return ret; + } + if (interface == PHY_INTERFACE_MODE_NA) { dev_err(dev, "Invalid PHY interface\n"); return -EINVAL; diff --git a/drivers/net/dwc_eth_xgmac_socfpga.c b/drivers/net/dwc_eth_xgmac_socfpga.c index 270c1b0..87fb7e8 100644 --- a/drivers/net/dwc_eth_xgmac_socfpga.c +++ b/drivers/net/dwc_eth_xgmac_socfpga.c @@ -37,7 +37,7 @@ static int dwxgmac_socfpga_do_setphy(struct udevice *dev, u32 modereg) u32 modemask = SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << xgmac->syscon_phy_regshift; - if (!(IS_ENABLED(CONFIG_SPL_BUILD)) && IS_ENABLED(CONFIG_SPL_ATF)) { + if (!(IS_ENABLED(CONFIG_XPL_BUILD)) && IS_ENABLED(CONFIG_SPL_ATF)) { u32 index = ((u64)xgmac->syscon_phy - socfpga_get_sysmgr_addr() - SYSMGR_SOC64_EMAC0) >> 2; diff --git a/drivers/net/dwmac_socfpga.c b/drivers/net/dwmac_socfpga.c index bba3fc4..a9e2d8c 100644 --- a/drivers/net/dwmac_socfpga.c +++ b/drivers/net/dwmac_socfpga.c @@ -68,7 +68,7 @@ static int dwmac_socfpga_do_setphy(struct udevice *dev, u32 modereg) struct dwmac_socfpga_plat *pdata = dev_get_plat(dev); u32 modemask = SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << pdata->reg_shift; -#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_ATF) +#if !defined(CONFIG_XPL_BUILD) && defined(CONFIG_SPL_ATF) u32 index = ((u64)pdata->phy_intf - socfpga_get_sysmgr_addr() - SYSMGR_SOC64_EMAC0) >> 2; diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index d18a8d5..f64dbb7 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -678,7 +678,7 @@ static int eepro100_recv_common(struct eepro100_priv *priv, uchar **packetp) status = le16_to_cpu(desc->status); if (!(status & RFD_STATUS_C)) - return 0; + return -EAGAIN; /* Valid frame status. */ if (status & RFD_STATUS_OK) { diff --git a/drivers/net/essedma.c b/drivers/net/essedma.c new file mode 100644 index 0000000..fccc5f5 --- /dev/null +++ b/drivers/net/essedma.c @@ -0,0 +1,1192 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2020 Sartura Ltd. + * + * Author: Robert Marko <robert.marko@sartura.hr> + * + * Copyright (c) 2021 Toco Technologies FZE <contact@toco.ae> + * Copyright (c) 2021 Gabor Juhos <j4g8y7@gmail.com> + * + * Qualcomm ESS EDMA ethernet driver + */ + +#include <asm/io.h> +#include <clk.h> +#include <cpu_func.h> +#include <dm.h> +#include <dm/device_compat.h> +#include <errno.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <log.h> +#include <miiphy.h> +#include <net.h> +#include <reset.h> + +#include "essedma.h" + +#define EDMA_MAX_PKT_SIZE (PKTSIZE_ALIGN + PKTALIGN) + +#define EDMA_RXQ_ID 0 +#define EDMA_TXQ_ID 0 + +/* descriptor ring */ +struct edma_ring { + u16 count; /* number of descriptors in the ring */ + void *hw_desc; /* descriptor ring virtual address */ + unsigned int hw_size; /* hw descriptor ring length in bytes */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 head; /* next Tx descriptor to fill */ + u16 tail; /* next Tx descriptor to clean */ +}; + +struct ess_switch { + phys_addr_t base; + struct phy_device *phydev[ESS_PORTS_NUM]; + u32 phy_mask; + ofnode ports_node; + phy_interface_t port_wrapper_mode; + int num_phy; +}; + +struct essedma_priv { + phys_addr_t base; + struct udevice *dev; + struct clk ess_clk; + struct reset_ctl ess_rst; + struct udevice *mdio_dev; + struct ess_switch esw; + phys_addr_t psgmii_base; + struct edma_ring tpd_ring; + struct edma_ring rfd_ring; +}; + +static void esw_port_loopback_set(struct ess_switch *esw, int port, + bool enable) +{ + u32 t; + + t = readl(esw->base + ESS_PORT_LOOKUP_CTRL(port)); + if (enable) + t |= ESS_PORT_LOOP_BACK_EN; + else + t &= ~ESS_PORT_LOOP_BACK_EN; + writel(t, esw->base + ESS_PORT_LOOKUP_CTRL(port)); +} + +static void esw_port_loopback_set_all(struct ess_switch *esw, bool enable) +{ + int i; + + for (i = 1; i < ESS_PORTS_NUM; i++) + esw_port_loopback_set(esw, i, enable); +} + +static void ess_reset(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + + reset_assert(&priv->ess_rst); + mdelay(10); + + reset_deassert(&priv->ess_rst); + mdelay(10); +} + +void qca8075_ess_reset(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct phy_device *psgmii_phy; + int i, val; + + /* Find the PSGMII PHY */ + psgmii_phy = priv->esw.phydev[priv->esw.num_phy - 1]; + + /* Fix phy psgmii RX 20bit */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005b); + + /* Reset phy psgmii */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x001b); + + /* Release reset phy psgmii */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005b); + for (i = 0; i < 100; i++) { + val = phy_read_mmd(psgmii_phy, MDIO_MMD_PMAPMD, 0x28); + if (val & 0x1) + break; + mdelay(1); + } + if (i >= 100) + printf("QCA807x PSGMII PLL_VCO_CALIB Not Ready\n"); + + /* + * Check qca8075 psgmii calibration done end. + * Freeze phy psgmii RX CDR + */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, 0x1a, 0x2230); + + ess_reset(dev); + + /* Check ipq psgmii calibration done start */ + for (i = 0; i < 100; i++) { + val = readl(priv->psgmii_base + PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_2); + if (val & 0x1) + break; + mdelay(1); + } + if (i >= 100) + printf("PSGMII PLL_VCO_CALIB Not Ready\n"); + + /* + * Check ipq psgmii calibration done end. + * Relesae phy psgmii RX CDR + */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, 0x1a, 0x3230); + + /* Release phy psgmii RX 20bit */ + phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005f); +} + +#define PSGMII_ST_NUM_RETRIES 20 +#define PSGMII_ST_PKT_COUNT (4 * 1024) +#define PSGMII_ST_PKT_SIZE 1504 + +/* + * Transmitting one byte over a 1000Mbps link requires 8 ns. + * Additionally, use + 1 ns for safety to compensate latencies + * and such. + */ +#define PSGMII_ST_TRAFFIC_TIMEOUT_NS \ + (PSGMII_ST_PKT_COUNT * PSGMII_ST_PKT_SIZE * (8 + 1)) + +#define PSGMII_ST_TRAFFIC_TIMEOUT \ + DIV_ROUND_UP(PSGMII_ST_TRAFFIC_TIMEOUT_NS, 1000000) + +static bool psgmii_self_test_repeat; + +static void psgmii_st_phy_power_down(struct phy_device *phydev) +{ + int val; + + val = phy_read(phydev, MDIO_DEVAD_NONE, MII_BMCR); + val |= QCA807X_POWER_DOWN; + phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, val); +} + +static void psgmii_st_phy_prepare(struct phy_device *phydev) +{ + int val; + + /* check phydev combo port */ + val = phy_read(phydev, MDIO_DEVAD_NONE, + QCA807X_CHIP_CONFIGURATION); + if (val) { + /* Select copper page */ + val |= QCA807X_MEDIA_PAGE_SELECT; + phy_write(phydev, MDIO_DEVAD_NONE, + QCA807X_CHIP_CONFIGURATION, val); + } + + /* Force no link by power down */ + psgmii_st_phy_power_down(phydev); + + /* Packet number (Non documented) */ + phy_write_mmd(phydev, MDIO_MMD_AN, 0x8021, PSGMII_ST_PKT_COUNT); + phy_write_mmd(phydev, MDIO_MMD_AN, 0x8062, PSGMII_ST_PKT_SIZE); + + /* Fix MDI status */ + val = phy_read(phydev, MDIO_DEVAD_NONE, QCA807X_FUNCTION_CONTROL); + val &= ~QCA807X_MDI_CROSSOVER_MODE_MASK; + val |= FIELD_PREP(QCA807X_MDI_CROSSOVER_MODE_MASK, + QCA807X_MDI_CROSSOVER_MODE_MANUAL_MDI); + val &= ~QCA807X_POLARITY_REVERSAL; + phy_write(phydev, MDIO_DEVAD_NONE, QCA807X_FUNCTION_CONTROL, val); +} + +static void psgmii_st_phy_recover(struct phy_device *phydev) +{ + int val; + + /* Packet number (Non documented) */ + phy_write_mmd(phydev, MDIO_MMD_AN, 0x8021, 0x0); + + /* Disable CRC checker and packet counter */ + val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER); + val &= ~QCA807X_MMD7_PACKET_COUNTER_SELFCLR; + val &= ~QCA807X_MMD7_CRC_PACKET_COUNTER_EN; + phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER, val); + + /* Disable traffic (Undocumented) */ + phy_write_mmd(phydev, MDIO_MMD_AN, 0x8020, 0x0); +} + +static void psgmii_st_phy_start_traffic(struct phy_device *phydev) +{ + int val; + + /* Enable CRC checker and packet counter */ + val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER); + val |= QCA807X_MMD7_CRC_PACKET_COUNTER_EN; + phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER, val); + + /* Start traffic (Undocumented) */ + phy_write_mmd(phydev, MDIO_MMD_AN, 0x8020, 0xa000); +} + +static bool psgmii_st_phy_check_counters(struct phy_device *phydev) +{ + u32 tx_ok; + + /* + * The number of test packets is limited to 65535 so + * only read the lower 16 bits of the counter. + */ + tx_ok = phy_read_mmd(phydev, MDIO_MMD_AN, + QCA807X_MMD7_VALID_EGRESS_COUNTER_2); + + return (tx_ok == PSGMII_ST_PKT_COUNT); +} + +static void psgmii_st_phy_reset_loopback(struct phy_device *phydev) +{ + /* reset the PHY */ + phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, 0x9000); + + /* enable loopback mode */ + phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, 0x4140); +} + +static inline bool psgmii_st_phy_link_is_up(struct phy_device *phydev) +{ + int val; + + val = phy_read(phydev, MDIO_DEVAD_NONE, QCA807X_PHY_SPECIFIC); + return !!(val & QCA807X_PHY_SPECIFIC_LINK); +} + +static bool psgmii_st_phy_wait(struct ess_switch *esw, u32 mask, + int retries, int delay, + bool (*check)(struct phy_device *)) +{ + int i; + + for (i = 0; i < retries; i++) { + int phy; + + for (phy = 0; phy < esw->num_phy - 1; phy++) { + u32 phybit = BIT(phy); + + if (!(mask & phybit)) + continue; + + if (check(esw->phydev[phy])) + mask &= ~phybit; + } + + if (!mask) + break; + + mdelay(delay); + } + + return (!mask); +} + +static bool psgmii_st_phy_wait_link(struct ess_switch *esw, u32 mask) +{ + return psgmii_st_phy_wait(esw, mask, 100, 10, + psgmii_st_phy_link_is_up); +} + +static bool psgmii_st_phy_wait_tx_complete(struct ess_switch *esw, u32 mask) +{ + return psgmii_st_phy_wait(esw, mask, PSGMII_ST_TRAFFIC_TIMEOUT, 1, + psgmii_st_phy_check_counters); +} + +static bool psgmii_st_run_test_serial(struct ess_switch *esw) +{ + bool result = true; + int i; + + for (i = 0; i < esw->num_phy - 1; i++) { + struct phy_device *phydev = esw->phydev[i]; + + psgmii_st_phy_reset_loopback(phydev); + + psgmii_st_phy_wait_link(esw, BIT(i)); + + psgmii_st_phy_start_traffic(phydev); + + /* wait for the traffic to complete */ + result &= psgmii_st_phy_wait_tx_complete(esw, BIT(i)); + + /* Power down */ + psgmii_st_phy_power_down(phydev); + + if (!result) + break; + } + + return result; +} + +static bool psgmii_st_run_test_parallel(struct ess_switch *esw) +{ + bool result; + int i; + + /* enable loopback mode on all PHYs */ + for (i = 0; i < esw->num_phy - 1; i++) + psgmii_st_phy_reset_loopback(esw->phydev[i]); + + psgmii_st_phy_wait_link(esw, esw->phy_mask); + + /* start traffic on all PHYs parallely */ + for (i = 0; i < esw->num_phy - 1; i++) + psgmii_st_phy_start_traffic(esw->phydev[i]); + + /* wait for the traffic to complete on all PHYs */ + result = psgmii_st_phy_wait_tx_complete(esw, esw->phy_mask); + + /* Power down all PHYs */ + for (i = 0; i < esw->num_phy - 1; i++) + psgmii_st_phy_power_down(esw->phydev[i]); + + return result; +} + +struct psgmii_st_stats { + int succeed; + int failed; + int failed_max; + int failed_cont; +}; + +static void psgmii_st_update_stats(struct psgmii_st_stats *stats, + bool success) +{ + if (success) { + stats->succeed++; + stats->failed_cont = 0; + return; + } + + stats->failed++; + stats->failed_cont++; + if (stats->failed_max < stats->failed_cont) + stats->failed_max = stats->failed_cont; +} + +static void psgmii_self_test(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct ess_switch *esw = &priv->esw; + struct psgmii_st_stats stats; + bool result = false; + unsigned long tm; + int i; + + memset(&stats, 0, sizeof(stats)); + + tm = get_timer(0); + + for (i = 0; i < esw->num_phy - 1; i++) + psgmii_st_phy_prepare(esw->phydev[i]); + + for (i = 0; i < PSGMII_ST_NUM_RETRIES; i++) { + qca8075_ess_reset(dev); + + /* enable loopback mode on the switch's ports */ + esw_port_loopback_set_all(esw, true); + + /* run test on each PHYs individually after each other */ + result = psgmii_st_run_test_serial(esw); + + if (result) { + /* run test on each PHYs parallely */ + result = psgmii_st_run_test_parallel(esw); + } + + psgmii_st_update_stats(&stats, result); + + if (psgmii_self_test_repeat) + continue; + + if (result) + break; + } + + for (i = 0; i < esw->num_phy - 1; i++) { + /* Configuration recover */ + psgmii_st_phy_recover(esw->phydev[i]); + + /* Disable loopback */ + phy_write(esw->phydev[i], MDIO_DEVAD_NONE, + QCA807X_FUNCTION_CONTROL, 0x6860); + phy_write(esw->phydev[i], MDIO_DEVAD_NONE, MII_BMCR, 0x9040); + } + + /* disable loopback mode on the switch's ports */ + esw_port_loopback_set_all(esw, false); + + tm = get_timer(tm); + dev_dbg(priv->dev, "\nPSGMII self-test: succeed %d, failed %d (max %d), duration %lu.%03lu secs\n", + stats.succeed, stats.failed, stats.failed_max, + tm / 1000, tm % 1000); +} + +static int ess_switch_disable_lookup(struct ess_switch *esw) +{ + int val; + int i; + + /* Disable port lookup for all ports*/ + for (i = 0; i < ESS_PORTS_NUM; i++) { + int ess_port_vid; + + val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); + val &= ~ESS_PORT_VID_MEM_MASK; + + switch (i) { + case 0: + fallthrough; + case 5: + /* CPU,WAN port -> nothing */ + ess_port_vid = 0; + break; + case 1 ... 4: + /* LAN ports -> all other LAN ports */ + ess_port_vid = GENMASK(4, 1); + ess_port_vid &= ~BIT(i); + break; + default: + return -EINVAL; + } + + val |= FIELD_PREP(ESS_PORT_VID_MEM_MASK, ess_port_vid); + + writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); + } + + /* Set magic value for the global forwarding register 1 */ + writel(0x3e3e3e, esw->base + ESS_GLOBAL_FW_CTRL1); + + return 0; +} + +static int ess_switch_enable_lookup(struct ess_switch *esw) +{ + int val; + int i; + + /* Enable port lookup for all ports*/ + for (i = 0; i < ESS_PORTS_NUM; i++) { + int ess_port_vid; + + val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); + val &= ~ESS_PORT_VID_MEM_MASK; + + switch (i) { + case 0: + /* CPU port -> all other ports */ + ess_port_vid = GENMASK(5, 1); + break; + case 1 ... 4: + /* LAN ports -> CPU and all other LAN ports */ + ess_port_vid = GENMASK(4, 0); + ess_port_vid &= ~BIT(i); + break; + case 5: + /* WAN port -> CPU port only */ + ess_port_vid = BIT(0); + break; + default: + return -EINVAL; + } + + val |= FIELD_PREP(ESS_PORT_VID_MEM_MASK, ess_port_vid); + + writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); + } + + /* Set magic value for the global forwarding register 1 */ + writel(0x3f3f3f, esw->base + ESS_GLOBAL_FW_CTRL1); + + return 0; +} + +static void ess_switch_init(struct ess_switch *esw) +{ + int val = 0; + int i; + + /* Set magic value for the global forwarding register 1 */ + writel(0x3e3e3e, esw->base + ESS_GLOBAL_FW_CTRL1); + + /* Set 1000M speed, full duplex and RX/TX flow control for the CPU port*/ + val &= ~ESS_PORT_SPEED_MASK; + val |= FIELD_PREP(ESS_PORT_SPEED_MASK, ESS_PORT_SPEED_1000); + val |= ESS_PORT_DUPLEX_MODE; + val |= ESS_PORT_TX_FLOW_EN; + val |= ESS_PORT_RX_FLOW_EN; + + writel(val, esw->base + ESS_PORT0_STATUS); + + /* Disable port lookup for all ports*/ + for (i = 0; i < ESS_PORTS_NUM; i++) { + val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); + val &= ~ESS_PORT_VID_MEM_MASK; + + writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); + } + + /* Set HOL settings for all ports*/ + for (i = 0; i < ESS_PORTS_NUM; i++) { + val = 0; + + val |= FIELD_PREP(EG_PORT_QUEUE_NUM_MASK, 30); + if (i == 0 || i == 5) { + val |= FIELD_PREP(EG_PRI5_QUEUE_NUM_MASK, 4); + val |= FIELD_PREP(EG_PRI4_QUEUE_NUM_MASK, 4); + } + val |= FIELD_PREP(EG_PRI3_QUEUE_NUM_MASK, 4); + val |= FIELD_PREP(EG_PRI2_QUEUE_NUM_MASK, 4); + val |= FIELD_PREP(EG_PRI1_QUEUE_NUM_MASK, 4); + val |= FIELD_PREP(EG_PRI0_QUEUE_NUM_MASK, 4); + + writel(val, esw->base + ESS_PORT_HOL_CTRL0(i)); + + val = readl(esw->base + ESS_PORT_HOL_CTRL1(i)); + val &= ~ESS_ING_BUF_NUM_0_MASK; + val |= FIELD_PREP(ESS_ING_BUF_NUM_0_MASK, 6); + + writel(val, esw->base + ESS_PORT_HOL_CTRL1(i)); + } + + /* Give switch some time */ + mdelay(1); + + /* Enable RX and TX MAC-s */ + val = readl(esw->base + ESS_PORT0_STATUS); + val |= ESS_PORT_TXMAC_EN; + val |= ESS_PORT_RXMAC_EN; + + writel(val, esw->base + ESS_PORT0_STATUS); + + /* Set magic value for the global forwarding register 1 */ + writel(0x7f7f7f, esw->base + ESS_GLOBAL_FW_CTRL1); +} + +static int essedma_of_phy(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct ess_switch *esw = &priv->esw; + int num_phy = 0, ret = 0; + ofnode node; + int i; + + ofnode_for_each_subnode(node, esw->ports_node) { + struct ofnode_phandle_args phandle_args; + struct phy_device *phydev; + u32 phy_addr; + + if (ofnode_is_enabled(node)) { + if (ofnode_parse_phandle_with_args(node, "phy-handle", NULL, 0, 0, + &phandle_args)) { + dev_dbg(priv->dev, "Failed to find phy-handle\n"); + return -ENODEV; + } + + ret = ofnode_read_u32(phandle_args.node, "reg", &phy_addr); + if (ret) { + dev_dbg(priv->dev, "Missing reg property in PHY node %s\n", + ofnode_get_name(phandle_args.node)); + return ret; + } + + phydev = dm_mdio_phy_connect(priv->mdio_dev, phy_addr, + dev, priv->esw.port_wrapper_mode); + if (!phydev) { + dev_dbg(priv->dev, "Failed to find phy on addr %d\n", phy_addr); + return -ENODEV; + } + + phydev->node = phandle_args.node; + ret = phy_config(phydev); + + esw->phydev[num_phy] = phydev; + + num_phy++; + } + } + + esw->num_phy = num_phy; + + for (i = 0; i < esw->num_phy - 1; i++) + esw->phy_mask |= BIT(i); + + return ret; +} + +static int essedma_of_switch(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + int port_wrapper_mode = -1; + + priv->esw.ports_node = ofnode_find_subnode(dev_ofnode(dev), "ports"); + if (!ofnode_valid(priv->esw.ports_node)) { + printf("Failed to find ports node\n"); + return -EINVAL; + } + + port_wrapper_mode = ofnode_read_phy_mode(priv->esw.ports_node); + if (port_wrapper_mode == -1) + return -EINVAL; + + priv->esw.port_wrapper_mode = port_wrapper_mode; + + return essedma_of_phy(dev); +} + +static void ipq40xx_edma_start_rx_tx(struct essedma_priv *priv) +{ + volatile u32 data; + + /* enable RX queues */ + data = readl(priv->base + EDMA_REG_RXQ_CTRL); + data |= EDMA_RXQ_CTRL_EN; + writel(data, priv->base + EDMA_REG_RXQ_CTRL); + + /* enable TX queues */ + data = readl(priv->base + EDMA_REG_TXQ_CTRL); + data |= EDMA_TXQ_CTRL_TXQ_EN; + writel(data, priv->base + EDMA_REG_TXQ_CTRL); +} + +/* + * ipq40xx_edma_init_desc() + * Update descriptor ring size, + * Update buffer and producer/consumer index + */ +static void ipq40xx_edma_init_desc(struct essedma_priv *priv) +{ + struct edma_ring *rfd_ring; + struct edma_ring *etdr; + volatile u32 data = 0; + u16 hw_cons_idx = 0; + + /* Set the base address of every TPD ring. */ + etdr = &priv->tpd_ring; + + /* Update TX descriptor ring base address. */ + writel((u32)(etdr->dma & 0xffffffff), + priv->base + EDMA_REG_TPD_BASE_ADDR_Q(EDMA_TXQ_ID)); + data = readl(priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); + + /* Calculate hardware consumer index for Tx. */ + hw_cons_idx = FIELD_GET(EDMA_TPD_CONS_IDX_MASK, data); + etdr->head = hw_cons_idx; + etdr->tail = hw_cons_idx; + data &= ~EDMA_TPD_PROD_IDX_MASK; + data |= hw_cons_idx; + + /* Update producer index for Tx. */ + writel(data, priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); + + /* Update SW consumer index register for Tx. */ + writel(hw_cons_idx, + priv->base + EDMA_REG_TX_SW_CONS_IDX_Q(EDMA_TXQ_ID)); + + /* Set TPD ring size. */ + writel((u32)(etdr->count & EDMA_TPD_RING_SIZE_MASK), + priv->base + EDMA_REG_TPD_RING_SIZE); + + /* Configure Rx ring. */ + rfd_ring = &priv->rfd_ring; + + /* Update Receive Free descriptor ring base address. */ + writel((u32)(rfd_ring->dma & 0xffffffff), + priv->base + EDMA_REG_RFD_BASE_ADDR_Q(EDMA_RXQ_ID)); + data = readl(priv->base + EDMA_REG_RFD_BASE_ADDR_Q(EDMA_RXQ_ID)); + + /* Update RFD ring size and RX buffer size. */ + data = (rfd_ring->count & EDMA_RFD_RING_SIZE_MASK) + << EDMA_RFD_RING_SIZE_SHIFT; + data |= (EDMA_MAX_PKT_SIZE & EDMA_RX_BUF_SIZE_MASK) + << EDMA_RX_BUF_SIZE_SHIFT; + writel(data, priv->base + EDMA_REG_RX_DESC0); + + /* Disable TX FIFO low watermark and high watermark */ + writel(0, priv->base + EDMA_REG_TXF_WATER_MARK); + + /* Load all of base address above */ + data = readl(priv->base + EDMA_REG_TX_SRAM_PART); + data |= 1 << EDMA_LOAD_PTR_SHIFT; + writel(data, priv->base + EDMA_REG_TX_SRAM_PART); +} + +static void ipq40xx_edma_init_rfd_ring(struct essedma_priv *priv) +{ + struct edma_ring *erdr = &priv->rfd_ring; + struct edma_rfd *rfds = erdr->hw_desc; + int i; + + for (i = 0; i < erdr->count; i++) + rfds[i].buffer_addr = virt_to_phys(net_rx_packets[i]); + + flush_dcache_range(erdr->dma, erdr->dma + erdr->hw_size); + + /* setup producer index */ + erdr->head = erdr->count - 1; + writel(erdr->head, priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); +} + +static void ipq40xx_edma_configure(struct essedma_priv *priv) +{ + u32 tmp; + int i; + + /* Set RSS type */ + writel(IPQ40XX_EDMA_RSS_TYPE_NONE, priv->base + EDMA_REG_RSS_TYPE); + + /* Configure RSS indirection table. + * 128 hash will be configured in the following + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively + * and so on + */ + for (i = 0; i < EDMA_NUM_IDT; i++) + writel(EDMA_RSS_IDT_VALUE, priv->base + EDMA_REG_RSS_IDT(i)); + + /* Set RFD burst number */ + tmp = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); + + /* Set RFD prefetch threshold */ + tmp |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); + + /* Set RFD in host ring low threshold to generte interrupt */ + tmp |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); + writel(tmp, priv->base + EDMA_REG_RX_DESC1); + + /* configure reception control data. */ + + /* Set Rx FIFO threshold to start to DMA data to host */ + tmp = EDMA_FIFO_THRESH_128_BYTE; + + /* Set RX remove vlan bit */ + tmp |= EDMA_RXQ_CTRL_RMV_VLAN; + writel(tmp, priv->base + EDMA_REG_RXQ_CTRL); + + /* Configure transmission control data */ + tmp = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); + tmp |= EDMA_TXQ_CTRL_TPD_BURST_EN; + tmp |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); + writel(tmp, priv->base + EDMA_REG_TXQ_CTRL); +} + +static void ipq40xx_edma_stop_rx_tx(struct essedma_priv *priv) +{ + volatile u32 data; + + data = readl(priv->base + EDMA_REG_RXQ_CTRL); + data &= ~EDMA_RXQ_CTRL_EN; + writel(data, priv->base + EDMA_REG_RXQ_CTRL); + data = readl(priv->base + EDMA_REG_TXQ_CTRL); + data &= ~EDMA_TXQ_CTRL_TXQ_EN; + writel(data, priv->base + EDMA_REG_TXQ_CTRL); +} + +static int ipq40xx_eth_recv(struct udevice *dev, int flags, uchar **packetp) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct edma_ring *erdr = &priv->rfd_ring; + struct edma_rrd *rrd; + u32 hw_tail; + u8 *rx_pkt; + + hw_tail = readl(priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); + hw_tail = FIELD_GET(EDMA_RFD_CONS_IDX_MASK, hw_tail); + + if (hw_tail == erdr->tail) + return -EAGAIN; + + rx_pkt = net_rx_packets[erdr->tail]; + invalidate_dcache_range((unsigned long)rx_pkt, + (unsigned long)(rx_pkt + EDMA_MAX_PKT_SIZE)); + + rrd = (struct edma_rrd *)rx_pkt; + + /* Check if RRD is valid */ + if (!(rrd->rrd7 & EDMA_RRD7_DESC_VALID)) + return 0; + + *packetp = rx_pkt + EDMA_RRD_SIZE; + + /* get the packet size */ + return rrd->rrd6; +} + +static int ipq40xx_eth_free_pkt(struct udevice *dev, uchar *packet, + int length) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct edma_ring *erdr; + + erdr = &priv->rfd_ring; + + /* Update the producer index */ + writel(erdr->head, priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); + + erdr->head++; + if (erdr->head == erdr->count) + erdr->head = 0; + + /* Update the consumer index */ + erdr->tail++; + if (erdr->tail == erdr->count) + erdr->tail = 0; + + writel(erdr->tail, + priv->base + EDMA_REG_RX_SW_CONS_IDX_Q(EDMA_RXQ_ID)); + + return 0; +} + +static int ipq40xx_eth_start(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + + ipq40xx_edma_init_rfd_ring(priv); + + ipq40xx_edma_start_rx_tx(priv); + ess_switch_enable_lookup(&priv->esw); + + return 0; +} + +/* + * One TPD would be enough for sending a packet, however because the + * minimal cache line size is larger than the size of a TPD it is not + * possible to flush only one at once. To overcome this limitation + * multiple TPDs are used for sending a single packet. + */ +#define EDMA_TPDS_PER_PACKET 4 +#define EDMA_TPD_MIN_BYTES 4 +#define EDMA_MIN_PKT_SIZE (EDMA_TPDS_PER_PACKET * EDMA_TPD_MIN_BYTES) + +#define EDMA_TX_COMPLETE_TIMEOUT 1000000 + +static int ipq40xx_eth_send(struct udevice *dev, void *packet, int length) +{ + struct essedma_priv *priv = dev_get_priv(dev); + struct edma_tpd *first_tpd; + struct edma_tpd *tpds; + int i; + + if (length < EDMA_MIN_PKT_SIZE) + return 0; + + flush_dcache_range((unsigned long)(packet), + (unsigned long)(packet) + + roundup(length, ARCH_DMA_MINALIGN)); + + tpds = priv->tpd_ring.hw_desc; + for (i = 0; i < EDMA_TPDS_PER_PACKET; i++) { + struct edma_tpd *tpd; + void *frag; + + frag = packet + (i * EDMA_TPD_MIN_BYTES); + + /* get the next TPD */ + tpd = &tpds[priv->tpd_ring.head]; + if (i == 0) + first_tpd = tpd; + + /* update the software index */ + priv->tpd_ring.head++; + if (priv->tpd_ring.head == priv->tpd_ring.count) + priv->tpd_ring.head = 0; + + tpd->svlan_tag = 0; + tpd->addr = virt_to_phys(frag); + tpd->word3 = EDMA_PORT_ENABLE_ALL << EDMA_TPD_PORT_BITMAP_SHIFT; + + if (i < (EDMA_TPDS_PER_PACKET - 1)) { + tpd->len = EDMA_TPD_MIN_BYTES; + tpd->word1 = 0; + } else { + tpd->len = length; + tpd->word1 = 1 << EDMA_TPD_EOP_SHIFT; + } + + length -= EDMA_TPD_MIN_BYTES; + } + + /* make sure that memory writing completes */ + wmb(); + + flush_dcache_range((unsigned long)first_tpd, + (unsigned long)first_tpd + + EDMA_TPDS_PER_PACKET * sizeof(struct edma_tpd)); + + /* update the TX producer index */ + writel(priv->tpd_ring.head, + priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); + + /* Wait for TX DMA completion */ + for (i = 0; i < EDMA_TX_COMPLETE_TIMEOUT; i++) { + u32 r, prod, cons; + + r = readl(priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); + prod = FIELD_GET(EDMA_TPD_PROD_IDX_MASK, r); + cons = FIELD_GET(EDMA_TPD_CONS_IDX_MASK, r); + + if (cons == prod) + break; + + udelay(1); + } + + if (i == EDMA_TX_COMPLETE_TIMEOUT) + printf("TX timeout: packet not sent!\n"); + + /* update the software TX consumer index register */ + writel(priv->tpd_ring.head, + priv->base + EDMA_REG_TX_SW_CONS_IDX_Q(EDMA_TXQ_ID)); + + return 0; +} + +static void ipq40xx_eth_stop(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + + ess_switch_disable_lookup(&priv->esw); + ipq40xx_edma_stop_rx_tx(priv); +} + +static void ipq40xx_edma_free_ring(struct edma_ring *ring) +{ + free(ring->hw_desc); +} + +/* + * Free Tx and Rx rings + */ +static void ipq40xx_edma_free_rings(struct essedma_priv *priv) +{ + ipq40xx_edma_free_ring(&priv->tpd_ring); + ipq40xx_edma_free_ring(&priv->rfd_ring); +} + +/* + * ipq40xx_edma_alloc_ring() + * allocate edma ring descriptor. + */ +static int ipq40xx_edma_alloc_ring(struct edma_ring *erd, + unsigned int desc_size) +{ + erd->head = 0; + erd->tail = 0; + + /* Alloc HW descriptors */ + erd->hw_size = roundup(desc_size * erd->count, + ARCH_DMA_MINALIGN); + + erd->hw_desc = memalign(CONFIG_SYS_CACHELINE_SIZE, erd->hw_size); + if (!erd->hw_desc) + return -ENOMEM; + + memset(erd->hw_desc, 0, erd->hw_size); + erd->dma = virt_to_phys(erd->hw_desc); + + return 0; + +} + +/* + * ipq40xx_allocate_tx_rx_rings() + */ +static int ipq40xx_edma_alloc_tx_rx_rings(struct essedma_priv *priv) +{ + int ret; + + ret = ipq40xx_edma_alloc_ring(&priv->tpd_ring, + sizeof(struct edma_tpd)); + if (ret) + return ret; + + ret = ipq40xx_edma_alloc_ring(&priv->rfd_ring, + sizeof(struct edma_rfd)); + if (ret) + goto err_free_tpd; + + return 0; + +err_free_tpd: + ipq40xx_edma_free_ring(&priv->tpd_ring); + return ret; +} + +static int ipq40xx_eth_write_hwaddr(struct udevice *dev) +{ + struct eth_pdata *pdata = dev_get_plat(dev); + struct essedma_priv *priv = dev_get_priv(dev); + unsigned char *mac = pdata->enetaddr; + u32 mac_lo, mac_hi; + + mac_hi = ((u32)mac[0]) << 8 | (u32)mac[1]; + mac_lo = ((u32)mac[2]) << 24 | ((u32)mac[3]) << 16 | + ((u32)mac[4]) << 8 | (u32)mac[5]; + + writel(mac_lo, priv->base + REG_MAC_CTRL0); + writel(mac_hi, priv->base + REG_MAC_CTRL1); + + return 0; +} + +static int edma_init(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + int ret; + + priv->tpd_ring.count = IPQ40XX_EDMA_TX_RING_SIZE; + priv->rfd_ring.count = PKTBUFSRX; + + ret = ipq40xx_edma_alloc_tx_rx_rings(priv); + if (ret) + return -ENOMEM; + + ipq40xx_edma_stop_rx_tx(priv); + + /* Configure EDMA. */ + ipq40xx_edma_configure(priv); + + /* Configure descriptor Ring */ + ipq40xx_edma_init_desc(priv); + + ess_switch_disable_lookup(&priv->esw); + + return 0; +} + +static int essedma_probe(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + int ret; + + priv->dev = dev; + + priv->base = dev_read_addr_name(dev, "edma"); + if (priv->base == FDT_ADDR_T_NONE) + return -EINVAL; + + priv->psgmii_base = dev_read_addr_name(dev, "psgmii_phy"); + if (priv->psgmii_base == FDT_ADDR_T_NONE) + return -EINVAL; + + priv->esw.base = dev_read_addr_name(dev, "base"); + if (priv->esw.base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = clk_get_by_name(dev, "ess", &priv->ess_clk); + if (ret) + return ret; + + ret = reset_get_by_name(dev, "ess", &priv->ess_rst); + if (ret) + return ret; + + ret = clk_enable(&priv->ess_clk); + if (ret) + return ret; + + ess_reset(dev); + + ret = uclass_get_device_by_driver(UCLASS_MDIO, + DM_DRIVER_GET(ipq4019_mdio), + &priv->mdio_dev); + if (ret) { + dev_dbg(dev, "Cant find IPQ4019 MDIO: %d\n", ret); + goto err; + } + + /* OF switch and PHY parsing and configuration */ + ret = essedma_of_switch(dev); + if (ret) + goto err; + + switch (priv->esw.port_wrapper_mode) { + case PHY_INTERFACE_MODE_PSGMII: + writel(PSGMIIPHY_PLL_VCO_VAL, + priv->psgmii_base + PSGMIIPHY_PLL_VCO_RELATED_CTRL); + writel(PSGMIIPHY_VCO_VAL, priv->psgmii_base + + PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_1); + /* wait for 10ms */ + mdelay(10); + writel(PSGMIIPHY_VCO_RST_VAL, priv->psgmii_base + + PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_1); + break; + case PHY_INTERFACE_MODE_RGMII: + writel(0x1, RGMII_TCSR_ESS_CFG); + writel(0x400, priv->esw.base + ESS_RGMII_CTRL); + break; + default: + printf("Unknown MII interface\n"); + } + + if (priv->esw.port_wrapper_mode == PHY_INTERFACE_MODE_PSGMII) + psgmii_self_test(dev); + + ess_switch_init(&priv->esw); + + ret = edma_init(dev); + if (ret) + goto err; + + return 0; + +err: + reset_assert(&priv->ess_rst); + clk_disable(&priv->ess_clk); + return ret; +} + +static int essedma_remove(struct udevice *dev) +{ + struct essedma_priv *priv = dev_get_priv(dev); + + ipq40xx_edma_free_rings(priv); + + clk_disable(&priv->ess_clk); + reset_assert(&priv->ess_rst); + + return 0; +} + +static const struct eth_ops essedma_eth_ops = { + .start = ipq40xx_eth_start, + .send = ipq40xx_eth_send, + .recv = ipq40xx_eth_recv, + .free_pkt = ipq40xx_eth_free_pkt, + .stop = ipq40xx_eth_stop, + .write_hwaddr = ipq40xx_eth_write_hwaddr, +}; + +static const struct udevice_id essedma_ids[] = { + { .compatible = "qcom,ipq4019-ess", }, + { } +}; + +U_BOOT_DRIVER(essedma) = { + .name = "essedma", + .id = UCLASS_ETH, + .of_match = essedma_ids, + .probe = essedma_probe, + .remove = essedma_remove, + .priv_auto = sizeof(struct essedma_priv), + .plat_auto = sizeof(struct eth_pdata), + .ops = &essedma_eth_ops, + .flags = DM_FLAG_ALLOC_PRIV_DMA, +}; diff --git a/drivers/net/essedma.h b/drivers/net/essedma.h new file mode 100644 index 0000000..067cb44 --- /dev/null +++ b/drivers/net/essedma.h @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2020 Sartura Ltd. + * + * Author: Robert Marko <robert.marko@sartura.hr> + * + * Copyright (c) 2021 Toco Technologies FZE <contact@toco.ae> + * Copyright (c) 2021 Gabor Juhos <j4g8y7@gmail.com> + * + * Qualcomm ESS EDMA ethernet driver + */ + +#ifndef _ESSEDMA_ETH_H +#define _ESSEDMA_ETH_H + +#define ESS_PORTS_NUM 6 + +#define ESS_RGMII_CTRL 0x4 + +#define ESS_GLOBAL_FW_CTRL1 0x624 + +#define ESS_PORT0_STATUS 0x7c +#define ESS_PORT_SPEED_MASK GENMASK(1, 0) +#define ESS_PORT_SPEED_1000 3 +#define ESS_PORT_SPEED_100 2 +#define ESS_PORT_SPEED_10 1 +#define ESS_PORT_TXMAC_EN BIT(2) +#define ESS_PORT_RXMAC_EN BIT(3) +#define ESS_PORT_TX_FLOW_EN BIT(4) +#define ESS_PORT_RX_FLOW_EN BIT(5) +#define ESS_PORT_DUPLEX_MODE BIT(6) + +#define ESS_PORT_LOOKUP_CTRL(_p) (0x660 + (_p) * 12) +#define ESS_PORT_LOOP_BACK_EN BIT(21) +#define ESS_PORT_VID_MEM_MASK GENMASK(6, 0) + +#define ESS_PORT_HOL_CTRL0(_p) (0x970 + (_p) * 8) +#define EG_PORT_QUEUE_NUM_MASK GENMASK(29, 24) + +/* Ports 0 and 5 have queues 0-5 + * Ports 1 to 4 have queues 0-3 + */ +#define EG_PRI5_QUEUE_NUM_MASK GENMASK(23, 20) +#define EG_PRI4_QUEUE_NUM_MASK GENMASK(19, 16) +#define EG_PRI3_QUEUE_NUM_MASK GENMASK(15, 12) +#define EG_PRI2_QUEUE_NUM_MASK GENMASK(11, 8) +#define EG_PRI1_QUEUE_NUM_MASK GENMASK(7, 4) +#define EG_PRI0_QUEUE_NUM_MASK GENMASK(3, 0) + +#define ESS_PORT_HOL_CTRL1(_p) (0x974 + (_p) * 8) +#define ESS_ING_BUF_NUM_0_MASK GENMASK(3, 0) + +/* QCA807x PHY registers */ +#define QCA807X_CHIP_CONFIGURATION 0x1f +#define QCA807X_MEDIA_PAGE_SELECT BIT(15) + +#define QCA807X_POWER_DOWN BIT(11) + +#define QCA807X_FUNCTION_CONTROL 0x10 +#define QCA807X_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5) +#define QCA807X_MDI_CROSSOVER_MODE_MANUAL_MDI 0 +#define QCA807X_POLARITY_REVERSAL BIT(1) + +#define QCA807X_PHY_SPECIFIC 0x11 +#define QCA807X_PHY_SPECIFIC_LINK BIT(10) + +#define QCA807X_MMD7_CRC_PACKET_COUNTER 0x8029 +#define QCA807X_MMD7_PACKET_COUNTER_SELFCLR BIT(1) +#define QCA807X_MMD7_CRC_PACKET_COUNTER_EN BIT(0) +#define QCA807X_MMD7_VALID_EGRESS_COUNTER_2 0x802e + +/* PSGMII specific registers */ +#define PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_1 0x9c +#define PSGMIIPHY_VCO_VAL 0x4ada +#define PSGMIIPHY_VCO_RST_VAL 0xada +#define PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_2 0xa0 + +#define PSGMIIPHY_PLL_VCO_RELATED_CTRL 0x78c +#define PSGMIIPHY_PLL_VCO_VAL 0x2803 + +#define RGMII_TCSR_ESS_CFG 0x01953000 + +/* EDMA registers */ +#define IPQ40XX_EDMA_TX_RING_SIZE 8 +#define IPQ40XX_EDMA_RSS_TYPE_NONE 0x1 + +#define EDMA_RSS_TYPE 0 +#define EDMA_TPD_EOP_SHIFT 31 + +/* tpd word 3 bit 18-28 */ +#define EDMA_TPD_PORT_BITMAP_SHIFT 18 + +/* Enable Tx for all ports */ +#define EDMA_PORT_ENABLE_ALL 0x3E + +/* Edma receive consumer index */ +/* x = queue id */ +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) +/* Edma transmit consumer index */ +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) +/* TPD Index Register */ +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) +/* Tx Descriptor Control Register */ +#define EDMA_REG_TPD_RING_SIZE 0x41C +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF + +/* Transmit descriptor base address */ + /* x = queue id */ +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) +#define EDMA_TPD_PROD_IDX_MASK GENMASK(15, 0) +#define EDMA_TPD_CONS_IDX_MASK GENMASK(31, 16) + +#define EDMA_REG_TX_SRAM_PART 0x400 +#define EDMA_LOAD_PTR_SHIFT 16 + +/* TXQ Control Register */ +#define EDMA_REG_TXQ_CTRL 0x404 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100 +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16 +#define EDMA_TXF_BURST 0x100 +#define EDMA_TPD_BURST 5 + +#define EDMA_REG_TXF_WATER_MARK 0x408 + +/* RSS Indirection Register */ +/* x = No. of indirection table */ +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) +#define EDMA_NUM_IDT 16 +#define EDMA_RSS_IDT_VALUE 0x64206420 + +/* RSS Hash Function Type Register */ +#define EDMA_REG_RSS_TYPE 0x894 + +/* x = queue id */ +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) +/* RFD Index Register */ +#define EDMA_RFD_BURST 8 +#define EDMA_RFD_THR 16 +#define EDMA_RFD_LTHR 0 +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) + +#define EDMA_RFD_CONS_IDX_MASK GENMASK(27, 16) + +/* Rx Descriptor Control Register */ +#define EDMA_REG_RX_DESC0 0xA10 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF +#define EDMA_RFD_RING_SIZE_SHIFT 0 +#define EDMA_RX_BUF_SIZE_SHIFT 16 + +#define EDMA_REG_RX_DESC1 0xA14 +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16 + +/* RXQ Control Register */ +#define EDMA_REG_RXQ_CTRL 0xA18 +#define EDMA_FIFO_THRESH_128_BYTE 0x0 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002 +#define EDMA_RXQ_CTRL_EN 0x0000FF00 + +/* MAC Control Register */ +#define REG_MAC_CTRL0 0xC20 +#define REG_MAC_CTRL1 0xC24 + +/* Transmit Packet Descriptor */ +struct edma_tpd { + u16 len; /* full packet including CRC */ + u16 svlan_tag; /* vlan tag */ + u32 word1; /* byte 4-7 */ + u32 addr; /* address of buffer */ + u32 word3; /* byte 12 */ +}; + +/* Receive Return Descriptor */ +struct edma_rrd { + u16 rrd0; + u16 rrd1; + u16 rrd2; + u16 rrd3; + u16 rrd4; + u16 rrd5; + u16 rrd6; + u16 rrd7; +} __packed; + +#define EDMA_RRD_SIZE sizeof(struct edma_rrd) + +#define EDMA_RRD7_DESC_VALID BIT(15) + +/* Receive Free Descriptor */ +struct edma_rfd { + u32 buffer_addr; /* buffer address */ +}; + +#endif /* _ESSEDMA_ETH_H */ diff --git a/drivers/net/fec_mxc.c b/drivers/net/fec_mxc.c index 0a0d92b..d6d5cb5 100644 --- a/drivers/net/fec_mxc.c +++ b/drivers/net/fec_mxc.c @@ -615,8 +615,7 @@ static int fecmxc_init(struct udevice *dev) if (fec->xcv_type != SEVENWIRE) miiphy_restart_aneg(dev); #endif - fec_open(dev); - return 0; + return fec_open(dev); } /** @@ -818,6 +817,9 @@ static int fecmxc_recv(struct udevice *dev, int flags, uchar **packetp) return -ENOMEM; } + if (!(readl(&fec->eth->ecntrl) & FEC_ECNTRL_ETHER_EN)) + return 0; + /* Check if any critical events have happened */ ievent = readl(&fec->eth->ievent); writel(ievent, &fec->eth->ievent); @@ -1210,10 +1212,13 @@ static int fecmxc_set_ref_clk(struct clk *clk_ref, phy_interface_t interface) else if (interface == PHY_INTERFACE_MODE_RGMII || interface == PHY_INTERFACE_MODE_RGMII_ID || interface == PHY_INTERFACE_MODE_RGMII_RXID || - interface == PHY_INTERFACE_MODE_RGMII_TXID) + interface == PHY_INTERFACE_MODE_RGMII_TXID) { freq = 125000000; - else + if (is_imx93()) + freq = freq << 1; + } else { return -EINVAL; + } ret = clk_set_rate(clk_ref, freq); if (ret < 0) diff --git a/drivers/net/fm/eth.c b/drivers/net/fm/eth.c index 19f3f0f..63fe4b2 100644 --- a/drivers/net/fm/eth.c +++ b/drivers/net/fm/eth.c @@ -26,7 +26,8 @@ #include "fm.h" -#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII) +#if ((defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \ + !defined(CONFIG_BITBANGMII)) #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \ TBIANA_FULL_DUPLEX) @@ -701,8 +702,11 @@ static int init_phy(struct fm_eth *fm_eth) supported |= SUPPORTED_2500baseX_Full; #endif +#if (CONFIG_IS_ENABLED(MII) || CONFIG_IS_ENABLED(CMD_MII)) && \ + !CONFIG_IS_ENABLED(BITBANGMII) if (fm_eth->type == FM_ETH_1G_E) dtsec_init_phy(fm_eth); +#endif #ifdef CONFIG_PHYLIB #ifdef CONFIG_DM_MDIO diff --git a/drivers/net/ftgmac100.c b/drivers/net/ftgmac100.c index 8781e50..f5ea2e7 100644 --- a/drivers/net/ftgmac100.c +++ b/drivers/net/ftgmac100.c @@ -26,6 +26,7 @@ #include <linux/io.h> #include <linux/iopoll.h> #include <linux/printk.h> +#include <linux/bitfield.h> #include "ftgmac100.h" @@ -57,6 +58,15 @@ enum ftgmac100_model { FTGMAC100_MODEL_FARADAY, FTGMAC100_MODEL_ASPEED, + FTGMAC100_MODEL_ASPEED_AST2700, +}; + +union ftgmac100_dma_addr { + dma_addr_t addr; + struct { + u32 lo; + u32 hi; + }; }; /** @@ -96,6 +106,8 @@ struct ftgmac100_data { /* End of RX/TX ring buffer bits. Depend on model */ u32 rxdes0_edorr_mask; u32 txdes0_edotr_mask; + + bool is_ast2700; }; /* @@ -222,7 +234,7 @@ static int ftgmac100_phy_init(struct udevice *dev) struct phy_device *phydev; int ret; - if (IS_ENABLED(CONFIG_DM_MDIO)) + if (IS_ENABLED(CONFIG_DM_MDIO) && priv->phy_mode != PHY_INTERFACE_MODE_NCSI) phydev = dm_eth_phy_connect(dev); else phydev = phy_connect(priv->bus, priv->phy_addr, dev, priv->phy_mode); @@ -320,8 +332,9 @@ static int ftgmac100_start(struct udevice *dev) struct eth_pdata *plat = dev_get_plat(dev); struct ftgmac100_data *priv = dev_get_priv(dev); struct ftgmac100 *ftgmac100 = priv->iobase; + union ftgmac100_dma_addr dma_addr = {.hi = 0, .lo = 0}; struct phy_device *phydev = priv->phydev; - unsigned int maccr; + unsigned int maccr, dblac, desc_size; ulong start, end; int ret; int i; @@ -341,6 +354,7 @@ static int ftgmac100_start(struct udevice *dev) priv->rx_index = 0; for (i = 0; i < PKTBUFSTX; i++) { + priv->txdes[i].txdes2 = 0; priv->txdes[i].txdes3 = 0; priv->txdes[i].txdes0 = 0; } @@ -351,7 +365,14 @@ static int ftgmac100_start(struct udevice *dev) flush_dcache_range(start, end); for (i = 0; i < PKTBUFSRX; i++) { - priv->rxdes[i].rxdes3 = (unsigned int)net_rx_packets[i]; + unsigned int ip_align = 0; + + dma_addr.addr = (dma_addr_t)net_rx_packets[i]; + priv->rxdes[i].rxdes2 = FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI, dma_addr.hi); + /* For IP alignment */ + if ((dma_addr.lo & (PKTALIGN - 1)) == 0) + ip_align = 2; + priv->rxdes[i].rxdes3 = dma_addr.lo + ip_align; priv->rxdes[i].rxdes0 = 0; } priv->rxdes[PKTBUFSRX - 1].rxdes0 = priv->rxdes0_edorr_mask; @@ -361,10 +382,25 @@ static int ftgmac100_start(struct udevice *dev) flush_dcache_range(start, end); /* transmit ring */ - writel((u32)priv->txdes, &ftgmac100->txr_badr); + dma_addr.addr = (dma_addr_t)priv->txdes; + writel(dma_addr.lo, &ftgmac100->txr_badr); + writel(dma_addr.hi, &ftgmac100->txr_badr_hi); /* receive ring */ - writel((u32)priv->rxdes, &ftgmac100->rxr_badr); + dma_addr.addr = (dma_addr_t)priv->rxdes; + writel(dma_addr.lo, &ftgmac100->rxr_badr); + writel(dma_addr.hi, &ftgmac100->rxr_badr_hi); + + /* Configure TX/RX decsriptor size + * This size is calculated based on cache line. + */ + desc_size = ARCH_DMA_MINALIGN / FTGMAC100_DESC_UNIT; + /* The descriptor size is at least 2 descriptor units. */ + if (desc_size < 2) + desc_size = 2; + dblac = readl(&ftgmac100->dblac) & ~GENMASK(19, 12); + dblac |= FTGMAC100_DBLAC_RXDES_SIZE(desc_size) | FTGMAC100_DBLAC_TXDES_SIZE(desc_size); + writel(dblac, &ftgmac100->dblac); /* poll receive descriptor automatically */ writel(FTGMAC100_APTC_RXPOLL_CNT(1), &ftgmac100->aptc); @@ -382,6 +418,10 @@ static int ftgmac100_start(struct udevice *dev) FTGMAC100_MACCR_RX_RUNT | FTGMAC100_MACCR_RX_BROADPKT; + if (priv->is_ast2700 && (priv->phydev->interface == PHY_INTERFACE_MODE_RMII || + priv->phydev->interface == PHY_INTERFACE_MODE_NCSI)) + maccr |= FTGMAC100_MACCR_RMII_ENABLE; + writel(maccr, &ftgmac100->maccr); ret = phy_startup(phydev); @@ -410,6 +450,14 @@ static int ftgmac100_free_pkt(struct udevice *dev, uchar *packet, int length) ulong des_end = des_start + roundup(sizeof(*curr_des), ARCH_DMA_MINALIGN); + /* + * Make sure there are no stale data in write-back over this area, which + * might get written into the memory while the ftgmac100 also writes + * into the same memory area. + */ + flush_dcache_range((ulong)net_rx_packets[priv->rx_index], + (ulong)net_rx_packets[priv->rx_index] + PKTSIZE_ALIGN); + /* Release buffer to DMA and flush descriptor */ curr_des->rxdes0 &= ~FTGMAC100_RXDES0_RXPKT_RDY; flush_dcache_range(des_start, des_end); @@ -431,9 +479,11 @@ static int ftgmac100_recv(struct udevice *dev, int flags, uchar **packetp) ulong des_start = ((ulong)curr_des) & ~(ARCH_DMA_MINALIGN - 1); ulong des_end = des_start + roundup(sizeof(*curr_des), ARCH_DMA_MINALIGN); - ulong data_start = curr_des->rxdes3; + union ftgmac100_dma_addr data_start = { .lo = 0, .hi = 0 }; ulong data_end; + data_start.hi = FIELD_GET(FTGMAC100_RXDES2_RXBUF_BADR_HI, curr_des->rxdes2); + data_start.lo = curr_des->rxdes3; invalidate_dcache_range(des_start, des_end); if (!(curr_des->rxdes0 & FTGMAC100_RXDES0_RXPKT_RDY)) @@ -453,9 +503,9 @@ static int ftgmac100_recv(struct udevice *dev, int flags, uchar **packetp) __func__, priv->rx_index, rxlen); /* Invalidate received data */ - data_end = data_start + roundup(rxlen, ARCH_DMA_MINALIGN); - invalidate_dcache_range(data_start, data_end); - *packetp = (uchar *)data_start; + data_end = data_start.addr + roundup(rxlen, ARCH_DMA_MINALIGN); + invalidate_dcache_range(data_start.addr, data_end); + *packetp = (uchar *)data_start.addr; return rxlen; } @@ -481,6 +531,7 @@ static int ftgmac100_send(struct udevice *dev, void *packet, int length) struct ftgmac100_data *priv = dev_get_priv(dev); struct ftgmac100 *ftgmac100 = priv->iobase; struct ftgmac100_txdes *curr_des = &priv->txdes[priv->tx_index]; + union ftgmac100_dma_addr dma_addr; ulong des_start = ((ulong)curr_des) & ~(ARCH_DMA_MINALIGN - 1); ulong des_end = des_start + roundup(sizeof(*curr_des), ARCH_DMA_MINALIGN); @@ -499,10 +550,12 @@ static int ftgmac100_send(struct udevice *dev, void *packet, int length) length = (length < ETH_ZLEN) ? ETH_ZLEN : length; - curr_des->txdes3 = (unsigned int)packet; + dma_addr.addr = (dma_addr_t)packet; + curr_des->txdes2 = FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI, dma_addr.hi); + curr_des->txdes3 = dma_addr.lo; /* Flush data to be sent */ - data_start = curr_des->txdes3; + data_start = (ulong)dma_addr.addr; data_end = data_start + roundup(length, ARCH_DMA_MINALIGN); flush_dcache_range(data_start, data_end); @@ -565,6 +618,11 @@ static int ftgmac100_of_to_plat(struct udevice *dev) if (dev_get_driver_data(dev) == FTGMAC100_MODEL_ASPEED) { priv->rxdes0_edorr_mask = BIT(30); priv->txdes0_edotr_mask = BIT(30); + priv->is_ast2700 = false; + } else if (dev_get_driver_data(dev) == FTGMAC100_MODEL_ASPEED_AST2700) { + priv->rxdes0_edorr_mask = BIT(30); + priv->txdes0_edotr_mask = BIT(30); + priv->is_ast2700 = true; } else { priv->rxdes0_edorr_mask = BIT(15); priv->txdes0_edotr_mask = BIT(15); @@ -655,10 +713,11 @@ static const struct eth_ops ftgmac100_ops = { }; static const struct udevice_id ftgmac100_ids[] = { - { .compatible = "faraday,ftgmac100", .data = FTGMAC100_MODEL_FARADAY }, - { .compatible = "aspeed,ast2500-mac", .data = FTGMAC100_MODEL_ASPEED }, - { .compatible = "aspeed,ast2600-mac", .data = FTGMAC100_MODEL_ASPEED }, - { } + { .compatible = "faraday,ftgmac100", .data = FTGMAC100_MODEL_FARADAY }, + { .compatible = "aspeed,ast2500-mac", .data = FTGMAC100_MODEL_ASPEED }, + { .compatible = "aspeed,ast2600-mac", .data = FTGMAC100_MODEL_ASPEED }, + { .compatible = "aspeed,ast2700-mac", .data = FTGMAC100_MODEL_ASPEED_AST2700 }, + {} }; U_BOOT_DRIVER(ftgmac100) = { diff --git a/drivers/net/ftgmac100.h b/drivers/net/ftgmac100.h index f7874ae..c38b57c 100644 --- a/drivers/net/ftgmac100.h +++ b/drivers/net/ftgmac100.h @@ -66,6 +66,13 @@ struct ftgmac100 { unsigned int rx_runt; /* 0xc0 */ unsigned int rx_crcer_ftl; /* 0xc4 */ unsigned int rx_col_lost; /* 0xc8 */ + unsigned int reserved[43]; /* 0xcc - 0x174 */ + unsigned int txr_badr_lo; /* 0x178, defined in ast2700 */ + unsigned int txr_badr_hi; /* 0x17c, defined in ast2700 */ + unsigned int hptxr_badr_lo; /* 0x180, defined in ast2700 */ + unsigned int hptxr_badr_hi; /* 0x184, defined in ast2700 */ + unsigned int rxr_badr_lo; /* 0x188, defined in ast2700 */ + unsigned int rxr_badr_hi; /* 0x18c, defined in ast2700 */ }; /* @@ -111,6 +118,7 @@ struct ftgmac100 { #define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) & 0x3) << 10) #define FTGMAC100_DBLAC_RXDES_SIZE(x) (((x) & 0xf) << 12) #define FTGMAC100_DBLAC_TXDES_SIZE(x) (((x) & 0xf) << 16) +#define FTGMAC100_DESC_UNIT 8 #define FTGMAC100_DBLAC_IFG_CNT(x) (((x) & 0x7) << 20) #define FTGMAC100_DBLAC_IFG_INC BIT(23) @@ -157,6 +165,7 @@ struct ftgmac100 { #define FTGMAC100_MACCR_RX_BROADPKT BIT(17) #define FTGMAC100_MACCR_DISCARD_CRCERR BIT(18) #define FTGMAC100_MACCR_FAST_MODE BIT(19) +#define FTGMAC100_MACCR_RMII_ENABLE BIT(20) /* defined in ast2700 */ #define FTGMAC100_MACCR_SW_RST BIT(31) /* @@ -183,7 +192,7 @@ struct ftgmac100_txdes { unsigned int txdes1; unsigned int txdes2; /* not used by HW */ unsigned int txdes3; /* TXBUF_BADR */ -} __aligned(16); +} __aligned(ARCH_DMA_MINALIGN); #define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff) #define FTGMAC100_TXDES0_EDOTR BIT(15) @@ -201,6 +210,8 @@ struct ftgmac100_txdes { #define FTGMAC100_TXDES1_TX2FIC BIT(30) #define FTGMAC100_TXDES1_TXIC BIT(31) +#define FTGMAC100_TXDES2_TXBUF_BADR_HI GENMASK(18, 16) + /* * Receive descriptor, aligned to 16 bytes */ @@ -209,7 +220,7 @@ struct ftgmac100_rxdes { unsigned int rxdes1; unsigned int rxdes2; /* not used by HW */ unsigned int rxdes3; /* RXBUF_BADR */ -} __aligned(16); +} __aligned(ARCH_DMA_MINALIGN); #define FTGMAC100_RXDES0_VDBC(x) ((x) & 0x3fff) #define FTGMAC100_RXDES0_EDORR BIT(15) @@ -240,4 +251,6 @@ struct ftgmac100_rxdes { #define FTGMAC100_RXDES1_UDP_CHKSUM_ERR BIT(26) #define FTGMAC100_RXDES1_IP_CHKSUM_ERR BIT(27) +#define FTGMAC100_RXDES2_RXBUF_BADR_HI GENMASK(18, 16) + #endif /* __FTGMAC100_H */ diff --git a/drivers/net/ksz9477.c b/drivers/net/ksz9477.c index 43baa69..7ebbe19 100644 --- a/drivers/net/ksz9477.c +++ b/drivers/net/ksz9477.c @@ -11,7 +11,12 @@ #include <eth_phy.h> #include <linux/delay.h> #include <miiphy.h> -#include <i2c.h> +#if CONFIG_IS_ENABLED(DM_I2C) +# include <i2c.h> +#endif +#if CONFIG_IS_ENABLED(DM_SPI) +# include <spi.h> +#endif #include <net/dsa.h> #include <asm-generic/gpio.h> @@ -71,15 +76,157 @@ #define MMD_SETUP(mode, dev) (((u16)(mode) << PORT_MMD_OP_MODE_S) | (dev)) #define REG_PORT_PHY_MMD_INDEX_DATA 0x011C +/* SPI specific define (opcodes) */ +#define KSZ_SPI_OP_RD 3 +#define KSZ_SPI_OP_WR 2 + +#define KSZ9477_SPI_ADDR_SHIFT 24 +#define KSZ9477_SPI_ADDR_ALIGN 3 +#define KSZ9477_SPI_TURNAROUND_SHIFT 5 + +/** + * struct ksz_phy_ops - low-level KSZ bus operations + */ +struct ksz_phy_ops { + /* read() - Read bytes from the device + * + * @udev: bus device + * @reg: register offset + * @val: data read + * @len: Number of bytes to read + * + * @return: 0 on success, negative on failure + */ + int (*read)(struct udevice *udev, u32 reg, u8 *val, int len); + + /* write() - Write bytes to the device + * + * @udev: bus device + * @reg: register offset + * @val: data to write + * @len: Number of bytes to write + * + * @return: 0 on success, negative on failure + */ + int (*write)(struct udevice *udev, u32 reg, u8 *val, int len); +}; + struct ksz_dsa_priv { struct udevice *dev; + struct ksz_phy_ops *phy_ops; u32 features; /* chip specific features */ }; +#if CONFIG_IS_ENABLED(DM_I2C) +static inline int ksz_i2c_read(struct udevice *dev, u32 reg, u8 *val, int len) +{ + return dm_i2c_read(dev, reg, val, len); +} + +static inline int ksz_i2c_write(struct udevice *dev, u32 reg, u8 *val, int len) +{ + return dm_i2c_write(dev, reg, val, len); +} + +static struct ksz_phy_ops phy_i2c_ops = { + .read = ksz_i2c_read, + .write = ksz_i2c_write, +}; +#endif + +#if CONFIG_IS_ENABLED(DM_SPI) +/** + * ksz_spi_xfer() - only used for 8/16/32 bits bus access + * + * @dev: The SPI slave device which will be sending/receiving the data. + * @reg: register address. + * @out: Pointer to a string of bits to send out. The bits are + * held in a byte array and are sent MSB first. + * @in: Pointer to a string of bits that will be filled in. + * @len: number of bytes to read. + * + * Return: 0 on success, not 0 on failure + */ +static int ksz_spi_xfer(struct udevice *dev, u32 reg, const u8 *out, + u8 *in, u16 len) +{ + int ret; + u32 addr = 0; + u8 opcode; + + if (in && out) { + printf("%s: can't do full duplex\n", __func__); + return -EINVAL; + } + + if (len > 4 || len == 0) { + printf("%s: only 8/16/32 bits bus access supported\n", + __func__); + return -EINVAL; + } + + ret = dm_spi_claim_bus(dev); + if (ret < 0) { + printf("%s: could not claim bus\n", __func__); + return ret; + } + + opcode = (in ? KSZ_SPI_OP_RD : KSZ_SPI_OP_WR); + + /* The actual device address space is 16 bits (A15 - A0), + * so the values of address bits A23 - A16 in the SPI + * command/address phase are “don't care”. + */ + addr |= opcode << (KSZ9477_SPI_ADDR_SHIFT + KSZ9477_SPI_TURNAROUND_SHIFT); + addr |= reg << KSZ9477_SPI_TURNAROUND_SHIFT; + + addr = __swab32(addr); + + ret = dm_spi_xfer(dev, 32, &addr, NULL, SPI_XFER_BEGIN); + if (ret) { + printf("%s ERROR: dm_spi_xfer addr (%u)\n", __func__, ret); + goto release_bus; + } + + ret = dm_spi_xfer(dev, len * 8, out, in, SPI_XFER_END); + if (ret) { + printf("%s ERROR: dm_spi_xfer data (%u)\n", __func__, ret); + goto release_bus; + } + +release_bus: + /* If an error occurred, release the chip by deasserting the CS */ + if (ret < 0) + dm_spi_xfer(dev, 0, NULL, NULL, SPI_XFER_END); + + dm_spi_release_bus(dev); + + return ret; +} + +static inline int ksz_spi_read(struct udevice *dev, u32 reg, u8 *val, int len) +{ + return ksz_spi_xfer(dev, reg, NULL, val, len); +} + +static inline int ksz_spi_write(struct udevice *dev, u32 reg, u8 *val, int len) +{ + return ksz_spi_xfer(dev, reg, val, NULL, len); +} + +static struct ksz_phy_ops phy_spi_ops = { + .read = ksz_spi_read, + .write = ksz_spi_write, +}; +#endif + static inline int ksz_read8(struct udevice *dev, u32 reg, u8 *val) { - int ret = dm_i2c_read(dev, reg, val, 1); + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; + + int ret = phy_ops->read(dev, reg, val, 1); dev_dbg(dev, "%s 0x%04x<<0x%02x\n", __func__, reg, *val); @@ -93,8 +240,11 @@ static inline int ksz_pread8(struct udevice *dev, int port, int reg, u8 *val) static inline int ksz_write8(struct udevice *dev, u32 reg, u8 val) { + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; + dev_dbg(dev, "%s 0x%04x>>0x%02x\n", __func__, reg, val); - return dm_i2c_write(dev, reg, &val, 1); + return phy_ops->write(dev, reg, &val, 1); } static inline int ksz_pwrite8(struct udevice *dev, int port, int reg, u8 val) @@ -104,13 +254,15 @@ static inline int ksz_pwrite8(struct udevice *dev, int port, int reg, u8 val) static inline int ksz_write16(struct udevice *dev, u32 reg, u16 val) { + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; u8 buf[2]; buf[1] = val & 0xff; buf[0] = val >> 8; dev_dbg(dev, "%s 0x%04x>>0x%04x\n", __func__, reg, val); - return dm_i2c_write(dev, reg, buf, 2); + return phy_ops->write(dev, reg, buf, 2); } static inline int ksz_pwrite16(struct udevice *dev, int port, int reg, u16 val) @@ -120,10 +272,12 @@ static inline int ksz_pwrite16(struct udevice *dev, int port, int reg, u16 val) static inline int ksz_read16(struct udevice *dev, u32 reg, u16 *val) { + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; u8 buf[2]; int ret; - ret = dm_i2c_read(dev, reg, buf, 2); + ret = phy_ops->read(dev, reg, buf, 2); *val = (buf[0] << 8) | buf[1]; dev_dbg(dev, "%s 0x%04x<<0x%04x\n", __func__, reg, *val); @@ -137,7 +291,10 @@ static inline int ksz_pread16(struct udevice *dev, int port, int reg, u16 *val) static inline int ksz_read32(struct udevice *dev, u32 reg, u32 *val) { - return dm_i2c_read(dev, reg, (u8 *)val, 4); + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; + + return phy_ops->read(dev, reg, (u8 *)val, 4); } static inline int ksz_pread32(struct udevice *dev, int port, int reg, u32 *val) @@ -147,6 +304,8 @@ static inline int ksz_pread32(struct udevice *dev, int port, int reg, u32 *val) static inline int ksz_write32(struct udevice *dev, u32 reg, u32 val) { + struct ksz_dsa_priv *priv = dev_get_priv(dev); + struct ksz_phy_ops *phy_ops = priv->phy_ops; u8 buf[4]; buf[3] = val & 0xff; @@ -155,7 +314,7 @@ static inline int ksz_write32(struct udevice *dev, u32 reg, u32 val) buf[0] = (val >> 8) & 0xff; dev_dbg(dev, "%s 0x%04x>>0x%04x\n", __func__, reg, val); - return dm_i2c_write(dev, reg, buf, 4); + return phy_ops->write(dev, reg, buf, 4); } static inline int ksz_pwrite32(struct udevice *dev, int port, int reg, u32 val) @@ -276,7 +435,7 @@ static int ksz_mdio_probe(struct udevice *dev) struct ksz_mdio_priv *priv = dev_get_priv(dev); dev_dbg(dev, "%s\n", __func__); - priv->ksz = dev_get_parent_priv(dev->parent); + priv->ksz = dev_get_priv(dev->parent); return 0; } @@ -355,12 +514,12 @@ static int ksz_port_setup(struct udevice *dev, int port, phy_interface_t interface) { struct dsa_pdata *pdata = dev_get_uclass_plat(dev); + struct ksz_dsa_priv *priv = dev_get_priv(dev); u8 data8; dev_dbg(dev, "%s P%d %s\n", __func__, port + 1, (port == pdata->cpu_port) ? "cpu" : ""); - struct ksz_dsa_priv *priv = dev_get_priv(dev); if (port != pdata->cpu_port) { if (priv->features & NEW_XMII) /* phy port: config errata and leds */ @@ -503,23 +662,59 @@ static int ksz_probe_mdio(struct udevice *dev) return 0; } -/* - * I2C driver - */ -static int ksz_i2c_probe(struct udevice *dev) +static void ksz_ops_register(struct udevice *dev, struct ksz_phy_ops *ops) +{ + struct ksz_dsa_priv *priv = dev_get_priv(dev); + + priv->phy_ops = ops; +} + +static bool dsa_ksz_check_ops(struct ksz_phy_ops *phy_ops) +{ + if (!phy_ops || !phy_ops->read || !phy_ops->write) + return false; + + return true; +} + +static int ksz_probe(struct udevice *dev) { struct dsa_pdata *pdata = dev_get_uclass_plat(dev); struct ksz_dsa_priv *priv = dev_get_priv(dev); + enum uclass_id parent_id = UCLASS_INVALID; int i, ret; u8 data8; u32 id; - dev_set_parent_priv(dev, priv); + parent_id = device_get_uclass_id(dev_get_parent(dev)); + switch (parent_id) { +#if CONFIG_IS_ENABLED(DM_I2C) + case UCLASS_I2C: { + ksz_ops_register(dev, &phy_i2c_ops); - ret = i2c_set_chip_offset_len(dev, 2); - if (ret) { - printf("i2c_set_chip_offset_len failed: %d\n", ret); - return ret; + ret = i2c_set_chip_offset_len(dev, 2); + if (ret) { + printf("i2c_set_chip_offset_len failed: %d\n", ret); + return ret; + } + break; + } +#endif +#if CONFIG_IS_ENABLED(DM_SPI) + case UCLASS_SPI: { + ksz_ops_register(dev, &phy_spi_ops); + break; + } +#endif + default: + dev_err(dev, "invalid parent bus (%s)\n", + uclass_get_name(parent_id)); + return -EINVAL; + } + + if (!dsa_ksz_check_ops(priv->phy_ops)) { + printf("Driver bug. No bus ops defined\n"); + return -EINVAL; } /* default config */ @@ -543,6 +738,9 @@ static int ksz_i2c_probe(struct udevice *dev) case 0x00956700: puts("KSZ9567R: "); break; + case 0x00989600: + puts("KSZ9896C: "); + break; case 0x00989700: puts("KSZ9897S: "); break; @@ -573,19 +771,20 @@ static int ksz_i2c_probe(struct udevice *dev) return 0; }; -static const struct udevice_id ksz_i2c_ids[] = { +static const struct udevice_id ksz_ids[] = { { .compatible = "microchip,ksz9897" }, { .compatible = "microchip,ksz9477" }, { .compatible = "microchip,ksz9567" }, { .compatible = "microchip,ksz9893" }, + { .compatible = "microchip,ksz9896" }, { } }; U_BOOT_DRIVER(ksz) = { .name = "ksz-switch", .id = UCLASS_DSA, - .of_match = ksz_i2c_ids, - .probe = ksz_i2c_probe, + .of_match = ksz_ids, + .probe = ksz_probe, .ops = &ksz_dsa_ops, .priv_auto = sizeof(struct ksz_dsa_priv), }; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 73064b2..13e7381 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -11,7 +11,7 @@ config MV88E6352_SWITCH menuconfig PHYLIB bool "Ethernet PHY (physical media interface) support" - depends on NET + depends on NET || NET_LWIP help Enable Ethernet PHY (physical media interface) support. @@ -368,6 +368,7 @@ config PHY_FIXED config PHY_NCSI bool "NC-SI based PHY" + depends on NET endif #PHYLIB diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index a96430c..4d67203 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -12,6 +12,7 @@ #define PHY_ID_YT8511 0x0000010a #define PHY_ID_YT8531 0x4f51e91b +#define PHY_ID_YT8821 0x4f51ea19 #define PHY_ID_MASK GENMASK(31, 0) /* Extended Register's Address Offset Register */ @@ -102,8 +103,12 @@ #define YTPHY_SPECIFIC_STATUS_REG 0x11 #define YTPHY_DUPLEX_MASK BIT(13) #define YTPHY_DUPLEX_SHIFT 13 -#define YTPHY_SPEED_MODE_MASK GENMASK(15, 14) -#define YTPHY_SPEED_MODE_SHIFT 14 +#define YTPHY_SPEED_MASK ((0x3 << 14) | BIT(9)) +#define YTPHY_SPEED_10M ((0x0 << 14)) +#define YTPHY_SPEED_100M ((0x1 << 14)) +#define YTPHY_SPEED_1000M ((0x2 << 14)) +#define YTPHY_SPEED_10G ((0x3 << 14)) +#define YTPHY_SPEED_2500M ((0x0 << 14) | BIT(9)) #define YT8531_EXTREG_SLEEP_CONTROL1_REG 0x27 #define YT8531_ESC1R_SLEEP_SW BIT(15) @@ -131,6 +136,91 @@ #define TX_CLK_100_INVERTED BIT(4) #define TX_CLK_1000_INVERTED BIT(5) +#define YT8821_SDS_EXT_CSR_CTRL_REG 0x23 +#define YT8821_SDS_EXT_CSR_VCO_LDO_EN BIT(15) +#define YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN BIT(8) + +#define YT8821_UTP_EXT_PI_CTRL_REG 0x56 +#define YT8821_UTP_EXT_PI_RST_N_FIFO BIT(5) +#define YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE BIT(4) +#define YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE BIT(3) +#define YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE BIT(2) +#define YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE BIT(1) +#define YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE BIT(0) + +#define YT8821_UTP_EXT_VCT_CFG6_CTRL_REG 0x97 +#define YT8821_UTP_EXT_FECHO_AMP_TH_HUGE GENMASK(15, 8) + +#define YT8821_UTP_EXT_ECHO_CTRL_REG 0x336 +#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000 GENMASK(14, 8) + +#define YT8821_UTP_EXT_GAIN_CTRL_REG 0x340 +#define YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000 GENMASK(6, 0) + +#define YT8821_UTP_EXT_RPDN_CTRL_REG 0x34E +#define YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 BIT(15) +#define YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 BIT(7) +#define YT8821_UTP_EXT_RPDN_IPR_SHT_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG 0x36A +#define YT8821_UTP_EXT_TH_20DB_2500 GENMASK(15, 0) + +#define YT8821_UTP_EXT_TRACE_CTRL_REG 0x372 +#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 GENMASK(14, 8) +#define YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG 0x374 +#define YT8821_UTP_EXT_ALPHA_SHT_2500 GENMASK(14, 8) +#define YT8821_UTP_EXT_IPR_LNG_2500 GENMASK(6, 0) + +#define YT8821_UTP_EXT_PLL_CTRL_REG 0x450 +#define YT8821_UTP_EXT_PLL_SPARE_CFG GENMASK(7, 0) + +#define YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG 0x466 +#define YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG 0x467 +#define YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG 0x468 +#define YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG 0x469 +#define YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG GENMASK(14, 8) +#define YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG GENMASK(6, 0) + +#define YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG 0x4B3 +#define YT8821_UTP_EXT_MU_COARSE_FR_F_FFE GENMASK(14, 12) +#define YT8821_UTP_EXT_MU_COARSE_FR_F_FBE GENMASK(10, 8) + +#define YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG 0x4B5 +#define YT8821_UTP_EXT_MU_FINE_FR_F_FFE GENMASK(14, 12) +#define YT8821_UTP_EXT_MU_FINE_FR_F_FBE GENMASK(10, 8) + +#define YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG 0x4D2 +#define YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER GENMASK(7, 4) +#define YT8821_UTP_EXT_VGA_LPF1_CAP_2500 GENMASK(3, 0) + +#define YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG 0x4D3 +#define YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER GENMASK(7, 4) +#define YT8821_UTP_EXT_VGA_LPF2_CAP_2500 GENMASK(3, 0) + +#define YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG 0x660 +#define YT8821_UTP_EXT_NFR_TX_ABILITY BIT(3) + +#define YT8821_CHIP_MODE_FORCE_BX2500 1 + +/* chip config register */ +#define YTPHY_CCR_MODE_SEL_MASK GENMASK(2, 0) + +#define YTPHY_REG_SPACE_SELECT_REG 0xA000 +#define YTPHY_RSSR_SPACE_MASK BIT(1) +#define YTPHY_RSSR_FIBER_SPACE (0x1 << 1) +#define YTPHY_RSSR_UTP_SPACE (0x0 << 1) + struct ytphy_plat_priv { u32 rx_delay_ps; u32 tx_delay_ps; @@ -295,15 +385,15 @@ static int yt8531_parse_status(struct phy_device *phydev) if (val < 0) return val; - speed_mode = (val & YTPHY_SPEED_MODE_MASK) >> YTPHY_SPEED_MODE_SHIFT; + speed_mode = (val & YTPHY_SPEED_MASK); switch (speed_mode) { - case 2: + case YTPHY_SPEED_1000M: speed = SPEED_1000; break; - case 1: + case YTPHY_SPEED_100M: speed = SPEED_100; break; - default: + case YTPHY_SPEED_10M: speed = SPEED_10; break; } @@ -632,6 +722,398 @@ static int yt8531_probe(struct phy_device *phydev) return 0; } +static int ytphy_save_page(struct phy_device *phydev) +{ + int old_page; + + old_page = ytphy_read_ext(phydev, YTPHY_REG_SPACE_SELECT_REG); + if (old_page < 0) + return old_page; + + if ((old_page & YTPHY_RSSR_SPACE_MASK) == YTPHY_RSSR_FIBER_SPACE) + return YTPHY_RSSR_FIBER_SPACE; + + return YTPHY_RSSR_UTP_SPACE; +}; + +static int ytphy_restore_page(struct phy_device *phydev, int page, + int ret) +{ + int mask = YTPHY_RSSR_SPACE_MASK; + int set; + int r; + + if ((page & YTPHY_RSSR_SPACE_MASK) == YTPHY_RSSR_FIBER_SPACE) + set = YTPHY_RSSR_FIBER_SPACE; + else + set = YTPHY_RSSR_UTP_SPACE; + + r = ytphy_modify_ext(phydev, YTPHY_REG_SPACE_SELECT_REG, mask, + set); + if (ret >= 0 && r < 0) + ret = r; + + return ret; +}; + +static int ytphy_write_ext(struct phy_device *phydev, u16 regnum, + u16 val) +{ + int ret; + + ret = phy_write(phydev, MDIO_DEVAD_NONE, + YTPHY_PAGE_SELECT, regnum); + if (ret < 0) + return ret; + + return phy_write(phydev, MDIO_DEVAD_NONE, YTPHY_PAGE_DATA, val); +} + +static int yt8821_probe(struct phy_device *phydev) +{ + phydev->advertising = PHY_GBIT_FEATURES | + SUPPORTED_2500baseX_Full | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + phydev->supported = phydev->advertising; + + return 0; +} + +static int yt8821_serdes_init(struct phy_device *phydev) +{ + int old_page; + u16 mask; + u16 set; + int ret; + + old_page = ytphy_save_page(phydev); + if (old_page < 0) + return old_page; + + ret = ytphy_modify_ext(phydev, YTPHY_REG_SPACE_SELECT_REG, + YTPHY_RSSR_SPACE_MASK, + YTPHY_RSSR_FIBER_SPACE); + if (ret < 0) + goto err_restore_page; + + ret = phy_modify(phydev, MDIO_DEVAD_NONE, MII_BMCR, + BMCR_ANENABLE, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_SDS_EXT_CSR_VCO_LDO_EN | + YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN; + set = YT8821_SDS_EXT_CSR_VCO_LDO_EN; + ret = ytphy_modify_ext(phydev, YT8821_SDS_EXT_CSR_CTRL_REG, mask, + set); + +err_restore_page: + return ytphy_restore_page(phydev, old_page, ret); +} + +static int yt8821_utp_init(struct phy_device *phydev) +{ + int old_page; + u16 mask; + u16 save; + u16 set; + int ret; + + old_page = ytphy_save_page(phydev); + if (old_page < 0) + return old_page; + + ret = ytphy_modify_ext(phydev, YTPHY_REG_SPACE_SELECT_REG, + YTPHY_RSSR_SPACE_MASK, + YTPHY_RSSR_UTP_SPACE); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 | + YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 | + YT8821_UTP_EXT_RPDN_IPR_SHT_2500; + set = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 | + YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500; + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_RPDN_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER | + YT8821_UTP_EXT_VGA_LPF1_CAP_2500; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER | + YT8821_UTP_EXT_VGA_LPF2_CAP_2500; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 | + YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500, 0x5a) | + FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500, 0x3c); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_TRACE_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_IPR_LNG_2500; + set = FIELD_PREP(YT8821_UTP_EXT_IPR_LNG_2500, 0x6c); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000, 0x2a); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_ECHO_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000; + set = FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000, 0x22); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_GAIN_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_TH_20DB_2500; + set = FIELD_PREP(YT8821_UTP_EXT_TH_20DB_2500, 0x8000); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_MU_COARSE_FR_F_FFE | + YT8821_UTP_EXT_MU_COARSE_FR_F_FBE; + set = FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FFE, 0x7) | + FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FBE, 0x7); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_MU_FINE_FR_F_FFE | + YT8821_UTP_EXT_MU_FINE_FR_F_FBE; + set = FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FFE, 0x2) | + FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FBE, 0x2); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + /* save YT8821_UTP_EXT_PI_CTRL_REG's val for use later */ + ret = ytphy_read_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG); + if (ret < 0) + goto err_restore_page; + + save = ret; + + mask = YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE | + YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE; + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, + mask, 0); + if (ret < 0) + goto err_restore_page; + + /* restore YT8821_UTP_EXT_PI_CTRL_REG's val */ + ret = ytphy_write_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, save); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_FECHO_AMP_TH_HUGE; + set = FIELD_PREP(YT8821_UTP_EXT_FECHO_AMP_TH_HUGE, 0x38); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_VCT_CFG6_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_NFR_TX_ABILITY; + set = YT8821_UTP_EXT_NFR_TX_ABILITY; + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_PLL_SPARE_CFG; + set = FIELD_PREP(YT8821_UTP_EXT_PLL_SPARE_CFG, 0xe9); + ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PLL_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG | + YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG | + YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG | + YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG, + mask, set); + if (ret < 0) + goto err_restore_page; + + mask = YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG | + YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG; + set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG, 0x64) | + FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG, 0x64); + ret = ytphy_modify_ext(phydev, + YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG, + mask, set); + +err_restore_page: + return ytphy_restore_page(phydev, old_page, ret); +} + +static int yt8821_auto_sleep_config(struct phy_device *phydev, + bool enable) +{ + int old_page; + int ret; + + old_page = ytphy_save_page(phydev); + if (old_page < 0) + return old_page; + + ret = ytphy_modify_ext(phydev, YTPHY_REG_SPACE_SELECT_REG, + YTPHY_RSSR_SPACE_MASK, + YTPHY_RSSR_UTP_SPACE); + if (ret < 0) + goto err_restore_page; + + ret = ytphy_modify_ext(phydev, + YT8531_EXTREG_SLEEP_CONTROL1_REG, + YT8531_ESC1R_SLEEP_SW, + enable ? 1 : 0); + +err_restore_page: + return ytphy_restore_page(phydev, old_page, ret); +} + +static int yt8821_soft_reset(struct phy_device *phydev) +{ + return ytphy_modify_ext(phydev, YT8531_CHIP_CONFIG_REG, + YT8531_CCR_SW_RST, 0); +} + +static int yt8821_config(struct phy_device *phydev) +{ + u8 mode = YT8821_CHIP_MODE_FORCE_BX2500; + int ret; + u16 set; + + set = FIELD_PREP(YTPHY_CCR_MODE_SEL_MASK, mode); + ret = ytphy_modify_ext(phydev, + YT8531_CHIP_CONFIG_REG, + YTPHY_CCR_MODE_SEL_MASK, + set); + if (ret < 0) + return ret; + + ret = yt8821_serdes_init(phydev); + if (ret < 0) + return ret; + + ret = yt8821_utp_init(phydev); + if (ret < 0) + return ret; + + ret = yt8821_auto_sleep_config(phydev, false); + if (ret < 0) + return ret; + + return yt8821_soft_reset(phydev); +} + +static void yt8821_parse_status(struct phy_device *phydev, int val) +{ + int speed_mode; + int speed; + + speed_mode = val & YTPHY_SPEED_MASK; + switch (speed_mode) { + case YTPHY_SPEED_2500M: + speed = SPEED_2500; + break; + case YTPHY_SPEED_1000M: + speed = SPEED_1000; + break; + case YTPHY_SPEED_100M: + speed = SPEED_100; + break; + case YTPHY_SPEED_10M: + speed = SPEED_10; + break; + } + + phydev->speed = speed; + phydev->duplex = FIELD_GET(YTPHY_DUPLEX_MASK, val); +} + +static int yt8821_startup(struct phy_device *phydev) +{ + u16 val; + int ret; + + ret = ytphy_modify_ext(phydev, YTPHY_REG_SPACE_SELECT_REG, + YTPHY_RSSR_SPACE_MASK, + YTPHY_RSSR_UTP_SPACE); + if (ret) + return ret; + + ret = genphy_update_link(phydev); + if (ret) + return ret; + + ret = phy_read(phydev, MDIO_DEVAD_NONE, + YTPHY_SPECIFIC_STATUS_REG); + if (ret < 0) + return ret; + + val = ret; + + if (phydev->link) + yt8821_parse_status(phydev, val); + + return 0; +} + U_BOOT_PHY_DRIVER(motorcomm8511) = { .name = "YT8511 Gigabit Ethernet", .uid = PHY_ID_YT8511, @@ -652,3 +1134,14 @@ U_BOOT_PHY_DRIVER(motorcomm8531) = { .startup = &yt8531_startup, .shutdown = &genphy_shutdown, }; + +U_BOOT_PHY_DRIVER(motorcomm8821) = { + .name = "YT8821 2.5G Ethernet", + .uid = PHY_ID_YT8821, + .mask = PHY_ID_MASK, + .mmds = (MDIO_MMD_PMAPMD | MDIO_MMD_PCS | MDIO_MMD_AN), + .probe = &yt8821_probe, + .config = &yt8821_config, + .startup = &yt8821_startup, + .shutdown = &genphy_shutdown, +}; diff --git a/drivers/net/rtl8139.c b/drivers/net/rtl8139.c index 2e0afad..5f4b1e2 100644 --- a/drivers/net/rtl8139.c +++ b/drivers/net/rtl8139.c @@ -433,7 +433,7 @@ static int rtl8139_recv_common(struct rtl8139_priv *priv, unsigned char *rxdata, int length = 0; if (inb(priv->ioaddr + RTL_REG_CHIPCMD) & RTL_REG_CHIPCMD_RXBUFEMPTY) - return 0; + return -EAGAIN; priv->rxstatus = inw(priv->ioaddr + RTL_REG_INTRSTATUS); /* See below for the rest of the interrupt acknowledges. */ diff --git a/drivers/net/sandbox-lwip.c b/drivers/net/sandbox-lwip.c new file mode 100644 index 0000000..3721033 --- /dev/null +++ b/drivers/net/sandbox-lwip.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015 National Instruments + * + * (C) Copyright 2015 + * Joe Hershberger <joe.hershberger@ni.com> + */ + +#include <dm.h> +#include <log.h> +#include <malloc.h> +#include <net.h> +#include <asm/eth.h> +#include <asm/global_data.h> +#include <asm/test.h> + +DECLARE_GLOBAL_DATA_PTR; + +static int sb_lwip_eth_start(struct udevice *dev) +{ + debug("eth_sandbox_lwip: Start\n"); + + return 0; +} + +static int sb_lwip_eth_send(struct udevice *dev, void *packet, int length) +{ + debug("eth_sandbox_lwip: Send packet %d\n", length); + + return -ENOTSUPP; +} + +static int sb_lwip_eth_recv(struct udevice *dev, int flags, uchar **packetp) +{ + return -EAGAIN; +} + +static int sb_lwip_eth_free_pkt(struct udevice *dev, uchar *packet, int length) +{ + return 0; +} + +static void sb_lwip_eth_stop(struct udevice *dev) +{ +} + +static int sb_lwip_eth_write_hwaddr(struct udevice *dev) +{ + return 0; +} + +static const struct eth_ops sb_eth_ops = { + .start = sb_lwip_eth_start, + .send = sb_lwip_eth_send, + .recv = sb_lwip_eth_recv, + .free_pkt = sb_lwip_eth_free_pkt, + .stop = sb_lwip_eth_stop, + .write_hwaddr = sb_lwip_eth_write_hwaddr, +}; + +static int sb_lwip_eth_remove(struct udevice *dev) +{ + return 0; +} + +static int sb_lwip_eth_of_to_plat(struct udevice *dev) +{ + return 0; +} + +static const struct udevice_id sb_eth_ids[] = { + { .compatible = "sandbox,eth" }, + { } +}; + +U_BOOT_DRIVER(eth_sandbox) = { + .name = "eth_lwip_sandbox", + .id = UCLASS_ETH, + .of_match = sb_eth_ids, + .of_to_plat = sb_lwip_eth_of_to_plat, + .remove = sb_lwip_eth_remove, + .ops = &sb_eth_ops, + .priv_auto = 0, + .plat_auto = sizeof(struct eth_pdata), +}; diff --git a/drivers/net/zynq_gem.c b/drivers/net/zynq_gem.c index fe7d108..461805a 100644 --- a/drivers/net/zynq_gem.c +++ b/drivers/net/zynq_gem.c @@ -228,7 +228,6 @@ struct zynq_gem_priv { struct clk tx_clk; struct clk pclk; u32 max_speed; - bool int_pcs; bool dma_64bit; u32 clk_en_info; struct reset_ctl_bulk resets; @@ -504,8 +503,7 @@ static int zynq_gem_init(struct udevice *dev) * Set SGMII enable PCS selection only if internal PCS/PMA * core is used and interface is SGMII. */ - if (priv->interface == PHY_INTERFACE_MODE_SGMII && - priv->int_pcs) { + if (priv->interface == PHY_INTERFACE_MODE_SGMII) { nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL | ZYNQ_GEM_NWCFG_PCS_SEL; } @@ -529,8 +527,7 @@ static int zynq_gem_init(struct udevice *dev) writel(nwcfg, ®s->nwcfg); #ifdef CONFIG_ARM64 - if (priv->interface == PHY_INTERFACE_MODE_SGMII && - priv->int_pcs) { + if (priv->interface == PHY_INTERFACE_MODE_SGMII) { /* * Disable AN for fixed link configuration, enable otherwise. * Must be written after PCS_SEL is set in nwconfig, @@ -992,8 +989,6 @@ static int zynq_gem_of_to_plat(struct udevice *dev) return -EINVAL; priv->interface = pdata->phy_interface; - priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma"); - priv->clk_en_info = dev_get_driver_data(dev); return 0; |