aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/drivers/net/ti
diff options
context:
space:
mode:
Diffstat (limited to 'roms/u-boot/drivers/net/ti')
-rw-r--r--roms/u-boot/drivers/net/ti/Kconfig34
-rw-r--r--roms/u-boot/drivers/net/ti/Makefile8
-rw-r--r--roms/u-boot/drivers/net/ti/am65-cpsw-nuss.c798
-rw-r--r--roms/u-boot/drivers/net/ti/cpsw-common.c107
-rw-r--r--roms/u-boot/drivers/net/ti/cpsw.c1388
-rw-r--r--roms/u-boot/drivers/net/ti/cpsw_mdio.c207
-rw-r--r--roms/u-boot/drivers/net/ti/cpsw_mdio.h18
-rw-r--r--roms/u-boot/drivers/net/ti/davinci_emac.c869
-rw-r--r--roms/u-boot/drivers/net/ti/davinci_emac.h304
-rw-r--r--roms/u-boot/drivers/net/ti/keystone_net.c824
10 files changed, 4557 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/net/ti/Kconfig b/roms/u-boot/drivers/net/ti/Kconfig
new file mode 100644
index 000000000..f2dbbd012
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+
+config DRIVER_TI_CPSW
+ bool "TI Common Platform Ethernet Switch"
+ select PHYLIB
+ help
+ This driver supports the TI three port switch gigabit ethernet
+ subsystem found in the TI SoCs.
+
+config DRIVER_TI_EMAC
+ bool "TI Davinci EMAC"
+ help
+ Support for davinci emac
+
+config DRIVER_TI_EMAC_USE_RMII
+ depends on DRIVER_TI_EMAC
+ bool "Use RMII"
+ help
+ Configure the TI EMAC driver to use RMII
+
+config DRIVER_TI_KEYSTONE_NET
+ bool "TI Keystone 2 Ethernet"
+ help
+ This driver supports the TI Keystone 2 Ethernet subsystem
+
+config TI_AM65_CPSW_NUSS
+ bool "TI K3 AM65x MCU CPSW Nuss Ethernet controller driver"
+ depends on ARCH_K3
+ select PHYLIB
+ help
+ This driver supports TI K3 MCU CPSW Nuss Ethernet controller
+ in Texas Instruments K3 AM65x SoCs.
diff --git a/roms/u-boot/drivers/net/ti/Makefile b/roms/u-boot/drivers/net/ti/Makefile
new file mode 100644
index 000000000..8d3808bb4
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+
+obj-$(CONFIG_DRIVER_TI_CPSW) += cpsw.o cpsw-common.o cpsw_mdio.o
+obj-$(CONFIG_DRIVER_TI_EMAC) += davinci_emac.o
+obj-$(CONFIG_DRIVER_TI_KEYSTONE_NET) += keystone_net.o cpsw_mdio.o
+obj-$(CONFIG_TI_AM65_CPSW_NUSS) += am65-cpsw-nuss.o cpsw_mdio.o
diff --git a/roms/u-boot/drivers/net/ti/am65-cpsw-nuss.c b/roms/u-boot/drivers/net/ti/am65-cpsw-nuss.c
new file mode 100644
index 000000000..3ab6a3082
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/am65-cpsw-nuss.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
+ *
+ * Copyright (C) 2019, Texas Instruments, Incorporated
+ *
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <dm/lists.h>
+#include <dma-uclass.h>
+#include <dm/of_access.h>
+#include <miiphy.h>
+#include <net.h>
+#include <phy.h>
+#include <power-domain.h>
+#include <linux/bitops.h>
+#include <linux/soc/ti/ti-udma.h>
+
+#include "cpsw_mdio.h"
+
+#define AM65_CPSW_CPSWNU_MAX_PORTS 9
+
+#define AM65_CPSW_SS_BASE 0x0
+#define AM65_CPSW_SGMII_BASE 0x100
+#define AM65_CPSW_MDIO_BASE 0xf00
+#define AM65_CPSW_XGMII_BASE 0x2100
+#define AM65_CPSW_CPSW_NU_BASE 0x20000
+#define AM65_CPSW_CPSW_NU_ALE_BASE 0x1e000
+
+#define AM65_CPSW_CPSW_NU_PORTS_OFFSET 0x1000
+#define AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET 0x330
+
+#define AM65_CPSW_MDIO_BUS_FREQ_DEF 1000000
+
+#define AM65_CPSW_CTL_REG 0x4
+#define AM65_CPSW_STAT_PORT_EN_REG 0x14
+#define AM65_CPSW_PTYPE_REG 0x18
+
+#define AM65_CPSW_CTL_REG_P0_ENABLE BIT(2)
+#define AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE BIT(13)
+#define AM65_CPSW_CTL_REG_P0_RX_PAD BIT(14)
+
+#define AM65_CPSW_P0_FLOW_ID_REG 0x8
+#define AM65_CPSW_PN_RX_MAXLEN_REG 0x24
+#define AM65_CPSW_PN_REG_SA_L 0x308
+#define AM65_CPSW_PN_REG_SA_H 0x30c
+
+#define AM65_CPSW_ALE_CTL_REG 0x8
+#define AM65_CPSW_ALE_CTL_REG_ENABLE BIT(31)
+#define AM65_CPSW_ALE_CTL_REG_RESET_TBL BIT(30)
+#define AM65_CPSW_ALE_CTL_REG_BYPASS BIT(4)
+#define AM65_CPSW_ALE_PN_CTL_REG(x) (0x40 + (x) * 4)
+#define AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD 0x3
+#define AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY BIT(11)
+
+#define AM65_CPSW_ALE_THREADMAPDEF_REG 0x134
+#define AM65_CPSW_ALE_DEFTHREAD_EN BIT(15)
+
+#define AM65_CPSW_MACSL_CTL_REG 0x0
+#define AM65_CPSW_MACSL_CTL_REG_IFCTL_A BIT(15)
+#define AM65_CPSW_MACSL_CTL_EXT_EN BIT(18)
+#define AM65_CPSW_MACSL_CTL_REG_GIG BIT(7)
+#define AM65_CPSW_MACSL_CTL_REG_GMII_EN BIT(5)
+#define AM65_CPSW_MACSL_CTL_REG_LOOPBACK BIT(1)
+#define AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX BIT(0)
+#define AM65_CPSW_MACSL_RESET_REG 0x8
+#define AM65_CPSW_MACSL_RESET_REG_RESET BIT(0)
+#define AM65_CPSW_MACSL_STATUS_REG 0x4
+#define AM65_CPSW_MACSL_RESET_REG_PN_IDLE BIT(31)
+#define AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE BIT(30)
+#define AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE BIT(29)
+#define AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE BIT(28)
+#define AM65_CPSW_MACSL_RESET_REG_IDLE_MASK \
+ (AM65_CPSW_MACSL_RESET_REG_PN_IDLE | \
+ AM65_CPSW_MACSL_RESET_REG_PN_E_IDLE | \
+ AM65_CPSW_MACSL_RESET_REG_PN_P_IDLE | \
+ AM65_CPSW_MACSL_RESET_REG_PN_TX_IDLE)
+
+#define AM65_CPSW_CPPI_PKT_TYPE 0x7
+
+struct am65_cpsw_port {
+ fdt_addr_t port_base;
+ fdt_addr_t macsl_base;
+ bool disabled;
+ u32 mac_control;
+};
+
+struct am65_cpsw_common {
+ struct udevice *dev;
+ fdt_addr_t ss_base;
+ fdt_addr_t cpsw_base;
+ fdt_addr_t mdio_base;
+ fdt_addr_t ale_base;
+ fdt_addr_t gmii_sel;
+ fdt_addr_t mac_efuse;
+
+ struct clk fclk;
+ struct power_domain pwrdmn;
+
+ u32 port_num;
+ struct am65_cpsw_port ports[AM65_CPSW_CPSWNU_MAX_PORTS];
+
+ struct mii_dev *bus;
+ u32 bus_freq;
+
+ struct dma dma_tx;
+ struct dma dma_rx;
+ u32 rx_next;
+ u32 rx_pend;
+ bool started;
+};
+
+struct am65_cpsw_priv {
+ struct udevice *dev;
+ struct am65_cpsw_common *cpsw_common;
+ u32 port_id;
+
+ struct phy_device *phydev;
+ bool has_phy;
+ ofnode phy_node;
+ u32 phy_addr;
+};
+
+#ifdef PKTSIZE_ALIGN
+#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN
+#else
+#define UDMA_RX_BUF_SIZE ALIGN(1522, ARCH_DMA_MINALIGN)
+#endif
+
+#ifdef PKTBUFSRX
+#define UDMA_RX_DESC_NUM PKTBUFSRX
+#else
+#define UDMA_RX_DESC_NUM 4
+#endif
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void am65_cpsw_set_sl_mac(struct am65_cpsw_port *slave,
+ unsigned char *addr)
+{
+ writel(mac_hi(addr),
+ slave->port_base + AM65_CPSW_PN_REG_SA_H);
+ writel(mac_lo(addr),
+ slave->port_base + AM65_CPSW_PN_REG_SA_L);
+}
+
+int am65_cpsw_macsl_reset(struct am65_cpsw_port *slave)
+{
+ u32 i = 100;
+
+ /* Set the soft reset bit */
+ writel(AM65_CPSW_MACSL_RESET_REG_RESET,
+ slave->macsl_base + AM65_CPSW_MACSL_RESET_REG);
+
+ while ((readl(slave->macsl_base + AM65_CPSW_MACSL_RESET_REG) &
+ AM65_CPSW_MACSL_RESET_REG_RESET) && i--)
+ cpu_relax();
+
+ /* Timeout on the reset */
+ return i;
+}
+
+static int am65_cpsw_macsl_wait_for_idle(struct am65_cpsw_port *slave)
+{
+ u32 i = 100;
+
+ while ((readl(slave->macsl_base + AM65_CPSW_MACSL_STATUS_REG) &
+ AM65_CPSW_MACSL_RESET_REG_IDLE_MASK) && i--)
+ cpu_relax();
+
+ return i;
+}
+
+static int am65_cpsw_update_link(struct am65_cpsw_priv *priv)
+{
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ struct am65_cpsw_port *port = &common->ports[priv->port_id];
+ struct phy_device *phy = priv->phydev;
+ u32 mac_control = 0;
+
+ if (phy->link) { /* link up */
+ mac_control = /*AM65_CPSW_MACSL_CTL_REG_LOOPBACK |*/
+ AM65_CPSW_MACSL_CTL_REG_GMII_EN;
+ if (phy->speed == 1000)
+ mac_control |= AM65_CPSW_MACSL_CTL_REG_GIG;
+ if (phy->speed == 10 && phy_interface_is_rgmii(phy))
+ /* Can be used with in band mode only */
+ mac_control |= AM65_CPSW_MACSL_CTL_EXT_EN;
+ if (phy->duplex == DUPLEX_FULL)
+ mac_control |= AM65_CPSW_MACSL_CTL_REG_FULL_DUPLEX;
+ if (phy->speed == 100)
+ mac_control |= AM65_CPSW_MACSL_CTL_REG_IFCTL_A;
+ }
+
+ if (mac_control == port->mac_control)
+ goto out;
+
+ if (mac_control) {
+ printf("link up on port %d, speed %d, %s duplex\n",
+ priv->port_id, phy->speed,
+ (phy->duplex == DUPLEX_FULL) ? "full" : "half");
+ } else {
+ printf("link down on port %d\n", priv->port_id);
+ }
+
+ writel(mac_control, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
+ port->mac_control = mac_control;
+
+out:
+ return phy->link;
+}
+
+#define AM65_GMII_SEL_MODE_MII 0
+#define AM65_GMII_SEL_MODE_RMII 1
+#define AM65_GMII_SEL_MODE_RGMII 2
+
+#define AM65_GMII_SEL_RGMII_IDMODE BIT(4)
+
+static void am65_cpsw_gmii_sel_k3(struct am65_cpsw_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ u32 reg;
+ u32 mode = 0;
+ bool rgmii_id = false;
+
+ reg = readl(common->gmii_sel);
+
+ dev_dbg(common->dev, "old gmii_sel: %08x\n", reg);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM65_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ mode = AM65_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM65_GMII_SEL_MODE_RGMII;
+ rgmii_id = true;
+ break;
+
+ default:
+ dev_warn(common->dev,
+ "Unsupported PHY mode: %u. Defaulting to MII.\n",
+ phy_mode);
+ /* fallthrough */
+ case PHY_INTERFACE_MODE_MII:
+ mode = AM65_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ if (rgmii_id)
+ mode |= AM65_GMII_SEL_RGMII_IDMODE;
+
+ reg = mode;
+ dev_dbg(common->dev, "gmii_sel PHY mode: %u, new gmii_sel: %08x\n",
+ phy_mode, reg);
+ writel(reg, common->gmii_sel);
+
+ reg = readl(common->gmii_sel);
+ if (reg != mode)
+ dev_err(common->dev,
+ "gmii_sel PHY mode NOT SET!: requested: %08x, gmii_sel: %08x\n",
+ mode, reg);
+}
+
+static int am65_cpsw_start(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ struct am65_cpsw_port *port = &common->ports[priv->port_id];
+ struct am65_cpsw_port *port0 = &common->ports[0];
+ struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data;
+ int ret, i;
+
+ ret = power_domain_on(&common->pwrdmn);
+ if (ret) {
+ dev_err(dev, "power_domain_on() failed %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_enable(&common->fclk);
+ if (ret) {
+ dev_err(dev, "clk enabled failed %d\n", ret);
+ goto err_off_pwrdm;
+ }
+
+ common->rx_next = 0;
+ common->rx_pend = 0;
+ ret = dma_get_by_name(common->dev, "tx0", &common->dma_tx);
+ if (ret) {
+ dev_err(dev, "TX dma get failed %d\n", ret);
+ goto err_off_clk;
+ }
+ ret = dma_get_by_name(common->dev, "rx", &common->dma_rx);
+ if (ret) {
+ dev_err(dev, "RX dma get failed %d\n", ret);
+ goto err_free_tx;
+ }
+
+ for (i = 0; i < UDMA_RX_DESC_NUM; i++) {
+ ret = dma_prepare_rcv_buf(&common->dma_rx,
+ net_rx_packets[i],
+ UDMA_RX_BUF_SIZE);
+ if (ret) {
+ dev_err(dev, "RX dma add buf failed %d\n", ret);
+ goto err_free_tx;
+ }
+ }
+
+ ret = dma_enable(&common->dma_tx);
+ if (ret) {
+ dev_err(dev, "TX dma_enable failed %d\n", ret);
+ goto err_free_rx;
+ }
+ ret = dma_enable(&common->dma_rx);
+ if (ret) {
+ dev_err(dev, "RX dma_enable failed %d\n", ret);
+ goto err_dis_tx;
+ }
+
+ /* Control register */
+ writel(AM65_CPSW_CTL_REG_P0_ENABLE |
+ AM65_CPSW_CTL_REG_P0_TX_CRC_REMOVE |
+ AM65_CPSW_CTL_REG_P0_RX_PAD,
+ common->cpsw_base + AM65_CPSW_CTL_REG);
+
+ /* disable priority elevation */
+ writel(0, common->cpsw_base + AM65_CPSW_PTYPE_REG);
+
+ /* enable statistics */
+ writel(BIT(0) | BIT(priv->port_id),
+ common->cpsw_base + AM65_CPSW_STAT_PORT_EN_REG);
+
+ /* Port 0 length register */
+ writel(PKTSIZE_ALIGN, port0->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
+
+ /* set base flow_id */
+ dma_get_cfg(&common->dma_rx, 0, (void **)&dma_rx_cfg_data);
+ writel(dma_rx_cfg_data->flow_id_base,
+ port0->port_base + AM65_CPSW_P0_FLOW_ID_REG);
+ dev_info(dev, "K3 CPSW: rflow_id_base: %u\n",
+ dma_rx_cfg_data->flow_id_base);
+
+ /* Reset and enable the ALE */
+ writel(AM65_CPSW_ALE_CTL_REG_ENABLE | AM65_CPSW_ALE_CTL_REG_RESET_TBL |
+ AM65_CPSW_ALE_CTL_REG_BYPASS,
+ common->ale_base + AM65_CPSW_ALE_CTL_REG);
+
+ /* port 0 put into forward mode */
+ writel(AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
+ common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
+
+ writel(AM65_CPSW_ALE_DEFTHREAD_EN,
+ common->ale_base + AM65_CPSW_ALE_THREADMAPDEF_REG);
+
+ /* PORT x configuration */
+
+ /* Port x Max length register */
+ writel(PKTSIZE_ALIGN, port->port_base + AM65_CPSW_PN_RX_MAXLEN_REG);
+
+ /* Port x set mac */
+ am65_cpsw_set_sl_mac(port, pdata->enetaddr);
+
+ /* Port x ALE: mac_only, Forwarding */
+ writel(AM65_CPSW_ALE_PN_CTL_REG_MAC_ONLY |
+ AM65_CPSW_ALE_PN_CTL_REG_MODE_FORWARD,
+ common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
+
+ port->mac_control = 0;
+ if (!am65_cpsw_macsl_reset(port)) {
+ dev_err(dev, "mac_sl reset failed\n");
+ ret = -EFAULT;
+ goto err_dis_rx;
+ }
+
+ ret = phy_startup(priv->phydev);
+ if (ret) {
+ dev_err(dev, "phy_startup failed\n");
+ goto err_dis_rx;
+ }
+
+ ret = am65_cpsw_update_link(priv);
+ if (!ret) {
+ ret = -ENODEV;
+ goto err_phy_shutdown;
+ }
+
+ common->started = true;
+
+ return 0;
+
+err_phy_shutdown:
+ phy_shutdown(priv->phydev);
+err_dis_rx:
+ /* disable ports */
+ writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
+ writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
+ if (!am65_cpsw_macsl_wait_for_idle(port))
+ dev_err(dev, "mac_sl idle timeout\n");
+ writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
+ writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
+ writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
+
+ dma_disable(&common->dma_rx);
+err_dis_tx:
+ dma_disable(&common->dma_tx);
+err_free_rx:
+ dma_free(&common->dma_rx);
+err_free_tx:
+ dma_free(&common->dma_tx);
+err_off_clk:
+ clk_disable(&common->fclk);
+err_off_pwrdm:
+ power_domain_off(&common->pwrdmn);
+out:
+ dev_err(dev, "%s end error\n", __func__);
+
+ return ret;
+}
+
+static int am65_cpsw_send(struct udevice *dev, void *packet, int length)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ struct ti_udma_drv_packet_data packet_data;
+ int ret;
+
+ packet_data.pkt_type = AM65_CPSW_CPPI_PKT_TYPE;
+ packet_data.dest_tag = priv->port_id;
+ ret = dma_send(&common->dma_tx, packet, length, &packet_data);
+ if (ret) {
+ dev_err(dev, "TX dma_send failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+
+ /* try to receive a new packet */
+ return dma_receive(&common->dma_rx, (void **)packetp, NULL);
+}
+
+static int am65_cpsw_free_pkt(struct udevice *dev, uchar *packet, int length)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ int ret;
+
+ if (length > 0) {
+ u32 pkt = common->rx_next % UDMA_RX_DESC_NUM;
+
+ ret = dma_prepare_rcv_buf(&common->dma_rx,
+ net_rx_packets[pkt],
+ UDMA_RX_BUF_SIZE);
+ if (ret)
+ dev_err(dev, "RX dma free_pkt failed %d\n", ret);
+ common->rx_next++;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_stop(struct udevice *dev)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ struct am65_cpsw_port *port = &common->ports[priv->port_id];
+
+ if (!common->started)
+ return;
+
+ phy_shutdown(priv->phydev);
+
+ writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(priv->port_id));
+ writel(0, common->ale_base + AM65_CPSW_ALE_PN_CTL_REG(0));
+ if (!am65_cpsw_macsl_wait_for_idle(port))
+ dev_err(dev, "mac_sl idle timeout\n");
+ writel(0, port->macsl_base + AM65_CPSW_MACSL_CTL_REG);
+ writel(0, common->ale_base + AM65_CPSW_ALE_CTL_REG);
+ writel(0, common->cpsw_base + AM65_CPSW_CTL_REG);
+
+ dma_disable(&common->dma_tx);
+ dma_free(&common->dma_tx);
+
+ dma_disable(&common->dma_rx);
+ dma_free(&common->dma_rx);
+
+ common->started = false;
+}
+
+static int am65_cpsw_read_rom_hwaddr(struct udevice *dev)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *common = priv->cpsw_common;
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ u32 mac_hi, mac_lo;
+
+ if (common->mac_efuse == FDT_ADDR_T_NONE)
+ return -1;
+
+ mac_lo = readl(common->mac_efuse);
+ mac_hi = readl(common->mac_efuse + 4);
+ pdata->enetaddr[0] = (mac_hi >> 8) & 0xff;
+ pdata->enetaddr[1] = mac_hi & 0xff;
+ pdata->enetaddr[2] = (mac_lo >> 24) & 0xff;
+ pdata->enetaddr[3] = (mac_lo >> 16) & 0xff;
+ pdata->enetaddr[4] = (mac_lo >> 8) & 0xff;
+ pdata->enetaddr[5] = mac_lo & 0xff;
+
+ return 0;
+}
+
+static const struct eth_ops am65_cpsw_ops = {
+ .start = am65_cpsw_start,
+ .send = am65_cpsw_send,
+ .recv = am65_cpsw_recv,
+ .free_pkt = am65_cpsw_free_pkt,
+ .stop = am65_cpsw_stop,
+ .read_rom_hwaddr = am65_cpsw_read_rom_hwaddr,
+};
+
+static int am65_cpsw_mdio_init(struct udevice *dev)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
+
+ if (!priv->has_phy || cpsw_common->bus)
+ return 0;
+
+ cpsw_common->bus = cpsw_mdio_init(dev->name,
+ cpsw_common->mdio_base,
+ cpsw_common->bus_freq,
+ clk_get_rate(&cpsw_common->fclk));
+ if (!cpsw_common->bus)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int am65_cpsw_phy_init(struct udevice *dev)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct am65_cpsw_common *cpsw_common = priv->cpsw_common;
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct phy_device *phydev;
+ u32 supported = PHY_GBIT_FEATURES;
+ int ret;
+
+ phydev = phy_connect(cpsw_common->bus,
+ priv->phy_addr,
+ priv->dev,
+ pdata->phy_interface);
+
+ if (!phydev) {
+ dev_err(dev, "phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ phydev->supported &= supported;
+ if (pdata->max_speed) {
+ ret = phy_set_supported(phydev, pdata->max_speed);
+ if (ret)
+ return ret;
+ }
+ phydev->advertising = phydev->supported;
+
+ if (ofnode_valid(priv->phy_node))
+ phydev->node = priv->phy_node;
+
+ priv->phydev = phydev;
+ ret = phy_config(phydev);
+ if (ret < 0)
+ pr_err("phy_config() failed: %d", ret);
+
+ return ret;
+}
+
+static int am65_cpsw_ofdata_parse_phy(struct udevice *dev, ofnode port_np)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct ofnode_phandle_args out_args;
+ const char *phy_mode;
+ int ret = 0;
+
+ phy_mode = ofnode_read_string(port_np, "phy-mode");
+ if (phy_mode) {
+ pdata->phy_interface =
+ phy_get_interface_by_name(phy_mode);
+ if (pdata->phy_interface == -1) {
+ dev_err(dev, "Invalid PHY mode '%s', port %u\n",
+ phy_mode, priv->port_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ofnode_read_u32(port_np, "max-speed", (u32 *)&pdata->max_speed);
+ if (pdata->max_speed)
+ dev_err(dev, "Port %u speed froced to %uMbit\n",
+ priv->port_id, pdata->max_speed);
+
+ priv->has_phy = true;
+ ret = ofnode_parse_phandle_with_args(port_np, "phy-handle",
+ NULL, 0, 0, &out_args);
+ if (ret) {
+ dev_err(dev, "can't parse phy-handle port %u (%d)\n",
+ priv->port_id, ret);
+ priv->has_phy = false;
+ ret = 0;
+ }
+
+ priv->phy_node = out_args.node;
+ if (priv->has_phy) {
+ ret = ofnode_read_u32(priv->phy_node, "reg", &priv->phy_addr);
+ if (ret) {
+ dev_err(dev, "failed to get phy_addr port %u (%d)\n",
+ priv->port_id, ret);
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int am65_cpsw_probe_cpsw(struct udevice *dev)
+{
+ struct am65_cpsw_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct am65_cpsw_common *cpsw_common;
+ ofnode ports_np, node;
+ int ret, i;
+
+ priv->dev = dev;
+
+ cpsw_common = calloc(1, sizeof(*priv->cpsw_common));
+ if (!cpsw_common)
+ return -ENOMEM;
+ priv->cpsw_common = cpsw_common;
+
+ cpsw_common->dev = dev;
+ cpsw_common->ss_base = dev_read_addr(dev);
+ if (cpsw_common->ss_base == FDT_ADDR_T_NONE)
+ return -EINVAL;
+ cpsw_common->mac_efuse = devfdt_get_addr_name(dev, "mac_efuse");
+ /* no err check - optional */
+
+ ret = power_domain_get_by_index(dev, &cpsw_common->pwrdmn, 0);
+ if (ret) {
+ dev_err(dev, "failed to get pwrdmn: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_get_by_name(dev, "fck", &cpsw_common->fclk);
+ if (ret) {
+ power_domain_free(&cpsw_common->pwrdmn);
+ dev_err(dev, "failed to get clock %d\n", ret);
+ return ret;
+ }
+
+ cpsw_common->cpsw_base = cpsw_common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+ cpsw_common->ale_base = cpsw_common->cpsw_base +
+ AM65_CPSW_CPSW_NU_ALE_BASE;
+ cpsw_common->mdio_base = cpsw_common->ss_base + AM65_CPSW_MDIO_BASE;
+
+ ports_np = dev_read_subnode(dev, "ethernet-ports");
+ if (!ofnode_valid(ports_np)) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ofnode_for_each_subnode(node, ports_np) {
+ const char *node_name;
+ u32 port_id;
+ bool disabled;
+
+ node_name = ofnode_get_name(node);
+
+ disabled = !ofnode_is_available(node);
+
+ ret = ofnode_read_u32(node, "reg", &port_id);
+ if (ret) {
+ dev_err(dev, "%s: failed to get port_id (%d)\n",
+ node_name, ret);
+ goto out;
+ }
+
+ if (port_id >= AM65_CPSW_CPSWNU_MAX_PORTS) {
+ dev_err(dev, "%s: invalid port_id (%d)\n",
+ node_name, port_id);
+ ret = -EINVAL;
+ goto out;
+ }
+ cpsw_common->port_num++;
+
+ if (!port_id)
+ continue;
+
+ cpsw_common->ports[port_id].disabled = disabled;
+ if (disabled)
+ continue;
+
+ priv->port_id = port_id;
+ ret = am65_cpsw_ofdata_parse_phy(dev, node);
+ if (ret)
+ goto out;
+ }
+
+ for (i = 0; i < AM65_CPSW_CPSWNU_MAX_PORTS; i++) {
+ struct am65_cpsw_port *port = &cpsw_common->ports[i];
+
+ port->port_base = cpsw_common->cpsw_base +
+ AM65_CPSW_CPSW_NU_PORTS_OFFSET +
+ (i * AM65_CPSW_CPSW_NU_PORTS_OFFSET);
+ port->macsl_base = port->port_base +
+ AM65_CPSW_CPSW_NU_PORT_MACSL_OFFSET;
+ }
+
+ node = dev_read_subnode(dev, "cpsw-phy-sel");
+ if (!ofnode_valid(node)) {
+ dev_err(dev, "can't find cpsw-phy-sel\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ cpsw_common->gmii_sel = ofnode_get_addr(node);
+ if (cpsw_common->gmii_sel == FDT_ADDR_T_NONE) {
+ dev_err(dev, "failed to get gmii_sel base\n");
+ goto out;
+ }
+
+ cpsw_common->bus_freq =
+ dev_read_u32_default(dev, "bus_freq",
+ AM65_CPSW_MDIO_BUS_FREQ_DEF);
+
+ am65_cpsw_gmii_sel_k3(priv, pdata->phy_interface, priv->port_id);
+
+ ret = am65_cpsw_mdio_init(dev);
+ if (ret)
+ goto out;
+
+ ret = am65_cpsw_phy_init(dev);
+ if (ret)
+ goto out;
+
+ dev_info(dev, "K3 CPSW: nuss_ver: 0x%08X cpsw_ver: 0x%08X ale_ver: 0x%08X Ports:%u mdio_freq:%u\n",
+ readl(cpsw_common->ss_base),
+ readl(cpsw_common->cpsw_base),
+ readl(cpsw_common->ale_base),
+ cpsw_common->port_num,
+ cpsw_common->bus_freq);
+
+out:
+ clk_free(&cpsw_common->fclk);
+ power_domain_free(&cpsw_common->pwrdmn);
+ return ret;
+}
+
+static const struct udevice_id am65_cpsw_nuss_ids[] = {
+ { .compatible = "ti,am654-cpsw-nuss" },
+ { .compatible = "ti,j721e-cpsw-nuss" },
+ { .compatible = "ti,am642-cpsw-nuss" },
+ { }
+};
+
+U_BOOT_DRIVER(am65_cpsw_nuss_slave) = {
+ .name = "am65_cpsw_nuss_slave",
+ .id = UCLASS_ETH,
+ .of_match = am65_cpsw_nuss_ids,
+ .probe = am65_cpsw_probe_cpsw,
+ .ops = &am65_cpsw_ops,
+ .priv_auto = sizeof(struct am65_cpsw_priv),
+ .plat_auto = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};
diff --git a/roms/u-boot/drivers/net/ti/cpsw-common.c b/roms/u-boot/drivers/net/ti/cpsw-common.c
new file mode 100644
index 000000000..3140f2515
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/cpsw-common.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW common - libs used across TI ethernet devices.
+ *
+ * Copyright (C) 2016, Texas Instruments, Incorporated
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <fdt_support.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <cpsw.h>
+#include <dm/device_compat.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define CTRL_MAC_REG(offset, id) ((offset) + 0x8 * (id))
+
+static void davinci_emac_3517_get_macid(u32 addr, u8 *mac_addr)
+{
+ /* try reading mac address from efuse */
+ u32 macid_lsb = readl(addr);
+ u32 macid_msb = readl(addr + 4);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+}
+
+static void cpsw_am33xx_cm_get_macid(u32 addr, u8 *mac_addr)
+{
+ /* try reading mac address from efuse */
+ u32 macid_lo = readl(addr);
+ u32 macid_hi = readl(addr + 4);
+
+ mac_addr[5] = (macid_lo >> 8) & 0xff;
+ mac_addr[4] = macid_lo & 0xff;
+ mac_addr[3] = (macid_hi >> 24) & 0xff;
+ mac_addr[2] = (macid_hi >> 16) & 0xff;
+ mac_addr[1] = (macid_hi >> 8) & 0xff;
+ mac_addr[0] = macid_hi & 0xff;
+}
+
+void ti_cm_get_macid(struct udevice *dev, struct cpsw_platform_data *data,
+ u8 *mac_addr)
+{
+ if (!strcmp(data->macid_sel_compat, "cpsw,am33xx"))
+ cpsw_am33xx_cm_get_macid(data->syscon_addr, mac_addr);
+ else if (!strcmp(data->macid_sel_compat, "davinci,emac"))
+ davinci_emac_3517_get_macid(data->syscon_addr, mac_addr);
+}
+
+int ti_cm_get_macid_addr(struct udevice *dev, int slave,
+ struct cpsw_platform_data *data)
+{
+ void *fdt = (void *)gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ fdt32_t gmii = 0;
+ int syscon;
+ u16 offset;
+
+ if (of_machine_is_compatible("ti,dm8148")) {
+ offset = 0x630;
+ data->macid_sel_compat = "cpsw,am33xx";
+ } else if (of_machine_is_compatible("ti,am33xx")) {
+ offset = 0x630;
+ data->macid_sel_compat = "cpsw,am33xx";
+ } else if (device_is_compatible(dev, "ti,am3517-emac")) {
+ offset = 0x110;
+ data->macid_sel_compat = "davinci,emac";
+ } else if (device_is_compatible(dev, "ti,dm816-emac")) {
+ offset = 0x30;
+ data->macid_sel_compat = "cpsw,am33xx";
+ } else if (of_machine_is_compatible("ti,am43")) {
+ offset = 0x630;
+ data->macid_sel_compat = "cpsw,am33xx";
+ } else if (of_machine_is_compatible("ti,dra7")) {
+ offset = 0x514;
+ data->macid_sel_compat = "davinci,emac";
+ } else {
+ dev_err(dev, "incompatible machine/device type for reading mac address\n");
+ return -ENOENT;
+ }
+
+ syscon = fdtdec_lookup_phandle(fdt, node, "syscon");
+ if (syscon < 0) {
+ pr_err("Syscon offset not found\n");
+ return -ENOENT;
+ }
+
+ data->syscon_addr = (u32)map_physmem(fdt_translate_address(fdt, syscon,
+ &gmii),
+ sizeof(u32), MAP_NOCACHE);
+ if (data->syscon_addr == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get syscon address to get mac efuse address\n");
+ return -ENOENT;
+ }
+
+ data->syscon_addr += CTRL_MAC_REG(offset, slave);
+
+ return 0;
+
+}
diff --git a/roms/u-boot/drivers/net/ti/cpsw.c b/roms/u-boot/drivers/net/ti/cpsw.c
new file mode 100644
index 000000000..68f4191fe
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/cpsw.c
@@ -0,0 +1,1388 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW Ethernet Switch Driver
+ *
+ * Copyright (C) 2010-2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <common.h>
+#include <command.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <net.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <net.h>
+#include <netdev.h>
+#include <cpsw.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <asm/gpio.h>
+#include <asm/io.h>
+#include <phy.h>
+#include <asm/arch/cpu.h>
+#include <dm.h>
+
+#include "cpsw_mdio.h"
+
+#define BITMASK(bits) (BIT(bits) - 1)
+#define NUM_DESCS (PKTBUFSRX * 2)
+#define PKT_MIN 60
+#define PKT_MAX (1500 + 14 + 4 + 4)
+#define CLEAR_BIT 1
+#define GIGABITEN BIT(7)
+#define FULLDUPLEXEN BIT(0)
+#define MIIEN BIT(15)
+#define CTL_EXT_EN BIT(18)
+/* DMA Registers */
+#define CPDMA_TXCONTROL 0x004
+#define CPDMA_RXCONTROL 0x014
+#define CPDMA_SOFTRESET 0x01c
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP_VER1 0x100
+#define CPDMA_TXHDP_VER2 0x200
+#define CPDMA_RXHDP_VER1 0x120
+#define CPDMA_RXHDP_VER2 0x220
+#define CPDMA_TXCP_VER1 0x140
+#define CPDMA_TXCP_VER2 0x240
+#define CPDMA_RXCP_VER1 0x160
+#define CPDMA_RXCP_VER2 0x260
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define CPDMA_TIMEOUT 100 /* msecs */
+
+struct cpsw_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+};
+
+struct cpsw_slave_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+#ifdef CONFIG_AM33XX
+ u32 gap_thresh;
+#elif defined(CONFIG_TI814X)
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+#endif
+ u32 sa_lo;
+ u32 sa_hi;
+};
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 flow_thresh;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_sliver_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 __reserved_1;
+ u32 rx_pri_map;
+};
+
+#define ALE_ENTRY_BITS 68
+#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
+/* ALE Registers */
+#define ALE_CONTROL 0x08
+#define ALE_UNKNOWNVLAN 0x18
+#define ALE_TABLE_CONTROL 0x20
+#define ALE_TABLE 0x34
+#define ALE_PORTCTL 0x40
+
+#define ALE_TABLE_WRITE BIT(31)
+
+#define ALE_TYPE_FREE 0
+#define ALE_TYPE_ADDR 1
+#define ALE_TYPE_VLAN 2
+#define ALE_TYPE_VLAN_ADDR 3
+
+#define ALE_UCAST_PERSISTANT 0
+#define ALE_UCAST_UNTOUCHED 1
+#define ALE_UCAST_OUI 2
+#define ALE_UCAST_TOUCHED 3
+
+#define ALE_MCAST_FWD 0
+#define ALE_MCAST_BLOCK_LEARN_FWD 1
+#define ALE_MCAST_FWD_LEARN 2
+#define ALE_MCAST_FWD_2 3
+
+enum cpsw_ale_port_state {
+ ALE_PORT_STATE_DISABLE = 0x00,
+ ALE_PORT_STATE_BLOCK = 0x01,
+ ALE_PORT_STATE_LEARN = 0x02,
+ ALE_PORT_STATE_FORWARD = 0x03,
+};
+
+/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+#define ALE_SECURE 1
+#define ALE_BLOCKED 2
+
+struct cpsw_slave {
+ struct cpsw_slave_regs *regs;
+ struct cpsw_sliver_regs *sliver;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+};
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_chan {
+ struct cpdma_desc *head, *tail;
+ void *hdp, *cp, *rxfree;
+};
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+
+#define GMII_SEL_MODE_MASK 0x3
+
+#define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
+
+#define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
+
+#define for_active_slave(slave, priv) \
+ slave = (priv)->slaves + ((priv)->data)->active_slave; if (slave)
+#define for_each_slave(slave, priv) \
+ for (slave = (priv)->slaves; slave != (priv)->slaves + \
+ ((priv)->data)->slaves; slave++)
+
+struct cpsw_priv {
+#ifdef CONFIG_DM_ETH
+ struct udevice *dev;
+#else
+ struct eth_device *dev;
+#endif
+ struct cpsw_platform_data *data;
+ int host_port;
+
+ struct cpsw_regs *regs;
+ void *dma_regs;
+ struct cpsw_host_regs *host_port_regs;
+ void *ale_regs;
+
+ struct cpdma_desc *descs;
+ struct cpdma_desc *desc_free;
+ struct cpdma_chan rx_chan, tx_chan;
+
+ struct cpsw_slave *slaves;
+ struct phy_device *phydev;
+ struct mii_dev *bus;
+
+ u32 phy_mask;
+};
+
+static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+{
+ int idx;
+
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ return (ale_entry[idx] >> start) & BITMASK(bits);
+}
+
+static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+{
+ int idx;
+
+ value &= BITMASK(bits);
+ idx = start / 32;
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ ale_entry[idx] &= ~(BITMASK(bits) << start);
+ ale_entry[idx] |= (value << start);
+}
+
+#define DEFINE_ALE_FIELD(name, start, bits) \
+static inline int __maybe_unused cpsw_ale_get_##name(u32 *ale_entry) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void __maybe_unused cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
+DEFINE_ALE_FIELD(entry_type, 60, 2)
+DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD(port_mask, 66, 3)
+DEFINE_ALE_FIELD(ucast_type, 62, 2)
+DEFINE_ALE_FIELD(port_num, 66, 2)
+DEFINE_ALE_FIELD(blocked, 65, 1)
+DEFINE_ALE_FIELD(secure, 64, 1)
+DEFINE_ALE_FIELD(mcast, 40, 1)
+
+/* The MAC address field in the ALE entry cannot be macroized as above */
+static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
+}
+
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
+}
+
+static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
+{
+ int i;
+
+ __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
+
+ return idx;
+}
+
+static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
+{
+ int i;
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
+
+ __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
+
+ return idx;
+}
+
+static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data->ale_entries; idx++) {
+ u8 entry_addr[6];
+
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ cpsw_ale_get_addr(ale_entry, entry_addr);
+ if (memcmp(entry_addr, addr, 6) == 0)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_match_free(struct cpsw_priv *priv)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data->ale_entries; idx++) {
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type == ALE_TYPE_FREE)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < priv->data->ale_entries; idx++) {
+ cpsw_ale_read(priv, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ if (cpsw_ale_get_mcast(ale_entry))
+ continue;
+ type = cpsw_ale_get_ucast_type(ale_entry);
+ if (type != ALE_UCAST_PERSISTANT &&
+ type != ALE_UCAST_OUI)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
+ int port, int flags)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_port_num(ale_entry, port);
+
+ idx = cpsw_ale_match_addr(priv, addr);
+ if (idx < 0)
+ idx = cpsw_ale_match_free(priv);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(priv);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(priv, idx, ale_entry);
+ return 0;
+}
+
+static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
+ int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx, mask;
+
+ idx = cpsw_ale_match_addr(priv, addr);
+ if (idx >= 0)
+ cpsw_ale_read(priv, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
+
+ mask = cpsw_ale_get_port_mask(ale_entry);
+ port_mask |= mask;
+ cpsw_ale_set_port_mask(ale_entry, port_mask);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(priv);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(priv);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(priv, idx, ale_entry);
+ return 0;
+}
+
+static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
+{
+ u32 tmp, mask = BIT(bit);
+
+ tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
+ tmp &= ~mask;
+ tmp |= val ? mask : 0;
+ __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
+}
+
+#define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
+#define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
+#define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
+
+static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
+ int val)
+{
+ int offset = ALE_PORTCTL + 4 * port;
+ u32 tmp, mask = 0x3;
+
+ tmp = __raw_readl(priv->ale_regs + offset);
+ tmp &= ~mask;
+ tmp |= val & mask;
+ __raw_writel(tmp, priv->ale_regs + offset);
+}
+
+/* Set a self-clearing bit in a register, and wait for it to clear */
+static inline void setbit_and_wait_for_clear32(void *addr)
+{
+ __raw_writel(CLEAR_BIT, addr);
+ while (__raw_readl(addr) & CLEAR_BIT)
+ ;
+}
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+static void cpsw_set_slave_mac(struct cpsw_slave *slave,
+ struct cpsw_priv *priv)
+{
+#ifdef CONFIG_DM_ETH
+ struct eth_pdata *pdata = dev_get_plat(priv->dev);
+
+ writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
+ writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
+#else
+ __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
+ __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
+#endif
+}
+
+static int cpsw_slave_update_link(struct cpsw_slave *slave,
+ struct cpsw_priv *priv, int *link)
+{
+ struct phy_device *phy;
+ u32 mac_control = 0;
+ int ret = -ENODEV;
+
+ phy = priv->phydev;
+ if (!phy)
+ goto out;
+
+ ret = phy_startup(phy);
+ if (ret)
+ goto out;
+
+ if (link)
+ *link = phy->link;
+
+ if (phy->link) { /* link up */
+ mac_control = priv->data->mac_control;
+ if (phy->speed == 1000)
+ mac_control |= GIGABITEN;
+ if (phy->duplex == DUPLEX_FULL)
+ mac_control |= FULLDUPLEXEN;
+ if (phy->speed == 100)
+ mac_control |= MIIEN;
+ if (phy->speed == 10 && phy_interface_is_rgmii(phy))
+ mac_control |= CTL_EXT_EN;
+ }
+
+ if (mac_control == slave->mac_control)
+ goto out;
+
+ if (mac_control) {
+ printf("link up on port %d, speed %d, %s duplex\n",
+ slave->slave_num, phy->speed,
+ (phy->duplex == DUPLEX_FULL) ? "full" : "half");
+ } else {
+ printf("link down on port %d\n", slave->slave_num);
+ }
+
+ __raw_writel(mac_control, &slave->sliver->mac_control);
+ slave->mac_control = mac_control;
+
+out:
+ return ret;
+}
+
+static int cpsw_update_link(struct cpsw_priv *priv)
+{
+ int ret = -ENODEV;
+ struct cpsw_slave *slave;
+
+ for_active_slave(slave, priv)
+ ret = cpsw_slave_update_link(slave, priv, NULL);
+
+ return ret;
+}
+
+static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
+static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ u32 slave_port;
+
+ setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
+
+ /* setup priority mapping */
+ __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
+ __raw_writel(0x33221100, &slave->regs->tx_pri_map);
+
+ /* setup max packet size, and mac address */
+ __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ /* enable forwarding */
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+ cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
+
+ priv->phy_mask |= 1 << slave->data->phy_addr;
+}
+
+static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
+{
+ struct cpdma_desc *desc = priv->desc_free;
+
+ if (desc)
+ priv->desc_free = desc_read_ptr(desc, hw_next);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
+{
+ if (desc) {
+ desc_write(desc, hw_next, priv->desc_free);
+ priv->desc_free = desc;
+ }
+}
+
+static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
+ void *buffer, int len)
+{
+ struct cpdma_desc *desc, *prev;
+ u32 mode;
+
+ desc = cpdma_desc_alloc(priv);
+ if (!desc)
+ return -ENOMEM;
+
+ if (len < PKT_MIN)
+ len = PKT_MIN;
+
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ if (!chan->head) {
+ /* simple case - first packet enqueued */
+ chan->head = desc;
+ chan->tail = desc;
+ chan_write(chan, hdp, desc);
+ goto done;
+ }
+
+ /* not the first packet - enqueue at the tail */
+ prev = chan->tail;
+ desc_write(prev, hw_next, desc);
+ chan->tail = desc;
+
+ /* next check if EOQ has been triggered already */
+ if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
+ chan_write(chan, hdp, desc);
+
+done:
+ if (chan->rxfree)
+ chan_write(chan, rxfree, 1);
+ return 0;
+}
+
+static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
+ void **buffer, int *len)
+{
+ struct cpdma_desc *desc = chan->head;
+ u32 status;
+
+ if (!desc)
+ return -ENOENT;
+
+ status = desc_read(desc, hw_mode);
+
+ if (len)
+ *len = status & 0x7ff;
+
+ if (buffer)
+ *buffer = desc_read_ptr(desc, sw_buffer);
+
+ if (status & CPDMA_DESC_OWNER) {
+ if (chan_read(chan, hdp) == 0) {
+ if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
+ chan_write(chan, hdp, desc);
+ }
+
+ return -EBUSY;
+ }
+
+ chan->head = desc_read_ptr(desc, hw_next);
+ chan_write(chan, cp, desc);
+
+ cpdma_desc_free(priv, desc);
+ return 0;
+}
+
+static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
+{
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* soft reset the controller and initialize priv */
+ setbit_and_wait_for_clear32(&priv->regs->soft_reset);
+
+ /* initialize and reset the address lookup engine */
+ cpsw_ale_enable(priv, 1);
+ cpsw_ale_clear(priv, 1);
+ cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
+
+ /* setup host port priority mapping */
+ __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
+ __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation and enable statistics on all ports */
+ __raw_writel(0, &priv->regs->ptype);
+
+ /* enable statistics collection only on the host port */
+ __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
+ __raw_writel(0x7, &priv->regs->stat_port_en);
+
+ cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
+ cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
+
+ for_active_slave(slave, priv)
+ cpsw_slave_init(slave, priv);
+
+ ret = cpsw_update_link(priv);
+ if (ret)
+ goto out;
+
+ /* init descriptor pool */
+ for (i = 0; i < NUM_DESCS; i++) {
+ desc_write(&priv->descs[i], hw_next,
+ (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
+ }
+ priv->desc_free = &priv->descs[0];
+
+ /* initialize channels */
+ if (priv->data->version == CPSW_CTRL_VERSION_2) {
+ memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
+ priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
+ priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
+ priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
+
+ memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
+ priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
+ priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
+ } else {
+ memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
+ priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
+ priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
+ priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
+
+ memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
+ priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
+ priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
+ }
+
+ /* clear dma state */
+ setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
+
+ if (priv->data->version == CPSW_CTRL_VERSION_2) {
+ for (i = 0; i < priv->data->channels; i++) {
+ __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
+ * i);
+ }
+ } else {
+ for (i = 0; i < priv->data->channels; i++) {
+ __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
+ * i);
+ __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
+ * i);
+
+ }
+ }
+
+ __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
+ __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
+
+ /* submit rx descs */
+ for (i = 0; i < PKTBUFSRX; i++) {
+ ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
+ PKTSIZE);
+ if (ret < 0) {
+ printf("error %d submitting rx desc\n", ret);
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int cpsw_reap_completed_packets(struct cpsw_priv *priv)
+{
+ int timeout = CPDMA_TIMEOUT;
+
+ /* reap completed packets */
+ while (timeout-- &&
+ (cpdma_process(priv, &priv->tx_chan, NULL, NULL) >= 0))
+ ;
+
+ return timeout;
+}
+
+static void _cpsw_halt(struct cpsw_priv *priv)
+{
+ cpsw_reap_completed_packets(priv);
+
+ writel(0, priv->dma_regs + CPDMA_TXCONTROL);
+ writel(0, priv->dma_regs + CPDMA_RXCONTROL);
+
+ /* soft reset the controller and initialize priv */
+ setbit_and_wait_for_clear32(&priv->regs->soft_reset);
+
+ /* clear dma state */
+ setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
+
+}
+
+static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
+{
+ int timeout;
+
+ flush_dcache_range((unsigned long)packet,
+ (unsigned long)packet + ALIGN(length, PKTALIGN));
+
+ timeout = cpsw_reap_completed_packets(priv);
+ if (timeout == -1) {
+ printf("cpdma_process timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return cpdma_submit(priv, &priv->tx_chan, packet, length);
+}
+
+static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
+{
+ void *buffer;
+ int len;
+ int ret;
+
+ ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
+ if (ret < 0)
+ return ret;
+
+ invalidate_dcache_range((unsigned long)buffer,
+ (unsigned long)buffer + PKTSIZE_ALIGN);
+ *pkt = buffer;
+
+ return len;
+}
+
+static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
+ struct cpsw_priv *priv)
+{
+ void *regs = priv->regs;
+ struct cpsw_slave_data *data = priv->data->slave_data + slave_num;
+ slave->slave_num = slave_num;
+ slave->data = data;
+ slave->regs = regs + data->slave_reg_ofs;
+ slave->sliver = regs + data->sliver_reg_ofs;
+}
+
+static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
+{
+ struct phy_device *phydev;
+ u32 supported = PHY_GBIT_FEATURES;
+ int ret;
+
+ phydev = phy_connect(priv->bus,
+ slave->data->phy_addr,
+ priv->dev,
+ slave->data->phy_if);
+
+ if (!phydev)
+ return -1;
+
+ phydev->supported &= supported;
+ if (slave->data->max_speed) {
+ ret = phy_set_supported(phydev, slave->data->max_speed);
+ if (ret)
+ return ret;
+#if CONFIG_IS_ENABLED(DM_ETH)
+ dev_dbg(priv->dev, "Port %u speed forced to %uMbit\n",
+ slave->slave_num + 1, slave->data->max_speed);
+#else
+ log_debug("%s: Port %u speed forced to %uMbit\n",
+ priv->dev->name, slave->slave_num + 1,
+ slave->data->max_speed);
+#endif
+ }
+ phydev->advertising = phydev->supported;
+
+#ifdef CONFIG_DM_ETH
+ if (ofnode_valid(slave->data->phy_of_handle))
+ phydev->node = slave->data->phy_of_handle;
+#endif
+
+ priv->phydev = phydev;
+ phy_config(phydev);
+
+ return 1;
+}
+
+static void cpsw_phy_addr_update(struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = priv->data;
+ u16 alive = cpsw_mdio_get_alive(priv->bus);
+ int active = data->active_slave;
+ int new_addr = ffs(alive) - 1;
+
+ /*
+ * If there is only one phy alive and its address does not match
+ * that of active slave, then phy address can safely be updated.
+ */
+ if (hweight16(alive) == 1 &&
+ data->slave_data[active].phy_addr != new_addr) {
+ printf("Updated phy address for CPSW#%d, old: %d, new: %d\n",
+ active, data->slave_data[active].phy_addr, new_addr);
+ data->slave_data[active].phy_addr = new_addr;
+ }
+}
+
+int _cpsw_register(struct cpsw_priv *priv)
+{
+ struct cpsw_slave *slave;
+ struct cpsw_platform_data *data = priv->data;
+ void *regs = (void *)data->cpsw_base;
+
+ priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
+ if (!priv->slaves) {
+ return -ENOMEM;
+ }
+
+ priv->host_port = data->host_port_num;
+ priv->regs = regs;
+ priv->host_port_regs = regs + data->host_port_reg_ofs;
+ priv->dma_regs = regs + data->cpdma_reg_ofs;
+ priv->ale_regs = regs + data->ale_reg_ofs;
+ priv->descs = (void *)regs + data->bd_ram_ofs;
+
+ int idx = 0;
+
+ for_each_slave(slave, priv) {
+ cpsw_slave_setup(slave, idx, priv);
+ idx = idx + 1;
+ }
+
+ priv->bus = cpsw_mdio_init(priv->dev->name, data->mdio_base, 0, 0);
+ if (!priv->bus)
+ return -EFAULT;
+
+ cpsw_phy_addr_update(priv);
+
+ for_active_slave(slave, priv)
+ cpsw_phy_init(priv, slave);
+
+ return 0;
+}
+
+#ifndef CONFIG_DM_ETH
+static int cpsw_init(struct eth_device *dev, struct bd_info *bis)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_init(priv, dev->enetaddr);
+}
+
+static void cpsw_halt(struct eth_device *dev)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_halt(priv);
+}
+
+static int cpsw_send(struct eth_device *dev, void *packet, int length)
+{
+ struct cpsw_priv *priv = dev->priv;
+
+ return _cpsw_send(priv, packet, length);
+}
+
+static int cpsw_recv(struct eth_device *dev)
+{
+ struct cpsw_priv *priv = dev->priv;
+ uchar *pkt = NULL;
+ int len;
+
+ len = _cpsw_recv(priv, &pkt);
+
+ if (len > 0) {
+ net_process_received_packet(pkt, len);
+ cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
+ }
+
+ return len;
+}
+
+int cpsw_register(struct cpsw_platform_data *data)
+{
+ struct cpsw_priv *priv;
+ struct eth_device *dev;
+ int ret;
+
+ dev = calloc(sizeof(*dev), 1);
+ if (!dev)
+ return -ENOMEM;
+
+ priv = calloc(sizeof(*priv), 1);
+ if (!priv) {
+ free(dev);
+ return -ENOMEM;
+ }
+
+ priv->dev = dev;
+ priv->data = data;
+
+ strcpy(dev->name, "cpsw");
+ dev->iobase = 0;
+ dev->init = cpsw_init;
+ dev->halt = cpsw_halt;
+ dev->send = cpsw_send;
+ dev->recv = cpsw_recv;
+ dev->priv = priv;
+
+ eth_register(dev);
+
+ ret = _cpsw_register(priv);
+ if (ret < 0) {
+ eth_unregister(dev);
+ free(dev);
+ free(priv);
+ return ret;
+ }
+
+ return 1;
+}
+#else
+static int cpsw_eth_start(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_init(priv, pdata->enetaddr);
+}
+
+static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_send(priv, packet, length);
+}
+
+static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_recv(priv, packetp);
+}
+
+static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
+ int length)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
+}
+
+static void cpsw_eth_stop(struct udevice *dev)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+
+ return _cpsw_halt(priv);
+}
+
+static const struct eth_ops cpsw_eth_ops = {
+ .start = cpsw_eth_start,
+ .send = cpsw_eth_send,
+ .recv = cpsw_eth_recv,
+ .free_pkt = cpsw_eth_free_pkt,
+ .stop = cpsw_eth_stop,
+};
+
+static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
+ phy_interface_t phy_mode)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+ bool rgmii_id = false;
+ int slave = priv->data->active_slave;
+
+ reg = readl(priv->data->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ rgmii_id = true;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
+ mode <<= slave * 2;
+
+ if (priv->data->rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ if (rgmii_id) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+ else
+ mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->data->gmii_sel);
+}
+
+static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
+ phy_interface_t phy_mode)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+ int slave = priv->data->active_slave;
+
+ reg = readl(priv->data->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->data->rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->data->gmii_sel);
+}
+
+static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
+ phy_interface_t phy_mode)
+{
+ if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
+ cpsw_gmii_sel_am3352(priv, phy_mode);
+ if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
+ cpsw_gmii_sel_am3352(priv, phy_mode);
+ else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
+ cpsw_gmii_sel_dra7xx(priv, phy_mode);
+}
+
+static int cpsw_eth_probe(struct udevice *dev)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+
+ priv->dev = dev;
+ priv->data = pdata->priv_pdata;
+ ti_cm_get_macid(dev, priv->data, pdata->enetaddr);
+ /* Select phy interface in control module */
+ cpsw_phy_sel(priv, priv->data->phy_sel_compat,
+ pdata->phy_interface);
+
+ return _cpsw_register(priv);
+}
+
+#if CONFIG_IS_ENABLED(OF_CONTROL)
+static void cpsw_eth_of_parse_slave(struct cpsw_platform_data *data,
+ int slave_index, ofnode subnode)
+{
+ struct ofnode_phandle_args out_args;
+ struct cpsw_slave_data *slave_data;
+ const char *phy_mode;
+ u32 phy_id[2];
+ int ret;
+
+ slave_data = &data->slave_data[slave_index];
+
+ phy_mode = ofnode_read_string(subnode, "phy-mode");
+ if (phy_mode)
+ slave_data->phy_if = phy_get_interface_by_name(phy_mode);
+
+ ret = ofnode_parse_phandle_with_args(subnode, "phy-handle",
+ NULL, 0, 0, &out_args);
+ if (!ret) {
+ slave_data->phy_of_handle = out_args.node;
+
+ ret = ofnode_read_s32(slave_data->phy_of_handle, "reg",
+ &slave_data->phy_addr);
+ if (ret)
+ printf("error: phy addr not found in dt\n");
+ } else {
+ ret = ofnode_read_u32_array(subnode, "phy_id", phy_id, 2);
+ if (ret)
+ printf("error: phy_id read failed\n");
+ }
+
+ slave_data->max_speed = ofnode_read_s32_default(subnode,
+ "max-speed", 0);
+}
+
+static int cpsw_eth_of_to_plat(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct cpsw_platform_data *data;
+ struct gpio_desc *mode_gpios;
+ int slave_index = 0;
+ int num_mode_gpios;
+ ofnode subnode;
+ int ret;
+
+ data = calloc(1, sizeof(struct cpsw_platform_data));
+ if (!data)
+ return -ENOMEM;
+
+ pdata->priv_pdata = data;
+ pdata->iobase = dev_read_addr(dev);
+ data->version = CPSW_CTRL_VERSION_2;
+ data->bd_ram_ofs = CPSW_BD_OFFSET;
+ data->ale_reg_ofs = CPSW_ALE_OFFSET;
+ data->cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
+ data->mdio_div = CPSW_MDIO_DIV;
+ data->host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
+
+ pdata->phy_interface = -1;
+
+ data->cpsw_base = pdata->iobase;
+
+ ret = dev_read_s32(dev, "cpdma_channels", &data->channels);
+ if (ret) {
+ printf("error: cpdma_channels not found in dt\n");
+ return ret;
+ }
+
+ ret = dev_read_s32(dev, "slaves", &data->slaves);
+ if (ret) {
+ printf("error: slaves not found in dt\n");
+ return ret;
+ }
+ data->slave_data = malloc(sizeof(struct cpsw_slave_data) *
+ data->slaves);
+
+ ret = dev_read_s32(dev, "ale_entries", &data->ale_entries);
+ if (ret) {
+ printf("error: ale_entries not found in dt\n");
+ return ret;
+ }
+
+ ret = dev_read_u32(dev, "bd_ram_size", &data->bd_ram_ofs);
+ if (ret) {
+ printf("error: bd_ram_size not found in dt\n");
+ return ret;
+ }
+
+ ret = dev_read_u32(dev, "mac_control", &data->mac_control);
+ if (ret) {
+ printf("error: ale_entries not found in dt\n");
+ return ret;
+ }
+
+ num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
+ if (num_mode_gpios > 0) {
+ mode_gpios = malloc(sizeof(struct gpio_desc) *
+ num_mode_gpios);
+ gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
+ num_mode_gpios, GPIOD_IS_OUT);
+ free(mode_gpios);
+ }
+
+ data->active_slave = dev_read_u32_default(dev, "active_slave", 0);
+
+ ofnode_for_each_subnode(subnode, dev_ofnode(dev)) {
+ const char *name;
+
+ name = ofnode_get_name(subnode);
+ if (!strncmp(name, "mdio", 4)) {
+ data->mdio_base = ofnode_get_addr(subnode);
+ if (data->mdio_base == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get MDIO address space\n");
+ return -ENOENT;
+ }
+ }
+
+ if (!strncmp(name, "slave", 5)) {
+ if (slave_index >= data->slaves)
+ continue;
+
+ cpsw_eth_of_parse_slave(data, slave_index, subnode);
+ slave_index++;
+ }
+
+ if (!strncmp(name, "cpsw-phy-sel", 12)) {
+ data->gmii_sel = ofnode_get_addr(subnode);
+
+ if (data->gmii_sel == FDT_ADDR_T_NONE) {
+ pr_err("Not able to get gmii_sel reg address\n");
+ return -ENOENT;
+ }
+
+ if (ofnode_read_bool(subnode, "rmii-clock-ext"))
+ data->rmii_clock_external = true;
+
+ data->phy_sel_compat = ofnode_read_string(subnode,
+ "compatible");
+ if (!data->phy_sel_compat) {
+ pr_err("Not able to get gmii_sel compatible\n");
+ return -ENOENT;
+ }
+ }
+ }
+
+ data->slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
+ data->slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
+
+ if (data->slaves == 2) {
+ data->slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
+ data->slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
+ }
+
+ ret = ti_cm_get_macid_addr(dev, data->active_slave, data);
+ if (ret < 0) {
+ pr_err("cpsw read efuse mac failed\n");
+ return ret;
+ }
+
+ pdata->phy_interface = data->slave_data[data->active_slave].phy_if;
+ if (pdata->phy_interface == -1) {
+ debug("%s: Invalid PHY interface '%s'\n", __func__,
+ phy_string_for_interface(pdata->phy_interface));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct udevice_id cpsw_eth_ids[] = {
+ { .compatible = "ti,cpsw" },
+ { .compatible = "ti,am335x-cpsw" },
+ { }
+};
+#endif
+
+int cpsw_get_slave_phy_addr(struct udevice *dev, int slave)
+{
+ struct cpsw_priv *priv = dev_get_priv(dev);
+ struct cpsw_platform_data *data = priv->data;
+
+ return data->slave_data[slave].phy_addr;
+}
+
+U_BOOT_DRIVER(eth_cpsw) = {
+ .name = "eth_cpsw",
+ .id = UCLASS_ETH,
+#if CONFIG_IS_ENABLED(OF_CONTROL)
+ .of_match = cpsw_eth_ids,
+ .of_to_plat = cpsw_eth_of_to_plat,
+ .plat_auto = sizeof(struct eth_pdata),
+#endif
+ .probe = cpsw_eth_probe,
+ .ops = &cpsw_eth_ops,
+ .priv_auto = sizeof(struct cpsw_priv),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA | DM_FLAG_PRE_RELOC,
+};
+#endif /* CONFIG_DM_ETH */
diff --git a/roms/u-boot/drivers/net/ti/cpsw_mdio.c b/roms/u-boot/drivers/net/ti/cpsw_mdio.c
new file mode 100644
index 000000000..f4cb86d10
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/cpsw_mdio.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPSW MDIO generic driver for TI AMxx/K2x/EMAC devices.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <common.h>
+#include <log.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <miiphy.h>
+#include <wait_bit.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+struct cpsw_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_FAULT BIT(19)
+#define CONTROL_FAULT_ENABLE BIT(18)
+#define CONTROL_DIV_MASK GENMASK(15, 0)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+ u32 physel;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_PHY_REG_SHIFT (21)
+#define USERACCESS_PHY_ADDR_SHIFT (16)
+#define USERACCESS_DATA GENMASK(15, 0)
+ } user[0];
+};
+
+#define CPSW_MDIO_DIV_DEF 0xff
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define CPSW_MDIO_TIMEOUT 100 /* msecs */
+
+struct cpsw_mdio {
+ struct cpsw_mdio_regs *regs;
+ struct mii_dev *bus;
+ int div;
+};
+
+/* wait until hardware is ready for another user access */
+static int cpsw_mdio_wait_for_user_access(struct cpsw_mdio *mdio)
+{
+ return wait_for_bit_le32(&mdio->regs->user[0].access,
+ USERACCESS_GO, false,
+ CPSW_MDIO_TIMEOUT, false);
+}
+
+static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
+ int dev_addr, int phy_reg)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ int data, ret;
+ u32 reg;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+ reg = (USERACCESS_GO | USERACCESS_READ |
+ (phy_reg << USERACCESS_PHY_REG_SHIFT) |
+ (phy_id << USERACCESS_PHY_ADDR_SHIFT));
+ writel(reg, &mdio->regs->user[0].access);
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+
+ reg = readl(&mdio->regs->user[0].access);
+ data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
+ return data;
+}
+
+static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
+ int phy_reg, u16 data)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = cpsw_mdio_wait_for_user_access(mdio);
+ if (ret)
+ return ret;
+ reg = (USERACCESS_GO | USERACCESS_WRITE |
+ (phy_reg << USERACCESS_PHY_REG_SHIFT) |
+ (phy_id << USERACCESS_PHY_ADDR_SHIFT) |
+ (data & USERACCESS_DATA));
+ writel(reg, &mdio->regs->user[0].access);
+
+ return cpsw_mdio_wait_for_user_access(mdio);
+}
+
+u32 cpsw_mdio_get_alive(struct mii_dev *bus)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 val;
+
+ val = readl(&mdio->regs->control);
+ return val & GENMASK(15, 0);
+}
+
+struct mii_dev *cpsw_mdio_init(const char *name, phys_addr_t mdio_base,
+ u32 bus_freq, int fck_freq)
+{
+ struct cpsw_mdio *cpsw_mdio;
+ int ret;
+
+ cpsw_mdio = calloc(1, sizeof(*cpsw_mdio));
+ if (!cpsw_mdio) {
+ debug("failed to alloc cpsw_mdio\n");
+ return NULL;
+ }
+
+ cpsw_mdio->bus = mdio_alloc();
+ if (!cpsw_mdio->bus) {
+ debug("failed to alloc mii bus\n");
+ free(cpsw_mdio);
+ return NULL;
+ }
+
+ cpsw_mdio->regs = (struct cpsw_mdio_regs *)(uintptr_t)mdio_base;
+
+ if (!bus_freq || !fck_freq)
+ cpsw_mdio->div = CPSW_MDIO_DIV_DEF;
+ else
+ cpsw_mdio->div = (fck_freq / bus_freq) - 1;
+ cpsw_mdio->div &= CONTROL_DIV_MASK;
+
+ /* set enable and clock divider */
+ writel(cpsw_mdio->div | CONTROL_ENABLE | CONTROL_FAULT |
+ CONTROL_FAULT_ENABLE, &cpsw_mdio->regs->control);
+ wait_for_bit_le32(&cpsw_mdio->regs->control,
+ CONTROL_IDLE, false, CPSW_MDIO_TIMEOUT, true);
+
+ /*
+ * wait for scan logic to settle:
+ * the scan time consists of (a) a large fixed component, and (b) a
+ * small component that varies with the mii bus frequency. These
+ * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
+ * silicon. Since the effect of (b) was found to be largely
+ * negligible, we keep things simple here.
+ */
+ mdelay(1);
+
+ cpsw_mdio->bus->read = cpsw_mdio_read;
+ cpsw_mdio->bus->write = cpsw_mdio_write;
+ cpsw_mdio->bus->priv = cpsw_mdio;
+ snprintf(cpsw_mdio->bus->name, sizeof(cpsw_mdio->bus->name), name);
+
+ ret = mdio_register(cpsw_mdio->bus);
+ if (ret < 0) {
+ debug("failed to register mii bus\n");
+ goto free_bus;
+ }
+
+ return cpsw_mdio->bus;
+
+free_bus:
+ mdio_free(cpsw_mdio->bus);
+ free(cpsw_mdio);
+ return NULL;
+}
+
+void cpsw_mdio_free(struct mii_dev *bus)
+{
+ struct cpsw_mdio *mdio = bus->priv;
+ u32 reg;
+
+ /* disable mdio */
+ reg = readl(&mdio->regs->control);
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &mdio->regs->control);
+
+ mdio_unregister(bus);
+ mdio_free(bus);
+ free(mdio);
+}
diff --git a/roms/u-boot/drivers/net/ti/cpsw_mdio.h b/roms/u-boot/drivers/net/ti/cpsw_mdio.h
new file mode 100644
index 000000000..dbf4a2dca
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/cpsw_mdio.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * CPSW MDIO generic driver API for TI AMxx/K2x/EMAC devices.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef CPSW_MDIO_H_
+#define CPSW_MDIO_H_
+
+struct cpsw_mdio;
+
+struct mii_dev *cpsw_mdio_init(const char *name, phys_addr_t mdio_base,
+ u32 bus_freq, int fck_freq);
+void cpsw_mdio_free(struct mii_dev *bus);
+u32 cpsw_mdio_get_alive(struct mii_dev *bus);
+
+#endif /* CPSW_MDIO_H_ */
diff --git a/roms/u-boot/drivers/net/ti/davinci_emac.c b/roms/u-boot/drivers/net/ti/davinci_emac.c
new file mode 100644
index 000000000..bfe1b84cd
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/davinci_emac.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ethernet driver for TI TMS320DM644x (DaVinci) chips.
+ *
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ *
+ * Parts shamelessly stolen from TI's dm644x_emac.c. Original copyright
+ * follows:
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * dm644x_emac.c
+ *
+ * TI DaVinci (DM644X) EMAC peripheral driver source for DV-EVM
+ *
+ * Copyright (C) 2005 Texas Instruments.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * Modifications:
+ * ver. 1.0: Sep 2005, Anant Gole - Created EMAC version for uBoot.
+ * ver 1.1: Nov 2005, Anant Gole - Extended the RX logic for multiple descriptors
+ */
+#include <common.h>
+#include <command.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <net.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <asm/cache.h>
+#include <linux/compiler.h>
+#include <asm/arch/emac_defs.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include "davinci_emac.h"
+
+unsigned int emac_dbg = 0;
+#define debug_emac(fmt,args...) if (emac_dbg) printf(fmt,##args)
+
+#ifdef EMAC_HW_RAM_ADDR
+static inline unsigned long BD_TO_HW(unsigned long x)
+{
+ if (x == 0)
+ return 0;
+
+ return x - EMAC_WRAPPER_RAM_ADDR + EMAC_HW_RAM_ADDR;
+}
+
+static inline unsigned long HW_TO_BD(unsigned long x)
+{
+ if (x == 0)
+ return 0;
+
+ return x - EMAC_HW_RAM_ADDR + EMAC_WRAPPER_RAM_ADDR;
+}
+#else
+#define BD_TO_HW(x) (x)
+#define HW_TO_BD(x) (x)
+#endif
+
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+#define emac_gigabit_enable(phy_addr) davinci_eth_gigabit_enable(phy_addr)
+#else
+#define emac_gigabit_enable(phy_addr) /* no gigabit to enable */
+#endif
+
+#if !defined(CONFIG_SYS_EMAC_TI_CLKDIV)
+#define CONFIG_SYS_EMAC_TI_CLKDIV ((EMAC_MDIO_BUS_FREQ / \
+ EMAC_MDIO_CLOCK_FREQ) - 1)
+#endif
+
+static void davinci_eth_mdio_enable(void);
+
+static int gen_init_phy(int phy_addr);
+static int gen_is_phy_connected(int phy_addr);
+static int gen_get_link_speed(int phy_addr);
+static int gen_auto_negotiate(int phy_addr);
+
+void eth_mdio_enable(void)
+{
+ davinci_eth_mdio_enable();
+}
+
+/* EMAC Addresses */
+static volatile emac_regs *adap_emac = (emac_regs *)EMAC_BASE_ADDR;
+static volatile ewrap_regs *adap_ewrap = (ewrap_regs *)EMAC_WRAPPER_BASE_ADDR;
+static volatile mdio_regs *adap_mdio = (mdio_regs *)EMAC_MDIO_BASE_ADDR;
+
+/* EMAC descriptors */
+static volatile emac_desc *emac_rx_desc = (emac_desc *)(EMAC_WRAPPER_RAM_ADDR + EMAC_RX_DESC_BASE);
+static volatile emac_desc *emac_tx_desc = (emac_desc *)(EMAC_WRAPPER_RAM_ADDR + EMAC_TX_DESC_BASE);
+static volatile emac_desc *emac_rx_active_head = 0;
+static volatile emac_desc *emac_rx_active_tail = 0;
+static int emac_rx_queue_active = 0;
+
+/* Receive packet buffers */
+static unsigned char emac_rx_buffers[EMAC_MAX_RX_BUFFERS * EMAC_RXBUF_SIZE]
+ __aligned(ARCH_DMA_MINALIGN);
+
+#ifndef CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT
+#define CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT 3
+#endif
+
+/* PHY address for a discovered PHY (0xff - not found) */
+static u_int8_t active_phy_addr[CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT];
+
+/* number of PHY found active */
+static u_int8_t num_phy;
+
+phy_t phy[CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT];
+
+static int davinci_emac_write_hwaddr(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ unsigned long mac_hi;
+ unsigned long mac_lo;
+
+ /*
+ * Set MAC Addresses & Init multicast Hash to 0 (disable any multicast
+ * receive)
+ * Using channel 0 only - other channels are disabled
+ * */
+ writel(0, &adap_emac->MACINDEX);
+ mac_hi = (pdata->enetaddr[3] << 24) |
+ (pdata->enetaddr[2] << 16) |
+ (pdata->enetaddr[1] << 8) |
+ (pdata->enetaddr[0]);
+ mac_lo = (pdata->enetaddr[5] << 8) |
+ (pdata->enetaddr[4]);
+
+ writel(mac_hi, &adap_emac->MACADDRHI);
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(mac_lo | EMAC_MAC_ADDR_IS_VALID | EMAC_MAC_ADDR_MATCH,
+ &adap_emac->MACADDRLO);
+#else
+ writel(mac_lo, &adap_emac->MACADDRLO);
+#endif
+
+ writel(0, &adap_emac->MACHASH1);
+ writel(0, &adap_emac->MACHASH2);
+
+ /* Set source MAC address - REQUIRED */
+ writel(mac_hi, &adap_emac->MACSRCADDRHI);
+ writel(mac_lo, &adap_emac->MACSRCADDRLO);
+
+
+ return 0;
+}
+
+static void davinci_eth_mdio_enable(void)
+{
+ u_int32_t clkdiv;
+
+ clkdiv = CONFIG_SYS_EMAC_TI_CLKDIV;
+
+ writel((clkdiv & 0xff) |
+ MDIO_CONTROL_ENABLE |
+ MDIO_CONTROL_FAULT |
+ MDIO_CONTROL_FAULT_ENABLE,
+ &adap_mdio->CONTROL);
+
+ while (readl(&adap_mdio->CONTROL) & MDIO_CONTROL_IDLE)
+ ;
+}
+
+/*
+ * Tries to find an active connected PHY. Returns 1 if address if found.
+ * If no active PHY (or more than one PHY) found returns 0.
+ * Sets active_phy_addr variable.
+ */
+static int davinci_eth_phy_detect(void)
+{
+ u_int32_t phy_act_state;
+ int i;
+ int j;
+ unsigned int count = 0;
+
+ for (i = 0; i < CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT; i++)
+ active_phy_addr[i] = 0xff;
+
+ udelay(1000);
+ phy_act_state = readl(&adap_mdio->ALIVE);
+
+ if (phy_act_state == 0)
+ return 0; /* No active PHYs */
+
+ debug_emac("davinci_eth_phy_detect(), ALIVE = 0x%08x\n", phy_act_state);
+
+ for (i = 0, j = 0; i < 32; i++)
+ if (phy_act_state & (1 << i)) {
+ count++;
+ if (count <= CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT) {
+ active_phy_addr[j++] = i;
+ } else {
+ printf("%s: to many PHYs detected.\n",
+ __func__);
+ count = 0;
+ break;
+ }
+ }
+
+ num_phy = count;
+
+ return count;
+}
+
+
+/* Read a PHY register via MDIO inteface. Returns 1 on success, 0 otherwise */
+int davinci_eth_phy_read(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t *data)
+{
+ int tmp;
+
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ writel(MDIO_USERACCESS0_GO |
+ MDIO_USERACCESS0_WRITE_READ |
+ ((reg_num & 0x1f) << 21) |
+ ((phy_addr & 0x1f) << 16),
+ &adap_mdio->USERACCESS0);
+
+ /* Wait for command to complete */
+ while ((tmp = readl(&adap_mdio->USERACCESS0)) & MDIO_USERACCESS0_GO)
+ ;
+
+ if (tmp & MDIO_USERACCESS0_ACK) {
+ *data = tmp & 0xffff;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write to a PHY register via MDIO inteface. Blocks until operation is complete. */
+int davinci_eth_phy_write(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t data)
+{
+
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ writel(MDIO_USERACCESS0_GO |
+ MDIO_USERACCESS0_WRITE_WRITE |
+ ((reg_num & 0x1f) << 21) |
+ ((phy_addr & 0x1f) << 16) |
+ (data & 0xffff),
+ &adap_mdio->USERACCESS0);
+
+ /* Wait for command to complete */
+ while (readl(&adap_mdio->USERACCESS0) & MDIO_USERACCESS0_GO)
+ ;
+
+ return 1;
+}
+
+/* PHY functions for a generic PHY */
+static int gen_init_phy(int phy_addr)
+{
+ int ret = 1;
+
+ if (gen_get_link_speed(phy_addr)) {
+ /* Try another time */
+ ret = gen_get_link_speed(phy_addr);
+ }
+
+ return(ret);
+}
+
+static int gen_is_phy_connected(int phy_addr)
+{
+ u_int16_t dummy;
+
+ return davinci_eth_phy_read(phy_addr, MII_PHYSID1, &dummy);
+}
+
+static int get_active_phy(void)
+{
+ int i;
+
+ for (i = 0; i < num_phy; i++)
+ if (phy[i].get_link_speed(active_phy_addr[i]))
+ return i;
+
+ return -1; /* Return error if no link */
+}
+
+static int gen_get_link_speed(int phy_addr)
+{
+ u_int16_t tmp;
+
+ if (davinci_eth_phy_read(phy_addr, MII_STATUS_REG, &tmp) &&
+ (tmp & 0x04)) {
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ davinci_eth_phy_read(phy_addr, MII_LPA, &tmp);
+
+ /* Speed doesn't matter, there is no setting for it in EMAC. */
+ if (tmp & (LPA_100FULL | LPA_10FULL)) {
+ /* set EMAC for Full Duplex */
+ writel(EMAC_MACCONTROL_MIIEN_ENABLE |
+ EMAC_MACCONTROL_FULLDUPLEX_ENABLE,
+ &adap_emac->MACCONTROL);
+ } else {
+ /*set EMAC for Half Duplex */
+ writel(EMAC_MACCONTROL_MIIEN_ENABLE,
+ &adap_emac->MACCONTROL);
+ }
+
+ if (tmp & (LPA_100FULL | LPA_100HALF))
+ writel(readl(&adap_emac->MACCONTROL) |
+ EMAC_MACCONTROL_RMIISPEED_100,
+ &adap_emac->MACCONTROL);
+ else
+ writel(readl(&adap_emac->MACCONTROL) &
+ ~EMAC_MACCONTROL_RMIISPEED_100,
+ &adap_emac->MACCONTROL);
+#endif
+ return(1);
+ }
+
+ return(0);
+}
+
+static int gen_auto_negotiate(int phy_addr)
+{
+ u_int16_t tmp;
+ u_int16_t val;
+ unsigned long cntr = 0;
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMCR, &tmp))
+ return 0;
+
+ val = tmp | BMCR_FULLDPLX | BMCR_ANENABLE |
+ BMCR_SPEED100;
+ davinci_eth_phy_write(phy_addr, MII_BMCR, val);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_ADVERTISE, &val))
+ return 0;
+
+ val |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL |
+ ADVERTISE_10HALF);
+ davinci_eth_phy_write(phy_addr, MII_ADVERTISE, val);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMCR, &tmp))
+ return(0);
+
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+ davinci_eth_phy_read(phy_addr, MII_CTRL1000, &val);
+ val |= PHY_1000BTCR_1000FD;
+ val &= ~PHY_1000BTCR_1000HD;
+ davinci_eth_phy_write(phy_addr, MII_CTRL1000, val);
+ davinci_eth_phy_read(phy_addr, MII_CTRL1000, &val);
+#endif
+
+ /* Restart Auto_negotiation */
+ tmp |= BMCR_ANRESTART;
+ davinci_eth_phy_write(phy_addr, MII_BMCR, tmp);
+
+ /*check AutoNegotiate complete */
+ do {
+ udelay(40000);
+ if (!davinci_eth_phy_read(phy_addr, MII_BMSR, &tmp))
+ return 0;
+
+ if (tmp & BMSR_ANEGCOMPLETE)
+ break;
+
+ cntr++;
+ } while (cntr < 200);
+
+ if (!davinci_eth_phy_read(phy_addr, MII_BMSR, &tmp))
+ return(0);
+
+ if (!(tmp & BMSR_ANEGCOMPLETE))
+ return(0);
+
+ return(gen_get_link_speed(phy_addr));
+}
+/* End of generic PHY functions */
+
+
+#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
+static int davinci_mii_phy_read(struct mii_dev *bus, int addr, int devad,
+ int reg)
+{
+ unsigned short value = 0;
+ int retval = davinci_eth_phy_read(addr, reg, &value);
+
+ return retval ? value : -EIO;
+}
+
+static int davinci_mii_phy_write(struct mii_dev *bus, int addr, int devad,
+ int reg, u16 value)
+{
+ return davinci_eth_phy_write(addr, reg, value) ? 0 : 1;
+}
+#endif
+
+static void __attribute__((unused)) davinci_eth_gigabit_enable(int phy_addr)
+{
+ u_int16_t data;
+
+ if (davinci_eth_phy_read(phy_addr, 0, &data)) {
+ if (data & (1 << 6)) { /* speed selection MSB */
+ /*
+ * Check if link detected is giga-bit
+ * If Gigabit mode detected, enable gigbit in MAC
+ */
+ writel(readl(&adap_emac->MACCONTROL) |
+ EMAC_MACCONTROL_GIGFORCE |
+ EMAC_MACCONTROL_GIGABIT_ENABLE,
+ &adap_emac->MACCONTROL);
+ }
+ }
+}
+
+/* Eth device open */
+static int davinci_emac_start(struct udevice *dev)
+{
+ dv_reg_p addr;
+ u_int32_t clkdiv, cnt, mac_control;
+ uint16_t __maybe_unused lpa_val;
+ volatile emac_desc *rx_desc;
+ int index;
+
+ debug_emac("+ emac_open\n");
+
+ /* Reset EMAC module and disable interrupts in wrapper */
+ writel(1, &adap_emac->SOFTRESET);
+ while (readl(&adap_emac->SOFTRESET) != 0)
+ ;
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(1, &adap_ewrap->softrst);
+ while (readl(&adap_ewrap->softrst) != 0)
+ ;
+#else
+ writel(0, &adap_ewrap->EWCTL);
+ for (cnt = 0; cnt < 5; cnt++) {
+ clkdiv = readl(&adap_ewrap->EWCTL);
+ }
+#endif
+
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ adap_ewrap->c0rxen = adap_ewrap->c1rxen = adap_ewrap->c2rxen = 0;
+ adap_ewrap->c0txen = adap_ewrap->c1txen = adap_ewrap->c2txen = 0;
+ adap_ewrap->c0miscen = adap_ewrap->c1miscen = adap_ewrap->c2miscen = 0;
+#endif
+ rx_desc = emac_rx_desc;
+
+ writel(1, &adap_emac->TXCONTROL);
+ writel(1, &adap_emac->RXCONTROL);
+
+ davinci_emac_write_hwaddr(dev);
+
+ /* Set DMA 8 TX / 8 RX Head pointers to 0 */
+ addr = &adap_emac->TX0HDP;
+ for (cnt = 0; cnt < 8; cnt++)
+ writel(0, addr++);
+
+ addr = &adap_emac->RX0HDP;
+ for (cnt = 0; cnt < 8; cnt++)
+ writel(0, addr++);
+
+ /* Clear Statistics (do this before setting MacControl register) */
+ addr = &adap_emac->RXGOODFRAMES;
+ for(cnt = 0; cnt < EMAC_NUM_STATS; cnt++)
+ writel(0, addr++);
+
+ /* No multicast addressing */
+ writel(0, &adap_emac->MACHASH1);
+ writel(0, &adap_emac->MACHASH2);
+
+ /* Create RX queue and set receive process in place */
+ emac_rx_active_head = emac_rx_desc;
+ for (cnt = 0; cnt < EMAC_MAX_RX_BUFFERS; cnt++) {
+ rx_desc->next = BD_TO_HW((u_int32_t)(rx_desc + 1));
+ rx_desc->buffer = &emac_rx_buffers[cnt * EMAC_RXBUF_SIZE];
+ rx_desc->buff_off_len = EMAC_MAX_ETHERNET_PKT_SIZE;
+ rx_desc->pkt_flag_len = EMAC_CPPI_OWNERSHIP_BIT;
+ rx_desc++;
+ }
+
+ /* Finalize the rx desc list */
+ rx_desc--;
+ rx_desc->next = 0;
+ emac_rx_active_tail = rx_desc;
+ emac_rx_queue_active = 1;
+
+ /* Enable TX/RX */
+ writel(EMAC_MAX_ETHERNET_PKT_SIZE, &adap_emac->RXMAXLEN);
+ writel(0, &adap_emac->RXBUFFEROFFSET);
+
+ /*
+ * No fancy configs - Use this for promiscous debug
+ * - EMAC_RXMBPENABLE_RXCAFEN_ENABLE
+ */
+ writel(EMAC_RXMBPENABLE_RXBROADEN, &adap_emac->RXMBPENABLE);
+
+ /* Enable ch 0 only */
+ writel(1, &adap_emac->RXUNICASTSET);
+
+ /* Init MDIO & get link state */
+ clkdiv = CONFIG_SYS_EMAC_TI_CLKDIV;
+ writel((clkdiv & 0xff) | MDIO_CONTROL_ENABLE | MDIO_CONTROL_FAULT,
+ &adap_mdio->CONTROL);
+
+ /* We need to wait for MDIO to start */
+ udelay(1000);
+
+ index = get_active_phy();
+ if (index == -1)
+ return(0);
+
+ /* Enable MII interface */
+ mac_control = EMAC_MACCONTROL_MIIEN_ENABLE;
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+ davinci_eth_phy_read(active_phy_addr[index], MII_STAT1000, &lpa_val);
+ if (lpa_val & PHY_1000BTSR_1000FD) {
+ debug_emac("eth_open : gigabit negotiated\n");
+ mac_control |= EMAC_MACCONTROL_FULLDUPLEX_ENABLE;
+ mac_control |= EMAC_MACCONTROL_GIGABIT_ENABLE;
+ }
+#endif
+
+ davinci_eth_phy_read(active_phy_addr[index], MII_LPA, &lpa_val);
+ if (lpa_val & (LPA_100FULL | LPA_10FULL))
+ /* set EMAC for Full Duplex */
+ mac_control |= EMAC_MACCONTROL_FULLDUPLEX_ENABLE;
+#if defined(CONFIG_SOC_DA8XX) || \
+ (defined(CONFIG_OMAP34XX) && defined(CONFIG_DRIVER_TI_EMAC_USE_RMII))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_100;
+#endif
+ writel(mac_control, &adap_emac->MACCONTROL);
+ /* Start receive process */
+ writel(BD_TO_HW((u_int32_t)emac_rx_desc), &adap_emac->RX0HDP);
+
+ debug_emac("- emac_open\n");
+
+ return(1);
+}
+
+/* EMAC Channel Teardown */
+static void davinci_eth_ch_teardown(int ch)
+{
+ dv_reg dly = 0xff;
+ dv_reg cnt;
+
+ debug_emac("+ emac_ch_teardown\n");
+
+ if (ch == EMAC_CH_TX) {
+ /* Init TX channel teardown */
+ writel(0, &adap_emac->TXTEARDOWN);
+ do {
+ /*
+ * Wait here for Tx teardown completion interrupt to
+ * occur. Note: A task delay can be called here to pend
+ * rather than occupying CPU cycles - anyway it has
+ * been found that teardown takes very few cpu cycles
+ * and does not affect functionality
+ */
+ dly--;
+ udelay(1);
+ if (dly == 0)
+ break;
+ cnt = readl(&adap_emac->TX0CP);
+ } while (cnt != 0xfffffffc);
+ writel(cnt, &adap_emac->TX0CP);
+ writel(0, &adap_emac->TX0HDP);
+ } else {
+ /* Init RX channel teardown */
+ writel(0, &adap_emac->RXTEARDOWN);
+ do {
+ /*
+ * Wait here for Rx teardown completion interrupt to
+ * occur. Note: A task delay can be called here to pend
+ * rather than occupying CPU cycles - anyway it has
+ * been found that teardown takes very few cpu cycles
+ * and does not affect functionality
+ */
+ dly--;
+ udelay(1);
+ if (dly == 0)
+ break;
+ cnt = readl(&adap_emac->RX0CP);
+ } while (cnt != 0xfffffffc);
+ writel(cnt, &adap_emac->RX0CP);
+ writel(0, &adap_emac->RX0HDP);
+ }
+
+ debug_emac("- emac_ch_teardown\n");
+}
+
+/* Eth device close */
+static void davinci_emac_stop(struct udevice *dev)
+{
+ debug_emac("+ emac_close\n");
+
+ davinci_eth_ch_teardown(EMAC_CH_TX); /* TX Channel teardown */
+ if (readl(&adap_emac->RXCONTROL) & 1)
+ davinci_eth_ch_teardown(EMAC_CH_RX); /* RX Channel teardown */
+
+ /* Reset EMAC module and disable interrupts in wrapper */
+ writel(1, &adap_emac->SOFTRESET);
+#if defined(DAVINCI_EMAC_VERSION2)
+ writel(1, &adap_ewrap->softrst);
+#else
+ writel(0, &adap_ewrap->EWCTL);
+#endif
+
+#if defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM)
+ adap_ewrap->c0rxen = adap_ewrap->c1rxen = adap_ewrap->c2rxen = 0;
+ adap_ewrap->c0txen = adap_ewrap->c1txen = adap_ewrap->c2txen = 0;
+ adap_ewrap->c0miscen = adap_ewrap->c1miscen = adap_ewrap->c2miscen = 0;
+#endif
+ debug_emac("- emac_close\n");
+}
+
+static int tx_send_loop = 0;
+
+/*
+ * This function sends a single packet on the network and returns
+ * positive number (number of bytes transmitted) or negative for error
+ */
+static int davinci_emac_send(struct udevice *dev,
+ void *packet, int length)
+{
+ int ret_status = -1;
+ int index;
+ tx_send_loop = 0;
+
+ index = get_active_phy();
+ if (index == -1) {
+ printf(" WARN: emac_send_packet: No link\n");
+ return (ret_status);
+ }
+
+ /* Check packet size and if < EMAC_MIN_ETHERNET_PKT_SIZE, pad it up */
+ if (length < EMAC_MIN_ETHERNET_PKT_SIZE) {
+ length = EMAC_MIN_ETHERNET_PKT_SIZE;
+ }
+
+ /* Populate the TX descriptor */
+ emac_tx_desc->next = 0;
+ emac_tx_desc->buffer = (u_int8_t *) packet;
+ emac_tx_desc->buff_off_len = (length & 0xffff);
+ emac_tx_desc->pkt_flag_len = ((length & 0xffff) |
+ EMAC_CPPI_SOP_BIT |
+ EMAC_CPPI_OWNERSHIP_BIT |
+ EMAC_CPPI_EOP_BIT);
+
+ flush_dcache_range((unsigned long)packet,
+ (unsigned long)packet + ALIGN(length, PKTALIGN));
+
+ /* Send the packet */
+ writel(BD_TO_HW((unsigned long)emac_tx_desc), &adap_emac->TX0HDP);
+
+ /* Wait for packet to complete or link down */
+ while (1) {
+ if (!phy[index].get_link_speed(active_phy_addr[index])) {
+ davinci_eth_ch_teardown (EMAC_CH_TX);
+ return (ret_status);
+ }
+
+ if (readl(&adap_emac->TXINTSTATRAW) & 0x01) {
+ ret_status = length;
+ break;
+ }
+ tx_send_loop++;
+ }
+
+ return (ret_status);
+}
+
+/*
+ * This function handles receipt of a packet from the network
+ */
+static int davinci_emac_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ volatile emac_desc *rx_curr_desc;
+ volatile emac_desc *curr_desc;
+ volatile emac_desc *tail_desc;
+ int status, ret = -1;
+
+ rx_curr_desc = emac_rx_active_head;
+ if (!rx_curr_desc)
+ return 0;
+ *packetp = rx_curr_desc->buffer;
+ status = rx_curr_desc->pkt_flag_len;
+ if ((status & EMAC_CPPI_OWNERSHIP_BIT) == 0) {
+ if (status & EMAC_CPPI_RX_ERROR_FRAME) {
+ /* Error in packet - discard it and requeue desc */
+ printf ("WARN: emac_rcv_pkt: Error in packet\n");
+ } else {
+ unsigned long tmp = (unsigned long)rx_curr_desc->buffer;
+ unsigned short len =
+ rx_curr_desc->buff_off_len & 0xffff;
+
+ invalidate_dcache_range(tmp, tmp + ALIGN(len, PKTALIGN));
+ ret = len;
+ }
+
+ /* Ack received packet descriptor */
+ writel(BD_TO_HW((ulong)rx_curr_desc), &adap_emac->RX0CP);
+ curr_desc = rx_curr_desc;
+ emac_rx_active_head =
+ (volatile emac_desc *) (HW_TO_BD(rx_curr_desc->next));
+
+ if (status & EMAC_CPPI_EOQ_BIT) {
+ if (emac_rx_active_head) {
+ writel(BD_TO_HW((ulong)emac_rx_active_head),
+ &adap_emac->RX0HDP);
+ } else {
+ emac_rx_queue_active = 0;
+ printf ("INFO:emac_rcv_packet: RX Queue not active\n");
+ }
+ }
+
+ /* Recycle RX descriptor */
+ rx_curr_desc->buff_off_len = EMAC_MAX_ETHERNET_PKT_SIZE;
+ rx_curr_desc->pkt_flag_len = EMAC_CPPI_OWNERSHIP_BIT;
+ rx_curr_desc->next = 0;
+
+ if (emac_rx_active_head == 0) {
+ printf ("INFO: emac_rcv_pkt: active queue head = 0\n");
+ emac_rx_active_head = curr_desc;
+ emac_rx_active_tail = curr_desc;
+ if (emac_rx_queue_active != 0) {
+ writel(BD_TO_HW((ulong)emac_rx_active_head),
+ &adap_emac->RX0HDP);
+ printf ("INFO: emac_rcv_pkt: active queue head = 0, HDP fired\n");
+ emac_rx_queue_active = 1;
+ }
+ } else {
+ tail_desc = emac_rx_active_tail;
+ emac_rx_active_tail = curr_desc;
+ tail_desc->next = BD_TO_HW((ulong) curr_desc);
+ status = tail_desc->pkt_flag_len;
+ if (status & EMAC_CPPI_EOQ_BIT) {
+ writel(BD_TO_HW((ulong)curr_desc),
+ &adap_emac->RX0HDP);
+ status &= ~EMAC_CPPI_EOQ_BIT;
+ tail_desc->pkt_flag_len = status;
+ }
+ }
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * This function initializes the emac hardware. It does NOT initialize
+ * EMAC modules power or pin multiplexors, that is done by board_init()
+ * much earlier in bootup process. Returns 1 on success, 0 otherwise.
+ */
+static int davinci_emac_probe(struct udevice *dev)
+{
+ u_int32_t phy_id;
+ u_int16_t tmp;
+ int i;
+ int ret;
+
+ davinci_eth_mdio_enable();
+
+ /* let the EMAC detect the PHYs */
+ udelay(5000);
+
+ for (i = 0; i < 256; i++) {
+ if (readl(&adap_mdio->ALIVE))
+ break;
+ udelay(1000);
+ }
+
+ if (i >= 256) {
+ printf("No ETH PHY detected!!!\n");
+ return(0);
+ }
+
+ /* Find if PHY(s) is/are connected */
+ ret = davinci_eth_phy_detect();
+ if (!ret)
+ return(0);
+ else
+ debug_emac(" %d ETH PHY detected\n", ret);
+
+ /* Get PHY ID and initialize phy_ops for a detected PHY */
+ for (i = 0; i < num_phy; i++) {
+ if (!davinci_eth_phy_read(active_phy_addr[i], MII_PHYSID1,
+ &tmp)) {
+ active_phy_addr[i] = 0xff;
+ continue;
+ }
+
+ phy_id = (tmp << 16) & 0xffff0000;
+
+ if (!davinci_eth_phy_read(active_phy_addr[i], MII_PHYSID2,
+ &tmp)) {
+ active_phy_addr[i] = 0xff;
+ continue;
+ }
+
+ phy_id |= tmp & 0x0000ffff;
+
+ sprintf(phy[i].name, "GENERIC @ 0x%02x",
+ active_phy_addr[i]);
+ phy[i].init = gen_init_phy;
+ phy[i].is_phy_connected = gen_is_phy_connected;
+ phy[i].get_link_speed = gen_get_link_speed;
+ phy[i].auto_negotiate = gen_auto_negotiate;
+
+ debug("Ethernet PHY: %s\n", phy[i].name);
+
+ int retval;
+ struct mii_dev *mdiodev = mdio_alloc();
+ if (!mdiodev)
+ return -ENOMEM;
+ strncpy(mdiodev->name, phy[i].name, MDIO_NAME_LEN);
+ mdiodev->read = davinci_mii_phy_read;
+ mdiodev->write = davinci_mii_phy_write;
+
+ retval = mdio_register(mdiodev);
+ if (retval < 0)
+ return retval;
+#ifdef DAVINCI_EMAC_GIG_ENABLE
+#define PHY_CONF_REG 22
+ /* Enable PHY to clock out TX_CLK */
+ davinci_eth_phy_read(active_phy_addr[i], PHY_CONF_REG, &tmp);
+ tmp |= PHY_CONF_TXCLKEN;
+ davinci_eth_phy_write(active_phy_addr[i], PHY_CONF_REG, tmp);
+ davinci_eth_phy_read(active_phy_addr[i], PHY_CONF_REG, &tmp);
+#endif
+ }
+
+#if defined(CONFIG_TI816X) || (defined(CONFIG_DRIVER_TI_EMAC_USE_RMII) && \
+ defined(CONFIG_MACH_DAVINCI_DA850_EVM) && \
+ !defined(CONFIG_DRIVER_TI_EMAC_RMII_NO_NEGOTIATE))
+ for (i = 0; i < num_phy; i++) {
+ if (phy[i].is_phy_connected(i))
+ phy[i].auto_negotiate(i);
+ }
+#endif
+ return 0;
+}
+
+static const struct eth_ops davinci_emac_ops = {
+ .start = davinci_emac_start,
+ .send = davinci_emac_send,
+ .recv = davinci_emac_recv,
+ .stop = davinci_emac_stop,
+ .write_hwaddr = davinci_emac_write_hwaddr,
+};
+
+static const struct udevice_id davinci_emac_ids[] = {
+ { .compatible = "ti,davinci-dm6467-emac" },
+ { .compatible = "ti,am3517-emac", },
+ { .compatible = "ti,dm816-emac", },
+ { }
+};
+
+U_BOOT_DRIVER(davinci_emac) = {
+ .name = "davinci_emac",
+ .id = UCLASS_ETH,
+ .of_match = davinci_emac_ids,
+ .probe = davinci_emac_probe,
+ .ops = &davinci_emac_ops,
+ .plat_auto = sizeof(struct eth_pdata),
+};
diff --git a/roms/u-boot/drivers/net/ti/davinci_emac.h b/roms/u-boot/drivers/net/ti/davinci_emac.h
new file mode 100644
index 000000000..695855b4d
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/davinci_emac.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2011 Ilya Yanok, Emcraft Systems
+ *
+ * Based on: mach-davinci/emac_defs.h
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ */
+
+#ifndef _DAVINCI_EMAC_H_
+#define _DAVINCI_EMAC_H_
+/* Ethernet Min/Max packet size */
+#define EMAC_MIN_ETHERNET_PKT_SIZE 60
+#define EMAC_MAX_ETHERNET_PKT_SIZE 1518
+/* Buffer size (should be aligned on 32 byte and cache line) */
+#define EMAC_RXBUF_SIZE ALIGN(ALIGN(EMAC_MAX_ETHERNET_PKT_SIZE, 32),\
+ ARCH_DMA_MINALIGN)
+
+/* Number of RX packet buffers
+ * NOTE: Only 1 buffer supported as of now
+ */
+#define EMAC_MAX_RX_BUFFERS 10
+
+
+/***********************************************
+ ******** Internally used macros ***************
+ ***********************************************/
+
+#define EMAC_CH_TX 1
+#define EMAC_CH_RX 0
+
+/* Each descriptor occupies 4 words, lets start RX desc's at 0 and
+ * reserve space for 64 descriptors max
+ */
+#define EMAC_RX_DESC_BASE 0x0
+#define EMAC_TX_DESC_BASE 0x1000
+
+/* EMAC Teardown value */
+#define EMAC_TEARDOWN_VALUE 0xfffffffc
+
+/* MII Status Register */
+#define MII_STATUS_REG 1
+/* PHY Configuration register */
+#define PHY_CONF_TXCLKEN (1 << 5)
+
+/* Number of statistics registers */
+#define EMAC_NUM_STATS 36
+
+
+/* EMAC Descriptor */
+typedef volatile struct _emac_desc
+{
+ u_int32_t next; /* Pointer to next descriptor
+ in chain */
+ u_int8_t *buffer; /* Pointer to data buffer */
+ u_int32_t buff_off_len; /* Buffer Offset(MSW) and Length(LSW) */
+ u_int32_t pkt_flag_len; /* Packet Flags(MSW) and Length(LSW) */
+} emac_desc;
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT (0x80000000)
+#define EMAC_CPPI_EOP_BIT (0x40000000)
+#define EMAC_CPPI_OWNERSHIP_BIT (0x20000000)
+#define EMAC_CPPI_EOQ_BIT (0x10000000)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT (0x08000000)
+#define EMAC_CPPI_PASS_CRC_BIT (0x04000000)
+
+#define EMAC_CPPI_RX_ERROR_FRAME (0x03fc0000)
+
+#define EMAC_MACCONTROL_MIIEN_ENABLE (0x20)
+#define EMAC_MACCONTROL_FULLDUPLEX_ENABLE (0x1)
+#define EMAC_MACCONTROL_GIGABIT_ENABLE (1 << 7)
+#define EMAC_MACCONTROL_GIGFORCE (1 << 17)
+#define EMAC_MACCONTROL_RMIISPEED_100 (1 << 15)
+
+#define EMAC_MAC_ADDR_MATCH (1 << 19)
+#define EMAC_MAC_ADDR_IS_VALID (1 << 20)
+
+#define EMAC_RXMBPENABLE_RXCAFEN_ENABLE (0x200000)
+#define EMAC_RXMBPENABLE_RXBROADEN (0x2000)
+
+
+#define MDIO_CONTROL_IDLE (0x80000000)
+#define MDIO_CONTROL_ENABLE (0x40000000)
+#define MDIO_CONTROL_FAULT_ENABLE (0x40000)
+#define MDIO_CONTROL_FAULT (0x80000)
+#define MDIO_USERACCESS0_GO (0x80000000)
+#define MDIO_USERACCESS0_WRITE_READ (0x0)
+#define MDIO_USERACCESS0_WRITE_WRITE (0x40000000)
+#define MDIO_USERACCESS0_ACK (0x20000000)
+
+/* Ethernet MAC Registers Structure */
+typedef struct {
+ dv_reg TXIDVER;
+ dv_reg TXCONTROL;
+ dv_reg TXTEARDOWN;
+ u_int8_t RSVD0[4];
+ dv_reg RXIDVER;
+ dv_reg RXCONTROL;
+ dv_reg RXTEARDOWN;
+ u_int8_t RSVD1[100];
+ dv_reg TXINTSTATRAW;
+ dv_reg TXINTSTATMASKED;
+ dv_reg TXINTMASKSET;
+ dv_reg TXINTMASKCLEAR;
+ dv_reg MACINVECTOR;
+ u_int8_t RSVD2[12];
+ dv_reg RXINTSTATRAW;
+ dv_reg RXINTSTATMASKED;
+ dv_reg RXINTMASKSET;
+ dv_reg RXINTMASKCLEAR;
+ dv_reg MACINTSTATRAW;
+ dv_reg MACINTSTATMASKED;
+ dv_reg MACINTMASKSET;
+ dv_reg MACINTMASKCLEAR;
+ u_int8_t RSVD3[64];
+ dv_reg RXMBPENABLE;
+ dv_reg RXUNICASTSET;
+ dv_reg RXUNICASTCLEAR;
+ dv_reg RXMAXLEN;
+ dv_reg RXBUFFEROFFSET;
+ dv_reg RXFILTERLOWTHRESH;
+ u_int8_t RSVD4[8];
+ dv_reg RX0FLOWTHRESH;
+ dv_reg RX1FLOWTHRESH;
+ dv_reg RX2FLOWTHRESH;
+ dv_reg RX3FLOWTHRESH;
+ dv_reg RX4FLOWTHRESH;
+ dv_reg RX5FLOWTHRESH;
+ dv_reg RX6FLOWTHRESH;
+ dv_reg RX7FLOWTHRESH;
+ dv_reg RX0FREEBUFFER;
+ dv_reg RX1FREEBUFFER;
+ dv_reg RX2FREEBUFFER;
+ dv_reg RX3FREEBUFFER;
+ dv_reg RX4FREEBUFFER;
+ dv_reg RX5FREEBUFFER;
+ dv_reg RX6FREEBUFFER;
+ dv_reg RX7FREEBUFFER;
+ dv_reg MACCONTROL;
+ dv_reg MACSTATUS;
+ dv_reg EMCONTROL;
+ dv_reg FIFOCONTROL;
+ dv_reg MACCONFIG;
+ dv_reg SOFTRESET;
+ u_int8_t RSVD5[88];
+ dv_reg MACSRCADDRLO;
+ dv_reg MACSRCADDRHI;
+ dv_reg MACHASH1;
+ dv_reg MACHASH2;
+ dv_reg BOFFTEST;
+ dv_reg TPACETEST;
+ dv_reg RXPAUSE;
+ dv_reg TXPAUSE;
+ u_int8_t RSVD6[16];
+ dv_reg RXGOODFRAMES;
+ dv_reg RXBCASTFRAMES;
+ dv_reg RXMCASTFRAMES;
+ dv_reg RXPAUSEFRAMES;
+ dv_reg RXCRCERRORS;
+ dv_reg RXALIGNCODEERRORS;
+ dv_reg RXOVERSIZED;
+ dv_reg RXJABBER;
+ dv_reg RXUNDERSIZED;
+ dv_reg RXFRAGMENTS;
+ dv_reg RXFILTERED;
+ dv_reg RXQOSFILTERED;
+ dv_reg RXOCTETS;
+ dv_reg TXGOODFRAMES;
+ dv_reg TXBCASTFRAMES;
+ dv_reg TXMCASTFRAMES;
+ dv_reg TXPAUSEFRAMES;
+ dv_reg TXDEFERRED;
+ dv_reg TXCOLLISION;
+ dv_reg TXSINGLECOLL;
+ dv_reg TXMULTICOLL;
+ dv_reg TXEXCESSIVECOLL;
+ dv_reg TXLATECOLL;
+ dv_reg TXUNDERRUN;
+ dv_reg TXCARRIERSENSE;
+ dv_reg TXOCTETS;
+ dv_reg FRAME64;
+ dv_reg FRAME65T127;
+ dv_reg FRAME128T255;
+ dv_reg FRAME256T511;
+ dv_reg FRAME512T1023;
+ dv_reg FRAME1024TUP;
+ dv_reg NETOCTETS;
+ dv_reg RXSOFOVERRUNS;
+ dv_reg RXMOFOVERRUNS;
+ dv_reg RXDMAOVERRUNS;
+ u_int8_t RSVD7[624];
+ dv_reg MACADDRLO;
+ dv_reg MACADDRHI;
+ dv_reg MACINDEX;
+ u_int8_t RSVD8[244];
+ dv_reg TX0HDP;
+ dv_reg TX1HDP;
+ dv_reg TX2HDP;
+ dv_reg TX3HDP;
+ dv_reg TX4HDP;
+ dv_reg TX5HDP;
+ dv_reg TX6HDP;
+ dv_reg TX7HDP;
+ dv_reg RX0HDP;
+ dv_reg RX1HDP;
+ dv_reg RX2HDP;
+ dv_reg RX3HDP;
+ dv_reg RX4HDP;
+ dv_reg RX5HDP;
+ dv_reg RX6HDP;
+ dv_reg RX7HDP;
+ dv_reg TX0CP;
+ dv_reg TX1CP;
+ dv_reg TX2CP;
+ dv_reg TX3CP;
+ dv_reg TX4CP;
+ dv_reg TX5CP;
+ dv_reg TX6CP;
+ dv_reg TX7CP;
+ dv_reg RX0CP;
+ dv_reg RX1CP;
+ dv_reg RX2CP;
+ dv_reg RX3CP;
+ dv_reg RX4CP;
+ dv_reg RX5CP;
+ dv_reg RX6CP;
+ dv_reg RX7CP;
+} emac_regs;
+
+/* EMAC Wrapper Registers Structure */
+typedef struct {
+#ifdef DAVINCI_EMAC_VERSION2
+ dv_reg idver;
+ dv_reg softrst;
+ dv_reg emctrl;
+ dv_reg c0rxthreshen;
+ dv_reg c0rxen;
+ dv_reg c0txen;
+ dv_reg c0miscen;
+ dv_reg c1rxthreshen;
+ dv_reg c1rxen;
+ dv_reg c1txen;
+ dv_reg c1miscen;
+ dv_reg c2rxthreshen;
+ dv_reg c2rxen;
+ dv_reg c2txen;
+ dv_reg c2miscen;
+ dv_reg c0rxthreshstat;
+ dv_reg c0rxstat;
+ dv_reg c0txstat;
+ dv_reg c0miscstat;
+ dv_reg c1rxthreshstat;
+ dv_reg c1rxstat;
+ dv_reg c1txstat;
+ dv_reg c1miscstat;
+ dv_reg c2rxthreshstat;
+ dv_reg c2rxstat;
+ dv_reg c2txstat;
+ dv_reg c2miscstat;
+ dv_reg c0rximax;
+ dv_reg c0tximax;
+ dv_reg c1rximax;
+ dv_reg c1tximax;
+ dv_reg c2rximax;
+ dv_reg c2tximax;
+#else
+ u_int8_t RSVD0[4100];
+ dv_reg EWCTL;
+ dv_reg EWINTTCNT;
+#endif
+} ewrap_regs;
+
+/* EMAC MDIO Registers Structure */
+typedef struct {
+ dv_reg VERSION;
+ dv_reg CONTROL;
+ dv_reg ALIVE;
+ dv_reg LINK;
+ dv_reg LINKINTRAW;
+ dv_reg LINKINTMASKED;
+ u_int8_t RSVD0[8];
+ dv_reg USERINTRAW;
+ dv_reg USERINTMASKED;
+ dv_reg USERINTMASKSET;
+ dv_reg USERINTMASKCLEAR;
+ u_int8_t RSVD1[80];
+ dv_reg USERACCESS0;
+ dv_reg USERPHYSEL0;
+ dv_reg USERACCESS1;
+ dv_reg USERPHYSEL1;
+} mdio_regs;
+
+int davinci_eth_phy_read(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t *data);
+int davinci_eth_phy_write(u_int8_t phy_addr, u_int8_t reg_num, u_int16_t data);
+
+typedef struct {
+ char name[64];
+ int (*init)(int phy_addr);
+ int (*is_phy_connected)(int phy_addr);
+ int (*get_link_speed)(int phy_addr);
+ int (*auto_negotiate)(int phy_addr);
+} phy_t;
+
+#endif /* _DAVINCI_EMAC_H_ */
diff --git a/roms/u-boot/drivers/net/ti/keystone_net.c b/roms/u-boot/drivers/net/ti/keystone_net.c
new file mode 100644
index 000000000..5e8f683c2
--- /dev/null
+++ b/roms/u-boot/drivers/net/ti/keystone_net.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ethernet driver for TI K2HK EVM.
+ *
+ * (C) Copyright 2012-2014
+ * Texas Instruments Incorporated, <www.ti.com>
+ */
+#include <common.h>
+#include <command.h>
+#include <console.h>
+#include <asm/global_data.h>
+#include <linux/delay.h>
+
+#include <dm.h>
+#include <dm/lists.h>
+
+#include <net.h>
+#include <phy.h>
+#include <errno.h>
+#include <miiphy.h>
+#include <malloc.h>
+#include <asm/ti-common/keystone_nav.h>
+#include <asm/ti-common/keystone_net.h>
+#include <asm/ti-common/keystone_serdes.h>
+#include <asm/arch/psc_defs.h>
+#include <linux/libfdt.h>
+
+#include "cpsw_mdio.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#ifdef KEYSTONE2_EMAC_GIG_ENABLE
+#define emac_gigabit_enable(x) keystone2_eth_gigabit_enable(x)
+#else
+#define emac_gigabit_enable(x) /* no gigabit to enable */
+#endif
+
+#define RX_BUFF_NUMS 24
+#define RX_BUFF_LEN 1520
+#define MAX_SIZE_STREAM_BUFFER RX_BUFF_LEN
+#define SGMII_ANEG_TIMEOUT 4000
+
+static u8 rx_buffs[RX_BUFF_NUMS * RX_BUFF_LEN] __aligned(16);
+
+enum link_type {
+ LINK_TYPE_SGMII_MAC_TO_MAC_AUTO = 0,
+ LINK_TYPE_SGMII_MAC_TO_PHY_MODE = 1,
+ LINK_TYPE_SGMII_MAC_TO_MAC_FORCED_MODE = 2,
+ LINK_TYPE_SGMII_MAC_TO_FIBRE_MODE = 3,
+ LINK_TYPE_SGMII_MAC_TO_PHY_NO_MDIO_MODE = 4,
+ LINK_TYPE_RGMII_LINK_MAC_PHY = 5,
+ LINK_TYPE_RGMII_LINK_MAC_MAC_FORCED = 6,
+ LINK_TYPE_RGMII_LINK_MAC_PHY_NO_MDIO = 7,
+ LINK_TYPE_10G_MAC_TO_PHY_MODE = 10,
+ LINK_TYPE_10G_MAC_TO_MAC_FORCED_MODE = 11,
+};
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+#ifdef CONFIG_KSNET_NETCP_V1_0
+
+#define EMAC_EMACSW_BASE_OFS 0x90800
+#define EMAC_EMACSW_PORT_BASE_OFS (EMAC_EMACSW_BASE_OFS + 0x60)
+
+/* CPSW Switch slave registers */
+#define CPGMACSL_REG_SA_LO 0x10
+#define CPGMACSL_REG_SA_HI 0x14
+
+#define DEVICE_EMACSW_BASE(base, x) ((base) + EMAC_EMACSW_PORT_BASE_OFS + \
+ (x) * 0x30)
+
+#elif defined(CONFIG_KSNET_NETCP_V1_5)
+
+#define EMAC_EMACSW_PORT_BASE_OFS 0x222000
+
+/* CPSW Switch slave registers */
+#define CPGMACSL_REG_SA_LO 0x308
+#define CPGMACSL_REG_SA_HI 0x30c
+
+#define DEVICE_EMACSW_BASE(base, x) ((base) + EMAC_EMACSW_PORT_BASE_OFS + \
+ (x) * 0x1000)
+
+#endif
+
+
+struct ks2_eth_priv {
+ struct udevice *dev;
+ struct phy_device *phydev;
+ struct mii_dev *mdio_bus;
+ int phy_addr;
+ phy_interface_t phy_if;
+ int phy_of_handle;
+ int sgmii_link_type;
+ void *mdio_base;
+ struct rx_buff_desc net_rx_buffs;
+ struct pktdma_cfg *netcp_pktdma;
+ void *hd;
+ int slave_port;
+ enum link_type link_type;
+ bool emac_open;
+ bool has_mdio;
+};
+
+static void __attribute__((unused))
+ keystone2_eth_gigabit_enable(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ /*
+ * Check if link detected is giga-bit
+ * If Gigabit mode detected, enable gigbit in MAC
+ */
+ if (priv->has_mdio) {
+ if (priv->phydev->speed != 1000)
+ return;
+ }
+
+ writel(readl(DEVICE_EMACSL_BASE(priv->slave_port - 1) +
+ CPGMACSL_REG_CTL) |
+ EMAC_MACCONTROL_GIGFORCE | EMAC_MACCONTROL_GIGABIT_ENABLE,
+ DEVICE_EMACSL_BASE(priv->slave_port - 1) + CPGMACSL_REG_CTL);
+}
+
+#ifdef CONFIG_SOC_K2G
+int keystone_rgmii_config(struct phy_device *phy_dev)
+{
+ unsigned int i, status;
+
+ i = 0;
+ do {
+ if (i > SGMII_ANEG_TIMEOUT) {
+ puts(" TIMEOUT !\n");
+ phy_dev->link = 0;
+ return 0;
+ }
+
+ if (ctrlc()) {
+ puts("user interrupt!\n");
+ phy_dev->link = 0;
+ return -EINTR;
+ }
+
+ if ((i++ % 500) == 0)
+ printf(".");
+
+ udelay(1000); /* 1 ms */
+ status = readl(RGMII_STATUS_REG);
+ } while (!(status & RGMII_REG_STATUS_LINK));
+
+ puts(" done\n");
+
+ return 0;
+}
+#else
+int keystone_sgmii_config(struct phy_device *phy_dev, int port, int interface)
+{
+ unsigned int i, status, mask;
+ unsigned int mr_adv_ability, control;
+
+ switch (interface) {
+ case SGMII_LINK_MAC_MAC_AUTONEG:
+ mr_adv_ability = (SGMII_REG_MR_ADV_ENABLE |
+ SGMII_REG_MR_ADV_LINK |
+ SGMII_REG_MR_ADV_FULL_DUPLEX |
+ SGMII_REG_MR_ADV_GIG_MODE);
+ control = (SGMII_REG_CONTROL_MASTER |
+ SGMII_REG_CONTROL_AUTONEG);
+
+ break;
+ case SGMII_LINK_MAC_PHY:
+ case SGMII_LINK_MAC_PHY_FORCED:
+ mr_adv_ability = SGMII_REG_MR_ADV_ENABLE;
+ control = SGMII_REG_CONTROL_AUTONEG;
+
+ break;
+ case SGMII_LINK_MAC_MAC_FORCED:
+ mr_adv_ability = (SGMII_REG_MR_ADV_ENABLE |
+ SGMII_REG_MR_ADV_LINK |
+ SGMII_REG_MR_ADV_FULL_DUPLEX |
+ SGMII_REG_MR_ADV_GIG_MODE);
+ control = SGMII_REG_CONTROL_MASTER;
+
+ break;
+ case SGMII_LINK_MAC_FIBER:
+ mr_adv_ability = 0x20;
+ control = SGMII_REG_CONTROL_AUTONEG;
+
+ break;
+ default:
+ mr_adv_ability = SGMII_REG_MR_ADV_ENABLE;
+ control = SGMII_REG_CONTROL_AUTONEG;
+ }
+
+ __raw_writel(0, SGMII_CTL_REG(port));
+
+ /*
+ * Wait for the SerDes pll to lock,
+ * but don't trap if lock is never read
+ */
+ for (i = 0; i < 1000; i++) {
+ udelay(2000);
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LOCK) != 0)
+ break;
+ }
+
+ __raw_writel(mr_adv_ability, SGMII_MRADV_REG(port));
+ __raw_writel(control, SGMII_CTL_REG(port));
+
+
+ mask = SGMII_REG_STATUS_LINK;
+
+ if (control & SGMII_REG_CONTROL_AUTONEG)
+ mask |= SGMII_REG_STATUS_AUTONEG;
+
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ if ((status & mask) == mask)
+ return 0;
+
+ printf("\n%s Waiting for SGMII auto negotiation to complete",
+ phy_dev->dev->name);
+ while ((status & mask) != mask) {
+ /*
+ * Timeout reached ?
+ */
+ if (i > SGMII_ANEG_TIMEOUT) {
+ puts(" TIMEOUT !\n");
+ phy_dev->link = 0;
+ return 0;
+ }
+
+ if (ctrlc()) {
+ puts("user interrupt!\n");
+ phy_dev->link = 0;
+ return -EINTR;
+ }
+
+ if ((i++ % 500) == 0)
+ printf(".");
+
+ udelay(1000); /* 1 ms */
+ status = __raw_readl(SGMII_STATUS_REG(port));
+ }
+ puts(" done\n");
+
+ return 0;
+}
+#endif
+
+int mac_sl_reset(u32 port)
+{
+ u32 i, v;
+
+ if (port >= DEVICE_N_GMACSL_PORTS)
+ return GMACSL_RET_INVALID_PORT;
+
+ /* Set the soft reset bit */
+ writel(CPGMAC_REG_RESET_VAL_RESET,
+ DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+
+ /* Wait for the bit to clear */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+ if ((v & CPGMAC_REG_RESET_VAL_RESET_MASK) !=
+ CPGMAC_REG_RESET_VAL_RESET)
+ return GMACSL_RET_OK;
+ }
+
+ /* Timeout on the reset */
+ return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+int mac_sl_config(u_int16_t port, struct mac_sl_cfg *cfg)
+{
+ u32 v, i;
+ int ret = GMACSL_RET_OK;
+
+ if (port >= DEVICE_N_GMACSL_PORTS)
+ return GMACSL_RET_INVALID_PORT;
+
+ if (cfg->max_rx_len > CPGMAC_REG_MAXLEN_LEN) {
+ cfg->max_rx_len = CPGMAC_REG_MAXLEN_LEN;
+ ret = GMACSL_RET_WARN_MAXLEN_TOO_BIG;
+ }
+
+ /* Must wait if the device is undergoing reset */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RESET);
+ if ((v & CPGMAC_REG_RESET_VAL_RESET_MASK) !=
+ CPGMAC_REG_RESET_VAL_RESET)
+ break;
+ }
+
+ if (i == DEVICE_EMACSL_RESET_POLL_COUNT)
+ return GMACSL_RET_CONFIG_FAIL_RESET_ACTIVE;
+
+ writel(cfg->max_rx_len, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_MAXLEN);
+ writel(cfg->ctl, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_CTL);
+
+#ifndef CONFIG_SOC_K2HK
+ /* Map RX packet flow priority to 0 */
+ writel(0, DEVICE_EMACSL_BASE(port) + CPGMACSL_REG_RX_PRI_MAP);
+#endif
+
+ return ret;
+}
+
+int ethss_config(u32 ctl, u32 max_pkt_size)
+{
+ u32 i;
+
+ /* Max length register */
+ writel(max_pkt_size, DEVICE_CPSW_BASE + CPSW_REG_MAXLEN);
+
+ /* Control register */
+ writel(ctl, DEVICE_CPSW_BASE + CPSW_REG_CTL);
+
+ /* All statistics enabled by default */
+ writel(CPSW_REG_VAL_STAT_ENABLE_ALL,
+ DEVICE_CPSW_BASE + CPSW_REG_STAT_PORT_EN);
+
+ /* Reset and enable the ALE */
+ writel(CPSW_REG_VAL_ALE_CTL_RESET_AND_ENABLE |
+ CPSW_REG_VAL_ALE_CTL_BYPASS,
+ DEVICE_CPSW_BASE + CPSW_REG_ALE_CONTROL);
+
+ /* All ports put into forward mode */
+ for (i = 0; i < DEVICE_CPSW_NUM_PORTS; i++)
+ writel(CPSW_REG_VAL_PORTCTL_FORWARD_MODE,
+ DEVICE_CPSW_BASE + CPSW_REG_ALE_PORTCTL(i));
+
+ return 0;
+}
+
+int ethss_start(void)
+{
+ int i;
+ struct mac_sl_cfg cfg;
+
+ cfg.max_rx_len = MAX_SIZE_STREAM_BUFFER;
+ cfg.ctl = GMACSL_ENABLE | GMACSL_RX_ENABLE_EXT_CTL;
+
+ for (i = 0; i < DEVICE_N_GMACSL_PORTS; i++) {
+ mac_sl_reset(i);
+ mac_sl_config(i, &cfg);
+ }
+
+ return 0;
+}
+
+int ethss_stop(void)
+{
+ int i;
+
+ for (i = 0; i < DEVICE_N_GMACSL_PORTS; i++)
+ mac_sl_reset(i);
+
+ return 0;
+}
+
+struct ks2_serdes ks2_serdes_sgmii_156p25mhz = {
+ .clk = SERDES_CLOCK_156P25M,
+ .rate = SERDES_RATE_5G,
+ .rate_mode = SERDES_QUARTER_RATE,
+ .intf = SERDES_PHY_SGMII,
+ .loopback = 0,
+};
+
+#ifndef CONFIG_SOC_K2G
+static void keystone2_net_serdes_setup(void)
+{
+ ks2_serdes_init(CONFIG_KSNET_SERDES_SGMII_BASE,
+ &ks2_serdes_sgmii_156p25mhz,
+ CONFIG_KSNET_SERDES_LANES_PER_SGMII);
+
+#if defined(CONFIG_SOC_K2E) || defined(CONFIG_SOC_K2L)
+ ks2_serdes_init(CONFIG_KSNET_SERDES_SGMII2_BASE,
+ &ks2_serdes_sgmii_156p25mhz,
+ CONFIG_KSNET_SERDES_LANES_PER_SGMII);
+#endif
+
+ /* wait till setup */
+ udelay(5000);
+}
+#endif
+
+static int ks2_eth_start(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+#ifdef CONFIG_SOC_K2G
+ keystone_rgmii_config(priv->phydev);
+#else
+ keystone_sgmii_config(priv->phydev, priv->slave_port - 1,
+ priv->sgmii_link_type);
+#endif
+
+ udelay(10000);
+
+ /* On chip switch configuration */
+ ethss_config(target_get_switch_ctl(), SWITCH_MAX_PKT_SIZE);
+
+ qm_init();
+
+ if (ksnav_init(priv->netcp_pktdma, &priv->net_rx_buffs)) {
+ pr_err("ksnav_init failed\n");
+ goto err_knav_init;
+ }
+
+ /*
+ * Streaming switch configuration. If not present this
+ * statement is defined to void in target.h.
+ * If present this is usually defined to a series of register writes
+ */
+ hw_config_streaming_switch();
+
+ if (priv->has_mdio) {
+ phy_startup(priv->phydev);
+ if (priv->phydev->link == 0) {
+ pr_err("phy startup failed\n");
+ goto err_phy_start;
+ }
+ }
+
+ emac_gigabit_enable(dev);
+
+ ethss_start();
+
+ priv->emac_open = true;
+
+ return 0;
+
+err_phy_start:
+ ksnav_close(priv->netcp_pktdma);
+err_knav_init:
+ qm_close();
+
+ return -EFAULT;
+}
+
+static int ks2_eth_send(struct udevice *dev, void *packet, int length)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ genphy_update_link(priv->phydev);
+ if (priv->phydev->link == 0)
+ return -1;
+
+ if (length < EMAC_MIN_ETHERNET_PKT_SIZE)
+ length = EMAC_MIN_ETHERNET_PKT_SIZE;
+
+ return ksnav_send(priv->netcp_pktdma, (u32 *)packet,
+ length, (priv->slave_port) << 16);
+}
+
+static int ks2_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ int pkt_size;
+ u32 *pkt = NULL;
+
+ priv->hd = ksnav_recv(priv->netcp_pktdma, &pkt, &pkt_size);
+ if (priv->hd == NULL)
+ return -EAGAIN;
+
+ *packetp = (uchar *)pkt;
+
+ return pkt_size;
+}
+
+static int ks2_eth_free_pkt(struct udevice *dev, uchar *packet,
+ int length)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ ksnav_release_rxhd(priv->netcp_pktdma, priv->hd);
+
+ return 0;
+}
+
+static void ks2_eth_stop(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ if (!priv->emac_open)
+ return;
+ ethss_stop();
+
+ ksnav_close(priv->netcp_pktdma);
+ qm_close();
+ phy_shutdown(priv->phydev);
+ priv->emac_open = false;
+}
+
+int ks2_eth_read_rom_hwaddr(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ u32 maca = 0;
+ u32 macb = 0;
+
+ /* Read the e-fuse mac address */
+ if (priv->slave_port == 1) {
+ maca = __raw_readl(MAC_ID_BASE_ADDR);
+ macb = __raw_readl(MAC_ID_BASE_ADDR + 4);
+ }
+
+ pdata->enetaddr[0] = (macb >> 8) & 0xff;
+ pdata->enetaddr[1] = (macb >> 0) & 0xff;
+ pdata->enetaddr[2] = (maca >> 24) & 0xff;
+ pdata->enetaddr[3] = (maca >> 16) & 0xff;
+ pdata->enetaddr[4] = (maca >> 8) & 0xff;
+ pdata->enetaddr[5] = (maca >> 0) & 0xff;
+
+ return 0;
+}
+
+int ks2_eth_write_hwaddr(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+
+ writel(mac_hi(pdata->enetaddr),
+ DEVICE_EMACSW_BASE(pdata->iobase, priv->slave_port - 1) +
+ CPGMACSL_REG_SA_HI);
+ writel(mac_lo(pdata->enetaddr),
+ DEVICE_EMACSW_BASE(pdata->iobase, priv->slave_port - 1) +
+ CPGMACSL_REG_SA_LO);
+
+ return 0;
+}
+
+static int ks2_eth_probe(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct mii_dev *mdio_bus;
+
+ priv->dev = dev;
+ priv->emac_open = false;
+
+ /* These clock enables has to be moved to common location */
+ if (cpu_is_k2g())
+ writel(KS2_ETHERNET_RGMII, KS2_ETHERNET_CFG);
+
+ /* By default, select PA PLL clock as PA clock source */
+#ifndef CONFIG_SOC_K2G
+ if (psc_enable_module(KS2_LPSC_PA))
+ return -EACCES;
+#endif
+ if (psc_enable_module(KS2_LPSC_CPGMAC))
+ return -EACCES;
+ if (psc_enable_module(KS2_LPSC_CRYPTO))
+ return -EACCES;
+
+ if (cpu_is_k2e() || cpu_is_k2l())
+ pll_pa_clk_sel();
+
+ priv->net_rx_buffs.buff_ptr = rx_buffs;
+ priv->net_rx_buffs.num_buffs = RX_BUFF_NUMS;
+ priv->net_rx_buffs.buff_len = RX_BUFF_LEN;
+
+ if (priv->slave_port == 1) {
+#ifndef CONFIG_SOC_K2G
+ keystone2_net_serdes_setup();
+#endif
+ /*
+ * Register MDIO bus for slave 0 only, other slave have
+ * to re-use the same
+ */
+ mdio_bus = cpsw_mdio_init("ethernet-mdio",
+ (u32)priv->mdio_base,
+ EMAC_MDIO_CLOCK_FREQ,
+ EMAC_MDIO_BUS_FREQ);
+ if (!mdio_bus) {
+ pr_err("MDIO alloc failed\n");
+ return -ENOMEM;
+ }
+ priv->mdio_bus = mdio_bus;
+ } else {
+ /* Get the MDIO bus from slave 0 device */
+ struct ks2_eth_priv *parent_priv;
+
+ parent_priv = dev_get_priv(dev->parent);
+ priv->mdio_bus = parent_priv->mdio_bus;
+ priv->mdio_base = parent_priv->mdio_base;
+ }
+
+ priv->netcp_pktdma = &netcp_pktdma;
+
+ if (priv->has_mdio) {
+ priv->phydev = phy_connect(priv->mdio_bus, priv->phy_addr,
+ dev, priv->phy_if);
+#ifdef CONFIG_DM_ETH
+ if (priv->phy_of_handle)
+ priv->phydev->node = offset_to_ofnode(priv->phy_of_handle);
+#endif
+ phy_config(priv->phydev);
+ }
+
+ return 0;
+}
+
+int ks2_eth_remove(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+
+ cpsw_mdio_free(priv->mdio_bus);
+
+ return 0;
+}
+
+static const struct eth_ops ks2_eth_ops = {
+ .start = ks2_eth_start,
+ .send = ks2_eth_send,
+ .recv = ks2_eth_recv,
+ .free_pkt = ks2_eth_free_pkt,
+ .stop = ks2_eth_stop,
+ .read_rom_hwaddr = ks2_eth_read_rom_hwaddr,
+ .write_hwaddr = ks2_eth_write_hwaddr,
+};
+
+static int ks2_eth_bind_slaves(struct udevice *dev, int gbe, int *gbe_0)
+{
+ const void *fdt = gd->fdt_blob;
+ struct udevice *sl_dev;
+ int interfaces;
+ int sec_slave;
+ int slave;
+ int ret;
+ char *slave_name;
+
+ interfaces = fdt_subnode_offset(fdt, gbe, "interfaces");
+ fdt_for_each_subnode(slave, fdt, interfaces) {
+ int slave_no;
+
+ slave_no = fdtdec_get_int(fdt, slave, "slave-port", -ENOENT);
+ if (slave_no == -ENOENT)
+ continue;
+
+ if (slave_no == 0) {
+ /* This is the current eth device */
+ *gbe_0 = slave;
+ } else {
+ /* Slave devices to be registered */
+ slave_name = malloc(20);
+ snprintf(slave_name, 20, "netcp@slave-%d", slave_no);
+ ret = device_bind_driver_to_node(dev, "eth_ks2_sl",
+ slave_name, offset_to_ofnode(slave),
+ &sl_dev);
+ if (ret) {
+ pr_err("ks2_net - not able to bind slave interfaces\n");
+ return ret;
+ }
+ }
+ }
+
+ sec_slave = fdt_subnode_offset(fdt, gbe, "secondary-slave-ports");
+ fdt_for_each_subnode(slave, fdt, sec_slave) {
+ int slave_no;
+
+ slave_no = fdtdec_get_int(fdt, slave, "slave-port", -ENOENT);
+ if (slave_no == -ENOENT)
+ continue;
+
+ /* Slave devices to be registered */
+ slave_name = malloc(20);
+ snprintf(slave_name, 20, "netcp@slave-%d", slave_no);
+ ret = device_bind_driver_to_node(dev, "eth_ks2_sl", slave_name,
+ offset_to_ofnode(slave), &sl_dev);
+ if (ret) {
+ pr_err("ks2_net - not able to bind slave interfaces\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ks2_eth_parse_slave_interface(int netcp, int slave,
+ struct ks2_eth_priv *priv,
+ struct eth_pdata *pdata)
+{
+ const void *fdt = gd->fdt_blob;
+ int mdio;
+ int phy;
+ int dma_count;
+ u32 dma_channel[8];
+ const char *phy_mode;
+
+ priv->slave_port = fdtdec_get_int(fdt, slave, "slave-port", -1);
+ priv->net_rx_buffs.rx_flow = priv->slave_port * 8;
+
+ /* U-Boot slave port number starts with 1 instead of 0 */
+ priv->slave_port += 1;
+
+ dma_count = fdtdec_get_int_array_count(fdt, netcp,
+ "ti,navigator-dmas",
+ dma_channel, 8);
+
+ if (dma_count > (2 * priv->slave_port)) {
+ int dma_idx;
+
+ dma_idx = priv->slave_port * 2 - 1;
+ priv->net_rx_buffs.rx_flow = dma_channel[dma_idx];
+ }
+
+ priv->link_type = fdtdec_get_int(fdt, slave, "link-interface", -1);
+
+ phy = fdtdec_lookup_phandle(fdt, slave, "phy-handle");
+
+ if (phy >= 0) {
+ priv->phy_of_handle = phy;
+ priv->phy_addr = fdtdec_get_int(fdt, phy, "reg", -1);
+
+ mdio = fdt_parent_offset(fdt, phy);
+ if (mdio < 0) {
+ pr_err("mdio dt not found\n");
+ return -ENODEV;
+ }
+ priv->mdio_base = (void *)fdtdec_get_addr(fdt, mdio, "reg");
+ }
+
+ if (priv->link_type == LINK_TYPE_SGMII_MAC_TO_PHY_MODE) {
+ priv->phy_if = PHY_INTERFACE_MODE_SGMII;
+ pdata->phy_interface = priv->phy_if;
+ priv->sgmii_link_type = SGMII_LINK_MAC_PHY;
+ priv->has_mdio = true;
+ } else if (priv->link_type == LINK_TYPE_RGMII_LINK_MAC_PHY) {
+ phy_mode = fdt_getprop(fdt, slave, "phy-mode", NULL);
+ if (phy_mode) {
+ priv->phy_if = phy_get_interface_by_name(phy_mode);
+ if (priv->phy_if != PHY_INTERFACE_MODE_RGMII &&
+ priv->phy_if != PHY_INTERFACE_MODE_RGMII_ID &&
+ priv->phy_if != PHY_INTERFACE_MODE_RGMII_RXID &&
+ priv->phy_if != PHY_INTERFACE_MODE_RGMII_TXID) {
+ pr_err("invalid phy-mode\n");
+ return -EINVAL;
+ }
+ } else {
+ priv->phy_if = PHY_INTERFACE_MODE_RGMII;
+ }
+ pdata->phy_interface = priv->phy_if;
+ priv->has_mdio = true;
+ }
+
+ return 0;
+}
+
+static int ks2_sl_eth_of_to_plat(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ const void *fdt = gd->fdt_blob;
+ int slave = dev_of_offset(dev);
+ int interfaces;
+ int gbe;
+ int netcp_devices;
+ int netcp;
+
+ interfaces = fdt_parent_offset(fdt, slave);
+ gbe = fdt_parent_offset(fdt, interfaces);
+ netcp_devices = fdt_parent_offset(fdt, gbe);
+ netcp = fdt_parent_offset(fdt, netcp_devices);
+
+ ks2_eth_parse_slave_interface(netcp, slave, priv, pdata);
+
+ pdata->iobase = fdtdec_get_addr(fdt, netcp, "reg");
+
+ return 0;
+}
+
+static int ks2_eth_of_to_plat(struct udevice *dev)
+{
+ struct ks2_eth_priv *priv = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ const void *fdt = gd->fdt_blob;
+ int gbe_0 = -ENODEV;
+ int netcp_devices;
+ int gbe;
+
+ netcp_devices = fdt_subnode_offset(fdt, dev_of_offset(dev),
+ "netcp-devices");
+ gbe = fdt_subnode_offset(fdt, netcp_devices, "gbe");
+
+ ks2_eth_bind_slaves(dev, gbe, &gbe_0);
+
+ ks2_eth_parse_slave_interface(dev_of_offset(dev), gbe_0, priv, pdata);
+
+ pdata->iobase = dev_read_addr(dev);
+
+ return 0;
+}
+
+static const struct udevice_id ks2_eth_ids[] = {
+ { .compatible = "ti,netcp-1.0" },
+ { }
+};
+
+U_BOOT_DRIVER(eth_ks2_slave) = {
+ .name = "eth_ks2_sl",
+ .id = UCLASS_ETH,
+ .of_to_plat = ks2_sl_eth_of_to_plat,
+ .probe = ks2_eth_probe,
+ .remove = ks2_eth_remove,
+ .ops = &ks2_eth_ops,
+ .priv_auto = sizeof(struct ks2_eth_priv),
+ .plat_auto = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};
+
+U_BOOT_DRIVER(eth_ks2) = {
+ .name = "eth_ks2",
+ .id = UCLASS_ETH,
+ .of_match = ks2_eth_ids,
+ .of_to_plat = ks2_eth_of_to_plat,
+ .probe = ks2_eth_probe,
+ .remove = ks2_eth_remove,
+ .ops = &ks2_eth_ops,
+ .priv_auto = sizeof(struct ks2_eth_priv),
+ .plat_auto = sizeof(struct eth_pdata),
+ .flags = DM_FLAG_ALLOC_PRIV_DMA,
+};