diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/designware.c | 10 | ||||
-rw-r--r-- | drivers/net/dwc_eth_qos.c | 110 | ||||
-rw-r--r-- | drivers/net/ti/Kconfig | 13 | ||||
-rw-r--r-- | drivers/net/ti/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ti/am65-cpsw-nuss.c | 4 | ||||
-rw-r--r-- | drivers/net/ti/icss_mii_rt.h | 192 | ||||
-rw-r--r-- | drivers/net/ti/icssg_classifier.c | 376 | ||||
-rw-r--r-- | drivers/net/ti/icssg_config.c | 474 | ||||
-rw-r--r-- | drivers/net/ti/icssg_config.h | 195 | ||||
-rw-r--r-- | drivers/net/ti/icssg_prueth.c | 691 | ||||
-rw-r--r-- | drivers/net/ti/icssg_prueth.h | 97 | ||||
-rw-r--r-- | drivers/net/ti/icssg_queues.c | 51 | ||||
-rw-r--r-- | drivers/net/ti/icssg_switch_map.h | 209 |
13 files changed, 2366 insertions, 57 deletions
diff --git a/drivers/net/designware.c b/drivers/net/designware.c index c222197b114..4c1642b29a8 100644 --- a/drivers/net/designware.c +++ b/drivers/net/designware.c @@ -352,6 +352,11 @@ static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p, (phydev->duplex) ? "full" : "half", (phydev->port == PORT_FIBRE) ? ", fiber mode" : ""); +#ifdef CONFIG_ARCH_NPCM8XX + /* Pass all Multicast Frames */ + setbits_le32(&mac_p->framefilt, BIT(4)); + +#endif return 0; } @@ -554,6 +559,11 @@ static int _dw_free_pkt(struct dw_eth_dev *priv) ulong desc_start = (ulong)desc_p; ulong desc_end = desc_start + roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN); + ulong data_start = desc_p->dmamac_addr; + ulong data_end = data_start + roundup(CFG_ETH_BUFSIZE, ARCH_DMA_MINALIGN); + + /* Invalidate the descriptor buffer data */ + invalidate_dcache_range(data_start, data_end); /* * Make the current descriptor valid again and go to diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c index 9b3bce1dc87..67d80d987ff 100644 --- a/drivers/net/dwc_eth_qos.c +++ b/drivers/net/dwc_eth_qos.c @@ -159,7 +159,7 @@ static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, ret = eqos_mdio_wait_idle(eqos); if (ret) { - pr_err("MDIO not idle at entry"); + pr_err("MDIO not idle at entry\n"); return ret; } @@ -179,7 +179,7 @@ static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad, ret = eqos_mdio_wait_idle(eqos); if (ret) { - pr_err("MDIO read didn't complete"); + pr_err("MDIO read didn't complete\n"); return ret; } @@ -203,7 +203,7 @@ static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, ret = eqos_mdio_wait_idle(eqos); if (ret) { - pr_err("MDIO not idle at entry"); + pr_err("MDIO not idle at entry\n"); return ret; } @@ -225,7 +225,7 @@ static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad, ret = eqos_mdio_wait_idle(eqos); if (ret) { - pr_err("MDIO read didn't complete"); + pr_err("MDIO read didn't complete\n"); return ret; } @@ -242,37 +242,37 @@ static int eqos_start_clks_tegra186(struct udevice *dev) ret = clk_enable(&eqos->clk_slave_bus); if (ret < 0) { - pr_err("clk_enable(clk_slave_bus) failed: %d", ret); + pr_err("clk_enable(clk_slave_bus) failed: %d\n", ret); goto err; } ret = clk_enable(&eqos->clk_master_bus); if (ret < 0) { - pr_err("clk_enable(clk_master_bus) failed: %d", ret); + pr_err("clk_enable(clk_master_bus) failed: %d\n", ret); goto err_disable_clk_slave_bus; } ret = clk_enable(&eqos->clk_rx); if (ret < 0) { - pr_err("clk_enable(clk_rx) failed: %d", ret); + pr_err("clk_enable(clk_rx) failed: %d\n", ret); goto err_disable_clk_master_bus; } ret = clk_enable(&eqos->clk_ptp_ref); if (ret < 0) { - pr_err("clk_enable(clk_ptp_ref) failed: %d", ret); + pr_err("clk_enable(clk_ptp_ref) failed: %d\n", ret); goto err_disable_clk_rx; } ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000); if (ret < 0) { - pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret); + pr_err("clk_set_rate(clk_ptp_ref) failed: %d\n", ret); goto err_disable_clk_ptp_ref; } ret = clk_enable(&eqos->clk_tx); if (ret < 0) { - pr_err("clk_enable(clk_tx) failed: %d", ret); + pr_err("clk_enable(clk_tx) failed: %d\n", ret); goto err_disable_clk_ptp_ref; } #endif @@ -305,26 +305,26 @@ static int eqos_start_clks_stm32(struct udevice *dev) ret = clk_enable(&eqos->clk_master_bus); if (ret < 0) { - pr_err("clk_enable(clk_master_bus) failed: %d", ret); + pr_err("clk_enable(clk_master_bus) failed: %d\n", ret); goto err; } ret = clk_enable(&eqos->clk_rx); if (ret < 0) { - pr_err("clk_enable(clk_rx) failed: %d", ret); + pr_err("clk_enable(clk_rx) failed: %d\n", ret); goto err_disable_clk_master_bus; } ret = clk_enable(&eqos->clk_tx); if (ret < 0) { - pr_err("clk_enable(clk_tx) failed: %d", ret); + pr_err("clk_enable(clk_tx) failed: %d\n", ret); goto err_disable_clk_rx; } if (clk_valid(&eqos->clk_ck) && !eqos->clk_ck_enabled) { ret = clk_enable(&eqos->clk_ck); if (ret < 0) { - pr_err("clk_enable(clk_ck) failed: %d", ret); + pr_err("clk_enable(clk_ck) failed: %d\n", ret); goto err_disable_clk_tx; } eqos->clk_ck_enabled = true; @@ -390,7 +390,7 @@ static int eqos_start_resets_tegra186(struct udevice *dev) ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1); if (ret < 0) { - pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret); + pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d\n", ret); return ret; } @@ -398,13 +398,13 @@ static int eqos_start_resets_tegra186(struct udevice *dev) ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0); if (ret < 0) { - pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret); + pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d\n", ret); return ret; } ret = reset_assert(&eqos->reset_ctl); if (ret < 0) { - pr_err("reset_assert() failed: %d", ret); + pr_err("reset_assert() failed: %d\n", ret); return ret; } @@ -412,7 +412,7 @@ static int eqos_start_resets_tegra186(struct udevice *dev) ret = reset_deassert(&eqos->reset_ctl); if (ret < 0) { - pr_err("reset_deassert() failed: %d", ret); + pr_err("reset_deassert() failed: %d\n", ret); return ret; } @@ -448,14 +448,14 @@ static int eqos_calibrate_pads_tegra186(struct udevice *dev) ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false); if (ret) { - pr_err("calibrate didn't start"); + pr_err("calibrate didn't start\n"); goto failed; } ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status, EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false); if (ret) { - pr_err("calibrate didn't finish"); + pr_err("calibrate didn't finish\n"); goto failed; } @@ -586,13 +586,13 @@ static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev) rate = 2.5 * 1000 * 1000; break; default: - pr_err("invalid speed %d", eqos->phy->speed); + pr_err("invalid speed %d\n", eqos->phy->speed); return -EINVAL; } ret = clk_set_rate(&eqos->clk_tx, rate); if (ret < 0) { - pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret); + pr_err("clk_set_rate(tx_clk, %lu) failed: %d\n", rate, ret); return ret; } #endif @@ -613,7 +613,7 @@ static int eqos_adjust_link(struct udevice *dev) else ret = eqos_set_half_duplex(dev); if (ret < 0) { - pr_err("eqos_set_*_duplex() failed: %d", ret); + pr_err("eqos_set_*_duplex() failed: %d\n", ret); return ret; } @@ -631,32 +631,32 @@ static int eqos_adjust_link(struct udevice *dev) ret = eqos_set_mii_speed_10(dev); break; default: - pr_err("invalid speed %d", eqos->phy->speed); + pr_err("invalid speed %d\n", eqos->phy->speed); return -EINVAL; } if (ret < 0) { - pr_err("eqos_set_*mii_speed*() failed: %d", ret); + pr_err("eqos_set_*mii_speed*() failed: %d\n", ret); return ret; } if (en_calibration) { ret = eqos->config->ops->eqos_calibrate_pads(dev); if (ret < 0) { - pr_err("eqos_calibrate_pads() failed: %d", + pr_err("eqos_calibrate_pads() failed: %d\n", ret); return ret; } } else { ret = eqos->config->ops->eqos_disable_calibration(dev); if (ret < 0) { - pr_err("eqos_disable_calibration() failed: %d", + pr_err("eqos_disable_calibration() failed: %d\n", ret); return ret; } } ret = eqos->config->ops->eqos_set_tx_clk_speed(dev); if (ret < 0) { - pr_err("eqos_set_tx_clk_speed() failed: %d", ret); + pr_err("eqos_set_tx_clk_speed() failed: %d\n", ret); return ret; } @@ -755,7 +755,7 @@ static int eqos_start(struct udevice *dev) ret = eqos->config->ops->eqos_start_resets(dev); if (ret < 0) { - pr_err("eqos_start_resets() failed: %d", ret); + pr_err("eqos_start_resets() failed: %d\n", ret); goto err; } @@ -773,13 +773,13 @@ static int eqos_start(struct udevice *dev) EQOS_DMA_MODE_SWR, false, eqos->config->swr_wait, false); if (ret) { - pr_err("EQOS_DMA_MODE_SWR stuck"); + pr_err("EQOS_DMA_MODE_SWR stuck\n"); goto err_stop_resets; } ret = eqos->config->ops->eqos_calibrate_pads(dev); if (ret < 0) { - pr_err("eqos_calibrate_pads() failed: %d", ret); + pr_err("eqos_calibrate_pads() failed: %d\n", ret); goto err_stop_resets; } @@ -812,7 +812,7 @@ static int eqos_start(struct udevice *dev) } if (!eqos->phy) { - pr_err("phy_connect() failed"); + pr_err("phy_connect() failed\n"); ret = -ENODEV; goto err_stop_resets; } @@ -820,7 +820,7 @@ static int eqos_start(struct udevice *dev) if (eqos->max_speed) { ret = phy_set_supported(eqos->phy, eqos->max_speed); if (ret) { - pr_err("phy_set_supported() failed: %d", ret); + pr_err("phy_set_supported() failed: %d\n", ret); goto err_shutdown_phy; } } @@ -828,26 +828,26 @@ static int eqos_start(struct udevice *dev) eqos->phy->node = eqos->phy_of_node; ret = phy_config(eqos->phy); if (ret < 0) { - pr_err("phy_config() failed: %d", ret); + pr_err("phy_config() failed: %d\n", ret); goto err_shutdown_phy; } } ret = phy_startup(eqos->phy); if (ret < 0) { - pr_err("phy_startup() failed: %d", ret); + pr_err("phy_startup() failed: %d\n", ret); goto err_shutdown_phy; } if (!eqos->phy->link) { - pr_err("No link"); + pr_err("No link\n"); ret = -EAGAIN; goto err_shutdown_phy; } ret = eqos_adjust_link(dev); if (ret < 0) { - pr_err("eqos_adjust_link() failed: %d", ret); + pr_err("eqos_adjust_link() failed: %d\n", ret); goto err_shutdown_phy; } @@ -1090,7 +1090,7 @@ err_shutdown_phy: err_stop_resets: eqos->config->ops->eqos_stop_resets(dev); err: - pr_err("FAILED: %d", ret); + pr_err("FAILED: %d\n", ret); return ret; } @@ -1217,7 +1217,7 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length) struct eqos_priv *eqos = dev_get_priv(dev); u32 idx, idx_mask = eqos->desc_per_cacheline - 1; uchar *packet_expected; - struct eqos_desc *rx_desc; + struct eqos_desc *rx_desc = NULL; debug("%s(packet=%p, length=%d)\n", __func__, packet, length); @@ -1361,7 +1361,7 @@ static int eqos_probe_resources_tegra186(struct udevice *dev) ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl); if (ret) { - pr_err("reset_get_by_name(rst) failed: %d", ret); + pr_err("reset_get_by_name(rst) failed: %d\n", ret); return ret; } @@ -1369,37 +1369,37 @@ static int eqos_probe_resources_tegra186(struct udevice *dev) &eqos->phy_reset_gpio, GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE); if (ret) { - pr_err("gpio_request_by_name(phy reset) failed: %d", ret); + pr_err("gpio_request_by_name(phy reset) failed: %d\n", ret); goto err_free_reset_eqos; } ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus); if (ret) { - pr_err("clk_get_by_name(slave_bus) failed: %d", ret); + pr_err("clk_get_by_name(slave_bus) failed: %d\n", ret); goto err_free_gpio_phy_reset; } ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus); if (ret) { - pr_err("clk_get_by_name(master_bus) failed: %d", ret); + pr_err("clk_get_by_name(master_bus) failed: %d\n", ret); goto err_free_gpio_phy_reset; } ret = clk_get_by_name(dev, "rx", &eqos->clk_rx); if (ret) { - pr_err("clk_get_by_name(rx) failed: %d", ret); + pr_err("clk_get_by_name(rx) failed: %d\n", ret); goto err_free_gpio_phy_reset; } ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref); if (ret) { - pr_err("clk_get_by_name(ptp_ref) failed: %d", ret); + pr_err("clk_get_by_name(ptp_ref) failed: %d\n", ret); goto err_free_gpio_phy_reset; } ret = clk_get_by_name(dev, "tx", &eqos->clk_tx); if (ret) { - pr_err("clk_get_by_name(tx) failed: %d", ret); + pr_err("clk_get_by_name(tx) failed: %d\n", ret); goto err_free_gpio_phy_reset; } @@ -1436,19 +1436,19 @@ static int eqos_probe_resources_stm32(struct udevice *dev) ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus); if (ret) { - pr_err("clk_get_by_name(master_bus) failed: %d", ret); + pr_err("clk_get_by_name(master_bus) failed: %d\n", ret); goto err_probe; } ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx); if (ret) { - pr_err("clk_get_by_name(rx) failed: %d", ret); + pr_err("clk_get_by_name(rx) failed: %d\n", ret); goto err_probe; } ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx); if (ret) { - pr_err("clk_get_by_name(tx) failed: %d", ret); + pr_err("clk_get_by_name(tx) failed: %d\n", ret); goto err_probe; } @@ -1502,7 +1502,7 @@ static int eqos_probe(struct udevice *dev) eqos->regs = dev_read_addr(dev); if (eqos->regs == FDT_ADDR_T_NONE) { - pr_err("dev_read_addr() failed"); + pr_err("dev_read_addr() failed\n"); return -ENODEV; } eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE); @@ -1514,19 +1514,19 @@ static int eqos_probe(struct udevice *dev) ret = eqos_probe_resources_core(dev); if (ret < 0) { - pr_err("eqos_probe_resources_core() failed: %d", ret); + pr_err("eqos_probe_resources_core() failed: %d\n", ret); return ret; } ret = eqos->config->ops->eqos_probe_resources(dev); if (ret < 0) { - pr_err("eqos_probe_resources() failed: %d", ret); + pr_err("eqos_probe_resources() failed: %d\n", ret); goto err_remove_resources_core; } ret = eqos->config->ops->eqos_start_clks(dev); if (ret < 0) { - pr_err("eqos_start_clks() failed: %d", ret); + pr_err("eqos_start_clks() failed: %d\n", ret); goto err_remove_resources_tegra; } @@ -1536,7 +1536,7 @@ static int eqos_probe(struct udevice *dev) if (!eqos->mii) { eqos->mii = mdio_alloc(); if (!eqos->mii) { - pr_err("mdio_alloc() failed"); + pr_err("mdio_alloc() failed\n"); ret = -ENOMEM; goto err_stop_clks; } @@ -1547,7 +1547,7 @@ static int eqos_probe(struct udevice *dev) ret = mdio_register(eqos->mii); if (ret < 0) { - pr_err("mdio_register() failed: %d", ret); + pr_err("mdio_register() failed: %d\n", ret); goto err_free_mdio; } } diff --git a/drivers/net/ti/Kconfig b/drivers/net/ti/Kconfig index 72eccc99e5f..ddfa95a0b7e 100644 --- a/drivers/net/ti/Kconfig +++ b/drivers/net/ti/Kconfig @@ -57,3 +57,16 @@ config MDIO_TI_CPSW help This driver supports the TI CPSW MDIO interface found in various TI SoCs. + +config TI_ICSSG_PRUETH + bool "TI Gigabit PRU Ethernet driver" + depends on ARCH_K3 + imply DM_MDIO + imply MISC_INIT_R + imply MISC + imply MDIO_TI_CPSW + select PHYLIB + select FS_LOADER + help + Support Gigabit Ethernet ports over the ICSSG PRU Subsystem + This subsystem is available starting with the AM65 platform. diff --git a/drivers/net/ti/Makefile b/drivers/net/ti/Makefile index 30c4c4b6d5a..b2b3aa3b180 100644 --- a/drivers/net/ti/Makefile +++ b/drivers/net/ti/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_DRIVER_TI_EMAC) += davinci_emac.o obj-$(CONFIG_DRIVER_TI_KEYSTONE_NET) += keystone_net.o cpsw_mdio.o obj-$(CONFIG_TI_AM65_CPSW_NUSS) += am65-cpsw-nuss.o obj-$(CONFIG_MDIO_TI_CPSW) += cpsw_mdio.o +obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg_prueth.o icssg_classifier.o icssg_config.o icssg_queues.o diff --git a/drivers/net/ti/am65-cpsw-nuss.c b/drivers/net/ti/am65-cpsw-nuss.c index d68ed671836..b151e25d6a4 100644 --- a/drivers/net/ti/am65-cpsw-nuss.c +++ b/drivers/net/ti/am65-cpsw-nuss.c @@ -664,7 +664,7 @@ static int am65_cpsw_port_probe(struct udevice *dev) struct am65_cpsw_priv *priv = dev_get_priv(dev); struct eth_pdata *pdata = dev_get_plat(dev); struct am65_cpsw_common *cpsw_common; - char portname[15]; + char portname[32]; int ret; priv->dev = dev; @@ -672,7 +672,7 @@ static int am65_cpsw_port_probe(struct udevice *dev) cpsw_common = dev_get_priv(dev->parent); priv->cpsw_common = cpsw_common; - sprintf(portname, "%s%s", dev->parent->name, dev->name); + snprintf(portname, sizeof(portname), "%s%s", dev->parent->name, dev->name); device_set_name(dev, portname); ret = am65_cpsw_ofdata_parse_phy(dev); diff --git a/drivers/net/ti/icss_mii_rt.h b/drivers/net/ti/icss_mii_rt.h new file mode 100644 index 00000000000..fd95d4d7c1f --- /dev/null +++ b/drivers/net/ti/icss_mii_rt.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* PRU-ICSS MII_RT register definitions + * + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com + */ + +#ifndef __NET_PRUSS_MII_RT_H__ +#define __NET_PRUSS_MII_RT_H__ + +#include <regmap.h> + +/* PRUSS_MII_RT Registers */ +#define PRUSS_MII_RT_RXCFG0 0x0 +#define PRUSS_MII_RT_RXCFG1 0x4 +#define PRUSS_MII_RT_TXCFG0 0x10 +#define PRUSS_MII_RT_TXCFG1 0x14 +#define PRUSS_MII_RT_TX_CRC0 0x20 +#define PRUSS_MII_RT_TX_CRC1 0x24 +#define PRUSS_MII_RT_TX_IPG0 0x30 +#define PRUSS_MII_RT_TX_IPG1 0x34 +#define PRUSS_MII_RT_PRS0 0x38 +#define PRUSS_MII_RT_PRS1 0x3c +#define PRUSS_MII_RT_RX_FRMS0 0x40 +#define PRUSS_MII_RT_RX_FRMS1 0x44 +#define PRUSS_MII_RT_RX_PCNT0 0x48 +#define PRUSS_MII_RT_RX_PCNT1 0x4c +#define PRUSS_MII_RT_RX_ERR0 0x50 +#define PRUSS_MII_RT_RX_ERR1 0x54 + +/* PRUSS_MII_RT_RXCFG0/1 bits */ +#define PRUSS_MII_RT_RXCFG_RX_ENABLE BIT(0) +#define PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS BIT(1) +#define PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE BIT(2) +#define PRUSS_MII_RT_RXCFG_RX_MUX_SEL BIT(3) +#define PRUSS_MII_RT_RXCFG_RX_L2_EN BIT(4) +#define PRUSS_MII_RT_RXCFG_RX_BYTE_SWAP BIT(5) +#define PRUSS_MII_RT_RXCFG_RX_AUTO_FWD_PRE BIT(6) +#define PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS BIT(9) + +/* PRUSS_MII_RT_TXCFG0/1 bits */ +#define PRUSS_MII_RT_TXCFG_TX_ENABLE BIT(0) +#define PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE BIT(1) +#define PRUSS_MII_RT_TXCFG_TX_EN_MODE BIT(2) +#define PRUSS_MII_RT_TXCFG_TX_BYTE_SWAP BIT(3) +#define PRUSS_MII_RT_TXCFG_TX_MUX_SEL BIT(8) +#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_SEQUENCE BIT(9) +#define PRUSS_MII_RT_TXCFG_PRE_TX_AUTO_ESC_ERR BIT(10) +#define PRUSS_MII_RT_TXCFG_TX_32_MODE_EN BIT(11) +#define PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN BIT(12) /* SR2.0 onwards */ + +#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT 16 +#define PRUSS_MII_RT_TXCFG_TX_START_DELAY_MASK GENMASK(25, 16) + +#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT 28 +#define PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK GENMASK(30, 28) + +/* PRUSS_MII_RT_TX_IPG0/1 bits */ +#define PRUSS_MII_RT_TX_IPG_IPG_SHIFT 0 +#define PRUSS_MII_RT_TX_IPG_IPG_MASK GENMASK(9, 0) + +/* PRUSS_MII_RT_PRS0/1 bits */ +#define PRUSS_MII_RT_PRS_COL BIT(0) +#define PRUSS_MII_RT_PRS_CRS BIT(1) + +/* PRUSS_MII_RT_RX_FRMS0/1 bits */ +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_SHIFT 0 +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK GENMASK(15, 0) + +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT 16 +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK GENMASK(31, 16) + +/* Min/Max in MII_RT_RX_FRMS */ +/* For EMAC and Switch */ +#define PRUSS_MII_RT_RX_FRMS_MAX (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +#define PRUSS_MII_RT_RX_FRMS_MIN_FRM (64) + +/* for HSR and PRP */ +#define PRUSS_MII_RT_RX_FRMS_MAX_FRM_LRE (PRUSS_MII_RT_RX_FRMS_MAX + \ + ICSS_LRE_TAG_RCT_SIZE) +/* PRUSS_MII_RT_RX_PCNT0/1 bits */ +#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_SHIFT 0 +#define PRUSS_MII_RT_RX_PCNT_MIN_PCNT_MASK GENMASK(3, 0) + +#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_SHIFT 4 +#define PRUSS_MII_RT_RX_PCNT_MAX_PCNT_MASK GENMASK(7, 4) + +/* PRUSS_MII_RT_RX_ERR0/1 bits */ +#define PRUSS_MII_RT_RX_ERR_MIN_PCNT_ERR BIT(0) +#define PRUSS_MII_RT_RX_ERR_MAX_PCNT_ERR BIT(1) +#define PRUSS_MII_RT_RX_ERR_MIN_FRM_ERR BIT(2) +#define PRUSS_MII_RT_RX_ERR_MAX_FRM_ERR BIT(3) + +#define ICSSG_CFG_OFFSET 0 +#define RGMII_CFG_OFFSET 4 + +/* Constant to choose between MII0 and MII1 */ +#define ICSS_MII0 0 +#define ICSS_MII1 1 + +/* ICSSG_CFG Register bits */ +#define ICSSG_CFG_SGMII_MODE BIT(16) +#define ICSSG_CFG_TX_PRU_EN BIT(11) +#define ICSSG_CFG_RX_SFD_TX_SOF_EN BIT(10) +#define ICSSG_CFG_RTU_PRU_PSI_SHARE_EN BIT(9) +#define ICSSG_CFG_IEP1_TX_EN BIT(8) +#define ICSSG_CFG_MII1_MODE GENMASK(6, 5) +#define ICSSG_CFG_MII1_MODE_SHIFT 5 +#define ICSSG_CFG_MII0_MODE GENMASK(4, 3) +#define ICSSG_CFG_MII0_MODE_SHIFT 3 +#define ICSSG_CFG_RX_L2_G_EN BIT(2) +#define ICSSG_CFG_TX_L2_EN BIT(1) +#define ICSSG_CFG_TX_L1_EN BIT(0) + +enum mii_mode { MII_MODE_MII = 0, MII_MODE_RGMII, MII_MODE_SGMII }; + +/* RGMII CFG Register bits */ +#define RGMII_CFG_INBAND_EN_MII0 BIT(16) +#define RGMII_CFG_GIG_EN_MII0 BIT(17) +#define RGMII_CFG_INBAND_EN_MII1 BIT(20) +#define RGMII_CFG_GIG_EN_MII1 BIT(21) +#define RGMII_CFG_FULL_DUPLEX_MII0 BIT(18) +#define RGMII_CFG_FULL_DUPLEX_MII1 BIT(22) +#define RGMII_CFG_SPEED_MII0 GENMASK(2, 1) +#define RGMII_CFG_SPEED_MII1 GENMASK(6, 5) +#define RGMII_CFG_SPEED_MII0_SHIFT 1 +#define RGMII_CFG_SPEED_MII1_SHIFT 5 +#define RGMII_CFG_FULLDUPLEX_MII0 BIT(3) +#define RGMII_CFG_FULLDUPLEX_MII1 BIT(7) +#define RGMII_CFG_FULLDUPLEX_MII0_SHIFT 3 +#define RGMII_CFG_FULLDUPLEX_MII1_SHIFT 7 +#define RGMII_CFG_SPEED_10M 0 +#define RGMII_CFG_SPEED_100M 1 +#define RGMII_CFG_SPEED_1G 2 + +static inline void icssg_mii_update_ipg(struct regmap *mii_rt, int mii, u32 ipg) +{ + u32 val; + + if (mii == ICSS_MII0) { + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, ipg); + } else { + /* Errata workaround: IEP1 is not read by h/w unless IEP0 is written */ + regmap_read(mii_rt, PRUSS_MII_RT_TX_IPG0, &val); + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG1, ipg); + regmap_write(mii_rt, PRUSS_MII_RT_TX_IPG0, val); + } +} + +static inline void icssg_update_rgmii_cfg(struct regmap *miig_rt, int speed, + bool full_duplex, int slice, struct prueth_priv *priv) +{ + u32 gig_en_mask, gig_val = 0, full_duplex_mask, full_duplex_val = 0; + u32 inband_en_mask, inband_val = 0; + + gig_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_GIG_EN_MII0 : + RGMII_CFG_GIG_EN_MII1; + if (speed == SPEED_1000) + gig_val = gig_en_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, gig_en_mask, gig_val); + + inband_en_mask = (slice == ICSS_MII0) ? RGMII_CFG_INBAND_EN_MII0 : + RGMII_CFG_INBAND_EN_MII1; + if (speed == SPEED_10 && phy_interface_is_rgmii(priv->phydev)) + inband_val = inband_en_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, inband_en_mask, inband_val); + + full_duplex_mask = (slice == ICSS_MII0) ? RGMII_CFG_FULL_DUPLEX_MII0 : + RGMII_CFG_FULL_DUPLEX_MII1; + if (full_duplex) + full_duplex_val = full_duplex_mask; + regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask, + full_duplex_val); +} + +static inline void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, int phy_if) +{ + u32 val, mask, shift; + + mask = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE : ICSSG_CFG_MII1_MODE; + shift = mii == ICSS_MII0 ? ICSSG_CFG_MII0_MODE_SHIFT : ICSSG_CFG_MII1_MODE_SHIFT; + + val = MII_MODE_RGMII; + if (phy_if == PHY_INTERFACE_MODE_MII) + val = MII_MODE_MII; + + val <<= shift; + regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, mask, val); + regmap_read(miig_rt, ICSSG_CFG_OFFSET, &val); +} + +#endif /* __NET_PRUSS_MII_RT_H__ */ diff --git a/drivers/net/ti/icssg_classifier.c b/drivers/net/ti/icssg_classifier.c new file mode 100644 index 00000000000..e510a1cd3e5 --- /dev/null +++ b/drivers/net/ti/icssg_classifier.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Texas Instruments ICSSG Ethernet Driver + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <dm/ofnode.h> +#include <regmap.h> + +#define ICSSG_NUM_CLASSIFIERS 16 +#define ICSSG_NUM_FT1_SLOTS 8 +#define ICSSG_NUM_FT3_SLOTS 16 + +#define ICSSG_NUM_CLASSIFIERS_IN_USE 1 + +/* Filter 1 - FT1 */ +#define FT1_NUM_SLOTS 8 +#define FT1_SLOT_SIZE 0x10 /* bytes */ + +/* offsets from FT1 slot base i.e. slot 1 start */ +#define FT1_DA0 0x0 +#define FT1_DA1 0x4 +#define FT1_DA0_MASK 0x8 +#define FT1_DA1_MASK 0xc + +#define FT1_N_REG(slize, n, reg) (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg)) + +#define FT1_LEN_MASK GENMASK(19, 16) +#define FT1_LEN_SHIFT 16 +#define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK) + +#define FT1_START_MASK GENMASK(14, 0) +#define FT1_START(start) ((start) & FT1_START_MASK) + +#define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16)) + +enum ft1_cfg_type { + FT1_CFG_TYPE_DISABLED = 0, + FT1_CFG_TYPE_EQ, + FT1_CFG_TYPE_GT, + FT1_CFG_TYPE_LT, +}; + +#define FT1_CFG_SHIFT(n) (2 * (n)) +#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n))) + +/* Filter 3 - FT3 */ +#define FT3_NUM_SLOTS 16 +#define FT3_SLOT_SIZE 0x20 /* bytes */ + +/* offsets from FT3 slot n's base */ +#define FT3_START 0 +#define FT3_START_AUTO 0x4 +#define FT3_START_OFFSET 0x8 +#define FT3_JUMP_OFFSET 0xc +#define FT3_LEN 0x10 +#define FT3_CFG 0x14 +#define FT3_T 0x18 +#define FT3_T_MASK 0x1c + +#define FT3_N_REG(slize, n, reg) \ + (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg)) + +/* offsets from rx_class n's base */ +#define RX_CLASS_AND_EN 0 +#define RX_CLASS_OR_EN 0x4 + +#define RX_CLASS_NUM_SLOTS 16 +#define RX_CLASS_EN_SIZE 0x8 /* bytes */ + +#define RX_CLASS_N_REG(slice, n, reg) \ + (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg)) + +/* RX Class Gates */ +#define RX_CLASS_GATES_SIZE 0x4 /* bytes */ + +#define RX_CLASS_GATES_N_REG(slice, n) \ + (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n)) + +#define RX_CLASS_GATES_ALLOW_MASK BIT(6) +#define RX_CLASS_GATES_RAW_MASK BIT(5) +#define RX_CLASS_GATES_PHASE_MASK BIT(4) + +/* RX Class traffic data matching bits */ +#define RX_CLASS_FT_UC BIT(31) +#define RX_CLASS_FT_MC BIT(30) +#define RX_CLASS_FT_BC BIT(29) +#define RX_CLASS_FT_FW BIT(28) +#define RX_CLASS_FT_RCV BIT(27) +#define RX_CLASS_FT_VLAN BIT(26) +#define RX_CLASS_FT_DA_P BIT(25) +#define RX_CLASS_FT_DA_I BIT(24) +#define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16) +#define RX_CLASS_FT_FT1_MATCH_SHIFT 16 +#define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0) +#define RX_CLASS_FT_FT3_MATCH_SHIFT 0 + +#define RX_CLASS_FT_FT1_MATCH(slot) \ + ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \ + RX_CLASS_FT_FT1_MATCH_MASK) + +enum rx_class_sel_type { + RX_CLASS_SEL_TYPE_OR = 0, + RX_CLASS_SEL_TYPE_AND = 1, + RX_CLASS_SEL_TYPE_OR_AND_AND = 2, + RX_CLASS_SEL_TYPE_OR_OR_AND = 3, +}; + +#define FT1_CFG_SHIFT(n) (2 * (n)) +#define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n))) + +#define RX_CLASS_SEL_SHIFT(n) (2 * (n)) +#define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n))) + +#define ICSSG_CFG_OFFSET 0 +#define MAC_INTERFACE_0 0x18 +#define MAC_INTERFACE_1 0x1c + +#define ICSSG_CFG_RX_L2_G_EN BIT(2) + +/* these are register offsets per PRU */ +struct miig_rt_offsets { + u32 mac0; + u32 mac1; + u32 ft1_start_len; + u32 ft1_cfg; + u32 ft1_slot_base; + u32 ft3_slot_base; + u32 ft3_p_base; + u32 ft_rx_ptr; + u32 rx_class_base; + u32 rx_class_cfg1; + u32 rx_class_cfg2; + u32 rx_class_gates_base; + u32 rx_green; + u32 rx_rate_cfg_base; + u32 rx_rate_src_sel0; + u32 rx_rate_src_sel1; + u32 tx_rate_cfg_base; + u32 stat_base; + u32 tx_hsr_tag; + u32 tx_hsr_seq; + u32 tx_vlan_type; + u32 tx_vlan_ins; +}; + +static struct miig_rt_offsets offs[] = { + /* PRU0 */ + { + 0x8, + 0xc, + 0x80, + 0x84, + 0x88, + 0x108, + 0x308, + 0x408, + 0x40c, + 0x48c, + 0x490, + 0x494, + 0x4d4, + 0x4e4, + 0x504, + 0x508, + 0x50c, + 0x54c, + 0x63c, + 0x640, + 0x644, + 0x648, + }, + /* PRU1 */ + { + 0x10, + 0x14, + 0x64c, + 0x650, + 0x654, + 0x6d4, + 0x8d4, + 0x9d4, + 0x9d8, + 0xa58, + 0xa5c, + 0xa60, + 0xaa0, + 0xab0, + 0xad0, + 0xad4, + 0xad8, + 0xb18, + 0xc08, + 0xc0c, + 0xc10, + 0xc14, + }, +}; + +static inline u32 addr_to_da0(const u8 *addr) +{ + return (u32)(addr[0] | addr[1] << 8 | + addr[2] << 16 | addr[3] << 24); +}; + +static inline u32 addr_to_da1(const u8 *addr) +{ + return (u32)(addr[4] | addr[5] << 8); +}; + +static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice, + u16 start, u8 len) +{ + u32 offset, val; + + offset = offs[slice].ft1_start_len; + val = FT1_LEN(len) | FT1_START(start); + regmap_write(miig_rt, offset, val); +} + +static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice, + int n, const u8 *addr) +{ + u32 offset; + + offset = FT1_N_REG(slice, n, FT1_DA0); + regmap_write(miig_rt, offset, addr_to_da0(addr)); + offset = FT1_N_REG(slice, n, FT1_DA1); + regmap_write(miig_rt, offset, addr_to_da1(addr)); +} + +static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice, + int n, const u8 *addr) +{ + u32 offset; + + offset = FT1_N_REG(slice, n, FT1_DA0_MASK); + regmap_write(miig_rt, offset, addr_to_da0(addr)); + offset = FT1_N_REG(slice, n, FT1_DA1_MASK); + regmap_write(miig_rt, offset, addr_to_da1(addr)); +} + +static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n, + enum ft1_cfg_type type) +{ + u32 offset; + + offset = offs[slice].ft1_cfg; + regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n), + type << FT1_CFG_SHIFT(n)); +} + +static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n, + enum rx_class_sel_type type) +{ + u32 offset; + + offset = offs[slice].rx_class_cfg1; + regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n), + type << RX_CLASS_SEL_SHIFT(n)); +} + +static void rx_class_set_and(struct regmap *miig_rt, int slice, int n, + u32 data) +{ + u32 offset; + + offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN); + regmap_write(miig_rt, offset, data); +} + +static void rx_class_set_or(struct regmap *miig_rt, int slice, int n, + u32 data) +{ + u32 offset; + + offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN); + regmap_write(miig_rt, offset, data); +} + +void icssg_class_set_host_mac_addr(struct regmap *miig_rt, u8 *mac) +{ + regmap_write(miig_rt, MAC_INTERFACE_0, addr_to_da0(mac)); + regmap_write(miig_rt, MAC_INTERFACE_1, addr_to_da1(mac)); +} + +void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) +{ + regmap_write(miig_rt, offs[slice].mac0, addr_to_da0(mac)); + regmap_write(miig_rt, offs[slice].mac1, addr_to_da1(mac)); +} + +void icssg_class_disable_n(struct regmap *miig_rt, int slice, int n) +{ + u32 data, offset; + + /* AND_EN = 0 */ + rx_class_set_and(miig_rt, slice, n, 0); + /* OR_EN = 0 */ + rx_class_set_or(miig_rt, slice, n, 0); + + /* set CFG1 to OR */ + rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR); + + /* configure gate */ + offset = RX_CLASS_GATES_N_REG(slice, n); + regmap_read(miig_rt, offset, &data); + /* clear class_raw so we go through filters */ + data &= ~RX_CLASS_GATES_RAW_MASK; + /* set allow and phase mask */ + data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK; + regmap_write(miig_rt, offset, data); +} + +/* disable all RX traffic */ +void icssg_class_disable(struct regmap *miig_rt, int slice) +{ + int n; + + /* Enable RX_L2_G */ + regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN, + ICSSG_CFG_RX_L2_G_EN); + + for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) + icssg_class_disable_n(miig_rt, slice, n); + + /* FT1 Disabled */ + for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) { + u8 addr[] = { 0, 0, 0, 0, 0, 0, }; + + rx_class_ft1_cfg_set_type(miig_rt, slice, n, + FT1_CFG_TYPE_DISABLED); + rx_class_ft1_set_da(miig_rt, slice, n, addr); + rx_class_ft1_set_da_mask(miig_rt, slice, n, addr); + } + + /* clear CFG2 */ + regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); +} + +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti) +{ + u32 data; + + /* defaults */ + icssg_class_disable(miig_rt, slice); + + /* Setup Classifier */ + /* match on Broadcast or MAC_PRU address */ + data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P; + + /* multicast? */ + if (allmulti) + data |= RX_CLASS_FT_MC; + + rx_class_set_or(miig_rt, slice, 0, data); + + /* set CFG1 for OR_OR_AND for classifier */ + rx_class_sel_set_type(miig_rt, slice, 0, + RX_CLASS_SEL_TYPE_OR_OR_AND); + + /* clear CFG2 */ + regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); +} + +/* required for SR2 for SAV check */ +void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) +{ + u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, }; + + rx_class_ft1_set_start_len(miig_rt, slice, 6, 6); + rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr); + rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr); + rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ); +} diff --git a/drivers/net/ti/icssg_config.c b/drivers/net/ti/icssg_config.c new file mode 100644 index 00000000000..5f132d0525c --- /dev/null +++ b/drivers/net/ti/icssg_config.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ICSSG Ethernet driver + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <phy.h> +#include "icssg_prueth.h" +#include "icssg_switch_map.h" +#include "icss_mii_rt.h" +#include <dm/device_compat.h> +#include <linux/iopoll.h> + +/* TX IPG Values to be set for 100M and 1G link speeds. These values are + * in ocp_clk cycles. So need change if ocp_clk is changed for a specific + * h/w design. + */ + +/* SR2.0 IPG is in rgmii_clk (125MHz) clock cycles + 1 */ +#define MII_RT_TX_IPG_100M 0x17 +#define MII_RT_TX_IPG_1G 0xb + +#define ICSSG_QUEUES_MAX 64 +#define ICSSG_QUEUE_OFFSET 0xd00 +#define ICSSG_QUEUE_PEEK_OFFSET 0xe00 +#define ICSSG_QUEUE_CNT_OFFSET 0xe40 +#define ICSSG_QUEUE_RESET_OFFSET 0xf40 + +#define ICSSG_NUM_TX_QUEUES 8 + +#define RECYCLE_Q_SLICE0 16 +#define RECYCLE_Q_SLICE1 17 + +#define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */ + +#define PORT_HI_Q_SLICE0 32 +#define PORT_LO_Q_SLICE0 33 +#define HOST_HI_Q_SLICE0 34 +#define HOST_LO_Q_SLICE0 35 +#define HOST_SPL_Q_SLICE0 40 /* Special Queue */ + +#define PORT_HI_Q_SLICE1 36 +#define PORT_LO_Q_SLICE1 37 +#define HOST_HI_Q_SLICE1 38 +#define HOST_LO_Q_SLICE1 39 +#define HOST_SPL_Q_SLICE1 41 /* Special Queue */ + +#define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \ + PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \ + PRUSS_MII_RT_RXCFG_RX_L2_EN | \ + PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS) + +#define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \ + PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \ + PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \ + PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN) + +#define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \ + ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \ + ICSSG_CFG_TX_PRU_EN | /* SR2.0 only */ \ + ICSSG_CFG_SGMII_MODE) + +#define FDB_GEN_CFG1 0x60 +#define SMEM_VLAN_OFFSET 8 +#define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8) + +#define FDB_GEN_CFG2 0x64 +#define FDB_VLAN_EN BIT(6) +#define FDB_HOST_EN BIT(2) +#define FDB_PRU1_EN BIT(1) +#define FDB_PRU0_EN BIT(0) +#define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \ + FDB_HOST_EN | FDB_VLAN_EN) + +struct map { + int queue; + u32 pd_addr_start; + u32 flags; + bool special; +}; + +struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { + { + { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 }, + { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 }, + { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 }, + { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 }, + { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 }, + }, + { + { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 }, + { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 }, + { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 }, + { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 }, + { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 }, + }, +}; + +static void icssg_config_mii_init(struct prueth_priv *priv, int slice) +{ + struct prueth *prueth = priv->prueth; + struct regmap *mii_rt = prueth->mii_rt; + u32 txcfg_reg, pcnt_reg; + u32 txcfg; + + txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : + PRUSS_MII_RT_TXCFG1; + pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : + PRUSS_MII_RT_RX_PCNT1; + + txcfg = MII_TXCFG_DEFAULT; + + if (prueth->phy_interface == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + else if (prueth->phy_interface != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + + regmap_write(mii_rt, txcfg_reg, txcfg); + regmap_write(mii_rt, pcnt_reg, 0x1); +} + +static void icssg_miig_queues_init(struct prueth_priv *priv, int slice) +{ + struct prueth *prueth = priv->prueth; + void __iomem *smem = (void __iomem *)prueth->shram.pa; + struct regmap *miig_rt = prueth->miig_rt; + int queue = 0, i, j; + u8 pd[ICSSG_SPECIAL_PD_SIZE]; + u32 *pdword; + + /* reset hwqueues */ + if (slice) + queue = ICSSG_NUM_TX_QUEUES; + + for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) { + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); + queue++; + } + + queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0; + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue); + + for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) { + regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, + hwq_map[slice][i].queue); + } + + /* initialize packet descriptors in SMEM */ + /* push pakcet descriptors to hwqueues */ + + pdword = (u32 *)pd; + for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) { + struct map *mp; + int pd_size, num_pds; + u32 pdaddr; + + mp = &hwq_map[slice][j]; + if (mp->special) { + pd_size = ICSSG_SPECIAL_PD_SIZE; + num_pds = ICSSG_NUM_SPECIAL_PDS; + } else { + pd_size = ICSSG_NORMAL_PD_SIZE; + num_pds = ICSSG_NUM_NORMAL_PDS; + } + + for (i = 0; i < num_pds; i++) { + memset(pd, 0, pd_size); + + pdword[0] &= cpu_to_le32(ICSSG_FLAG_MASK); + pdword[0] |= cpu_to_le32(mp->flags); + pdaddr = mp->pd_addr_start + i * pd_size; + + memcpy_toio(smem + pdaddr, pd, pd_size); + queue = mp->queue; + regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, + pdaddr); + } + } +} + +void icssg_config_ipg(struct prueth_priv *priv, int speed, int mii) +{ + struct prueth *prueth = priv->prueth; + + switch (speed) { + case SPEED_1000: + icssg_mii_update_ipg(prueth->mii_rt, mii, MII_RT_TX_IPG_1G); + break; + case SPEED_100: + icssg_mii_update_ipg(prueth->mii_rt, mii, MII_RT_TX_IPG_100M); + break; + default: + /* Other links speeds not supported */ + pr_err("Unsupported link speed\n"); + return; + } +} + +static void emac_r30_cmd_init(struct prueth_priv *priv) +{ + struct prueth *prueth = priv->prueth; + struct icssg_r30_cmd *p; + int i; + + p = (struct icssg_r30_cmd *)(prueth->dram[priv->port_id].pa + MGR_R30_CMD_OFFSET); + + for (i = 0; i < 4; i++) + writel(EMAC_NONE, &p->cmd[i]); +} + +static int emac_r30_is_done(struct prueth_priv *priv) +{ + struct prueth *prueth = priv->prueth; + const struct icssg_r30_cmd *p; + int i; + u32 cmd; + + p = (const struct icssg_r30_cmd *)(prueth->dram[priv->port_id].pa + MGR_R30_CMD_OFFSET); + + for (i = 0; i < 4; i++) { + cmd = readl(&p->cmd[i]); + if (cmd != EMAC_NONE) + return 0; + } + + return 1; +} + +static int prueth_emac_buffer_setup(struct prueth_priv *priv) +{ + struct prueth *prueth = priv->prueth; + struct icssg_buffer_pool_cfg *bpool_cfg; + struct icssg_rxq_ctx *rxq_ctx; + int slice = priv->port_id; + u32 addr; + int i; + + /* Layout to have 64KB aligned buffer pool + * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| + */ + + addr = lower_32_bits(prueth->sram_pa); + if (slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + + if (addr % SZ_64K) { + dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); + return -EINVAL; + } + + bpool_cfg = (struct icssg_buffer_pool_cfg *)(prueth->dram[priv->port_id].pa + BUFFER_POOL_0_ADDR_OFFSET); + /* workaround for f/w bug. bpool 0 needs to be initilalized */ + bpool_cfg[0].addr = cpu_to_le32(addr); + bpool_cfg[0].len = 0; + + for (i = PRUETH_EMAC_BUF_POOL_START; + i < (PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS); + i++) { + bpool_cfg[i].addr = cpu_to_le32(addr); + bpool_cfg[i].len = cpu_to_le32(PRUETH_EMAC_BUF_POOL_SIZE); + addr += PRUETH_EMAC_BUF_POOL_SIZE; + } + + if (!slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + else + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; + + rxq_ctx = (struct icssg_rxq_ctx *)(prueth->dram[priv->port_id].pa + HOST_RX_Q_PRE_CONTEXT_OFFSET); + + for (i = 0; i < 3; i++) + rxq_ctx->start[i] = cpu_to_le32(addr); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + rxq_ctx->end = cpu_to_le32(addr); + + /* Express RX buffer queue */ + rxq_ctx = (struct icssg_rxq_ctx *)(prueth->dram[priv->port_id].pa + HOST_RX_Q_EXP_CONTEXT_OFFSET); + for (i = 0; i < 3; i++) + rxq_ctx->start[i] = cpu_to_le32(addr); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + rxq_ctx->end = cpu_to_le32(addr); + + return 0; +} + +static void icssg_init_emac_mode(struct prueth *prueth) +{ + u8 mac[6] = { 0 }; + + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, 0); + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0); + /* Clear host MAC address */ + icssg_class_set_host_mac_addr(prueth->miig_rt, mac); +} + +int icssg_config(struct prueth_priv *priv) +{ + struct prueth *prueth = priv->prueth; + void *config = (void *)(prueth->dram[priv->port_id].pa + ICSSG_CONFIG_OFFSET); + u8 *cfg_byte_ptr = config; + struct icssg_flow_cfg *flow_cfg; + u32 mask; + int ret; + + int slice = priv->port_id; + + icssg_init_emac_mode(prueth); + + memset_io(config, 0, TAS_GATE_MASK_LIST0); + icssg_miig_queues_init(priv, slice); + + prueth->speed = SPEED_1000; + prueth->duplex = DUPLEX_FULL; + if (!phy_interface_is_rgmii(priv->phydev)) { + prueth->speed = SPEED_100; + prueth->duplex = DUPLEX_FULL; + } + + regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, + ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); + icssg_miig_set_interface_mode(prueth->miig_rt, ICSS_MII0, prueth->phy_interface); + icssg_miig_set_interface_mode(prueth->miig_rt, ICSS_MII1, prueth->phy_interface); + icssg_config_mii_init(priv, slice); + + icssg_config_ipg(priv, SPEED_1000, slice); + icssg_update_rgmii_cfg(prueth->miig_rt, SPEED_1000, true, slice, priv); + + /* set GPI mode */ + pruss_cfg_gpimode(prueth->pruss, slice, PRUSS_GPI_MODE_MII); + + /* enable XFR shift for PRU and RTU */ + mask = PRUSS_SPP_XFER_SHIFT_EN | PRUSS_SPP_RTU_XFR_SHIFT_EN; + pruss_cfg_update(prueth->pruss, PRUSS_CFG_SPP, mask, mask); + + flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; + flow_cfg->rx_base_flow = prueth->dma_rx.id; + flow_cfg->mgm_base_flow = 0; + *(cfg_byte_ptr + SPL_PKT_DEFAULT_PRIORITY) = 0; + *(cfg_byte_ptr + QUEUE_NUM_UNTAGGED) = 0x0; + + ret = prueth_emac_buffer_setup(priv); + + if (ret) + return ret; + + emac_r30_cmd_init(priv); + return 0; +} + +/* commands to program ICSSG R30 registers */ +static struct icssg_r30_cmd emac_r32_bitmask[] = { + {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */ + {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */ + {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */ + {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */ + {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */ + {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */ + {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */ + {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */ + {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/ + {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/ + {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/ + {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/ + {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/ + {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}} /* Preemption on Tx DISABLE*/ +}; + +int emac_set_port_state(struct prueth_priv *priv, + enum icssg_port_state_cmd cmd) +{ + struct prueth *prueth = priv->prueth; + struct icssg_r30_cmd *p; + int ret = -ETIMEDOUT; + int timeout = 10; + int i; + + p = (struct icssg_r30_cmd *)(prueth->dram[priv->port_id].pa + MGR_R30_CMD_OFFSET); + + if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) { + dev_err(prueth->dev, "invalid port command\n"); + return -EINVAL; + } + + for (i = 0; i < 4; i++) + writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]); + + /* wait for done */ + while (timeout) { + if (emac_r30_is_done(priv)) { + ret = 0; + break; + } + + udelay(2000); + timeout--; + } + + if (ret == -ETIMEDOUT) + dev_err(prueth->dev, "timeout waiting for command done\n"); + + return ret; +} + +int icssg_send_fdb_msg(struct prueth_priv *priv, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp) +{ + struct prueth *prueth = priv->prueth; + int slice = priv->port_id; + int ret, addr; + + addr = icssg_queue_pop(prueth, slice == 0 ? + ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1); + if (addr < 0) + return addr; + + /* First 4 bytes have FW owned buffer linking info which should + * not be touched + */ + memcpy_toio((void __iomem *)prueth->shram.pa + addr + 4, cmd, sizeof(*cmd)); + icssg_queue_push(prueth, slice == 0 ? + ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr); + ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0, + 2000, 20000000, prueth, slice == 0 ? + ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1); + + if (ret) { + dev_err(prueth->dev, "Timedout sending HWQ message\n"); + return ret; + } + + memcpy_fromio(rsp, (void __iomem *)prueth->shram.pa + addr, sizeof(*rsp)); + /* Return buffer back for to pool */ + icssg_queue_push(prueth, slice == 0 ? + ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr); + + return 0; +} + +int emac_fdb_flow_id_updated(struct prueth_priv *priv) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + struct prueth *prueth = priv->prueth; + struct mgmt_cmd fdb_cmd = { 0 }; + int slice = priv->port_id; + int ret = 0; + + fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER; + fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW; + fdb_cmd.seqnum = ++(prueth->icssg_hwcmdseq); + fdb_cmd.param = 0; + + fdb_cmd.param |= (slice << 4); + fdb_cmd.cmd_args[0] = 0; + + ret = icssg_send_fdb_msg(priv, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + if (fdb_cmd.seqnum != fdb_cmd_rsp.seqnum) { + dev_err(prueth->dev, "seqnum doesn't match, cmd.seqnum %d != rsp.seqnum %d\n", + fdb_cmd.seqnum, fdb_cmd_rsp.seqnum); + return -EINVAL; + } + + if (fdb_cmd_rsp.status == 1) + return 0; + + return -EINVAL; +} diff --git a/drivers/net/ti/icssg_config.h b/drivers/net/ti/icssg_config.h new file mode 100644 index 00000000000..d388484c035 --- /dev/null +++ b/drivers/net/ti/icssg_config.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_CONFIG_H +#define __NET_TI_ICSSG_CONFIG_H + +struct icssg_buffer_pool_cfg { + __le32 addr; + __le32 len; +} __packed; + +struct icssg_flow_cfg { + __le16 rx_base_flow; + __le16 mgm_base_flow; +} __packed; + +/* Config area lies in shared RAM */ +#define ICSSG_CONFIG_OFFSET_SLICE0 0 +#define ICSSG_CONFIG_OFFSET_SLICE1 0x8000 + +/* pstate speed/duplex command to set speed and duplex settings + * in firmware. + * Command format : 0x8102ssPN. ss - sequence number: currently not + * used by driver, P - port number: For switch, N - Speed/Duplex state + * - Possible values of N: + * 0x0 - 10Mbps/Half duplex ; + * 0x8 - 10Mbps/Full duplex ; + * 0x2 - 100Mbps/Half duplex; + * 0xa - 100Mbps/Full duplex; + * 0xc - 1Gbps/Full duplex; + * NOTE: The above are same as bits [3..1](slice 0) or bits [8..6](slice 1) of + * RGMII CFG register. So suggested to read the register to populate the command + * bits. + */ +#define ICSSG_PSTATE_SPEED_DUPLEX_CMD 0x81020000 +#define ICSSG_PSTATE_FULL_DUPLEX BIT(3) +#define ICSSG_PSTATE_SPEED_100 BIT(1) +#define ICSSG_PSTATE_SPEED_1000 BIT(2) + +/* Flow IDs used in config structure to firmware. Should match with + * flow_id in struct dma for rx channels. + */ +#define ICSSG_RX_CHAN_FLOW_ID 0 /* flow id for host port */ +#define ICSSG_RX_MGM_CHAN_FLOW_ID 1 /* flow id for command response */ + +/* Used to notify the FW of the current link speed */ +#define PORT_LINK_SPEED_OFFSET 0x00A8 + +#define FW_LINK_SPEED_1G (0x00) +#define FW_LINK_SPEED_100M (0x01) +#define FW_LINK_SPEED_10M (0x02) +#define FW_LINK_SPEED_HD (0x80) + +#define PRUETH_PKT_TYPE_CMD 0x10 +#define PRUETH_NAV_PS_DATA_SIZE 16 /* Protocol specific data size */ +#define PRUETH_NAV_SW_DATA_SIZE 16 /* SW related data size */ +#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ +#define PRUETH_RX_FLOW_DATA 0 /* FIXME: f/w bug to change to highest priority flow */ + +#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K +#define PRUETH_EMAC_POOLS_PER_SLICE 24 +#define PRUETH_EMAC_BUF_POOL_START 8 +#define PRUETH_NUM_BUF_POOLS 8 +#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ +#define MSMC_RAM_SIZE (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ + PRUETH_EMAC_RX_CTX_BUF_SIZE)) + +struct icssg_rxq_ctx { + __le32 start[3]; + __le32 end; +} __packed; + +/* Load time Fiwmware Configuration */ + +#define ICSSG_FW_MGMT_CMD_HEADER 0x81 +#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03 +#define ICSSG_FW_MGMT_CMD_TYPE 0x04 +#define ICSSG_FW_MGMT_PKT 0x80000000 +#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05 + +struct icssg_r30_cmd { + u32 cmd[4]; +} __packed; + +enum icssg_port_state_cmd { + ICSSG_EMAC_PORT_DISABLE = 0, + ICSSG_EMAC_PORT_BLOCK, + ICSSG_EMAC_PORT_FORWARD, + ICSSG_EMAC_PORT_FORWARD_WO_LEARNING, + ICSSG_EMAC_PORT_ACCEPT_ALL, + ICSSG_EMAC_PORT_ACCEPT_TAGGED, + ICSSG_EMAC_PORT_ACCEPT_UNTAGGED_N_PRIO, + ICSSG_EMAC_PORT_TAS_TRIGGER, + ICSSG_EMAC_PORT_TAS_ENABLE, + ICSSG_EMAC_PORT_TAS_RESET, + ICSSG_EMAC_PORT_TAS_DISABLE, + ICSSG_EMAC_PORT_UC_FLOODING_ENABLE, + ICSSG_EMAC_PORT_UC_FLOODING_DISABLE, + ICSSG_EMAC_PORT_MC_FLOODING_ENABLE, + ICSSG_EMAC_PORT_MC_FLOODING_DISABLE, + ICSSG_EMAC_PORT_PREMPT_TX_ENABLE, + ICSSG_EMAC_PORT_PREMPT_TX_DISABLE, + ICSSG_EMAC_PORT_MAX_COMMANDS +}; + +#define EMAC_NONE 0xffff0000 +#define EMAC_PRU0_P_DI 0xffff0004 +#define EMAC_PRU1_P_DI 0xffff0040 +#define EMAC_TX_P_DI 0xffff0100 + +#define EMAC_PRU0_P_EN 0xfffb0000 +#define EMAC_PRU1_P_EN 0xffbf0000 +#define EMAC_TX_P_EN 0xfeff0000 + +#define EMAC_P_BLOCK 0xffff0040 +#define EMAC_TX_P_BLOCK 0xffff0200 +#define EMAC_P_UNBLOCK 0xffbf0000 +#define EMAC_TX_P_UNBLOCK 0xfdff0000 +#define EMAC_LEAN_EN 0xfff70000 +#define EMAC_LEAN_DI 0xffff0008 + +#define EMAC_ACCEPT_ALL 0xffff0001 +#define EMAC_ACCEPT_TAG 0xfffe0002 +#define EMAC_ACCEPT_PRIOR 0xfffc0000 + +/* Config area lies in DRAM */ +#define ICSSG_CONFIG_OFFSET 0x0 + +#define ICSSG_NUM_NORMAL_PDS 64 +#define ICSSG_NUM_SPECIAL_PDS 16 + +#define ICSSG_NORMAL_PD_SIZE 8 +#define ICSSG_SPECIAL_PD_SIZE 20 + +#define ICSSG_FLAG_MASK 0xff00ffff + +struct icssg_setclock_desc { + u8 request; + u8 restore; + u8 acknowledgment; + u8 cmp_status; + u32 margin; + u32 cyclecounter0_set; + u32 cyclecounter1_set; + u32 iepcount_set; + u32 rsvd1; + u32 rsvd2; + u32 CMP0_current; + u32 iepcount_current; + u32 difference; + u32 cyclecounter0_new; + u32 cyclecounter1_new; + u32 CMP0_new; +} __packed; + +struct mgmt_cmd { + u8 param; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +} __packed; + +struct mgmt_cmd_rsp { + u32 reserved; + u8 status; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +} __packed; + +#define ICSSG_CMD_POP_SLICE0 56 +#define ICSSG_CMD_POP_SLICE1 60 + +#define ICSSG_CMD_PUSH_SLICE0 57 +#define ICSSG_CMD_PUSH_SLICE1 61 + +#define ICSSG_RSP_POP_SLICE0 58 +#define ICSSG_RSP_POP_SLICE1 62 + +#define ICSSG_RSP_PUSH_SLICE0 56 +#define ICSSG_RSP_PUSH_SLICE1 60 + +#define ICSSG_TS_POP_SLICE0 59 +#define ICSSG_TS_POP_SLICE1 63 + +#define ICSSG_TS_PUSH_SLICE0 40 +#define ICSSG_TS_PUSH_SLICE1 41 + +#endif /* __NET_TI_ICSSG_CONFIG_H */ diff --git a/drivers/net/ti/icssg_prueth.c b/drivers/net/ti/icssg_prueth.c new file mode 100644 index 00000000000..2639f960631 --- /dev/null +++ b/drivers/net/ti/icssg_prueth.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Texas Instruments K3 AM65 PRU Ethernet Driver + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include <asm/io.h> +#include <asm/processor.h> +#include <clk.h> +#include <dm/lists.h> +#include <dm/device.h> +#include <dma-uclass.h> +#include <dm/of_access.h> +#include <dm/pinctrl.h> +#include <fs_loader.h> +#include <miiphy.h> +#include <net.h> +#include <phy.h> +#include <power-domain.h> +#include <linux/soc/ti/ti-udma.h> +#include <regmap.h> +#include <remoteproc.h> +#include <syscon.h> +#include <soc.h> +#include <linux/pruss_driver.h> +#include <dm/device_compat.h> + +#include "icssg_prueth.h" +#include "icss_mii_rt.h" + +#define ICSS_SLICE0 0 +#define ICSS_SLICE1 1 + +#ifdef PKTSIZE_ALIGN +#define UDMA_RX_BUF_SIZE PKTSIZE_ALIGN +#else +#define UDMA_RX_BUF_SIZE ALIGN(PKTSIZE, ARCH_DMA_MINALIGN) +#endif + +#ifdef PKTBUFSRX +#define UDMA_RX_DESC_NUM PKTBUFSRX +#else +#define UDMA_RX_DESC_NUM 4 +#endif + +/* Config region lies in shared RAM */ +#define ICSS_CONFIG_OFFSET_SLICE0 0 +#define ICSS_CONFIG_OFFSET_SLICE1 0x8000 + +/* Firmware flags */ +#define ICSS_SET_RUN_FLAG_VLAN_ENABLE BIT(0) /* switch only */ +#define ICSS_SET_RUN_FLAG_FLOOD_UNICAST BIT(1) /* switch only */ +#define ICSS_SET_RUN_FLAG_PROMISC BIT(2) /* MAC only */ +#define ICSS_SET_RUN_FLAG_MULTICAST_PROMISC BIT(3) /* MAC only */ + +/* CTRLMMR_ICSSG_RGMII_CTRL register bits */ +#define ICSSG_CTRL_RGMII_ID_MODE BIT(24) + +/* Management packet type */ +#define PRUETH_PKT_TYPE_CMD 0x10 + +/* Number of PRU Cores per Slice */ +#define ICSSG_NUM_PRU_CORES 3 + +static int icssg_gmii_select(struct prueth_priv *priv) +{ + struct phy_device *phydev = priv->phydev; + + if (phydev->interface != PHY_INTERFACE_MODE_MII && + phydev->interface < PHY_INTERFACE_MODE_RGMII && + phydev->interface > PHY_INTERFACE_MODE_RGMII_TXID) { + dev_err(priv->dev, "PHY mode unsupported %s\n", + phy_string_for_interface(phydev->interface)); + return -EINVAL; + } + + /* AM65 SR2.0 has TX Internal delay always enabled by hardware + * and it is not possible to disable TX Internal delay. The below + * switch case block describes how we handle different phy modes + * based on hardware restriction. + */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + phydev->interface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + dev_err(priv->dev, "RGMII mode without TX delay is not supported"); + return -EINVAL; + default: + break; + } + + return 0; +} + +static int icssg_phy_init(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct phy_device *phydev; + u32 supported = PHY_GBIT_FEATURES; + int ret; + + phydev = dm_eth_phy_connect(dev); + if (!phydev) { + dev_err(dev, "phy_connect() failed\n"); + return -ENODEV; + } + + /* disable unsupported features */ + supported &= ~(PHY_10BT_FEATURES | + SUPPORTED_100baseT_Half | + SUPPORTED_1000baseT_Half | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + + phydev->supported &= supported; + phydev->advertising = phydev->supported; + priv->phydev = phydev; + + ret = icssg_gmii_select(priv); + if (ret) + goto out; + + ret = phy_config(phydev); + if (ret < 0) + dev_err(dev, "phy_config() failed: %d", ret); +out: + return ret; +} + +static void icssg_config_set_speed(struct prueth_priv *priv, int speed) +{ + struct prueth *prueth = priv->prueth; + u8 fw_speed; + + switch (speed) { + case SPEED_1000: + fw_speed = FW_LINK_SPEED_1G; + break; + case SPEED_100: + fw_speed = FW_LINK_SPEED_100M; + break; + case SPEED_10: + fw_speed = FW_LINK_SPEED_10M; + break; + default: + /* Other links speeds not supported */ + dev_err(priv->dev, "Unsupported link speed\n"); + return; + } + + writeb(fw_speed, prueth->dram[priv->port_id].pa + PORT_LINK_SPEED_OFFSET); +} + +static int icssg_update_link(struct prueth_priv *priv) +{ + struct phy_device *phy = priv->phydev; + struct prueth *prueth = priv->prueth; + bool gig_en = false, full_duplex = false; + + if (phy->link) { /* link up */ + if (phy->speed == SPEED_1000) + gig_en = true; + if (phy->duplex == DUPLEX_FULL) + full_duplex = true; + /* Set the RGMII cfg for gig en and full duplex */ + icssg_update_rgmii_cfg(prueth->miig_rt, phy->speed, full_duplex, + priv->port_id, priv); + /* update the Tx IPG based on 100M/1G speed */ + icssg_config_ipg(priv, phy->speed, priv->port_id); + + /* Send command to firmware to update Speed setting */ + icssg_config_set_speed(priv, phy->speed); + + /* Enable PORT FORWARDING */ + emac_set_port_state(priv, ICSSG_EMAC_PORT_FORWARD); + + printf("link up on port %d, speed %d, %s duplex\n", + priv->port_id, phy->speed, + (phy->duplex == DUPLEX_FULL) ? "full" : "half"); + } else { + emac_set_port_state(priv, ICSSG_EMAC_PORT_DISABLE); + printf("link down on port %d\n", priv->port_id); + } + + return phy->link; +} + +struct icssg_firmwares { + char *pru; + char *rtu; + char *txpru; +}; + +static struct icssg_firmwares icssg_emac_firmwares[] = { + { + .pru = "/lib/firmware/ti-pruss/am65x-sr2-pru0-prueth-fw.elf", + .rtu = "/lib/firmware/ti-pruss/am65x-sr2-rtu0-prueth-fw.elf", + .txpru = "/lib/firmware/ti-pruss/am65x-sr2-txpru0-prueth-fw.elf", + }, + { + .pru = "/lib/firmware/ti-pruss/am65x-sr2-pru1-prueth-fw.elf", + .rtu = "/lib/firmware/ti-pruss/am65x-sr2-rtu1-prueth-fw.elf", + .txpru = "/lib/firmware/ti-pruss/am65x-sr2-txpru1-prueth-fw.elf", + } +}; + +static int icssg_start_pru_cores(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + struct icssg_firmwares *firmwares; + struct udevice *rproc_dev = NULL; + int ret, slice; + u32 phandle; + u8 index; + + slice = priv->port_id; + index = slice * ICSSG_NUM_PRU_CORES; + firmwares = icssg_emac_firmwares; + + ofnode_read_u32_index(dev_ofnode(prueth->dev), "ti,prus", index, &phandle); + ret = uclass_get_device_by_phandle_id(UCLASS_REMOTEPROC, phandle, &rproc_dev); + if (ret) { + dev_err(dev, "Unknown remote processor with phandle '0x%x' requested(%d)\n", + phandle, ret); + return ret; + } + + prueth->pru_core_id = dev_seq(rproc_dev); + ret = rproc_set_firmware(rproc_dev, firmwares[slice].pru); + if (ret) + return ret; + + ret = rproc_boot(rproc_dev); + if (ret) { + dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); + return -EINVAL; + } + + ofnode_read_u32_index(dev_ofnode(prueth->dev), "ti,prus", index + 1, &phandle); + ret = uclass_get_device_by_phandle_id(UCLASS_REMOTEPROC, phandle, &rproc_dev); + if (ret) { + dev_err(dev, "Unknown remote processor with phandle '0x%x' requested(%d)\n", + phandle, ret); + goto halt_pru; + } + + prueth->rtu_core_id = dev_seq(rproc_dev); + ret = rproc_set_firmware(rproc_dev, firmwares[slice].rtu); + if (ret) + goto halt_pru; + + ret = rproc_boot(rproc_dev); + if (ret) { + dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); + goto halt_pru; + } + + ofnode_read_u32_index(dev_ofnode(prueth->dev), "ti,prus", index + 2, &phandle); + ret = uclass_get_device_by_phandle_id(UCLASS_REMOTEPROC, phandle, &rproc_dev); + if (ret) { + dev_err(dev, "Unknown remote processor with phandle '0x%x' requested(%d)\n", + phandle, ret); + goto halt_rtu; + } + + prueth->txpru_core_id = dev_seq(rproc_dev); + ret = rproc_set_firmware(rproc_dev, firmwares[slice].txpru); + if (ret) + goto halt_rtu; + + ret = rproc_boot(rproc_dev); + if (ret) { + dev_err(dev, "failed to boot TXPRU%d: %d\n", slice, ret); + goto halt_rtu; + } + + return 0; + +halt_rtu: + rproc_stop(prueth->rtu_core_id); + +halt_pru: + rproc_stop(prueth->pru_core_id); + return ret; +} + +static int icssg_stop_pru_cores(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + + rproc_stop(prueth->pru_core_id); + rproc_stop(prueth->rtu_core_id); + rproc_stop(prueth->txpru_core_id); + + return 0; +} + +static int prueth_start(struct udevice *dev) +{ + struct ti_udma_drv_chan_cfg_data *dma_rx_cfg_data; + struct eth_pdata *pdata = dev_get_plat(dev); + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + struct icssg_flow_cfg *flow_cfg; + u8 *hwaddr = pdata->enetaddr; + char chn_name[16]; + void *config; + int ret, i; + + icssg_class_set_mac_addr(prueth->miig_rt, priv->port_id, hwaddr); + icssg_ft1_set_mac_addr(prueth->miig_rt, priv->port_id, hwaddr); + icssg_class_default(prueth->miig_rt, priv->port_id, 0); + + /* Set Load time configuration */ + icssg_config(priv); + + ret = icssg_start_pru_cores(dev); + if (ret) + return ret; + + /* To differentiate channels for SLICE0 vs SLICE1 */ + snprintf(chn_name, sizeof(chn_name), "tx%d-0", priv->port_id); + + ret = dma_get_by_name(prueth->dev, chn_name, &prueth->dma_tx); + if (ret) + dev_err(dev, "TX dma get failed %d\n", ret); + + snprintf(chn_name, sizeof(chn_name), "rx%d", priv->port_id); + ret = dma_get_by_name(prueth->dev, chn_name, &prueth->dma_rx); + if (ret) + dev_err(dev, "RX dma get failed %d\n", ret); + + for (i = 0; i < UDMA_RX_DESC_NUM; i++) { + ret = dma_prepare_rcv_buf(&prueth->dma_rx, + net_rx_packets[i], + UDMA_RX_BUF_SIZE); + if (ret) + dev_err(dev, "RX dma add buf failed %d\n", ret); + } + + ret = dma_enable(&prueth->dma_tx); + if (ret) { + dev_err(dev, "TX dma_enable failed %d\n", ret); + goto tx_fail; + } + + ret = dma_enable(&prueth->dma_rx); + if (ret) { + dev_err(dev, "RX dma_enable failed %d\n", ret); + goto rx_fail; + } + + /* check if the rx_flow_id of dma_rx is as expected since + * driver hardcode that value in config struct to firmware + * in probe. Just add this sanity check to catch any change + * to rx channel assignment in the future. + */ + dma_get_cfg(&prueth->dma_rx, 0, (void **)&dma_rx_cfg_data); + config = (void *)(prueth->dram[priv->port_id].pa + ICSSG_CONFIG_OFFSET); + + flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET; + writew(dma_rx_cfg_data->flow_id_base, &flow_cfg->rx_base_flow); + writew(0, &flow_cfg->mgm_base_flow); + + dev_info(dev, "K3 ICSSG: rflow_id_base: %u, chn_name = %s\n", + dma_rx_cfg_data->flow_id_base, chn_name); + + ret = emac_fdb_flow_id_updated(priv); + if (ret) { + dev_err(dev, "Failed to update Rx Flow ID %d", ret); + goto phy_fail; + } + + ret = phy_startup(priv->phydev); + if (ret) { + dev_err(dev, "phy_startup failed\n"); + goto phy_fail; + } + + ret = icssg_update_link(priv); + if (!ret) { + ret = -ENODEV; + goto phy_shut; + } + + return 0; + +phy_shut: + phy_shutdown(priv->phydev); +phy_fail: + dma_disable(&prueth->dma_rx); + dma_free(&prueth->dma_rx); +rx_fail: + dma_disable(&prueth->dma_tx); + dma_free(&prueth->dma_tx); + +tx_fail: + icssg_class_disable(prueth->miig_rt, priv->port_id); + + return ret; +} + +static int prueth_send(struct udevice *dev, void *packet, int length) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + int ret; + + ret = dma_send(&prueth->dma_tx, packet, length, NULL); + + return ret; +} + +static int prueth_recv(struct udevice *dev, int flags, uchar **packetp) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + int ret; + + /* try to receive a new packet */ + ret = dma_receive(&prueth->dma_rx, (void **)packetp, NULL); + + return ret; +} + +static int prueth_free_pkt(struct udevice *dev, uchar *packet, int length) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + int ret = 0; + + if (length > 0) { + u32 pkt = prueth->rx_next % UDMA_RX_DESC_NUM; + + dev_dbg(dev, "%s length:%d pkt:%u\n", __func__, length, pkt); + + ret = dma_prepare_rcv_buf(&prueth->dma_rx, + net_rx_packets[pkt], + UDMA_RX_BUF_SIZE); + prueth->rx_next++; + } + + return ret; +} + +static void prueth_stop(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth = priv->prueth; + + phy_shutdown(priv->phydev); + + dma_disable(&prueth->dma_tx); + dma_disable(&prueth->dma_rx); + + icssg_stop_pru_cores(dev); + + dma_free(&prueth->dma_tx); + dma_free(&prueth->dma_rx); +} + +static const struct eth_ops prueth_ops = { + .start = prueth_start, + .send = prueth_send, + .recv = prueth_recv, + .free_pkt = prueth_free_pkt, + .stop = prueth_stop, +}; + +static int icssg_ofdata_parse_phy(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + + dev_read_u32(dev, "reg", &priv->port_id); + priv->phy_interface = dev_read_phy_mode(dev); + if (priv->phy_interface == PHY_INTERFACE_MODE_NA) { + dev_err(dev, "Invalid PHY mode '%s', port %u\n", + phy_string_for_interface(priv->phy_interface), + priv->port_id); + return -EINVAL; + } + + return 0; +} + +static int prueth_port_probe(struct udevice *dev) +{ + struct prueth_priv *priv = dev_get_priv(dev); + struct prueth *prueth; + char portname[15]; + int ret; + + priv->dev = dev; + prueth = dev_get_priv(dev->parent); + priv->prueth = prueth; + + sprintf(portname, "%s-%s", dev->parent->name, dev->name); + + device_set_name(dev, portname); + + ret = icssg_ofdata_parse_phy(dev); + if (ret) + goto out; + + ret = icssg_phy_init(dev); + if (ret) + goto out; + + ret = pruss_request_mem_region(prueth->pruss, + priv->port_id ? PRUSS_MEM_DRAM1 : PRUSS_MEM_DRAM0, + &prueth->dram[priv->port_id]); + if (ret) { + dev_err(dev, "could not request DRAM%d region\n", priv->port_id); + return ret; + } +out: + return ret; +} + +static int prueth_probe(struct udevice *dev) +{ + ofnode node, pruss_node, mdio_node, sram_node, curr_sram_node; + struct prueth *prueth = dev_get_priv(dev); + u32 phandle, err, sp, prev_end_addr; + struct udevice **prussdev = NULL; + ofnode eth_ports_node, eth_node; + struct udevice *port_dev; + int ret = 0; + + prueth->dev = dev; + + err = ofnode_read_u32(dev_ofnode(dev), "ti,prus", &phandle); + if (err) + return err; + + node = ofnode_get_by_phandle(phandle); + if (!ofnode_valid(node)) + return -EINVAL; + + pruss_node = ofnode_get_parent(node); + ret = device_get_global_by_ofnode(pruss_node, prussdev); + if (ret) + dev_err(dev, "error getting the pruss dev\n"); + prueth->pruss = *prussdev; + + ret = pruss_request_mem_region(*prussdev, PRUSS_MEM_SHRD_RAM2, + &prueth->shram); + if (ret) + return ret; + + ret = pruss_request_tm_region(*prussdev, &prueth->tmaddr); + if (ret) + return ret; + + prueth->miig_rt = syscon_regmap_lookup_by_phandle(dev, "ti,mii-g-rt"); + if (!prueth->miig_rt) { + dev_err(dev, "couldn't get mii-g-rt syscon regmap\n"); + return -ENODEV; + } + + prueth->mii_rt = syscon_regmap_lookup_by_phandle(dev, "ti,mii-rt"); + if (!prueth->mii_rt) { + dev_err(dev, "couldn't get mii-rt syscon regmap\n"); + return -ENODEV; + } + + ret = ofnode_read_u32(dev_ofnode(dev), "sram", &sp); + if (ret) { + dev_err(dev, "sram node fetch failed %d\n", ret); + return ret; + } + + sram_node = ofnode_get_by_phandle(sp); + if (!ofnode_valid(sram_node)) + return -EINVAL; + + prev_end_addr = ofnode_get_addr(sram_node); + + ofnode_for_each_subnode(curr_sram_node, sram_node) { + u32 start_addr, size, end_addr, avail; + const char *name; + + name = ofnode_get_name(curr_sram_node); + start_addr = ofnode_get_addr(curr_sram_node); + size = ofnode_get_size(curr_sram_node); + end_addr = start_addr + size; + avail = start_addr - prev_end_addr; + + if (avail > MSMC_RAM_SIZE) + break; + + prev_end_addr = end_addr; + } + + prueth->sram_pa = prev_end_addr; + if (prueth->sram_pa % SZ_64K != 0) { + /* This is constraint for SR2.0 firmware */ + dev_err(dev, "sram address needs to be 64KB aligned\n"); + return -EINVAL; + } + dev_dbg(dev, "sram: addr %x size %x\n", prueth->sram_pa, MSMC_RAM_SIZE); + + mdio_node = ofnode_find_subnode(pruss_node, "mdio"); + prueth->mdio_base = ofnode_get_addr(mdio_node); + ofnode_read_u32(mdio_node, "bus_freq", &prueth->mdio_freq); + + ret = clk_get_by_name_nodev(mdio_node, "fck", &prueth->mdiofck); + if (ret) { + dev_err(dev, "failed to get clock %d\n", ret); + return ret; + } + + ret = clk_enable(&prueth->mdiofck); + if (ret) { + dev_err(dev, "clk_enable failed %d\n", ret); + return ret; + } + + eth_ports_node = dev_read_subnode(dev, "ethernet-ports"); + if (!ofnode_valid(eth_ports_node)) + return -ENOENT; + + ofnode_for_each_subnode(eth_node, eth_ports_node) { + const char *node_name; + u32 port_id; + bool disabled; + + node_name = ofnode_get_name(eth_node); + disabled = !ofnode_is_enabled(eth_node); + ret = ofnode_read_u32(eth_node, "reg", &port_id); + if (ret) + dev_err(dev, "%s: error reading port_id (%d)\n", node_name, ret); + + if (port_id >= PRUETH_NUM_MACS) { + dev_err(dev, "%s: invalid port_id (%d)\n", node_name, port_id); + return -EINVAL; + } + + if (port_id < 0) + continue; + if (disabled) + continue; + + ret = device_bind_driver_to_node(dev, "prueth_port", + ofnode_get_name(eth_node), + eth_node, &port_dev); + if (ret) { + dev_err(dev, "Failed to bind to %s node\n", ofnode_get_name(eth_node)); + goto out; + } + } + + return 0; +out: + clk_disable(&prueth->mdiofck); + + return ret; +} + +static const struct udevice_id prueth_ids[] = { + { .compatible = "ti,am654-icssg-prueth" }, + { .compatible = "ti,am642-icssg-prueth" }, + { } +}; + +U_BOOT_DRIVER(prueth) = { + .name = "prueth", + .id = UCLASS_MISC, + .of_match = prueth_ids, + .probe = prueth_probe, + .priv_auto = sizeof(struct prueth), +}; + +U_BOOT_DRIVER(prueth_port) = { + .name = "prueth_port", + .id = UCLASS_ETH, + .probe = prueth_port_probe, + .ops = &prueth_ops, + .priv_auto = sizeof(struct prueth_priv), + .plat_auto = sizeof(struct eth_pdata), + .flags = DM_FLAG_ALLOC_PRIV_DMA, +}; diff --git a/drivers/net/ti/icssg_prueth.h b/drivers/net/ti/icssg_prueth.h new file mode 100644 index 00000000000..c69cfd4f162 --- /dev/null +++ b/drivers/net/ti/icssg_prueth.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_PRUETH_H +#define __NET_TI_ICSSG_PRUETH_H + +#include <asm/io.h> +#include <clk.h> +#include <dm/lists.h> +#include <dm/ofnode.h> +#include <dm/device.h> +#include <dma-uclass.h> +#include <regmap.h> +#include <linux/sizes.h> +#include <linux/pruss_driver.h> +#include "icssg_config.h" +#include "icssg_switch_map.h" + +void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac); +void icssg_class_set_host_mac_addr(struct regmap *miig_rt, u8 *mac); +void icssg_class_disable(struct regmap *miig_rt, int slice); +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti); +void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr); + +enum prueth_mac { + PRUETH_MAC0 = 0, + PRUETH_MAC1, + PRUETH_NUM_MACS, +}; + +enum prueth_port { + PRUETH_PORT_HOST = 0, /* host side port */ + PRUETH_PORT_MII0, /* physical port MII 0 */ + PRUETH_PORT_MII1, /* physical port MII 1 */ +}; + +struct prueth { + struct udevice *dev; + struct udevice *pruss; + struct regmap *miig_rt; + struct regmap *mii_rt; + fdt_addr_t mdio_base; + struct pruss_mem_region shram; + struct pruss_mem_region dram[PRUETH_NUM_MACS]; + phys_addr_t tmaddr; + struct mii_dev *bus; + u32 sram_pa; + ofnode eth_node[PRUETH_NUM_MACS]; + u32 mdio_freq; + int phy_interface; + struct clk mdiofck; + struct dma dma_tx; + struct dma dma_rx; + struct dma dma_rx_mgm; + u32 rx_next; + u32 rx_pend; + int slice; + bool mdio_manual_mode; + int speed; + int duplex; + u8 pru_core_id; + u8 rtu_core_id; + u8 txpru_core_id; + u8 icssg_hwcmdseq; +}; + +struct prueth_priv { + struct udevice *dev; + struct prueth *prueth; + u32 port_id; + struct phy_device *phydev; + bool has_phy; + ofnode phy_node; + u32 phy_addr; + int phy_interface; +}; + +/* config helpers */ +void icssg_config_ipg(struct prueth_priv *priv, int speed, int mii); +int icssg_config(struct prueth_priv *priv); +int emac_set_port_state(struct prueth_priv *priv, enum icssg_port_state_cmd cmd); + +/* Buffer queue helpers */ +int icssg_queue_pop(struct prueth *prueth, u8 queue); +void icssg_queue_push(struct prueth *prueth, int queue, u16 addr); +u32 icssg_queue_level(struct prueth *prueth, int queue); + +/* FDB helpers */ +int icssg_send_fdb_msg(struct prueth_priv *priv, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp); +int emac_fdb_flow_id_updated(struct prueth_priv *priv); + +#endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ti/icssg_queues.c b/drivers/net/ti/icssg_queues.c new file mode 100644 index 00000000000..fc4d33dbb25 --- /dev/null +++ b/drivers/net/ti/icssg_queues.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ICSSG Buffer queue helpers + * + * Copyright (C) 2018-2024 Texas Instruments Incorporated - https://www.ti.com + */ + +#include <dm/ofnode.h> +#include <regmap.h> +#include "icssg_prueth.h" + +#define ICSSG_QUEUES_MAX 64 +#define ICSSG_QUEUE_OFFSET 0xd00 +#define ICSSG_QUEUE_PEEK_OFFSET 0xe00 +#define ICSSG_QUEUE_CNT_OFFSET 0xe40 +#define ICSSG_QUEUE_RESET_OFFSET 0xf40 + +int icssg_queue_pop(struct prueth *prueth, u8 queue) +{ + u32 val, cnt; + + if (queue >= ICSSG_QUEUES_MAX) + return -EINVAL; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, &cnt); + if (!cnt) + return -EINVAL; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, &val); + + return val; +} + +void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) +{ + if (queue >= ICSSG_QUEUES_MAX) + return; + + regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr); +} + +u32 icssg_queue_level(struct prueth *prueth, int queue) +{ + u32 reg; + + if (queue >= ICSSG_QUEUES_MAX) + return 0; + + regmap_read(prueth->miig_rt, ICSSG_QUEUE_CNT_OFFSET + 4 * queue, ®); + + return reg; +} diff --git a/drivers/net/ti/icssg_switch_map.h b/drivers/net/ti/icssg_switch_map.h new file mode 100644 index 00000000000..b62c51407b8 --- /dev/null +++ b/drivers/net/ti/icssg_switch_map.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Texas Instruments ICSSG Ethernet driver + * + * Copyright (C) 2020-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#ifndef __NET_TI_ICSSG_SWITCH_MAP_H +#define __NET_TI_ICSSG_SWITCH_MAP_H + +/*Time after which FDB entries are checked for aged out values. Value in nanoseconds*/ +#define FDB_AGEING_TIMEOUT_OFFSET 0x0014 + +/*default VLAN tag for Host Port*/ +#define HOST_PORT_DF_VLAN_OFFSET 0x001C + +/*Same as HOST_PORT_DF_VLAN_OFFSET*/ +#define EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET HOST_PORT_DF_VLAN_OFFSET + +/*default VLAN tag for P1 Port*/ +#define P1_PORT_DF_VLAN_OFFSET 0x0020 + +/*Same as P1_PORT_DF_VLAN_OFFSET*/ +#define EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET P1_PORT_DF_VLAN_OFFSET + +/*default VLAN tag for P2 Port*/ +#define P2_PORT_DF_VLAN_OFFSET 0x0024 + +/*Same as P2_PORT_DF_VLAN_OFFSET*/ +#define EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET P2_PORT_DF_VLAN_OFFSET + +/*VLAN-FID Table offset. 4096 VIDs. 2B per VID = 8KB = 0x2000*/ +#define VLAN_STATIC_REG_TABLE_OFFSET 0x0100 + +/*VLAN-FID Table offset for EMAC*/ +#define EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET VLAN_STATIC_REG_TABLE_OFFSET + +/*packet descriptor Q reserved memory*/ +#define PORT_DESC0_HI 0x2104 + +/*packet descriptor Q reserved memory*/ +#define PORT_DESC0_LO 0x2F6C + +/*packet descriptor Q reserved memory*/ +#define PORT_DESC1_HI 0x3DD4 + +/*packet descriptor Q reserved memory*/ +#define PORT_DESC1_LO 0x4C3C + +/*packet descriptor Q reserved memory*/ +#define HOST_DESC0_HI 0x5AA4 + +/*packet descriptor Q reserved memory*/ +#define HOST_DESC0_LO 0x5F0C + +/*packet descriptor Q reserved memory*/ +#define HOST_DESC1_HI 0x6374 + +/*packet descriptor Q reserved memory*/ +#define HOST_DESC1_LO 0x67DC + +/*special packet descriptor Q reserved memory*/ +#define HOST_SPPD0 0x7AAC + +/*special packet descriptor Q reserved memory*/ +#define HOST_SPPD1 0x7EAC + +/*_Small_Description_*/ +#define TIMESYNC_FW_WC_CYCLECOUNT_OFFSET 0x83EC + +/*IEP count hi roll over count*/ +#define TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET 0x83F4 + +/*_Small_Description_*/ +#define TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET 0x83F8 + +/*Set clock descriptor*/ +#define TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET 0x83FC + +/*_Small_Description_*/ +#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET 0x843C + +/*_Small_Description_*/ +#define TIMESYNC_FW_WC_SYNCOUT_REDUCTION_COUNT_OFFSET 0x8440 + +/*_Small_Description_*/ +#define TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET 0x8444 + +/*Control variable to generate SYNC1*/ +#define TIMESYNC_FW_WC_ISOM_PIN_SIGNAL_EN_OFFSET 0x844C + +/*SystemTime Sync0 periodicity*/ +#define TIMESYNC_FW_ST_SYNCOUT_PERIOD_OFFSET 0x8450 + +/*pktTxDelay for P1 = link speed dependent p1 mac delay + p1 phy delay*/ +#define TIMESYNC_FW_WC_PKTTXDELAY_P1_OFFSET 0x8454 + +/*pktTxDelay for P2 = link speed dependent p2 mac delay + p2 phy delay*/ +#define TIMESYNC_FW_WC_PKTTXDELAY_P2_OFFSET 0x8458 + +/*Set clock operation done signal for next task*/ +#define TIMESYNC_FW_SIG_PNFW_OFFSET 0x845C + +/*Set clock operation done signal for next task*/ +#define TIMESYNC_FW_SIG_TIMESYNCFW_OFFSET 0x8460 + +/*New list is copied at this time*/ +#define TAS_CONFIG_CHANGE_TIME 0x000C + +/*config change error counter*/ +#define TAS_CONFIG_CHANGE_ERROR_COUNTER 0x0014 + +/*TAS List update pending flag*/ +#define TAS_CONFIG_PENDING 0x0018 + +/*TAS list update trigger flag*/ +#define TAS_CONFIG_CHANGE 0x0019 + +/*List length for new TAS schedule*/ +#define TAS_ADMIN_LIST_LENGTH 0x001A + +/*Currently active TAS list index*/ +#define TAS_ACTIVE_LIST_INDEX 0x001B + +/*Cycle time for the new TAS schedule*/ +#define TAS_ADMIN_CYCLE_TIME 0x001C + +/*Cycle counts remaining till the TAS list update*/ +#define TAS_CONFIG_CHANGE_CYCLE_COUNT 0x0020 + +/*Base Flow ID for sending packets to Host for Slice0*/ +#define PSI_L_REGULAR_FLOW_ID_BASE_OFFSET 0x0024 + +/*Same as PSI_L_REGULAR_FLOW_ID_BASE_OFFSET*/ +#define EMAC_ICSSG_SWITCH_PSI_L_REGULAR_FLOW_ID_BASE_OFFSET PSI_L_REGULAR_FLOW_ID_BASE_OFFSET + +/*Base Flow ID for sending mgmt and Tx TS to Host for Slice0*/ +#define PSI_L_MGMT_FLOW_ID_OFFSET 0x0026 + +/*Same as PSI_L_MGMT_FLOW_ID_OFFSET*/ +#define EMAC_ICSSG_SWITCH_PSI_L_MGMT_FLOW_ID_BASE_OFFSET PSI_L_MGMT_FLOW_ID_OFFSET + +/*Queue number for Special packets written here*/ +#define SPL_PKT_DEFAULT_PRIORITY 0x0028 + +/*Express Preemptible Queue Mask*/ +#define EXPRESS_PRE_EMPTIVE_Q_MASK 0x0029 + +/*Port1/Port2 Default Queue number for untagged packets, only 1B is used*/ +#define QUEUE_NUM_UNTAGGED 0x002A + +/*Stores the table used for priority regeneration. 1B per PCP/Queue*/ +#define PORT_Q_PRIORITY_REGEN_OFFSET 0x002C + +/* For marking Packet as priority/express (this feature is disabled) or + * cut-through/S&F. + */ +#define EXPRESS_PRE_EMPTIVE_Q_MAP 0x0034 + +/*Stores the table used for priority mapping. 1B per PCP/Queue*/ +#define PORT_Q_PRIORITY_MAPPING_OFFSET 0x003C + +/*TAS gate mask for windows list0*/ +#define TAS_GATE_MASK_LIST0 0x0100 + +/*TAS gate mask for windows list1*/ +#define TAS_GATE_MASK_LIST1 0x0350 + +/*Memory to Enable/Disable Preemption on TX side*/ +#define PRE_EMPTION_ENABLE_TX 0x05A0 + +/*Active State of Preemption on TX side*/ +#define PRE_EMPTION_ACTIVE_TX 0x05A1 + +/*Memory to Enable/Disable Verify State Machine Preemption*/ +#define PRE_EMPTION_ENABLE_VERIFY 0x05A2 + +/*Verify Status of State Machine*/ +#define PRE_EMPTION_VERIFY_STATUS 0x05A3 + +/*Non Final Fragment Size supported by Link Partner*/ +#define PRE_EMPTION_ADD_FRAG_SIZE_REMOTE 0x05A4 + +/*Non Final Fragment Size supported by Firmware*/ +#define PRE_EMPTION_ADD_FRAG_SIZE_LOCAL 0x05A6 + +/*Time in ms the State machine waits for respond packet*/ +#define PRE_EMPTION_VERIFY_TIME 0x05A8 + +/*Memory used for R30 related management commands*/ +#define MGR_R30_CMD_OFFSET 0x05AC + +/*HW Buffer Pool0 base address*/ +#define BUFFER_POOL_0_ADDR_OFFSET 0x05BC + +/*16B for Host Egress MSMC Q (Pre-emptible) context*/ +#define HOST_RX_Q_PRE_CONTEXT_OFFSET 0x0684 + +/*Buffer for 8 FDB entries to be added by 'Add Multiple FDB entries IOCTL*/ +#define FDB_CMD_BUFFER 0x0894 + +/*16B for Host Egress MSMC Q (Express) context*/ +#define HOST_RX_Q_EXP_CONTEXT_OFFSET 0x0940 + +/*Start of 32 bits PA_STAT counters*/ +#define PA_STAT_32b_START_OFFSET 0x0080 + +#endif +/* __NET_TI_ICSSG_SWITCH_MAP_H */ |