diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 8 | ||||
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/airoha_eth.c | 82 | ||||
-rw-r--r-- | drivers/net/dwmac_thead.c | 288 |
4 files changed, 362 insertions, 17 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 950ed0f25a9..d942fa4e202 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -411,6 +411,14 @@ config ETH_DESIGNWARE_S700 This provides glue layer to use Synopsys Designware Ethernet MAC present on Actions S700 SoC. +config ETH_DESIGNWARE_THEAD + bool "T-Head glue driver for Synopsys Designware Ethernet MAC" + depends on ETH_DESIGNWARE + select DW_ALTDESCRIPTOR + help + This provides glue layer to use Synopsys Designware Ethernet MAC + present on T-Head SoCs. + config DW_ALTDESCRIPTOR bool "Designware Ethernet MAC uses alternate (enhanced) descriptors" depends on ETH_DESIGNWARE diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 67bba3a8536..79cc8b422b0 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_ETH_DESIGNWARE) += designware.o obj-$(CONFIG_ETH_DESIGNWARE_MESON8B) += dwmac_meson8b.o obj-$(CONFIG_ETH_DESIGNWARE_S700) += dwmac_s700.o obj-$(CONFIG_ETH_DESIGNWARE_SOCFPGA) += dwmac_socfpga.o +obj-$(CONFIG_ETH_DESIGNWARE_THEAD) += dwmac_thead.o obj-$(CONFIG_ETH_SANDBOX) += sandbox.o obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw-bus.o obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw.o diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c index 7e35e1fd41d..6588eb3a806 100644 --- a/drivers/net/airoha_eth.c +++ b/drivers/net/airoha_eth.c @@ -97,6 +97,7 @@ (_n) == 2 ? GDM2_BASE : GDM1_BASE) #define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) +#define GDM_PAD_EN BIT(28) #define GDM_DROP_CRC_ERR BIT(23) #define GDM_IP4_CKSUM BIT(22) #define GDM_TCP_CKSUM BIT(21) @@ -354,13 +355,37 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) #define airoha_switch_wr(eth, offset, val) \ airoha_wr((eth)->switch_regs, (offset), (val)) +static inline dma_addr_t dma_map_unaligned(void *vaddr, size_t len, + enum dma_data_direction dir) +{ + uintptr_t start, end; + + start = ALIGN_DOWN((uintptr_t)vaddr, ARCH_DMA_MINALIGN); + end = ALIGN((uintptr_t)(vaddr + len), ARCH_DMA_MINALIGN); + + return dma_map_single((void *)start, end - start, dir); +} + +static inline void dma_unmap_unaligned(dma_addr_t addr, size_t len, + enum dma_data_direction dir) +{ + uintptr_t start, end; + + start = ALIGN_DOWN((uintptr_t)addr, ARCH_DMA_MINALIGN); + end = ALIGN((uintptr_t)(addr + len), ARCH_DMA_MINALIGN); + dma_unmap_single(start, end - start, dir); +} + static void airoha_fe_maccr_init(struct airoha_eth *eth) { int p; for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { - /* Disable any kind of CRC drop or offload */ - airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), 0); + /* + * Disable any kind of CRC drop or offload. + * Enable padding of short TX packets to 60 bytes. + */ + airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), GDM_PAD_EN); } } @@ -371,13 +396,14 @@ static int airoha_fe_init(struct airoha_eth *eth) return 0; } -static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index, - uchar *rx_packet) +static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index) { struct airoha_qdma_desc *desc; + uchar *rx_packet; u32 val; desc = &q->desc[index]; + rx_packet = net_rx_packets[index]; index = (index + 1) % q->ndesc; dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE); @@ -391,7 +417,7 @@ static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index, val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN); WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); - dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE); + dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE); } static void airoha_qdma_init_rx_desc(struct airoha_queue *q) @@ -399,7 +425,7 @@ static void airoha_qdma_init_rx_desc(struct airoha_queue *q) int i; for (i = 0; i < q->ndesc; i++) - airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]); + airoha_qdma_reset_rx_desc(q, i); } static int airoha_qdma_init_rx_queue(struct airoha_queue *q, @@ -423,10 +449,14 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q, RX_RING_SIZE_MASK, FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); + /* + * See arht_eth_free_pkt() for the reasons used to fill + * REG_RX_CPU_IDX(qid) register. + */ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, FIELD_PREP(RX_RING_THR_MASK, 0)); airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, - FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1)); + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 3)); airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); @@ -804,6 +834,11 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length) u32 val; int i; + /* + * There is no need to pad short TX packets to 60 bytes since the + * GDM_PAD_EN bit set in the corresponding REG_GDM_FWD_CFG(n) register. + */ + dma_addr = dma_map_single(packet, length, DMA_TO_DEVICE); qid = 0; @@ -826,14 +861,14 @@ static int airoha_eth_send(struct udevice *dev, void *packet, int length) WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); - dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE); + dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE); airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); for (i = 0; i < 100; i++) { - dma_unmap_single(virt_to_phys(desc), sizeof(*desc), - DMA_FROM_DEVICE); + dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc), + DMA_FROM_DEVICE); if (desc->ctrl & QDMA_DESC_DONE_MASK) break; @@ -864,8 +899,8 @@ static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp) q = &qdma->q_rx[qid]; desc = &q->desc[q->head]; - dma_unmap_single(virt_to_phys(desc), sizeof(*desc), - DMA_FROM_DEVICE); + dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc), + DMA_FROM_DEVICE); if (!(desc->ctrl & QDMA_DESC_DONE_MASK)) return -EAGAIN; @@ -885,6 +920,7 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length) struct airoha_qdma *qdma = ð->qdma[0]; struct airoha_queue *q; int qid; + u16 prev, pprev; if (!packet) return 0; @@ -892,13 +928,24 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length) qid = 0; q = &qdma->q_rx[qid]; - dma_map_single(packet, length, DMA_TO_DEVICE); - - airoha_qdma_reset_rx_desc(q, q->head, packet); + /* + * Due to cpu cache issue the airoha_qdma_reset_rx_desc() function + * will always touch 2 descriptors: + * - if current descriptor is even, then the previous and the one + * before previous descriptors will be touched (previous cacheline) + * - if current descriptor is odd, then only current and previous + * descriptors will be touched (current cacheline) + * + * Thus, to prevent possible destroying of rx queue, only (q->ndesc - 2) + * descriptors might be used for packet receiving. + */ + prev = (q->head + q->ndesc - 1) % q->ndesc; + pprev = (q->head + q->ndesc - 2) % q->ndesc; + q->head = (q->head + 1) % q->ndesc; + airoha_qdma_reset_rx_desc(q, prev); airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, - FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); - q->head = (q->head + 1) % q->ndesc; + FIELD_PREP(RX_RING_CPU_IDX_MASK, pprev)); return 0; } @@ -926,6 +973,7 @@ static int arht_eth_write_hwaddr(struct udevice *dev) static const struct udevice_id airoha_eth_ids[] = { { .compatible = "airoha,en7581-eth" }, + { } }; static const struct eth_ops airoha_eth_ops = { diff --git a/drivers/net/dwmac_thead.c b/drivers/net/dwmac_thead.c new file mode 100644 index 00000000000..138d71a6202 --- /dev/null +++ b/drivers/net/dwmac_thead.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * T-HEAD DWMAC platform driver + * + * Copyright (C) 2021 Alibaba Group Holding Limited. + * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org> + * Copyright (C) 2025 Yao Zi <ziyao@disroot.org> + * + */ + +#include <asm/io.h> +#include <clk.h> +#include <dm.h> +#include <linux/bitfield.h> +#include <phy.h> + +#include "designware.h" + +#define GMAC_CLK_EN 0x00 +#define GMAC_TX_CLK_EN BIT(1) +#define GMAC_TX_CLK_N_EN BIT(2) +#define GMAC_TX_CLK_OUT_EN BIT(3) +#define GMAC_RX_CLK_EN BIT(4) +#define GMAC_RX_CLK_N_EN BIT(5) +#define GMAC_EPHY_REF_CLK_EN BIT(6) +#define GMAC_RXCLK_DELAY_CTRL 0x04 +#define GMAC_RXCLK_BYPASS BIT(15) +#define GMAC_RXCLK_INVERT BIT(14) +#define GMAC_RXCLK_DELAY GENMASK(4, 0) +#define GMAC_TXCLK_DELAY_CTRL 0x08 +#define GMAC_TXCLK_BYPASS BIT(15) +#define GMAC_TXCLK_INVERT BIT(14) +#define GMAC_TXCLK_DELAY GENMASK(4, 0) +#define GMAC_PLLCLK_DIV 0x0c +#define GMAC_PLLCLK_DIV_EN BIT(31) +#define GMAC_PLLCLK_DIV_NUM GENMASK(7, 0) +#define GMAC_GTXCLK_SEL 0x18 +#define GMAC_GTXCLK_SEL_PLL BIT(0) +#define GMAC_INTF_CTRL 0x1c +#define PHY_INTF_MASK BIT(0) +#define PHY_INTF_RGMII FIELD_PREP(PHY_INTF_MASK, 1) +#define PHY_INTF_MII_GMII FIELD_PREP(PHY_INTF_MASK, 0) +#define GMAC_TXCLK_OEN 0x20 +#define TXCLK_DIR_MASK BIT(0) +#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0) +#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1) + +#define GMAC_RGMII_CLK_RATE 125000000 + +struct dwmac_thead_plat { + struct dw_eth_pdata dw_eth_pdata; + void __iomem *apb_base; +}; + +static int dwmac_thead_set_phy_if(struct dwmac_thead_plat *plat) +{ + u32 phyif; + + switch (plat->dw_eth_pdata.eth_pdata.phy_interface) { + case PHY_INTERFACE_MODE_MII: + phyif = PHY_INTF_MII_GMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + phyif = PHY_INTF_RGMII; + break; + default: + return -EINVAL; + } + + writel(phyif, plat->apb_base + GMAC_INTF_CTRL); + return 0; +} + +static int dwmac_thead_set_txclk_dir(struct dwmac_thead_plat *plat) +{ + u32 txclk_dir; + + switch (plat->dw_eth_pdata.eth_pdata.phy_interface) { + case PHY_INTERFACE_MODE_MII: + txclk_dir = TXCLK_DIR_INPUT; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + txclk_dir = TXCLK_DIR_OUTPUT; + break; + default: + return -EINVAL; + } + + writel(txclk_dir, plat->apb_base + GMAC_TXCLK_OEN); + return 0; +} + +static unsigned long dwmac_thead_rgmii_tx_rate(int speed) +{ + switch (speed) { + case 10: + return 2500000; + case 100: + return 25000000; + case 1000: + return 125000000; + } + + return -EINVAL; +} + +static int dwmac_thead_set_clk_tx_rate(struct dwmac_thead_plat *plat, + struct dw_eth_dev *edev, + unsigned long tx_rate) +{ + unsigned long rate; + u32 div, reg; + + rate = clk_get_rate(&edev->clocks[0]); + + writel(0, plat->apb_base + GMAC_PLLCLK_DIV); + + div = rate / tx_rate; + if (rate != tx_rate * div) { + pr_err("invalid gmac rate %lu\n", rate); + return -EINVAL; + } + + reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) | + FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div); + writel(reg, plat->apb_base + GMAC_PLLCLK_DIV); + + return 0; +} + +static int dwmac_thead_enable_clk(struct dwmac_thead_plat *plat) +{ + u32 reg; + + switch (plat->dw_eth_pdata.eth_pdata.phy_interface) { + case PHY_INTERFACE_MODE_MII: + reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* use pll */ + writel(GMAC_GTXCLK_SEL_PLL, plat->apb_base + GMAC_GTXCLK_SEL); + reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN | + GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN; + break; + + default: + return -EINVAL; + } + + writel(reg, plat->apb_base + GMAC_CLK_EN); + return 0; +} + +static int dwmac_thead_eth_start(struct udevice *dev) +{ + struct dwmac_thead_plat *plat = dev_get_plat(dev); + struct dw_eth_dev *edev = dev_get_priv(dev); + phy_interface_t interface; + bool is_rgmii; + long tx_rate; + int ret; + + interface = plat->dw_eth_pdata.eth_pdata.phy_interface; + is_rgmii = (interface == PHY_INTERFACE_MODE_RGMII) | + (interface == PHY_INTERFACE_MODE_RGMII_ID) | + (interface == PHY_INTERFACE_MODE_RGMII_RXID) | + (interface == PHY_INTERFACE_MODE_RGMII_TXID); + + /* + * When operating in RGMII mode, the TX clock is generated by an + * internal divider and fed to the MAC. Configure and enable it before + * initializing the MAC. + */ + if (is_rgmii) { + ret = dwmac_thead_set_clk_tx_rate(plat, edev, + GMAC_RGMII_CLK_RATE); + if (ret) + return ret; + } + + ret = designware_eth_init(edev, plat->dw_eth_pdata.eth_pdata.enetaddr); + if (ret) + return ret; + + if (is_rgmii) { + tx_rate = dwmac_thead_rgmii_tx_rate(edev->phydev->speed); + if (tx_rate < 0) + return tx_rate; + + ret = dwmac_thead_set_clk_tx_rate(plat, edev, tx_rate); + if (ret) + return ret; + } + + ret = designware_eth_enable(edev); + if (ret) + return ret; + + return 0; +} + +static int dwmac_thead_probe(struct udevice *dev) +{ + struct dwmac_thead_plat *plat = dev_get_plat(dev); + unsigned int reg; + int ret; + + ret = designware_eth_probe(dev); + if (ret) + return ret; + + ret = dwmac_thead_set_phy_if(plat); + if (ret) { + pr_err("failed to set phy interface: %d\n", ret); + return ret; + } + + ret = dwmac_thead_set_txclk_dir(plat); + if (ret) { + pr_err("failed to set TX clock direction: %d\n", ret); + return ret; + } + + reg = readl(plat->apb_base + GMAC_RXCLK_DELAY_CTRL); + reg &= ~(GMAC_RXCLK_DELAY); + reg |= FIELD_PREP(GMAC_RXCLK_DELAY, 0); + writel(reg, plat->apb_base + GMAC_RXCLK_DELAY_CTRL); + + reg = readl(plat->apb_base + GMAC_TXCLK_DELAY_CTRL); + reg &= ~(GMAC_TXCLK_DELAY); + reg |= FIELD_PREP(GMAC_TXCLK_DELAY, 0); + writel(reg, plat->apb_base + GMAC_TXCLK_DELAY_CTRL); + + ret = dwmac_thead_enable_clk(plat); + if (ret) + pr_err("failed to enable clock: %d\n", ret); + + return ret; +} + +static int dwmac_thead_of_to_plat(struct udevice *dev) +{ + struct dwmac_thead_plat *pdata = dev_get_plat(dev); + + pdata->apb_base = dev_read_addr_index_ptr(dev, 1); + if (!pdata->apb_base) { + pr_err("failed to get apb registers\n"); + return -ENOENT; + } + + return designware_eth_of_to_plat(dev); +} + +static const struct eth_ops dwmac_thead_eth_ops = { + .start = dwmac_thead_eth_start, + .send = designware_eth_send, + .recv = designware_eth_recv, + .free_pkt = designware_eth_free_pkt, + .stop = designware_eth_stop, + .write_hwaddr = designware_eth_write_hwaddr, +}; + +static const struct udevice_id dwmac_thead_match[] = { + { .compatible = "thead,th1520-gmac" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(dwmac_thead) = { + .name = "dwmac_thead", + .id = UCLASS_ETH, + .of_match = dwmac_thead_match, + .of_to_plat = dwmac_thead_of_to_plat, + .probe = dwmac_thead_probe, + .ops = &dwmac_thead_eth_ops, + .priv_auto = sizeof(struct dw_eth_dev), + .plat_auto = sizeof(struct dwmac_thead_plat), + .flags = DM_FLAG_ALLOC_PRIV_DMA, +}; |