summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig22
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/airoha_eth.c948
-rw-r--r--drivers/net/dwc_eth_qos.c10
-rw-r--r--drivers/net/dwc_eth_qos_rockchip.c292
-rw-r--r--drivers/net/sandbox-lwip.c85
-rw-r--r--drivers/net/sandbox.c250
-rw-r--r--drivers/net/ti/am65-cpsw-nuss.c9
8 files changed, 1428 insertions, 190 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3db784faedd..4434d364777 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -48,7 +48,6 @@ config DM_DSA
bool "Enable Driver Model for DSA switches"
depends on DM_MDIO
depends on PHY_FIXED
- depends on !NET_LWIP
help
Enable driver model for DSA switches
@@ -122,6 +121,14 @@ config AG7XXX
This driver supports the Atheros AG7xxx Ethernet MAC. This MAC is
present in the Atheros AR7xxx, AR9xxx and QCA9xxx MIPS chips.
+config AIROHA_ETH
+ bool "Airoha Ethernet QDMA Driver"
+ depends on ARCH_AIROHA
+ select PHYLIB
+ select DM_RESET
+ help
+ This Driver support Airoha Ethernet QDMA Driver
+ Say Y to enable support for the Airoha Ethernet QDMA.
config ALTERA_TSE
bool "Altera Triple-Speed Ethernet MAC support"
@@ -350,7 +357,7 @@ config ESSEDMA
config ETH_SANDBOX
depends on SANDBOX
- depends on NET
+ depends on NET || NET_LWIP
default y
bool "Sandbox: Mocked Ethernet driver"
help
@@ -359,17 +366,6 @@ config ETH_SANDBOX
This driver is particularly useful in the test/dm/eth.c tests
-config ETH_SANDBOX_LWIP
- depends on SANDBOX
- depends on NET_LWIP
- default y
- bool "Sandbox: Mocked Ethernet driver (for NET_LWIP)"
- help
- This driver is meant as a replacement for ETH_SANDBOX when
- the network stack is NET_LWIP rather than NET. It currently
- does nothing, i.e. it drops the sent packets and never receives
- data.
-
config ETH_SANDBOX_RAW
depends on SANDBOX
depends on NET
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d919d437c08..67bba3a8536 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_AG7XXX) += ag7xxx.o
+obj-$(CONFIG_AIROHA_ETH) += airoha_eth.o
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
obj-$(CONFIG_ASPEED_MDIO) += aspeed_mdio.o
obj-$(CONFIG_BCM6348_ETH) += bcm6348-eth.o
@@ -40,7 +41,6 @@ obj-$(CONFIG_ETH_DESIGNWARE_SOCFPGA) += dwmac_socfpga.o
obj-$(CONFIG_ETH_SANDBOX) += sandbox.o
obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw-bus.o
obj-$(CONFIG_ETH_SANDBOX_RAW) += sandbox-raw.o
-obj-$(CONFIG_ETH_SANDBOX_LWIP) += sandbox-lwip.o
obj-$(CONFIG_FEC_MXC) += fec_mxc.o
obj-$(CONFIG_FMAN_ENET) += fm/
obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
new file mode 100644
index 00000000000..7e35e1fd41d
--- /dev/null
+++ b/drivers/net/airoha_eth.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Based on Linux airoha_eth.c majorly rewritten
+ * and simplified for U-Boot usage for single TX/RX ring.
+ *
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Christian Marangi <ansuelsmth@gmail.org>
+ */
+
+#include <dm.h>
+#include <dm/devres.h>
+#include <mapmem.h>
+#include <net.h>
+#include <regmap.h>
+#include <reset.h>
+#include <syscon.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/time.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_QDMA 1
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 4
+
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_TX_RING 1
+#define AIROHA_NUM_RX_RING 1
+#define AIROHA_NUM_TX_IRQ 1
+#define HW_DSCP_NUM 32
+#define IRQ_QUEUE_LEN 1
+#define TX_DSCP_NUM 16
+#define RX_DSCP_NUM PKTBUFSRX
+
+/* SCU */
+#define SCU_SHARE_FEMEM_SEL 0x958
+
+/* SWITCH */
+#define SWITCH_MFC 0x10
+#define SWITCH_BC_FFP GENMASK(31, 24)
+#define SWITCH_UNM_FFP GENMASK(23, 16)
+#define SWITCH_UNU_FFP GENMASK(15, 8)
+#define SWITCH_PMCR(_n) 0x3000 + ((_n) * 0x100)
+#define SWITCH_IPG_CFG GENMASK(19, 18)
+#define SWITCH_IPG_CFG_NORMAL FIELD_PREP(SWITCH_IPG_CFG, 0x0)
+#define SWITCH_IPG_CFG_SHORT FIELD_PREP(SWITCH_IPG_CFG, 0x1)
+#define SWITCH_IPG_CFG_SHRINK FIELD_PREP(SWITCH_IPG_CFG, 0x2)
+#define SWITCH_MAC_MODE BIT(16)
+#define SWITCH_FORCE_MODE BIT(15)
+#define SWITCH_MAC_TX_EN BIT(14)
+#define SWITCH_MAC_RX_EN BIT(13)
+#define SWITCH_BKOFF_EN BIT(9)
+#define SWITCH_BKPR_EN BIT(8)
+#define SWITCH_FORCE_RX_FC BIT(5)
+#define SWITCH_FORCE_TX_FC BIT(4)
+#define SWITCH_FORCE_SPD GENMASK(3, 2)
+#define SWITCH_FORCE_SPD_10 FIELD_PREP(SWITCH_FORCE_SPD, 0x0)
+#define SWITCH_FORCE_SPD_100 FIELD_PREP(SWITCH_FORCE_SPD, 0x1)
+#define SWITCH_FORCE_SPD_1000 FIELD_PREP(SWITCH_FORCE_SPD, 0x2)
+#define SWITCH_FORCE_DPX BIT(1)
+#define SWITCH_FORCE_LNK BIT(0)
+#define SWITCH_SMACCR0 0x30e4
+#define SMACCR0_MAC2 GENMASK(31, 24)
+#define SMACCR0_MAC3 GENMASK(23, 16)
+#define SMACCR0_MAC4 GENMASK(15, 8)
+#define SMACCR0_MAC5 GENMASK(7, 0)
+#define SWITCH_SMACCR1 0x30e8
+#define SMACCR1_MAC0 GENMASK(15, 8)
+#define SMACCR1_MAC1 GENMASK(7, 0)
+#define SWITCH_PHY_POLL 0x7018
+#define SWITCH_PHY_AP_EN GENMASK(30, 24)
+#define SWITCH_EEE_POLL_EN GENMASK(22, 16)
+#define SWITCH_PHY_PRE_EN BIT(15)
+#define SWITCH_PHY_END_ADDR GENMASK(12, 8)
+#define SWITCH_PHY_ST_ADDR GENMASK(4, 0)
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+struct airoha_queue {
+ struct airoha_qdma_desc *desc;
+ u16 head;
+
+ int ndesc;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ int id;
+};
+
+struct airoha_eth {
+ void __iomem *fe_regs;
+ void __iomem *switch_regs;
+
+ struct reset_ctl_bulk rsts;
+ struct reset_ctl_bulk xsi_rsts;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+static u32 airoha_rr(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= (airoha_rr(base, offset) & ~mask);
+ airoha_wr(base, offset, val);
+
+ return val;
+}
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+#define airoha_switch_wr(eth, offset, val) \
+ airoha_wr((eth)->switch_regs, (offset), (val))
+
+static void airoha_fe_maccr_init(struct airoha_eth *eth)
+{
+ int p;
+
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ /* Disable any kind of CRC drop or offload */
+ airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), 0);
+ }
+}
+
+static int airoha_fe_init(struct airoha_eth *eth)
+{
+ airoha_fe_maccr_init(eth);
+
+ return 0;
+}
+
+static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
+ uchar *rx_packet)
+{
+ struct airoha_qdma_desc *desc;
+ u32 val;
+
+ desc = &q->desc[index];
+ index = (index + 1) % q->ndesc;
+
+ dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
+
+ WRITE_ONCE(desc->msg0, cpu_to_le32(0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(0));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0));
+ WRITE_ONCE(desc->msg3, cpu_to_le32(0));
+ WRITE_ONCE(desc->addr, cpu_to_le32(virt_to_phys(rx_packet)));
+ WRITE_ONCE(desc->data, cpu_to_le32(index));
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+
+ dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+}
+
+static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
+{
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
+}
+
+static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int ndesc)
+{
+ int qid = q - &qdma->q_rx[0];
+ unsigned long dma_addr;
+
+ q->ndesc = ndesc;
+ q->head = 0;
+
+ q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
+ if (!q->desc)
+ return -ENOMEM;
+
+ memset(q->desc, 0, q->ndesc * sizeof(*q->desc));
+ dma_map_single(q->desc, q->ndesc * sizeof(*q->desc), DMA_TO_DEVICE);
+
+ airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
+ RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, 0));
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+
+ return 0;
+}
+
+static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ int err;
+
+ err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
+ RX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
+ struct airoha_qdma *qdma, int size)
+{
+ int qid = q - &qdma->q_tx[0];
+ unsigned long dma_addr;
+
+ q->ndesc = size;
+ q->head = 0;
+
+ q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
+ if (!q->desc)
+ return -ENOMEM;
+
+ memset(q->desc, 0, q->ndesc * sizeof(*q->desc));
+ dma_map_single(q->desc, q->ndesc * sizeof(*q->desc), DMA_TO_DEVICE);
+
+ airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+
+ return 0;
+}
+
+static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
+ struct airoha_qdma *qdma, int size)
+{
+ int id = irq_q - &qdma->q_tx_irq[0];
+ unsigned long dma_addr;
+
+ irq_q->q = dma_alloc_coherent(size * sizeof(u32), &dma_addr);
+ if (!irq_q->q)
+ return -ENOMEM;
+
+ memset(irq_q->q, 0xffffffff, size * sizeof(u32));
+ irq_q->size = size;
+ irq_q->qdma = qdma;
+
+ dma_map_single(irq_q->q, size * sizeof(u32), DMA_TO_DEVICE);
+
+ airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
+ IRQ_QUEUE_LEN);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
+ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+{
+ unsigned long dma_addr;
+ u32 status;
+ int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+ qdma->hfwd.desc = dma_alloc_coherent(size, &dma_addr);
+ if (!qdma->hfwd.desc)
+ return -ENOMEM;
+
+ memset(qdma->hfwd.desc, 0, size);
+ dma_map_single(qdma->hfwd.desc, size, DMA_TO_DEVICE);
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ qdma->hfwd.q = dma_alloc_coherent(size, &dma_addr);
+ if (!qdma->hfwd.q)
+ return -ENOMEM;
+
+ memset(qdma->hfwd.q, 0, size);
+ dma_map_single(qdma->hfwd.q, size, DMA_TO_DEVICE);
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK |
+ HW_FWD_DSCP_MIN_SCATTER_LEN_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0) |
+ FIELD_PREP(HW_FWD_DSCP_MIN_SCATTER_LEN_MASK, 1));
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+ LMGR_INIT_START);
+
+ udelay(1000);
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+ 30 * USEC_PER_MSEC, qdma,
+ REG_LMGR_INIT_CFG);
+}
+
+static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
+{
+ int i;
+
+ /* clear pending irqs */
+ for (i = 0; i < 2; i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+ GLOBAL_CFG_IRQ0_EN_MASK |
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 3));
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+ if (!qdma->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_init(struct udevice *dev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
+{
+ int err;
+
+ qdma->eth = eth;
+ qdma->regs = dev_remap_addr_name(dev, "qdma0");
+ if (IS_ERR(qdma->regs))
+ return PTR_ERR(qdma->regs);
+
+ err = airoha_qdma_init_rx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_tx(qdma);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_hfwd_queues(qdma);
+ if (err)
+ return err;
+
+ return airoha_qdma_hw_init(qdma);
+}
+
+static int airoha_hw_init(struct udevice *dev,
+ struct airoha_eth *eth)
+{
+ int ret, i;
+
+ /* disable xsi */
+ ret = reset_assert_bulk(&eth->xsi_rsts);
+ if (ret)
+ return ret;
+
+ ret = reset_assert_bulk(&eth->rsts);
+ if (ret)
+ return ret;
+
+ mdelay(20);
+
+ ret = reset_deassert_bulk(&eth->rsts);
+ if (ret)
+ return ret;
+
+ mdelay(20);
+
+ ret = airoha_fe_init(eth);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
+ ret = airoha_qdma_init(dev, eth, &eth->qdma[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int airoha_switch_init(struct udevice *dev, struct airoha_eth *eth)
+{
+ ofnode switch_node;
+ fdt_addr_t addr;
+
+ switch_node = ofnode_by_compatible(ofnode_null(), "airoha,en7581-switch");
+ if (!ofnode_valid(switch_node))
+ return -EINVAL;
+
+ addr = ofnode_get_addr(switch_node);
+ if (addr == FDT_ADDR_T_NONE)
+ return -ENOMEM;
+
+ /* Switch doesn't have a DEV, gets address and setup Flood and CPU port */
+ eth->switch_regs = map_sysmem(addr, 0);
+
+ /* Set FLOOD, no CPU switch register */
+ airoha_switch_wr(eth, SWITCH_MFC, SWITCH_BC_FFP | SWITCH_UNM_FFP |
+ SWITCH_UNU_FFP);
+
+ /* Set CPU 6 PMCR */
+ airoha_switch_wr(eth, SWITCH_PMCR(6),
+ SWITCH_IPG_CFG_SHORT | SWITCH_MAC_MODE |
+ SWITCH_FORCE_MODE | SWITCH_MAC_TX_EN |
+ SWITCH_MAC_RX_EN | SWITCH_BKOFF_EN | SWITCH_BKPR_EN |
+ SWITCH_FORCE_RX_FC | SWITCH_FORCE_TX_FC |
+ SWITCH_FORCE_SPD_1000 | SWITCH_FORCE_DPX |
+ SWITCH_FORCE_LNK);
+
+ /* Sideband signal error for Port 3, which need the auto polling */
+ airoha_switch_wr(eth, SWITCH_PHY_POLL,
+ FIELD_PREP(SWITCH_PHY_AP_EN, 0x7f) |
+ FIELD_PREP(SWITCH_EEE_POLL_EN, 0x7f) |
+ SWITCH_PHY_PRE_EN |
+ FIELD_PREP(SWITCH_PHY_END_ADDR, 0xc) |
+ FIELD_PREP(SWITCH_PHY_ST_ADDR, 0x8));
+
+ return 0;
+}
+
+static int airoha_eth_probe(struct udevice *dev)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct regmap *scu_regmap;
+ ofnode scu_node;
+ int ret;
+
+ scu_node = ofnode_by_compatible(ofnode_null(), "airoha,en7581-scu");
+ if (!ofnode_valid(scu_node))
+ return -EINVAL;
+
+ scu_regmap = syscon_node_to_regmap(scu_node);
+ if (IS_ERR(scu_regmap))
+ return PTR_ERR(scu_regmap);
+
+ /* It seems by default the FEMEM_SEL is set to Memory (0x1)
+ * preventing any access to any QDMA and FrameEngine register
+ * reporting all 0xdeadbeef (poor cow :( )
+ */
+ regmap_write(scu_regmap, SCU_SHARE_FEMEM_SEL, 0x0);
+
+ eth->fe_regs = dev_remap_addr_name(dev, "fe");
+ if (!eth->fe_regs)
+ return -ENOMEM;
+
+ eth->rsts.resets = devm_kcalloc(dev, AIROHA_MAX_NUM_RSTS,
+ sizeof(struct reset_ctl), GFP_KERNEL);
+ if (!eth->rsts.resets)
+ return -ENOMEM;
+ eth->rsts.count = AIROHA_MAX_NUM_RSTS;
+
+ eth->xsi_rsts.resets = devm_kcalloc(dev, AIROHA_MAX_NUM_XSI_RSTS,
+ sizeof(struct reset_ctl), GFP_KERNEL);
+ if (!eth->xsi_rsts.resets)
+ return -ENOMEM;
+ eth->xsi_rsts.count = AIROHA_MAX_NUM_XSI_RSTS;
+
+ ret = reset_get_by_name(dev, "fe", &eth->rsts.resets[0]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "pdma", &eth->rsts.resets[1]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "qdma", &eth->rsts.resets[2]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "hsi0-mac", &eth->xsi_rsts.resets[0]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "hsi1-mac", &eth->xsi_rsts.resets[1]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "hsi-mac", &eth->xsi_rsts.resets[2]);
+ if (ret)
+ return ret;
+
+ ret = reset_get_by_name(dev, "xfp-mac", &eth->xsi_rsts.resets[3]);
+ if (ret)
+ return ret;
+
+ ret = airoha_hw_init(dev, eth);
+ if (ret)
+ return ret;
+
+ return airoha_switch_init(dev, eth);
+}
+
+static int airoha_eth_init(struct udevice *dev)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct airoha_qdma *qdma = &eth->qdma[0];
+ struct airoha_queue *q;
+ int qid;
+
+ qid = 0;
+ q = &qdma->q_rx[qid];
+
+ airoha_qdma_init_rx_desc(q);
+
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+}
+
+static void airoha_eth_stop(struct udevice *dev)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct airoha_qdma *qdma = &eth->qdma[0];
+
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
+}
+
+static int airoha_eth_send(struct udevice *dev, void *packet, int length)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct airoha_qdma *qdma = &eth->qdma[0];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue *q;
+ dma_addr_t dma_addr;
+ u32 msg0, msg1;
+ int qid, index;
+ u8 fport;
+ u32 val;
+ int i;
+
+ dma_addr = dma_map_single(packet, length, DMA_TO_DEVICE);
+
+ qid = 0;
+ q = &qdma->q_tx[qid];
+ desc = &q->desc[q->head];
+ index = (q->head + 1) % q->ndesc;
+
+ fport = 1;
+
+ msg0 = 0;
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, length);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(dma_addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
+
+ dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ for (i = 0; i < 100; i++) {
+ dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
+ if (desc->ctrl & QDMA_DESC_DONE_MASK)
+ break;
+
+ udelay(1);
+ }
+
+ /* Return error if for some reason the descriptor never ACK */
+ if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
+ return -EAGAIN;
+
+ q->head = index;
+ airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(0),
+ IRQ_CLEAR_LEN_MASK, 1);
+
+ return 0;
+}
+
+static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct airoha_qdma *qdma = &eth->qdma[0];
+ struct airoha_qdma_desc *desc;
+ struct airoha_queue *q;
+ u16 length;
+ int qid;
+
+ qid = 0;
+ q = &qdma->q_rx[qid];
+ desc = &q->desc[q->head];
+
+ dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
+
+ if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
+ return -EAGAIN;
+
+ length = FIELD_GET(QDMA_DESC_LEN_MASK, desc->ctrl);
+ dma_unmap_single(desc->addr, length,
+ DMA_FROM_DEVICE);
+
+ *packetp = phys_to_virt(desc->addr);
+
+ return length;
+}
+
+static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
+{
+ struct airoha_eth *eth = dev_get_priv(dev);
+ struct airoha_qdma *qdma = &eth->qdma[0];
+ struct airoha_queue *q;
+ int qid;
+
+ if (!packet)
+ return 0;
+
+ qid = 0;
+ q = &qdma->q_rx[qid];
+
+ dma_map_single(packet, length, DMA_TO_DEVICE);
+
+ airoha_qdma_reset_rx_desc(q, q->head, packet);
+
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ q->head = (q->head + 1) % q->ndesc;
+
+ return 0;
+}
+
+static int arht_eth_write_hwaddr(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct airoha_eth *eth = dev_get_priv(dev);
+ unsigned char *mac = pdata->enetaddr;
+ u32 macaddr_lsb, macaddr_msb;
+
+ macaddr_lsb = FIELD_PREP(SMACCR0_MAC2, mac[2]) |
+ FIELD_PREP(SMACCR0_MAC3, mac[3]) |
+ FIELD_PREP(SMACCR0_MAC4, mac[4]) |
+ FIELD_PREP(SMACCR0_MAC5, mac[5]);
+ macaddr_msb = FIELD_PREP(SMACCR1_MAC1, mac[1]) |
+ FIELD_PREP(SMACCR1_MAC0, mac[0]);
+
+ /* Set MAC for Switch */
+ airoha_switch_wr(eth, SWITCH_SMACCR0, macaddr_lsb);
+ airoha_switch_wr(eth, SWITCH_SMACCR1, macaddr_msb);
+
+ return 0;
+}
+
+static const struct udevice_id airoha_eth_ids[] = {
+ { .compatible = "airoha,en7581-eth" },
+};
+
+static const struct eth_ops airoha_eth_ops = {
+ .start = airoha_eth_init,
+ .stop = airoha_eth_stop,
+ .send = airoha_eth_send,
+ .recv = airoha_eth_recv,
+ .free_pkt = arht_eth_free_pkt,
+ .write_hwaddr = arht_eth_write_hwaddr,
+};
+
+U_BOOT_DRIVER(airoha_eth) = {
+ .name = "airoha-eth",
+ .id = UCLASS_ETH,
+ .of_match = airoha_eth_ids,
+ .probe = airoha_eth_probe,
+ .ops = &airoha_eth_ops,
+ .priv_auto = sizeof(struct airoha_eth),
+ .plat_auto = sizeof(struct eth_pdata),
+};
diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
index b4ec3614696..0cfe09333f7 100644
--- a/drivers/net/dwc_eth_qos.c
+++ b/drivers/net/dwc_eth_qos.c
@@ -1173,7 +1173,7 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
eqos->config->ops->eqos_inval_buffer(packet, length);
- if ((eqos->rx_desc_idx & idx_mask) == idx_mask) {
+ if (eqos->started && (eqos->rx_desc_idx & idx_mask) == idx_mask) {
for (idx = eqos->rx_desc_idx - idx_mask;
idx <= eqos->rx_desc_idx;
idx++) {
@@ -1612,10 +1612,18 @@ static const struct udevice_id eqos_ids[] = {
#endif
#if IS_ENABLED(CONFIG_DWC_ETH_QOS_ROCKCHIP)
{
+ .compatible = "rockchip,rk3528-gmac",
+ .data = (ulong)&eqos_rockchip_config
+ },
+ {
.compatible = "rockchip,rk3568-gmac",
.data = (ulong)&eqos_rockchip_config
},
{
+ .compatible = "rockchip,rk3576-gmac",
+ .data = (ulong)&eqos_rockchip_config
+ },
+ {
.compatible = "rockchip,rk3588-gmac",
.data = (ulong)&eqos_rockchip_config
},
diff --git a/drivers/net/dwc_eth_qos_rockchip.c b/drivers/net/dwc_eth_qos_rockchip.c
index f3a0f63003e..d646d3ebac8 100644
--- a/drivers/net/dwc_eth_qos_rockchip.c
+++ b/drivers/net/dwc_eth_qos_rockchip.c
@@ -50,6 +50,132 @@ struct rockchip_platform_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static int rk3528_set_to_rgmii(struct udevice *dev,
+ int tx_delay, int rx_delay)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+
+ regmap_write(data->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(data->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(data->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+
+ return 0;
+}
+
+static int rk3528_set_to_rmii(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+
+ if (data->id == 1)
+ regmap_write(data->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
+ else
+ regmap_write(data->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
+
+ return 0;
+}
+
+static int rk3528_set_gmac_speed(struct udevice *dev)
+{
+ struct eqos_priv *eqos = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+ u32 val, reg;
+
+ switch (eqos->phy->speed) {
+ case SPEED_10:
+ if (pdata->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val = data->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
+ RK3528_GMAC0_CLK_RMII_DIV20;
+ else
+ val = RK3528_GMAC1_CLK_RGMII_DIV50;
+ break;
+ case SPEED_100:
+ if (pdata->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val = data->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
+ RK3528_GMAC0_CLK_RMII_DIV2;
+ else
+ val = RK3528_GMAC1_CLK_RGMII_DIV5;
+ break;
+ case SPEED_1000:
+ if (pdata->phy_interface != PHY_INTERFACE_MODE_RMII)
+ val = RK3528_GMAC1_CLK_RGMII_DIV1;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = data->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
+ RK3528_VO_GRF_GMAC_CON;
+ regmap_write(data->grf, reg, val);
+
+ return 0;
+}
+
+static void rk3528_set_clock_selection(struct udevice *dev, bool enable)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+ u32 val;
+
+ if (data->id == 1) {
+ val = data->clock_input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(data->grf, RK3528_VPU_GRF_GMAC_CON5, val);
+ } else {
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(data->grf, RK3528_VO_GRF_GMAC_CON, val);
+ }
+}
+
#define RK3568_GRF_GMAC0_CON0 0x0380
#define RK3568_GRF_GMAC0_CON1 0x0384
#define RK3568_GRF_GMAC1_CON0 0x0388
@@ -134,6 +260,145 @@ static int rk3568_set_gmac_speed(struct udevice *dev)
return 0;
}
+/* VCCIO0_1_3_IOC */
+#define RK3576_VCCIO0_1_3_IOC_CON2 0x6408
+#define RK3576_VCCIO0_1_3_IOC_CON3 0x640c
+#define RK3576_VCCIO0_1_3_IOC_CON4 0x6410
+#define RK3576_VCCIO0_1_3_IOC_CON5 0x6414
+
+#define RK3576_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3576_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* SDGMAC_GRF */
+#define RK3576_GRF_GMAC_CON0 0x0020
+#define RK3576_GRF_GMAC_CON1 0x0024
+
+#define RK3576_GMAC_RMII_MODE GRF_BIT(3)
+#define RK3576_GMAC_RGMII_MODE GRF_CLR_BIT(3)
+
+#define RK3576_GMAC_CLK_SELECT_IO GRF_BIT(7)
+#define RK3576_GMAC_CLK_SELECT_CRU GRF_CLR_BIT(7)
+
+#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
+#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
+
+#define RK3576_GMAC_CLK_RGMII_DIV1 \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV5 \
+ (GRF_BIT(6) | GRF_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV50 \
+ (GRF_BIT(6) | GRF_CLR_BIT(5))
+
+#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
+#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
+
+static int rk3576_set_to_rgmii(struct udevice *dev,
+ int tx_delay, int rx_delay)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+ u32 offset_con;
+
+ offset_con = data->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(data->grf, offset_con, RK3576_GMAC_RGMII_MODE);
+
+ offset_con = data->id == 1 ? RK3576_VCCIO0_1_3_IOC_CON4 :
+ RK3576_VCCIO0_1_3_IOC_CON2;
+
+ /* m0 && m1 delay enabled */
+ regmap_write(data->php_grf, offset_con,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+ regmap_write(data->php_grf, offset_con + 0x4,
+ DELAY_ENABLE(RK3576, tx_delay, rx_delay));
+
+ /* m0 && m1 delay value */
+ regmap_write(data->php_grf, offset_con,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+ regmap_write(data->php_grf, offset_con + 0x4,
+ RK3576_GMAC_CLK_TX_DL_CFG(tx_delay) |
+ RK3576_GMAC_CLK_RX_DL_CFG(rx_delay));
+
+ return 0;
+}
+
+static int rk3576_set_to_rmii(struct udevice *dev)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+ u32 offset_con;
+
+ offset_con = data->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(data->grf, offset_con, RK3576_GMAC_RMII_MODE);
+
+ return 0;
+}
+
+static int rk3576_set_gmac_speed(struct udevice *dev)
+{
+ struct eqos_priv *eqos = dev_get_priv(dev);
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+ u32 val = 0, offset_con;
+
+ switch (eqos->phy->speed) {
+ case SPEED_10:
+ if (pdata->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV20;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV50;
+ break;
+ case SPEED_100:
+ if (pdata->phy_interface == PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RMII_DIV2;
+ else
+ val = RK3576_GMAC_CLK_RGMII_DIV5;
+ break;
+ case SPEED_1000:
+ if (pdata->phy_interface != PHY_INTERFACE_MODE_RMII)
+ val = RK3576_GMAC_CLK_RGMII_DIV1;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ offset_con = data->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(data->grf, offset_con, val);
+
+ return 0;
+}
+
+static void rk3576_set_clock_selection(struct udevice *dev, bool enable)
+{
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct rockchip_platform_data *data = pdata->priv_pdata;
+
+ u32 val = data->clock_input ? RK3576_GMAC_CLK_SELECT_IO :
+ RK3576_GMAC_CLK_SELECT_CRU;
+ u32 offset_con;
+
+ val |= enable ? RK3576_GMAC_CLK_RMII_NOGATE :
+ RK3576_GMAC_CLK_RMII_GATE;
+
+ offset_con = data->id == 1 ? RK3576_GRF_GMAC_CON1 :
+ RK3576_GRF_GMAC_CON0;
+
+ regmap_write(data->grf, offset_con, val);
+}
+
#define RK3588_DELAY_ENABLE(id, tx, rx) \
(((tx) ? RK3588_GMAC_TXCLK_DLY_ENABLE(id) : RK3588_GMAC_TXCLK_DLY_DISABLE(id)) | \
((rx) ? RK3588_GMAC_RXCLK_DLY_ENABLE(id) : RK3588_GMAC_RXCLK_DLY_DISABLE(id)))
@@ -270,6 +535,18 @@ static void rk3588_set_clock_selection(struct udevice *dev, bool enable)
static const struct rk_gmac_ops rk_gmac_ops[] = {
{
+ .compatible = "rockchip,rk3528-gmac",
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_gmac_speed = rk3528_set_gmac_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+ },
+ {
.compatible = "rockchip,rk3568-gmac",
.set_to_rgmii = rk3568_set_to_rgmii,
.set_to_rmii = rk3568_set_to_rmii,
@@ -281,6 +558,18 @@ static const struct rk_gmac_ops rk_gmac_ops[] = {
},
},
{
+ .compatible = "rockchip,rk3576-gmac",
+ .set_to_rgmii = rk3576_set_to_rgmii,
+ .set_to_rmii = rk3576_set_to_rmii,
+ .set_gmac_speed = rk3576_set_gmac_speed,
+ .set_clock_selection = rk3576_set_clock_selection,
+ .regs = {
+ 0x2a220000, /* gmac0 */
+ 0x2a230000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+ },
+ {
.compatible = "rockchip,rk3588-gmac",
.set_to_rgmii = rk3588_set_to_rgmii,
.set_to_rmii = rk3588_set_to_rmii,
@@ -357,7 +646,8 @@ static int eqos_probe_resources_rk(struct udevice *dev)
goto err_free;
}
- if (device_is_compatible(dev, "rockchip,rk3588-gmac")) {
+ if (device_is_compatible(dev, "rockchip,rk3588-gmac") ||
+ device_is_compatible(dev, "rockchip,rk3576-gmac")) {
data->php_grf =
syscon_regmap_lookup_by_phandle(dev, "rockchip,php-grf");
if (IS_ERR(data->php_grf)) {
diff --git a/drivers/net/sandbox-lwip.c b/drivers/net/sandbox-lwip.c
deleted file mode 100644
index 3721033c310..00000000000
--- a/drivers/net/sandbox-lwip.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015 National Instruments
- *
- * (C) Copyright 2015
- * Joe Hershberger <joe.hershberger@ni.com>
- */
-
-#include <dm.h>
-#include <log.h>
-#include <malloc.h>
-#include <net.h>
-#include <asm/eth.h>
-#include <asm/global_data.h>
-#include <asm/test.h>
-
-DECLARE_GLOBAL_DATA_PTR;
-
-static int sb_lwip_eth_start(struct udevice *dev)
-{
- debug("eth_sandbox_lwip: Start\n");
-
- return 0;
-}
-
-static int sb_lwip_eth_send(struct udevice *dev, void *packet, int length)
-{
- debug("eth_sandbox_lwip: Send packet %d\n", length);
-
- return -ENOTSUPP;
-}
-
-static int sb_lwip_eth_recv(struct udevice *dev, int flags, uchar **packetp)
-{
- return -EAGAIN;
-}
-
-static int sb_lwip_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
-{
- return 0;
-}
-
-static void sb_lwip_eth_stop(struct udevice *dev)
-{
-}
-
-static int sb_lwip_eth_write_hwaddr(struct udevice *dev)
-{
- return 0;
-}
-
-static const struct eth_ops sb_eth_ops = {
- .start = sb_lwip_eth_start,
- .send = sb_lwip_eth_send,
- .recv = sb_lwip_eth_recv,
- .free_pkt = sb_lwip_eth_free_pkt,
- .stop = sb_lwip_eth_stop,
- .write_hwaddr = sb_lwip_eth_write_hwaddr,
-};
-
-static int sb_lwip_eth_remove(struct udevice *dev)
-{
- return 0;
-}
-
-static int sb_lwip_eth_of_to_plat(struct udevice *dev)
-{
- return 0;
-}
-
-static const struct udevice_id sb_eth_ids[] = {
- { .compatible = "sandbox,eth" },
- { }
-};
-
-U_BOOT_DRIVER(eth_sandbox) = {
- .name = "eth_lwip_sandbox",
- .id = UCLASS_ETH,
- .of_match = sb_eth_ids,
- .of_to_plat = sb_lwip_eth_of_to_plat,
- .remove = sb_lwip_eth_remove,
- .ops = &sb_eth_ops,
- .priv_auto = 0,
- .plat_auto = sizeof(struct eth_pdata),
-};
diff --git a/drivers/net/sandbox.c b/drivers/net/sandbox.c
index fe3627db6e3..2011fd31f41 100644
--- a/drivers/net/sandbox.c
+++ b/drivers/net/sandbox.c
@@ -9,13 +9,84 @@
#include <dm.h>
#include <log.h>
#include <malloc.h>
-#include <net.h>
#include <asm/eth.h>
#include <asm/global_data.h>
#include <asm/test.h>
+#include <asm/types.h>
+
+/*
+ * Structure definitions for network protocols. Since this file is used for
+ * both NET and NET_LWIP, and given that the two network stacks do have
+ * conflicting types (for instance struct icmp_hdr), it is on purpose that the
+ * structures are defined locally with minimal dependencies -- <asm/types.h> is
+ * included for the bit types and that's it.
+ */
+
+#define ETHADDR_LEN 6
+#define IP4_LEN 4
+
+struct ethhdr {
+ u8 dst[ETHADDR_LEN];
+ u8 src[ETHADDR_LEN];
+ u16 protlen;
+} __attribute__((packed));
+
+#define ETHHDR_SIZE (sizeof(struct ethhdr))
+
+struct arphdr {
+ u16 htype;
+ u16 ptype;
+ u8 hlen;
+ u8 plen;
+ u16 op;
+} __attribute__((packed));
+
+#define ARPHDR_SIZE (sizeof(struct arphdr))
+
+#define ARP_REQUEST 1
+#define ARP_REPLY 2
+
+struct arpdata {
+ u8 sha[ETHADDR_LEN];
+ u32 spa;
+ u8 tha[ETHADDR_LEN];
+ u32 tpa;
+} __attribute__((packed));
+
+#define ARPDATA_SIZE (sizeof(struct arpdata))
+
+struct iphdr {
+ u8 hl_v;
+ u8 tos;
+ u16 len;
+ u16 id;
+ u16 off;
+ u8 ttl;
+ u8 prot;
+ u16 sum;
+ u32 src;
+ u32 dst;
+} __attribute__((packed));
+
+#define IPHDR_SIZE (sizeof(struct iphdr))
+
+struct icmphdr {
+ u8 type;
+ u8 code;
+ u16 checksum;
+ u16 id;
+ u16 sequence;
+} __attribute__((packed));
+
+#define ICMPHDR_SIZE (sizeof(struct icmphdr))
+
+#define ICMP_ECHO_REQUEST 8
+#define ICMP_ECHO_REPLY 0
+#define IPPROTO_ICMP 1
DECLARE_GLOBAL_DATA_PTR;
+static const u8 null_ethaddr[6];
static bool skip_timeout;
/*
@@ -59,17 +130,19 @@ int sandbox_eth_arp_req_to_reply(struct udevice *dev, void *packet,
unsigned int len)
{
struct eth_sandbox_priv *priv = dev_get_priv(dev);
- struct ethernet_hdr *eth = packet;
- struct arp_hdr *arp;
- struct ethernet_hdr *eth_recv;
- struct arp_hdr *arp_recv;
-
- if (ntohs(eth->et_protlen) != PROT_ARP)
+ struct ethhdr *eth = packet;
+ struct arphdr *arp;
+ struct arpdata *arpd;
+ struct ethhdr *eth_recv;
+ struct arphdr *arp_recv;
+ struct arpdata *arp_recvd;
+
+ if (ntohs(eth->protlen) != PROT_ARP)
return -EAGAIN;
- arp = packet + ETHER_HDR_SIZE;
+ arp = packet + ETHHDR_SIZE;
- if (ntohs(arp->ar_op) != ARPOP_REQUEST)
+ if (ntohs(arp->op) != ARP_REQUEST)
return -EAGAIN;
/* Don't allow the buffer to overrun */
@@ -77,27 +150,29 @@ int sandbox_eth_arp_req_to_reply(struct udevice *dev, void *packet,
return 0;
/* store this as the assumed IP of the fake host */
- priv->fake_host_ipaddr = net_read_ip(&arp->ar_tpa);
+ arpd = (struct arpdata *)(arp + 1);
+ priv->fake_host_ipaddr.s_addr = arpd->tpa;
/* Formulate a fake response */
eth_recv = (void *)priv->recv_packet_buffer[priv->recv_packets];
- memcpy(eth_recv->et_dest, eth->et_src, ARP_HLEN);
- memcpy(eth_recv->et_src, priv->fake_host_hwaddr, ARP_HLEN);
- eth_recv->et_protlen = htons(PROT_ARP);
-
- arp_recv = (void *)eth_recv + ETHER_HDR_SIZE;
- arp_recv->ar_hrd = htons(ARP_ETHER);
- arp_recv->ar_pro = htons(PROT_IP);
- arp_recv->ar_hln = ARP_HLEN;
- arp_recv->ar_pln = ARP_PLEN;
- arp_recv->ar_op = htons(ARPOP_REPLY);
- memcpy(&arp_recv->ar_sha, priv->fake_host_hwaddr, ARP_HLEN);
- net_write_ip(&arp_recv->ar_spa, priv->fake_host_ipaddr);
- memcpy(&arp_recv->ar_tha, &arp->ar_sha, ARP_HLEN);
- net_copy_ip(&arp_recv->ar_tpa, &arp->ar_spa);
-
- priv->recv_packet_length[priv->recv_packets] =
- ETHER_HDR_SIZE + ARP_HDR_SIZE;
+ memcpy(eth_recv->dst, eth->src, ETHADDR_LEN);
+ memcpy(eth_recv->src, priv->fake_host_hwaddr, ETHADDR_LEN);
+ eth_recv->protlen = htons(PROT_ARP);
+
+ arp_recv = (void *)eth_recv + ETHHDR_SIZE;
+ arp_recv->htype = htons(ARP_ETHER);
+ arp_recv->ptype = htons(PROT_IP);
+ arp_recv->hlen = ETHADDR_LEN;
+ arp_recv->plen = IP4_LEN;
+ arp_recv->op = htons(ARP_REPLY);
+ arp_recvd = (struct arpdata *)(arp_recv + 1);
+ memcpy(&arp_recvd->sha, priv->fake_host_hwaddr, ETHADDR_LEN);
+ arp_recvd->spa = priv->fake_host_ipaddr.s_addr;
+ memcpy(&arp_recvd->tha, &arpd->sha, ETHADDR_LEN);
+ arp_recvd->tpa = arpd->spa;
+
+ priv->recv_packet_length[priv->recv_packets] = ETHHDR_SIZE +
+ ARPHDR_SIZE + ARPDATA_SIZE;
++priv->recv_packets;
return 0;
@@ -114,22 +189,22 @@ int sandbox_eth_ping_req_to_reply(struct udevice *dev, void *packet,
unsigned int len)
{
struct eth_sandbox_priv *priv = dev_get_priv(dev);
- struct ethernet_hdr *eth = packet;
- struct ip_udp_hdr *ip;
- struct icmp_hdr *icmp;
- struct ethernet_hdr *eth_recv;
- struct ip_udp_hdr *ipr;
- struct icmp_hdr *icmpr;
-
- if (ntohs(eth->et_protlen) != PROT_IP)
+ struct ethhdr *eth = packet;
+ struct iphdr *ip;
+ struct icmphdr *icmp;
+ struct ethhdr *eth_recv;
+ struct iphdr *ipr;
+ struct icmphdr *icmpr;
+
+ if (ntohs(eth->protlen) != PROT_IP)
return -EAGAIN;
- ip = packet + ETHER_HDR_SIZE;
+ ip = packet + ETHHDR_SIZE;
- if (ip->ip_p != IPPROTO_ICMP)
+ if (ip->prot != IPPROTO_ICMP)
return -EAGAIN;
- icmp = (struct icmp_hdr *)&ip->udp_src;
+ icmp = (struct icmphdr *)(ip + 1);
if (icmp->type != ICMP_ECHO_REQUEST)
return -EAGAIN;
@@ -141,19 +216,19 @@ int sandbox_eth_ping_req_to_reply(struct udevice *dev, void *packet,
/* reply to the ping */
eth_recv = (void *)priv->recv_packet_buffer[priv->recv_packets];
memcpy(eth_recv, packet, len);
- ipr = (void *)eth_recv + ETHER_HDR_SIZE;
- icmpr = (struct icmp_hdr *)&ipr->udp_src;
- memcpy(eth_recv->et_dest, eth->et_src, ARP_HLEN);
- memcpy(eth_recv->et_src, priv->fake_host_hwaddr, ARP_HLEN);
- ipr->ip_sum = 0;
- ipr->ip_off = 0;
- net_copy_ip((void *)&ipr->ip_dst, &ip->ip_src);
- net_write_ip((void *)&ipr->ip_src, priv->fake_host_ipaddr);
- ipr->ip_sum = compute_ip_checksum(ipr, IP_HDR_SIZE);
+ ipr = (void *)eth_recv + ETHHDR_SIZE;
+ icmpr = (struct icmphdr *)(ipr + 1);
+ memcpy(eth_recv->dst, eth->src, ETHADDR_LEN);
+ memcpy(eth_recv->src, priv->fake_host_hwaddr, ETHADDR_LEN);
+ ipr->sum = 0;
+ ipr->off = 0;
+ ipr->dst = ip->src;
+ ipr->src = priv->fake_host_ipaddr.s_addr;
+ ipr->sum = compute_ip_checksum(ipr, IPHDR_SIZE);
icmpr->type = ICMP_ECHO_REPLY;
icmpr->checksum = 0;
- icmpr->checksum = compute_ip_checksum(icmpr, ICMP_HDR_SIZE);
+ icmpr->checksum = compute_ip_checksum(icmpr, ICMPHDR_SIZE);
priv->recv_packet_length[priv->recv_packets] = len;
++priv->recv_packets;
@@ -171,8 +246,9 @@ int sandbox_eth_ping_req_to_reply(struct udevice *dev, void *packet,
int sandbox_eth_recv_arp_req(struct udevice *dev)
{
struct eth_sandbox_priv *priv = dev_get_priv(dev);
- struct ethernet_hdr *eth_recv;
- struct arp_hdr *arp_recv;
+ struct ethhdr *eth_recv;
+ struct arphdr *arp_recv;
+ struct arpdata *arp_recvd;
/* Don't allow the buffer to overrun */
if (priv->recv_packets >= PKTBUFSRX)
@@ -180,23 +256,24 @@ int sandbox_eth_recv_arp_req(struct udevice *dev)
/* Formulate a fake request */
eth_recv = (void *)priv->recv_packet_buffer[priv->recv_packets];
- memcpy(eth_recv->et_dest, net_bcast_ethaddr, ARP_HLEN);
- memcpy(eth_recv->et_src, priv->fake_host_hwaddr, ARP_HLEN);
- eth_recv->et_protlen = htons(PROT_ARP);
-
- arp_recv = (void *)eth_recv + ETHER_HDR_SIZE;
- arp_recv->ar_hrd = htons(ARP_ETHER);
- arp_recv->ar_pro = htons(PROT_IP);
- arp_recv->ar_hln = ARP_HLEN;
- arp_recv->ar_pln = ARP_PLEN;
- arp_recv->ar_op = htons(ARPOP_REQUEST);
- memcpy(&arp_recv->ar_sha, priv->fake_host_hwaddr, ARP_HLEN);
- net_write_ip(&arp_recv->ar_spa, priv->fake_host_ipaddr);
- memcpy(&arp_recv->ar_tha, net_null_ethaddr, ARP_HLEN);
- net_write_ip(&arp_recv->ar_tpa, net_ip);
+ memcpy(eth_recv->dst, net_bcast_ethaddr, ETHADDR_LEN);
+ memcpy(eth_recv->src, priv->fake_host_hwaddr, ETHADDR_LEN);
+ eth_recv->protlen = htons(PROT_ARP);
+
+ arp_recv = (void *)eth_recv + ETHHDR_SIZE;
+ arp_recv->htype = htons(ARP_ETHER);
+ arp_recv->ptype = htons(PROT_IP);
+ arp_recv->hlen = ETHADDR_LEN;
+ arp_recv->plen = IP4_LEN;
+ arp_recv->op = htons(ARP_REQUEST);
+ arp_recvd = (struct arpdata *)(arp_recv + 1);
+ memcpy(&arp_recvd->sha, priv->fake_host_hwaddr, ETHADDR_LEN);
+ arp_recvd->spa = priv->fake_host_ipaddr.s_addr;
+ memcpy(&arp_recvd->tha, null_ethaddr, ETHADDR_LEN);
+ arp_recvd->tpa = net_ip.s_addr;
priv->recv_packet_length[priv->recv_packets] =
- ETHER_HDR_SIZE + ARP_HDR_SIZE;
+ ETHHDR_SIZE + ARPHDR_SIZE + ARPDATA_SIZE;
++priv->recv_packets;
return 0;
@@ -212,9 +289,10 @@ int sandbox_eth_recv_arp_req(struct udevice *dev)
int sandbox_eth_recv_ping_req(struct udevice *dev)
{
struct eth_sandbox_priv *priv = dev_get_priv(dev);
- struct ethernet_hdr *eth_recv;
- struct ip_udp_hdr *ipr;
- struct icmp_hdr *icmpr;
+ struct eth_pdata *pdata = dev_get_plat(dev);
+ struct ethhdr *eth_recv;
+ struct iphdr *ipr;
+ struct icmphdr *icmpr;
/* Don't allow the buffer to overrun */
if (priv->recv_packets >= PKTBUFSRX)
@@ -223,31 +301,31 @@ int sandbox_eth_recv_ping_req(struct udevice *dev)
/* Formulate a fake ping */
eth_recv = (void *)priv->recv_packet_buffer[priv->recv_packets];
- memcpy(eth_recv->et_dest, net_ethaddr, ARP_HLEN);
- memcpy(eth_recv->et_src, priv->fake_host_hwaddr, ARP_HLEN);
- eth_recv->et_protlen = htons(PROT_IP);
+ memcpy(eth_recv->dst, pdata->enetaddr, ETHADDR_LEN);
+ memcpy(eth_recv->src, priv->fake_host_hwaddr, ETHADDR_LEN);
+ eth_recv->protlen = htons(PROT_IP);
- ipr = (void *)eth_recv + ETHER_HDR_SIZE;
- ipr->ip_hl_v = 0x45;
- ipr->ip_len = htons(IP_ICMP_HDR_SIZE);
- ipr->ip_off = htons(IP_FLAGS_DFRAG);
- ipr->ip_p = IPPROTO_ICMP;
- ipr->ip_sum = 0;
- net_write_ip(&ipr->ip_src, priv->fake_host_ipaddr);
- net_write_ip(&ipr->ip_dst, net_ip);
- ipr->ip_sum = compute_ip_checksum(ipr, IP_HDR_SIZE);
+ ipr = (void *)eth_recv + ETHHDR_SIZE;
+ ipr->hl_v = 0x45;
+ ipr->len = htons(IPHDR_SIZE + ICMPHDR_SIZE);
+ ipr->off = htons(IP_FLAGS_DFRAG);
+ ipr->prot = IPPROTO_ICMP;
+ ipr->sum = 0;
+ ipr->src = priv->fake_host_ipaddr.s_addr;
+ ipr->dst = net_ip.s_addr;
+ ipr->sum = compute_ip_checksum(ipr, IPHDR_SIZE);
- icmpr = (struct icmp_hdr *)&ipr->udp_src;
+ icmpr = (struct icmphdr *)(ipr + 1);
icmpr->type = ICMP_ECHO_REQUEST;
icmpr->code = 0;
icmpr->checksum = 0;
- icmpr->un.echo.id = 0;
- icmpr->un.echo.sequence = htons(1);
- icmpr->checksum = compute_ip_checksum(icmpr, ICMP_HDR_SIZE);
+ icmpr->id = 0;
+ icmpr->sequence = htons(1);
+ icmpr->checksum = compute_ip_checksum(icmpr, ICMPHDR_SIZE);
priv->recv_packet_length[priv->recv_packets] =
- ETHER_HDR_SIZE + IP_ICMP_HDR_SIZE;
+ ETHHDR_SIZE + IPHDR_SIZE + ICMPHDR_SIZE;
++priv->recv_packets;
return 0;
@@ -398,7 +476,7 @@ static int sb_eth_write_hwaddr(struct udevice *dev)
debug("eth_sandbox %s: Write HW ADDR - %pM\n", dev->name,
pdata->enetaddr);
- memcpy(priv->fake_host_hwaddr, pdata->enetaddr, ARP_HLEN);
+ memcpy(priv->fake_host_hwaddr, pdata->enetaddr, ETHADDR_LEN);
return 0;
}
diff --git a/drivers/net/ti/am65-cpsw-nuss.c b/drivers/net/ti/am65-cpsw-nuss.c
index 3c62fc0b428..9b69f36d04d 100644
--- a/drivers/net/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ti/am65-cpsw-nuss.c
@@ -438,6 +438,12 @@ static int am65_cpsw_start(struct udevice *dev)
port->port_sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
}
+ ret = phy_config(priv->phydev);
+ if (ret < 0) {
+ dev_err(dev, "phy_config failed: %d", ret);
+ goto err_dis_rx;
+ }
+
ret = phy_startup(priv->phydev);
if (ret) {
dev_err(dev, "phy_startup failed\n");
@@ -639,9 +645,6 @@ static int am65_cpsw_phy_init(struct udevice *dev)
phydev->advertising = phydev->supported;
priv->phydev = phydev;
- ret = phy_config(phydev);
- if (ret < 0)
- dev_err(dev, "phy_config() failed: %d", ret);
return ret;
}