summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c22
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c23
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c24
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c84
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c78
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c7
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c48
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
55 files changed, 803 insertions, 269 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 29554992215a..2349ea970255 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int rxcsum, rxvlan, rxvlan_filter;
+ netdev_features_t rxcsum, rxvlan, rxvlan_filter;
rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
unsigned int len, put_len, max_len;
- int received = 0;
+ unsigned int received = 0;
+ int packet_count = 0;
DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
- while (received < budget) {
+ while (packet_count < budget) {
DBGPR(" cur = %d\n", ring->cur);
/* First time in loop see if we need to restore state */
@@ -1662,7 +1663,7 @@ read_again:
if (packet->errors)
DBGPR("Error in received packet\n");
dev_kfree_skb(skb);
- continue;
+ goto next_packet;
}
if (!context) {
@@ -1677,7 +1678,7 @@ read_again:
}
dev_kfree_skb(skb);
- continue;
+ goto next_packet;
}
memcpy(skb_tail_pointer(skb), rdata->skb->data,
put_len);
@@ -1694,7 +1695,7 @@ read_again:
/* Stray Context Descriptor? */
if (!skb)
- continue;
+ goto next_packet;
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
@@ -1705,7 +1706,7 @@ read_again:
if (skb->len > max_len) {
DBGPR("packet length exceeds configured MTU\n");
dev_kfree_skb(skb);
- continue;
+ goto next_packet;
}
#ifdef XGMAC_ENABLE_RX_PKT_DUMP
@@ -1739,6 +1740,9 @@ read_again:
netdev->last_rx = jiffies;
napi_gro_receive(&pdata->napi, skb);
+
+next_packet:
+ packet_count++;
}
/* Check if we need to save state before leaving */
@@ -1752,9 +1756,9 @@ read_again:
rdata->state.error = error;
}
- DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+ DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
- return received;
+ return packet_count;
}
static int xgbe_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea1941e973..7ba83ffb08ac 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+ if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+ return false;
+
+ if (ioread32(p->ring_csr_addr + SRST_ADDR))
+ return false;
+
+ return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
u32 val;
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
val |= SCAN_AUTO_INCR;
MGMT_CLOCK_SEL_SET(&val, 1);
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+
+ return 0;
}
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 38558584080e..ec45f3256f0e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
+#define CLKEN_ADDR 0xc208
+#define SRST_ADDR 0xc200
+
#define MAC_ADDR_REG_OFFSET 0x00
#define MAC_COMMAND_REG_OFFSET 0x04
#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc6f6bb..123669696184 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
- u8 cpu_bufnum = 0, eth_bufnum = 0;
- u8 bp_bufnum = 0x20;
- u16 ring_id, ring_num = 0;
+ u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
+ u8 bp_bufnum = START_BP_BUFNUM;
+ u16 ring_id, ring_num = START_RING_NUM;
int ret;
/* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
u16 dst_ring_num;
int ret;
- pdata->port_ops->reset(pdata);
+ ret = pdata->port_ops->reset(pdata);
+ if (ret)
+ return ret;
ret = xgene_enet_create_desc_rings(ndev);
if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
return ret;
err:
+ unregister_netdev(ndev);
free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a01161f..f9958fae6ffd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define START_ETH_BUFNUM 2
+#define START_BP_BUFNUM 0x22
+#define START_RING_NUM 8
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
};
struct xgene_port_ops {
- void (*reset)(struct xgene_enet_pdata *pdata);
+ int (*reset)(struct xgene_enet_pdata *pdata);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index e6d24c210198..f5d4f68c288c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
u32 data;
- int i;
+ int i = 0;
xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
- for (i = 0; i < 10 && data != ~0U ; i++) {
+ do {
usleep_range(100, 110);
data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
- }
-
- if (data != ~0U) {
- netdev_err(ndev, "Failed to release memory from shutdown\n");
- return -ENODEV;
- }
+ if (data == ~0U)
+ return 0;
+ } while (++i < 10);
- return 0;
+ netdev_err(ndev, "Failed to release memory from shutdown\n");
+ return -ENODEV;
}
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
@@ -313,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
xgene_sgmac_rxtx(p, TX_EN, false);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *p)
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
{
+ if (!xgene_ring_mgr_init(p))
+ return -ENODEV;
+
clk_prepare_enable(p->clk);
clk_disable_unprepare(p->clk);
clk_prepare_enable(p->clk);
xgene_enet_ecc_init(p);
xgene_enet_config_ring_if_assoc(p);
+
+ return 0;
}
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d07206b3c7..a18a9d1f1143 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
xgene_enet_ecc_init(pdata);
xgene_enet_config_ring_if_assoc(pdata);
+
+ return 0;
}
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9ae36979bdee..531bb7c57531 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
if (!(reg & TDMA_DISABLED))
netdev_warn(priv->netdev, "TDMA not stopped!\n");
+ /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+ * fail, so by checking this pointer we know whether the TX ring was
+ * fully initialized or not.
+ */
+ if (!ring->cbs)
+ return;
+
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = NULL;
if (ring->desc_dma) {
- dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
ring->desc_dma = 0;
}
ring->size = 0;
@@ -1397,6 +1406,9 @@ static void bcm_sysport_netif_start(struct net_device *dev)
/* Enable NAPI */
napi_enable(&priv->napi);
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
phy_start(priv->phydev);
/* Enable TX interrupts for the 32 TXQs */
@@ -1499,9 +1511,6 @@ static int bcm_sysport_open(struct net_device *dev)
if (ret)
goto out_free_rx_ring;
- /* Enable RX interrupt and TX ring full interrupt */
- intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
/* Turn on TDMA */
ret = tdma_enable_set(priv, 1);
if (ret)
@@ -1858,6 +1867,8 @@ static int bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
+ umac_reset(priv);
+
/* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now
*/
@@ -1885,9 +1896,6 @@ static int bcm_sysport_resume(struct device *d)
netif_device_attach(dev);
- /* Enable RX interrupt and TX ring full interrupt */
- intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
-
/* RX pipe enable */
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 23f23c97c2ad..f05fab65d78a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
- rcu_read_lock();
if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
rc = -ENODEV;
- rcu_read_unlock();
break;
}
csk = &cp->csk_tbl[l5_cid];
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
}
}
csk_put(csk);
- rcu_read_unlock();
rc = 0;
}
}
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
mutex_lock(&cnic_lock);
- if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+ if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec09e453..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
+ /* Re-configure the port multiplexer towards the PHY device */
+ bcmgenet_mii_config(priv->dev, false);
+
+ phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+
bcmgenet_netif_start(dev);
return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
+ /* Really kill the PHY state machine and disconnect from it */
+ phy_disconnect(priv->phydev);
+
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524ea3b19..31b2da5f9b82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_mii_setup(struct net_device *dev);
/* Wake-on-LAN routines */
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a9f801..933cd7e7cd33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
-static void bcmgenet_mii_setup(struct net_device *dev)
+void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
}
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
return -EINVAL;
}
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev);
+ ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 8edf0f5bd679..cca604994003 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -60,6 +60,43 @@ void cxgb4_dcb_version_init(struct net_device *dev)
dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
}
+static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = pi->adapter;
+ struct port_dcb_info *dcb = &pi->dcb;
+ struct dcb_app app;
+ int i, err;
+
+ /* zero priority implies remove */
+ app.priority = 0;
+
+ for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+ /* Check if app list is exhausted */
+ if (!dcb->app_priority[i].protocolid)
+ break;
+
+ app.protocol = dcb->app_priority[i].protocolid;
+
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ app.priority = dcb->app_priority[i].user_prio_map;
+ app.selector = dcb->app_priority[i].sel_field + 1;
+ err = dcb_ieee_delapp(dev, &app);
+ } else {
+ app.selector = !!(dcb->app_priority[i].sel_field);
+ err = dcb_setapp(dev, &app);
+ }
+
+ if (err) {
+ dev_err(adap->pdev_dev,
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ dcb_ver_array[dcb->dcb_version], app.selector,
+ app.protocol, -err);
+ break;
+ }
+ }
+}
+
/* Finite State machine for Data Center Bridging.
*/
void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -80,14 +117,17 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
/* we're going to use Host DCB */
dcb->state = CXGB4_DCB_STATE_HOST;
dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
- dcb->enabled = 1;
break;
}
case CXGB4_DCB_INPUT_FW_ENABLED: {
/* we're going to use Firmware DCB */
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
- dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+ dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+ dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+ else
+ dcb->supported |= DCB_CAP_DCBX_VER_CEE;
break;
}
@@ -145,6 +185,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
* state. We need to reset back to a ground state
* of incomplete.
*/
+ cxgb4_dcb_cleanup_apps(dev);
cxgb4_dcb_state_init(dev);
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
dcb->supported = CXGB4_DCBX_FW_SUPPORT;
@@ -349,6 +390,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
{
struct port_info *pi = netdev2pinfo(dev);
+ /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
+ if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
+ pi->dcb.enabled = enabled;
+ return 0;
+ }
+
/* Firmware doesn't provide any mechanism to control the DCB state.
*/
if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
@@ -394,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
*up_tc_map = (1 << tc);
/* prio_type is link strict */
- *prio_type = 0x2;
+ if (*pgid != 0xF)
+ *prio_type = 0x2;
}
static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 1);
}
@@ -409,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 0);
}
static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -419,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
struct fw_port_cmd pcmd;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = pi->adapter;
+ int fw_tc = 7 - tc;
u32 _pgid;
int err;
@@ -437,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
}
_pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
- _pgid &= ~(0xF << (tc * 4));
- _pgid |= pgid << (tc * 4);
+ _pgid &= ~(0xF << (fw_tc * 4));
+ _pgid |= pgid << (fw_tc * 4);
pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -551,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
- *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+ *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
}
/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -576,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
if (pfccfg)
- pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+ pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
else
- pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+ pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -833,11 +886,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
/* Return whether IEEE Data Center Bridging has been negotiated.
*/
-static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev)
+static inline int
+cxgb4_ieee_negotiation_complete(struct net_device *dev,
+ enum cxgb4_dcb_fw_msgs dcb_subtype)
{
struct port_info *pi = netdev2pinfo(dev);
struct port_dcb_info *dcb = &pi->dcb;
+ if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+ return 0;
+
return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
}
@@ -850,7 +908,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
{
int prio;
- if (!cxgb4_ieee_negotiation_complete(dev))
+ if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
return -EINVAL;
if (!(app->selector && app->protocol))
return -EINVAL;
@@ -872,7 +930,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
int ret;
- if (!cxgb4_ieee_negotiation_complete(dev))
+ if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
return -EINVAL;
if (!(app->selector && app->protocol))
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3f60070f2519..8520d5529df8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_info *pi = netdev_priv(dev);
- return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
+ if (!pi->dcb.enabled)
+ return 0;
+
+ return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+ (pi->dcb.state == CXGB4_DCB_STATE_HOST));
#else
return 0;
#endif
@@ -6610,6 +6614,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
+ spin_lock_init(&adapter->win0_lock);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314e11af..39f2b13e66c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_conm_ctrl;
+ u32 sge_control, sge_control2, sge_conm_ctrl;
+ unsigned int ingpadboundary, ingpackboundary;
int ret, egress_threshold;
/*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
sge_control = t4_read_reg(adap, SGE_CONTROL);
s->pktshift = PKTSHIFT_GET(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
- s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adap->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
if (adap->flags & USING_SOFT_PARAMS)
ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74e4f09..163a2a14948c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
HOSTPAGESIZEPF6(sge_hps) |
HOSTPAGESIZEPF7(sge_hps));
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(fl_align_log - 5) |
- EGRSTATUSPAGESIZE(stat_len != 64));
-
+ if (is_t4(adap->params.chip)) {
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(fl_align_log - 5) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ } else {
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good. For T5 the smallest
+ * Padding Boundary which we can select is 32 bytes which is
+ * larger than any known Memory Controller Line Size so we'll
+ * use that.
+ *
+ * T5 has a different interpretation of the "0" value for the
+ * Packing Boundary. This corresponds to 16 bytes instead of
+ * the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes.
+ */
+ if (fl_align <= 32) {
+ fl_align = 64;
+ fl_align_log = 6;
+ }
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(fl_align_log -
+ INGPACKBOUNDARY_SHIFT_X));
+ }
/*
* Adjust various SGE Free List Host Buffer Sizes.
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db5dc13..8d2de1006b08 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
#define X_INGPADBOUNDARY_SHIFT 5
#define SGE_CONTROL 0x1008
+#define SGE_CONTROL2_A 0x1124
#define DCASYSTYPE 0x00080000U
#define RXPKTCPLMODE_MASK 0x00040000U
#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define INGPCIEBOUNDARY_32B_X 0
#define INGPCIEBOUNDARY_MASK 0x00000380U
#define INGPCIEBOUNDARY_SHIFT 7
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
>> INGPADBOUNDARY_SHIFT)
+#define INGPACKBOUNDARY_16B_X 0
+#define INGPACKBOUNDARY_SHIFT_X 5
+
+#define INGPACKBOUNDARY_S 16
+#define INGPACKBOUNDARY_M 0x7U
+#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
+#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
+ & INGPACKBOUNDARY_M)
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
#define EGRPCIEBOUNDARY_SHIFT 1
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
+ /* Decoded Adapter Parameters.
+ */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+
/*
* Reverse maps from Absolute Queue IDs to associated queue pointers.
* The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index bfa398d91826..0b42bddaf284 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x480d), /* T480-cr */
CH_DEVICE(0x480e), /* T440-lp-cr */
CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
- CH_DEVICE(0x4880),
+ CH_DEVICE(0x4881),
+ CH_DEVICE(0x4882),
+ CH_DEVICE(0x4883),
+ CH_DEVICE(0x4884),
+ CH_DEVICE(0x4885),
+ CH_DEVICE(0x4886),
+ CH_DEVICE(0x4887),
+ CH_DEVICE(0x4888),
CH_DEVICE(0x5801), /* T520-cr */
CH_DEVICE(0x5802), /* T522-cr */
CH_DEVICE(0x5803), /* T540-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
#include "../cxgb4/t4_msg.h"
/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER; /* large page allocation size */
-static u32 STAT_LEN; /* length of status page at ring end */
-static u32 PKTSHIFT; /* padding between CPL and packet data */
-static u32 FL_ALIGN; /* response queue message alignment */
-
-/*
* Constants ...
*/
enum {
@@ -102,12 +94,6 @@ enum {
MAX_TIMER_TX_RECLAIM = 100,
/*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
- * timer will attempt to refill it.
- */
- FL_STARVE_THRES = 4,
-
- /*
* Suspend an Ethernet TX queue with fewer available descriptors than
* this. We always want to have room for a maximum sized packet:
* inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
/**
* fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
* @fl: the Free List
*
* Tests specified Free List to see whether the number of buffers
* available to the hardware has falled below our "starvation"
* threshold.
*/
-static inline bool fl_starving(const struct sge_fl *fl)
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
/**
* get_buf_size - return the size of an RX Free List buffer.
+ * @adapter: pointer to the associated adapter
* @sdesc: pointer to the software buffer descriptor
*/
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+static inline int get_buf_size(const struct adapter *adapter,
+ const struct rx_sw_desc *sdesc)
{
- return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
- ? (PAGE_SIZE << FL_PG_ORDER)
- : PAGE_SIZE;
+ const struct sge *s = &adapter->sge;
+
+ return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
}
/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
put_page(sdesc->page);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
int n, gfp_t gfp)
{
+ struct sge *s = &adapter->sge;
struct page *page;
dma_addr_t dma_addr;
unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* If we don't support large pages, drop directly into the small page
* allocation code.
*/
- if (FL_PG_ORDER == 0)
+ if (s->fl_pg_order == 0)
goto alloc_small_pages;
while (n) {
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- FL_PG_ORDER);
+ s->fl_pg_order);
if (unlikely(!page)) {
/*
* We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
fl->large_alloc_failed++;
break;
}
- poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+ poison_buf(page, PAGE_SIZE << s->fl_pg_order);
dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
- PAGE_SIZE << FL_PG_ORDER,
+ PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
/*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* because DMA mapping resources are typically
* critical resources once they become scarse.
*/
- __free_pages(page, FL_PG_ORDER);
+ __free_pages(page, s->fl_pg_order);
goto out;
}
dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
fl->pend_cred += cred;
ring_fl_db(adapter, fl);
- if (unlikely(fl_starving(fl))) {
+ if (unlikely(fl_starving(adapter, fl))) {
smp_wmb();
set_bit(fl->cntxt_id, adapter->sge.starving_fl);
}
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
+ struct adapter *adapter = rxq->rspq.adapter;
+ struct sge *s = &adapter->sge;
int ret;
struct sk_buff *skb;
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb, gl, PKTSHIFT);
- skb->len = gl->tot_len - PKTSHIFT;
+ copy_frags(skb, gl, s->pktshift);
+ skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
(rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.rx_drops++;
return 0;
}
- __skb_pull(skb, PKTSHIFT);
+ __skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
int budget_left = budget;
while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
BUG_ON(frag >= MAX_SKB_FRAGS);
BUG_ON(rxq->fl.avail == 0);
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(sdesc);
+ bufsz = get_buf_size(adapter, sdesc);
fp->page = sdesc->page;
fp->offset = rspq->offset;
fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
if (likely(ret == 0))
- rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
* schedule napi but the FL is no longer starving.
* No biggie.
*/
- if (fl_starving(fl)) {
+ if (fl_starving(adapter, fl)) {
struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
int intr_dest,
struct sge_fl *fl, rspq_handler_t hnd)
{
+ struct sge *s = &adapter->sge;
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
- &fl->addr, &fl->sdesc, STAT_LEN);
+ &fl->addr, &fl->sdesc, s->stat_len);
if (!fl->desc) {
ret = -ENOMEM;
goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
- STAT_LEN / EQ_UNIT);
+ s->stat_len / EQ_UNIT);
/*
* Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *devq,
unsigned int iqid)
{
+ struct sge *s = &adapter->sge;
int ret, nentries;
struct fw_eq_eth_cmd cmd, rpl;
struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
* Calculate the size of the hardware TX Queue (including the Status
* Page on the end of the TX Queue) in units of TX Descriptors.
*/
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
/*
* Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
sizeof(struct tx_desc),
sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
if (!txq->q.desc)
return -ENOMEM;
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
*/
static void free_txq(struct adapter *adapter, struct sge_txq *tq)
{
+ struct sge *s = &adapter->sge;
+
dma_free_coherent(adapter->pdev_dev,
- tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->size * sizeof(*tq->desc) + s->stat_len,
tq->desc, tq->phys_addr);
tq->cntxt_id = 0;
tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
struct sge_fl *fl)
{
+ struct sge *s = &adapter->sge;
unsigned int flid = fl ? fl->cntxt_id : 0xffff;
t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
if (fl) {
free_rx_bufs(adapter, fl, fl->avail);
dma_free_coherent(adapter->pdev_dev,
- fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->size * sizeof(*fl->desc) + s->stat_len,
fl->desc, fl->addr);
kfree(fl->sdesc);
fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
+ unsigned int ingpadboundary, ingpackboundary;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
* Now translate the adapter parameters into our internal forms.
*/
if (fl1)
- FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
- STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
- ? 128 : 64);
- PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
- FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- SGE_INGPADBOUNDARY_SHIFT);
+ s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ ? 128 : 64);
+ s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adapter->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
+
+ /* A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ s->fl_starve_thres
+ = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61dcb4ce..4b6a6d14d86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
*/
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
+ u32 sge_control2; /* T5: more of the same */
u32 sge_host_page_size; /* RDMA page sizes */
u32 sge_queues_per_page; /* RDMA queues/page */
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
+ u32 sge_congestion_control; /* congestion thresholds, etc. */
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
u32 sge_timer_value_2_and_3;
u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc48ba2..1e896b923234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
sge_params->sge_timer_value_2_and_3 = vals[5];
sge_params->sge_timer_value_4_and_5 = vals[6];
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
+ * SGE_CONTROL in order to determine how ingress packet data will be
+ * laid out in Packed Buffer Mode. Unfortunately, older versions of
+ * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+ * failure grabbing it we throw an error since we can't figure out the
+ * right value.
+ */
+ if (!is_t4(adapter->params.chip)) {
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter->pdev_dev,
+ "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_params->sge_control2 = vals[0];
+ }
+
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
- v = t4vf_query_params(adapter, 1, params, vals);
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+ v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
+ sge_params->sge_congestion_control = vals[1];
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 69dfd3c9e529..0be6850be8a2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
int i;
enic_rfs_timer_stop(enic);
- spin_lock(&enic->rfs_h.lock);
+ spin_lock_bh(&enic->rfs_h.lock);
enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
@@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
kfree(n);
}
}
- spin_unlock(&enic->rfs_h.lock);
+ spin_unlock_bh(&enic->rfs_h.lock);
}
struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
@@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data)
bool res;
int j;
- spin_lock(&enic->rfs_h.lock);
+ spin_lock_bh(&enic->rfs_h.lock);
for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
@@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data)
}
}
}
- spin_unlock(&enic->rfs_h.lock);
+ spin_unlock_bh(&enic->rfs_h.lock);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
@@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
- spin_lock(&enic->rfs_h.lock);
+ spin_lock_bh(&enic->rfs_h.lock);
n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
if (n) { /* entry already present */
@@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
ret_unlock:
- spin_unlock(&enic->rfs_h.lock);
+ spin_unlock_bh(&enic->rfs_h.lock);
return res;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 929bfe70080a..73cf1653a4a3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
struct vnic_rq_buf *buf = rq->to_use;
if (buf->os_buf) {
- buf = buf->next;
- rq->to_use = buf;
- rq->ring.desc_avail--;
- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
- /* Adding write memory barrier prevents compiler and/or
- * CPU reordering, thus avoiding descriptor posting
- * before descriptor is initialized. Otherwise, hardware
- * can read stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &rq->ctrl->posted_index);
- }
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
return 0;
}
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
return;
}
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
}
}
@@ -1674,13 +1670,13 @@ static int enic_stop(struct net_device *netdev)
enic_dev_disable(enic);
- local_bh_disable();
for (i = 0; i < enic->rq_count; i++) {
napi_disable(&enic->napi[i]);
+ local_bh_disable();
while (!enic_poll_lock_napi(&enic->rq[i]))
mdelay(1);
+ local_bh_enable();
}
- local_bh_enable();
netif_carrier_off(netdev);
netif_tx_disable(netdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 81b96cf87574..3dca494797bd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
return bufaddr;
}
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+ int i;
+ unsigned int *src = src_buf;
+ unsigned int *dst = dst_buf;
+
+ for (i = 0; i < len; i += 4, src++, dst++)
+ *dst = swab32p(src);
+}
+
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
}
static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length)
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, length);
+ if (!swap)
+ memcpy(new_skb->data, (*skb)->data, length);
+ else
+ swap_buffer2(new_skb->data, (*skb)->data, length);
*skb = new_skb;
return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool is_copybreak;
+ bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+ need_swap);
if (!is_copybreak) {
skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
/* Extract the enhanced buffer descriptor */
@@ -1581,7 +1596,8 @@ fec_enet_interrupt(int irq, void *dev_id)
complete(&fep->mdio_done);
}
- fec_ptp_check_pps_event(fep);
+ if (fep->ptp_clock)
+ fec_ptp_check_pps_event(fep);
return ret;
}
@@ -3342,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
}
rtnl_unlock();
- fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -3366,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
return ret;
}
- pinctrl_pm_select_default_state(&fep->pdev->dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret)
- goto failed_clk;
-
rtnl_lock();
if (netif_running(ndev)) {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret) {
+ rtnl_unlock();
+ goto failed_clk;
+ }
fec_restart(ndev);
netif_tx_lock_bh(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 3d4e08be1709..b34214e2df5f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev)
FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
}
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
/*
* Enable interrupts we wish to service.
*/
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index f30411f0701f..7a184e8816a4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev)
if (fep->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5f6aded512f5..24f3986cfae2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM |
NETIF_F_SG);
- netdev->priv_flags |= IFF_UNICAST_FLT;
+ /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
+ if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
+ hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ed5f1c15fb0f..c3a7f4a4b775 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_PF_NUM_SHIFT;
u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a21b14495ebd..a2d72a87cbde 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6537,6 +6537,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
if (unlikely(page_to_nid(page) != numa_node_id()))
return false;
+ if (unlikely(page->pfmemalloc))
+ return false;
+
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
@@ -6603,7 +6606,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_node_id()))
+ if (likely((page_to_nid(page) == numa_node_id()) &&
+ !page->pfmemalloc))
return true;
/* this page cannot be reused so discard it */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3ce4a258f945..0ae038b9af90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
if (old == advertised)
return err;
/* this sets the link speed and restarts auto-neg */
+ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ usleep_range(1000, 2000);
+
hw->mac.autotry_restart = true;
err = hw->mac.ops.setup_link(hw, advertised, true);
if (err) {
e_info(probe, "setup link failed with code %d\n", err);
hw->mac.ops.setup_link(hw, old, true);
}
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
} else {
/* in this case we currently only support 10Gb/FULL */
u32 speed = ethtool_cmd_speed(ecmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fec5212d4337..d2df4e3d1032 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
IXGBE_CB(skb)->page_released = false;
}
dev_kfree_skb(skb);
+ rx_buffer->skb = NULL;
}
- rx_buffer->skb = NULL;
if (rx_buffer->dma)
dma_unmap_page(dev, rx_buffer->dma,
ixgbe_rx_pg_size(rx_ring),
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..28b81ae09b5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
- s32 status;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, autoneg_reg);
-
- return status;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a949f352..d44560d1d268 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
- struct sk_buff *skb;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- skb = NULL;
- if (cmd_sts & TX_LAST_DESC)
- skb = __skb_dequeue(&txq->tx_skb);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+ desc->byte_cnt, DMA_TO_DEVICE);
+
+ if (cmd_sts & TX_ENABLE_INTERRUPT) {
+ struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+ if (!WARN_ON(!skb))
+ dev_kfree_skb(skb);
+ }
if (cmd_sts & ERROR_SUMMARY) {
netdev_info(mp->dev, "tx error\n");
mp->dev->stats.tx_errors++;
}
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- dev_kfree_skb(skb);
}
__netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f101526..fdf3e382e464 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
{
struct mvpp2_prs_entry *pe;
int tid_aux, tid;
+ int ret = 0;
pe = mvpp2_prs_vlan_find(priv, tpid, ai);
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
break;
}
- if (tid <= tid_aux)
- return -EINVAL;
+ if (tid <= tid_aux) {
+ ret = -EINVAL;
+ goto error;
+ }
memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
unsigned int port_map)
{
struct mvpp2_prs_entry *pe;
- int tid_aux, tid, ai;
+ int tid_aux, tid, ai, ret = 0;
pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
/* Set ai value for new double vlan entry */
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
- if (ai < 0)
- return ai;
+ if (ai < 0) {
+ ret = ai;
+ goto error;
+ }
/* Get first single/triple vlan tid */
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
break;
}
- if (tid >= tid_aux)
- return -ERANGE;
+ if (tid >= tid_aux) {
+ ret = -ERANGE;
+ goto error;
+ }
memset(pe, 0, sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fec8fce..02266e3de514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 1);
out:
- if (ret)
+ if (ret) {
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+ return;
+ }
+
+ /* set offloads */
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
+ /* unset offloads */
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
- }
-
mdev->pndev[port] = dev;
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 34c137878545..454d9fea640e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
- MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ if (!skb->encapsulation)
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ else
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
ring->tx_csum++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index a49c9d11d8a5..49290a405903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
pr_cont("\n");
}
}
+ synchronize_irq(eq->irq);
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ca0f98c95105..872843179f44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
cur->ib.dst_gid_msk);
break;
+ case MLX4_NET_TRANS_RULE_ID_VXLAN:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
+ break;
case MLX4_NET_TRANS_RULE_ID_IPV6:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ed53291468f3..ad2c96a02a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
+ eq->irqn = vecidx;
+ eq->dev = dev;
+ eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
eq->name, eq);
if (err)
goto err_eq;
- eq->irqn = vecidx;
- eq->dev = dev;
- eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-
err = mlx5_debug_eq_add(dev, eq);
if (err)
goto err_irq;
@@ -420,6 +419,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
+ synchronize_irq(table->msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e489b2d..71b10b210792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
dev->profile = &profile[prof_sel];
dev->event = mlx5_core_event;
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
err = mlx5_dev_init(dev, pdev);
if (err) {
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
goto out;
}
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1ccd276d..613037584d08 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
if (test_bit(__NX_RESETTING, &adapter->state))
goto reschedule;
- if (test_bit(__NX_DEV_UP, &adapter->state)) {
+ if (test_bit(__NX_DEV_UP, &adapter->state) &&
+ !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
if (!adapter->has_link_events) {
netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a47147937d..9a49f42ac2ba 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
config NET_VENDOR_QUALCOMM
bool "Qualcomm devices"
default y
- depends on SPI_MASTER && OF_GPIO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
config QCA7000
tristate "Qualcomm Atheros QCA7000 support"
- depends on SPI_MASTER && OF_GPIO
+ depends on SPI_MASTER && OF
---help---
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cdc319f..a77f05ce8325 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
EFX_MAX_CHANNELS,
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- BUG_ON(efx->max_channels == 0);
+ if (WARN_ON(efx->max_channels == 0))
+ return -EIO;
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ee84a90e371c..aaf2987512b5 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unsigned short dma_flags;
int i = 0;
- EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
if (skb_shinfo(skb)->gso_size)
return efx_enqueue_skb_tso(tx_queue, skb);
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Find the packet protocol and sanity-check it */
state.protocol = efx_tso_check_protocol(skb);
- EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
-
rc = tso_start(&state, efx, skb);
if (rc)
goto mem_err;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5e94d00b96b3..6cc3cf6f17c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, smc91x_match);
+
+/**
+ * of_try_set_control_gpio - configure a gpio if it exists
+ */
+static int try_toggle_control_gpio(struct device *dev,
+ struct gpio_desc **desc,
+ const char *name, int index,
+ int value, unsigned int nsdelay)
+{
+ struct gpio_desc *gpio = *desc;
+ int res;
+
+ gpio = devm_gpiod_get_index(dev, name, index);
+ if (IS_ERR(gpio)) {
+ if (PTR_ERR(gpio) == -ENOENT) {
+ *desc = NULL;
+ return 0;
+ }
+
+ return PTR_ERR(gpio);
+ }
+ res = gpiod_direction_output(gpio, !value);
+ if (res) {
+ dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
+ devm_gpiod_put(dev, gpio);
+ gpio = NULL;
+ return res;
+ }
+ if (nsdelay)
+ usleep_range(nsdelay, 2 * nsdelay);
+ gpiod_set_value_cansleep(gpio, value);
+ *desc = gpio;
+
+ return 0;
+}
#endif
/*
@@ -2207,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
- struct resource *res, *ires;
+ struct resource *res;
unsigned int __iomem *addr;
unsigned long irq_flags = SMC_IRQ_FLAGS;
+ unsigned long irq_resflags;
int ret;
ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2237,6 +2274,28 @@ static int smc_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
u32 val;
+ /* Optional pwrdwn GPIO configured? */
+ ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
+ "power", 0, 0, 100);
+ if (ret)
+ return ret;
+
+ /*
+ * Optional reset GPIO configured? Minimum 100 ns reset needed
+ * according to LAN91C96 datasheet page 14.
+ */
+ ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
+ "reset", 0, 0, 100);
+ if (ret)
+ return ret;
+
+ /*
+ * Need to wait for optional EEPROM to load, max 750 us according
+ * to LAN91C96 datasheet page 55.
+ */
+ if (lp->reset_gpio)
+ usleep_range(750, 1000);
+
/* Combination of IO widths supported, default to 16-bit */
if (!of_property_read_u32(np, "reg-io-width", &val)) {
if (val & 1)
@@ -2279,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!ires) {
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
ret = -ENODEV;
goto out_release_io;
}
-
- ndev->irq = ires->start;
-
- if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
- irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+ /*
+ * If this platform does not specify any special irqflags, or if
+ * the resource supplies a trigger, override the irqflags with
+ * the trigger flags from the resource.
+ */
+ irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+ irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
ret = smc_request_attrib(pdev, ndev);
if (ret)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 47dce918eb0f..2a38dacbbd27 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -298,6 +298,9 @@ struct smc_local {
struct sk_buff *pending_tx_skb;
struct tasklet_struct tx_task;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+
/* version/revision of the SMC91x chip */
int version;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29da353e..77ed74561e5f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ /* If the internal PHY is in General Power-Down mode, all, except the
+ * management interface, is powered-down and stays in that condition as
+ * long as Phy register bit 0.11 is HIGH.
+ *
+ * In that case, clear the bit 0.11, so the PHY powers up and we can
+ * access to the phy registers.
+ */
+ rc = phy_read(pdata->phy_dev, MII_BMCR);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* If the PHY general power-down bit is not set is not necessary to
+ * disable the general power down-mode.
+ */
+ if (rc & BMCR_PDOWN) {
+ rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ usleep_range(1000, 1500);
+ }
+
+ return 0;
+}
+
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
return rc;
}
- /*
- * If energy is detected the PHY is already awake so is not necessary
- * to disable the energy detect power-down mode.
- */
- if ((rc & MII_LAN83C185_EDPWRDOWN) &&
- !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Only disable if energy detect mode is already enabled */
+ if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
+ /* Allow PHY to wakeup */
+ mdelay(2);
}
return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
- mdelay(100);
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
}
return 0;
}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
int ret;
/*
+ * Make sure to power-up the PHY chip before doing a reset, otherwise
+ * the reset fails.
+ */
+ ret = smsc911x_phy_general_power_up(pdata);
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
+ return ret;
+ }
+
+ /*
* LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
* are initialized in a Energy Detect Power-Down mode that prevents
* the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46c7e2c..18c46bb0f3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
char *phy_bus_name = priv->plat->phy_bus_name;
+ unsigned long flags;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed).
* In that case the driver disable own timers.
*/
+ spin_lock_irqsave(&priv->lock, flags);
if (priv->eee_active) {
pr_debug("stmmac: disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer);
}
priv->eee_active = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
goto out;
}
/* Activate the EEE and start timers */
+ spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
-
ret = true;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
}
out:
return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
/* At this stage, it could be needed to setup the EEE or adjust some
* MAC related HW registers.
*/
priv->eee_enabled = stmmac_eee_init(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
}
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i)
+ int i, gfp_t flags)
{
struct sk_buff *skb;
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- GFP_KERNEL);
+ flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i);
+ ret = stmmac_init_rx_buffers(priv, p, i, flags);
if (ret)
goto err_init_rx_buffers;
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_desc_rings(dev);
- if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
- return ret;
- }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
}
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT;
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
goto dma_desc_error;
}
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto init_error;
+ }
+
ret = stmmac_hw_setup(dev);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_init_tx_coalesce(priv);
+
if (priv->phydev)
phy_start(priv->phydev);
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
unsigned int enh_desc = priv->plat->enh_desc;
+ spin_lock(&priv->tx_lock);
+
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ spin_unlock(&priv->tx_lock);
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- spin_lock(&priv->tx_lock);
-
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
dma_map_err:
+ spin_unlock(&priv->tx_lock);
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- spin_lock(&priv->lock);
priv->hw->mac->set_filter(priv->hw, dev);
- spin_unlock(&priv->lock);
}
/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->stmmac_clk);
+ clk_disable(priv->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
- clk_prepare_enable(priv->stmmac_clk);
+ clk_enable(priv->stmmac_clk);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ init_dma_desc_rings(ndev, GFP_ATOMIC);
stmmac_hw_setup(ndev);
+ stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 655a23bbc451..e17a970eaf2b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
static void stmmac_default_data(void)
{
memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+
plat_dat.bus_id = 1;
plat_dat.phy_addr = 0;
plat_dat.interface = PHY_INTERFACE_MODE_GMII;
@@ -47,6 +48,12 @@ static void stmmac_default_data(void)
dma_cfg.pbl = 32;
dma_cfg.burst_len = DMA_AXI_BLEN_256;
plat_dat.dma_cfg = &dma_cfg;
+
+ /* Set default value for multicast hash bins */
+ plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat.unicast_filter_entries = 1;
}
/**
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
HMD(("init rxring, "));
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ u32 mapping;
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hp->dma_dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
- dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(skb, RX_OFFSET);
}
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb = hp->rx_skbs[elem];
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
+ u32 mapping;
/* Now refill the entry, if we can. */
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
drops++;
goto drop_it;
}
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, new_skb->data,
+ RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ dev_kfree_skb_any(new_skb);
+ drops++;
+ goto drop_it;
+ }
+
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
- dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(new_skb, RX_OFFSET);
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+ u32 first_len, u32 first_entry, u32 entry)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+ dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+ first_entry = NEXT_TX(first_entry);
+ while (first_entry != entry) {
+ struct happy_meal_txd *this = &txbase[first_entry];
+ u32 addr, len;
+
+ addr = hme_read_desc32(hp, &this->tx_addr);
+ len = hme_read_desc32(hp, &this->tx_flags);
+ len &= TXFLAG_SIZE;
+ dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+ goto out_dma_error;
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+ goto out_dma_error;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ unmap_partial_tx_skb(hp, first_mapping, first_len,
+ first_entry, entry);
+ goto out_dma_error;
+ }
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
+
+out_dma_error:
+ hp->tx_skbs[hp->tx_new] = NULL;
+ spin_unlock_irq(&hp->happy_lock);
+
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 952e1e4764b7..d8794488f80a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
if (enable) {
unsigned long timeout = jiffies + HZ;
- /* Disable Learn for all ports */
- for (i = 0; i < priv->data.slaves; i++) {
+ /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
+ for (i = 0; i <= priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
dev_dbg(&ndev->dev, "promiscuity enabled\n");
} else {
- /* Flood All Unicast Packets to Host port */
+ /* Don't Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
- /* Enable Learn for all ports */
- for (i = 0; i < priv->data.slaves; i++) {
+ /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
+ for (i = 0; i <= priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI);
return;
} else {
/* Disable promiscuous mode */
cpsw_set_promiscious(ndev, false);
}
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
+
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
@@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
const int port = priv->host_port;
u32 reg;
int i;
+ int unreg_mcast_mask;
reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
CPSW2_PORT_VLAN;
@@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
for (i = 0; i < priv->data.slaves; i++)
slave_write(priv->slaves + i, vlan, reg);
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
- (ALE_PORT_1 | ALE_PORT_2) << port);
+ unreg_mcast_mask << port);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
int ret;
+ int unreg_mcast_mask;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
ret = cpsw_ale_add_vlan(priv->ale, vid,
ALE_ALL_PORTS << priv->host_port,
0, ALE_ALL_PORTS << priv->host_port,
- (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+ unreg_mcast_mask << priv->host_port);
if (ret != 0)
return ret;
@@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
- return -EINVAL;
+ goto no_phy_slave;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
@@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
+ slave_data->phy_if = of_get_phy_mode(slave_node);
+ if (slave_data->phy_if < 0) {
+ dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ return slave_data->phy_if;
+ }
+
+no_phy_slave:
mac_addr = of_get_mac_address(slave_node);
if (mac_addr) {
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
@@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return ret;
}
}
-
- slave_data->phy_if = of_get_phy_mode(slave_node);
- if (slave_data->phy_if < 0) {
- dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
- i);
- return slave_data->phy_if;
- }
-
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0579b2243bb6..097ebe7077ac 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
return 0;
}
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+ int unreg_mcast = 0;
+
+ /* Only bother doing the work if the setting is actually changing */
+ if (ale->allmulti == allmulti)
+ return;
+
+ /* Remember the new setting to check against next time */
+ ale->allmulti = allmulti;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
+ if (allmulti)
+ unreg_mcast |= 1;
+ else
+ unreg_mcast &= ~1;
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
struct ale_control_info {
const char *name;
int offset, port_offset;
@@ -756,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
{
if (!ale)
return -EINVAL;
- cpsw_ale_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
kfree(ale);
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 31cf43cab42e..c0d4127aa549 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -27,6 +27,7 @@ struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
unsigned long ageout;
+ int allmulti;
};
enum cpsw_ale_control {
@@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67da035..4a4388b813ac 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
switch (ptp_class & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;