summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-19 10:39:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-19 10:39:08 -0800
commit8bf22c33e7a172fbc72464f4cc484d23a6b412ba (patch)
tree7034d84f09ee8c239574adec764ddae7594775f0 /drivers/net/ethernet
parent4f13d0dabc87fb585b96d90cc4b29f67a2995405 (diff)
parent571dcbeb8e635182bb825ae758399831805693c2 (diff)
Merge tag 'net-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Including fixes from Netfilter. Current release - new code bugs: - net: fix backlog_unlock_irq_restore() vs CONFIG_PREEMPT_RT - eth: mlx5e: XSK, Fix unintended ICOSQ change - phy_port: correctly recompute the port's linkmodes - vsock: prevent child netns mode switch from local to global - couple of kconfig fixes for new symbols Previous releases - regressions: - nfc: nci: fix false-positive parameter validation for packet data - net: do not delay zero-copy skbs in skb_attempt_defer_free() Previous releases - always broken: - mctp: ensure our nlmsg responses to user space are zero-initialised - ipv6: ioam: fix heap buffer overflow in __ioam6_fill_trace_data() - fixes for ICMP rate limiting Misc: - intel: fix PCI device ID conflict between i40e and ipw2200" * tag 'net-7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (85 commits) net: nfc: nci: Fix parameter validation for packet data net/mlx5e: Use unsigned for mlx5e_get_max_num_channels net/mlx5e: Fix deadlocks between devlink and netdev instance locks net/mlx5e: MACsec, add ASO poll loop in macsec_aso_set_arm_event net/mlx5: Fix misidentification of write combining CQE during poll loop net/mlx5e: Fix misidentification of ASO CQE during poll loop net/mlx5: Fix multiport device check over light SFs bonding: alb: fix UAF in rlb_arp_recv during bond up/down bnge: fix reserving resources from FW eth: fbnic: Advertise supported XDP features. rds: tcp: fix uninit-value in __inet_bind net/rds: Fix NULL pointer dereference in rds_tcp_accept_one octeontx2-af: Fix default entries mcam entry action net/mlx5e: XSK, Fix unintended ICOSQ change ipv6: icmp: icmpv6_xrlim_allow() optimization if net.ipv6.icmp.ratelimit is zero ipv4: icmp: icmpv4_xrlim_allow() optimization if net.ipv4.icmp_ratelimit is zero ipv6: icmp: remove obsolete code in icmpv6_xrlim_allow() inet: move icmp_global_{credit,stamp} to a separate cache line icmp: prevent possible overflow in icmp_global_allow() selftests/net: packetdrill: add ipv4-mapped-ipv6 tests ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c1
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c25
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c75
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
27 files changed, 261 insertions, 171 deletions
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
index 84c90a957719..91a4ef9e3150 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -442,7 +442,7 @@ __bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr)
struct hwrm_func_cfg_input *req;
u32 enables = 0;
- if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG))
+ if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG))
return NULL;
req->fid = cpu_to_le16(0xffff);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 6ab317f1c16e..b8e258842c61 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -16,7 +16,6 @@
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <net/ip.h>
-#include <net/netdev_queues.h>
#include <linux/skbuff.h>
#include <net/page_pool/helpers.h>
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 67275aa4f65b..0c86cbb0313c 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -423,7 +423,7 @@ static int ec_bhf_open(struct net_device *net_dev)
error_rx_free:
dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
- priv->rx_buf.alloc_len);
+ priv->rx_buf.alloc_phys);
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d3bc3207054f..02de186dcc8f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -75,7 +75,13 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
+ /*
+ * This ID conflicts with ipw2200, but the devices can be differentiated
+ * because i40e devices use PCI_CLASS_NETWORK_ETHERNET and ipw2200
+ * devices use PCI_CLASS_NETWORK_OTHER.
+ */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_B),
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c7c70429eb6c..8658cb2143df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -1042,32 +1042,35 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
- /* update the VF flow rule action with the VF default entry action */
- if (mcam_index < 0)
- npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
- *(u64 *)&action);
-
/* update the action change in default rule */
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (pfvf->def_ucast_rule)
pfvf->def_ucast_rule->rx_action = action;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_PROMISC_ENTRY);
+ if (mcam_index < 0) {
+ /* update the VF flow rule action with the VF default
+ * entry action
+ */
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
- /* If PF's promiscuous entry is enabled,
- * Set RSS action for that entry as well
- */
- npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
- alg_idx);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_ALLMULTI_ENTRY);
- /* If PF's allmulti entry is enabled,
- * Set RSS action for that entry as well
- */
- npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
- alg_idx);
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
+ blkaddr, alg_idx);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+ /* If PF's allmulti entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index,
+ blkaddr, alg_idx);
+ }
}
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a7de3a3efc49..ea2cd1f5d1d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -180,7 +180,8 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
}
/* Use this function to get max num channels (rxqs/txqs) only to create netdev */
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+static inline unsigned int
+mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
@@ -1103,6 +1104,7 @@ int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
+void mlx5e_trigger_napi_async_icosq(struct mlx5e_channel *c);
void mlx5e_trigger_napi_sched(struct napi_struct *napi);
int mlx5e_open_channels(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 424f8a2728a3..74660e7fe674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -457,22 +457,8 @@ static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
{
struct mlx5e_ptpsq *ptpsq =
container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
- struct mlx5e_txqsq *sq = &ptpsq->txqsq;
-
- /* Recovering the PTP SQ means re-enabling NAPI, which requires the
- * netdev instance lock. However, SQ closing has to wait for this work
- * task to finish while also holding the same lock. So either get the
- * lock or find that the SQ is no longer enabled and thus this work is
- * not relevant anymore.
- */
- while (!netdev_trylock(sq->netdev)) {
- if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
- return;
- msleep(20);
- }
mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
- netdev_unlock(sq->netdev);
}
static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 0686fbdd5a05..6efb626b5506 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Mellanox Technologies.
+#include <net/netdev_lock.h>
+
#include "health.h"
#include "params.h"
#include "txrx.h"
@@ -177,6 +179,16 @@ static int mlx5e_rx_reporter_timeout_recover(void *ctx)
rq = ctx;
priv = rq->priv;
+ /* Acquire netdev instance lock to synchronize with channel close and
+ * reopen flows. Either successfully obtain the lock, or detect that
+ * channels are closing for another reason, making this work no longer
+ * necessary.
+ */
+ while (!netdev_trylock(rq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
+ return 0;
+ msleep(20);
+ }
mutex_lock(&priv->state_lock);
eq = rq->cq.mcq.eq;
@@ -186,6 +198,7 @@ static int mlx5e_rx_reporter_timeout_recover(void *ctx)
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(rq->netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 4adc1adf9897..60ba840e00fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Mellanox Technologies. */
+#include <net/netdev_lock.h>
+
#include "health.h"
#include "en/ptp.h"
#include "en/devlink.h"
@@ -79,6 +81,18 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
return 0;
+ /* Recovering queues means re-enabling NAPI, which requires the netdev
+ * instance lock. However, SQ closing flows have to wait for work tasks
+ * to finish while also holding the netdev instance lock. So either get
+ * the lock or find that the SQ is no longer enabled and thus this work
+ * is not relevant anymore.
+ */
+ while (!netdev_trylock(dev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
+ return 0;
+ msleep(20);
+ }
+
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
@@ -114,9 +128,11 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
else
mlx5e_trigger_napi_sched(sq->cq.napi);
+ netdev_unlock(dev);
return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ netdev_unlock(dev);
return err;
}
@@ -137,10 +153,24 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
sq = to_ctx->sq;
eq = sq->cq.mcq.eq;
priv = sq->priv;
+
+ /* Recovering the TX queues implies re-enabling NAPI, which requires
+ * the netdev instance lock.
+ * However, channel closing flows have to wait for this work to finish
+ * while holding the same lock. So either get the lock or find that
+ * channels are being closed for other reason and this work is not
+ * relevant anymore.
+ */
+ while (!netdev_trylock(sq->netdev)) {
+ if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
+ return 0;
+ msleep(20);
+ }
+
err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats);
if (!err) {
to_ctx->status = 0; /* this sq recovered */
- return err;
+ goto out;
}
mutex_lock(&priv->state_lock);
@@ -148,7 +178,7 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
- return err;
+ goto out;
}
to_ctx->status = err;
@@ -156,7 +186,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
netdev_err(priv->netdev,
"mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
err);
-
+out:
+ netdev_unlock(sq->netdev);
return err;
}
@@ -173,10 +204,22 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
return 0;
priv = ptpsq->txqsq.priv;
+ netdev = priv->netdev;
+
+ /* Recovering the PTP SQ means re-enabling NAPI, which requires the
+ * netdev instance lock. However, SQ closing has to wait for this work
+ * task to finish while also holding the same lock. So either get the
+ * lock or find that the SQ is no longer enabled and thus this work is
+ * not relevant anymore.
+ */
+ while (!netdev_trylock(netdev)) {
+ if (!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))
+ return 0;
+ msleep(20);
+ }
mutex_lock(&priv->state_lock);
chs = &priv->channels;
- netdev = priv->netdev;
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
@@ -193,6 +236,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
+ netdev_unlock(netdev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 7819fb297280..d5d9146efca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#include <linux/iopoll.h>
#include <linux/math64.h>
#include "lib/aso.h"
#include "en/tc/post_act.h"
@@ -115,7 +116,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
struct mlx5e_flow_meters *flow_meters;
u8 cir_man, cir_exp, cbs_man, cbs_exp;
struct mlx5_aso_wqe *aso_wqe;
- unsigned long expires;
struct mlx5_aso *aso;
u64 rate, burst;
u8 ds_cnt;
@@ -187,12 +187,8 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
/* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
- expires = jiffies + msecs_to_jiffies(10);
- do {
- err = mlx5_aso_poll_cq(aso, true);
- if (err)
- usleep_range(2, 10);
- } while (err && time_is_after_jiffies(expires));
+ read_poll_timeout(mlx5_aso_poll_cq, err, !err, 10, 10 * USEC_PER_MSEC,
+ false, aso, true);
mutex_unlock(&flow_meters->aso_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index db776e515b6a..5c5360a25c64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -127,7 +127,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
goto err_remove_pool;
mlx5e_activate_xsk(c);
- mlx5e_trigger_napi_icosq(c);
+ mlx5e_trigger_napi_async_icosq(c);
/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
* any Fill Ring entries at the setup stage.
@@ -179,7 +179,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
c = priv->channels.c[ix];
mlx5e_activate_rq(&c->rq);
- mlx5e_trigger_napi_icosq(c);
+ mlx5e_trigger_napi_async_icosq(c);
mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 9e33156fac8a..8aeab4b21035 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -34,7 +34,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
&c->async_icosq->state))
return 0;
- mlx5e_trigger_napi_icosq(c);
+ mlx5e_trigger_napi_async_icosq(c);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 528b04d4de41..90b3bc5f9166 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -5,6 +5,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/xarray.h>
#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
#include "en.h"
#include "lib/aso.h"
@@ -1385,7 +1386,8 @@ static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_mac
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- err = mlx5_aso_poll_cq(maso, false);
+ read_poll_timeout(mlx5_aso_poll_cq, err, !err, 10, 10 * USEC_PER_MSEC,
+ false, maso, false);
mutex_unlock(&aso->aso_lock);
return err;
@@ -1397,7 +1399,6 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
- unsigned long expires;
int err;
aso = &macsec->aso;
@@ -1411,12 +1412,8 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
- expires = jiffies + msecs_to_jiffies(10);
- do {
- err = mlx5_aso_poll_cq(maso, false);
- if (err)
- usleep_range(2, 10);
- } while (err && time_is_after_jiffies(expires));
+ read_poll_timeout(mlx5_aso_poll_cq, err, !err, 10, 10 * USEC_PER_MSEC,
+ false, maso, false);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4b8084420816..7eb691c2a1bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -631,19 +631,7 @@ static void mlx5e_rq_timeout_work(struct work_struct *timeout_work)
struct mlx5e_rq,
rx_timeout_work);
- /* Acquire netdev instance lock to synchronize with channel close and
- * reopen flows. Either successfully obtain the lock, or detect that
- * channels are closing for another reason, making this work no longer
- * necessary.
- */
- while (!netdev_trylock(rq->netdev)) {
- if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
- return;
- msleep(20);
- }
-
mlx5e_reporter_rx_timeout(rq);
- netdev_unlock(rq->netdev);
}
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
@@ -1952,20 +1940,7 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
- /* Recovering queues means re-enabling NAPI, which requires the netdev
- * instance lock. However, SQ closing flows have to wait for work tasks
- * to finish while also holding the netdev instance lock. So either get
- * the lock or find that the SQ is no longer enabled and thus this work
- * is not relevant anymore.
- */
- while (!netdev_trylock(sq->netdev)) {
- if (!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))
- return;
- msleep(20);
- }
-
mlx5e_reporter_tx_err_cqe(sq);
- netdev_unlock(sq->netdev);
}
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
@@ -2744,16 +2719,26 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
+ struct mlx5e_icosq *sq = &c->icosq;
bool locked;
- if (!test_and_set_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state))
- synchronize_net();
+ set_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state);
+ synchronize_net();
+
+ locked = mlx5e_icosq_sync_lock(sq);
+ mlx5e_trigger_irq(sq);
+ mlx5e_icosq_sync_unlock(sq, locked);
- locked = mlx5e_icosq_sync_lock(&c->icosq);
- mlx5e_trigger_irq(&c->icosq);
- mlx5e_icosq_sync_unlock(&c->icosq, locked);
+ clear_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &sq->state);
+}
+
+void mlx5e_trigger_napi_async_icosq(struct mlx5e_channel *c)
+{
+ struct mlx5e_icosq *sq = c->async_icosq;
- clear_bit(MLX5E_SQ_STATE_LOCK_NEEDED, &c->icosq.state);
+ spin_lock_bh(&sq->lock);
+ mlx5e_trigger_irq(sq);
+ spin_unlock_bh(&sq->lock);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)
@@ -2836,7 +2821,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
- async_icosq_needed = !!xsk_pool || priv->ktls_rx_was_enabled;
+ async_icosq_needed = !!params->xdp_prog || priv->ktls_rx_was_enabled;
err = mlx5e_open_queues(c, params, cparam, async_icosq_needed);
if (unlikely(err))
goto err_napi_del;
@@ -5105,19 +5090,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
struct net_device *netdev = priv->netdev;
int i;
- /* Recovering the TX queues implies re-enabling NAPI, which requires
- * the netdev instance lock.
- * However, channel closing flows have to wait for this work to finish
- * while holding the same lock. So either get the lock or find that
- * channels are being closed for other reason and this work is not
- * relevant anymore.
- */
- while (!netdev_trylock(netdev)) {
- if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state))
- return;
- msleep(20);
- }
-
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
@@ -5130,8 +5102,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
/* break if tried to reopened channels */
break;
}
-
- netdev_unlock(netdev);
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 815a7c97d6b0..04d03be1bb77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -2,6 +2,7 @@
// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mlx5/transobj.h>
#include "lib/clock.h"
#include "mlx5_core.h"
@@ -15,7 +16,7 @@
#define TEST_WC_NUM_WQES 255
#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
-#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+#define TEST_WC_POLLING_MAX_TIME_USEC (100 * USEC_PER_MSEC)
struct mlx5_wc_cq {
/* data path - accessed per cqe */
@@ -359,7 +360,6 @@ static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
{
unsigned int offset = 0;
- unsigned long expires;
struct mlx5_wc_sq *sq;
int i, err;
@@ -389,13 +389,9 @@ static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
mlx5_wc_post_nop(sq, &offset, true);
- expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
- do {
- err = mlx5_wc_poll_cq(sq);
- if (err)
- usleep_range(2, 10);
- } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
- time_is_after_jiffies(expires));
+ poll_timeout_us(mlx5_wc_poll_cq(sq),
+ mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED, 10,
+ TEST_WC_POLLING_MAX_TIME_USEC, false);
mlx5_wc_destroy_sq(sq);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 11745a2d8a44..401c2196b9ff 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -1145,6 +1145,9 @@ ipv6_flow:
return -EINVAL;
}
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+
/* Write action table values */
act_tcam->dest = dest;
act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
index 85a883dba385..d8a9a7d7c237 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -51,8 +51,6 @@ int fbnic_fw_log_init(struct fbnic_dev *fbd)
log->data_start = data;
log->data_end = data + FBNIC_FW_LOG_SIZE;
- fbnic_fw_log_enable(fbd, true);
-
return 0;
}
@@ -63,7 +61,6 @@ void fbnic_fw_log_free(struct fbnic_dev *fbd)
if (!fbnic_fw_log_ready(fbd))
return;
- fbnic_fw_log_disable(fbd);
INIT_LIST_HEAD(&log->entries);
log->size = 0;
vfree(log->data_start);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 81c9d5c9a4b2..b4b396ca9bce 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -262,6 +262,23 @@ static int fbnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int fbnic_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+
+ if (fbnic_check_split_frames(fbn->xdp_prog, new_mtu, fbn->hds_thresh)) {
+ dev_err(&dev->dev,
+ "MTU %d is larger than HDS threshold %d in XDP mode\n",
+ new_mtu, fbn->hds_thresh);
+
+ return -EINVAL;
+ }
+
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ return 0;
+}
+
void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
{
struct net_device *netdev = fbd->netdev;
@@ -533,6 +550,7 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_start_xmit = fbnic_xmit_frame,
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
+ .ndo_change_mtu = fbnic_change_mtu,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
.ndo_bpf = fbnic_bpf,
@@ -787,6 +805,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netdev->hw_enc_features |= netdev->features;
netdev->features |= NETIF_F_NTUPLE;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_RX_SG;
+
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 6f9389748a7d..3fa9d1910daa 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -311,11 +311,17 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto free_irqs;
}
+ err = fbnic_fw_log_init(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Unable to initialize firmware log buffer: %d\n",
+ err);
+
err = fbnic_fw_request_mbx(fbd);
if (err) {
dev_err(&pdev->dev,
"Firmware mailbox initialization failure\n");
- goto free_irqs;
+ goto free_fw_log;
}
/* Send the request to enable the FW logging to host. Note if this
@@ -323,11 +329,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* possible the FW is just too old to support the logging and needs
* to be updated.
*/
- err = fbnic_fw_log_init(fbd);
- if (err)
- dev_warn(fbd->dev,
- "Unable to initialize firmware log buffer: %d\n",
- err);
+ fbnic_fw_log_enable(fbd, true);
fbnic_devlink_register(fbd);
fbnic_devlink_otp_check(fbd, "error detected during probe");
@@ -374,6 +376,8 @@ init_failure_mode:
* firmware updates for fixes.
*/
return 0;
+free_fw_log:
+ fbnic_fw_log_free(fbd);
free_irqs:
fbnic_free_irqs(fbd);
err_destroy_health:
@@ -408,8 +412,9 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_hwmon_unregister(fbd);
fbnic_dbg_fbd_exit(fbd);
fbnic_devlink_unregister(fbd);
- fbnic_fw_log_free(fbd);
+ fbnic_fw_log_disable(fbd);
fbnic_fw_free_mbx(fbd);
+ fbnic_fw_log_free(fbd);
fbnic_free_irqs(fbd);
fbnic_devlink_health_destroy(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 7f31e890031c..42a186db43ea 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -338,9 +338,8 @@ void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
else if (tstamp_mask & (1u << flow_type))
dest |= FBNIC_RPC_ACT_TBL0_TS_ENA;
- if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
- dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
- FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index e29959241ff3..4aaa928bf8ab 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -2591,7 +2591,8 @@ write_ctl:
}
static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
- struct fbnic_ring *rcq, bool tx_pause)
+ struct fbnic_ring *rcq, bool tx_pause,
+ bool hdr_split)
{
struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 drop_mode, rcq_ctl;
@@ -2604,22 +2605,26 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
/* Specify packet layout */
rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
- FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT, hdr_split);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
-void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause)
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool txp)
{
+ bool hds;
int i, t;
+ hds = fbn->hds_thresh < FBNIC_HDR_BYTES_MIN;
+
for (i = 0; i < fbn->num_napi; i++) {
struct fbnic_napi_vector *nv = fbn->napi[i];
for (t = 0; t < nv->rxt_count; t++) {
struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t];
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl, txp, hds);
}
}
}
@@ -2670,20 +2675,18 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
{
struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
- u32 hds_thresh = fbn->hds_thresh;
u32 rcq_ctl = 0;
-
- fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause);
+ bool hdr_split;
+ u32 hds_thresh;
/* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
* be split at L4. It would also result in the frames being split at
* L2/L3 depending on the frame size.
*/
- if (fbn->hds_thresh < FBNIC_HDR_BYTES_MIN) {
- rcq_ctl = FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT;
- hds_thresh = FBNIC_HDR_BYTES_MIN;
- }
+ hdr_split = fbn->hds_thresh < FBNIC_HDR_BYTES_MIN;
+ fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause, hdr_split);
+ hds_thresh = max(fbn->hds_thresh, FBNIC_HDR_BYTES_MIN);
rcq_ctl |= FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, hds_thresh) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index b9560103ab86..980965274079 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -66,7 +66,7 @@ struct fbnic_net;
(4096 - FBNIC_RX_HROOM - FBNIC_RX_TROOM - FBNIC_RX_PAD)
#define FBNIC_HDS_THRESH_DEFAULT \
(1536 - FBNIC_RX_PAD)
-#define FBNIC_HDR_BYTES_MIN 128
+#define FBNIC_HDR_BYTES_MIN 256
struct fbnic_pkt_buff {
struct xdp_buff buff;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 2f168700f63c..8b2e07821a95 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -576,7 +576,7 @@ static int sparx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static struct ptp_clock_info sparx5_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "sparx5 ptp",
- .max_adj = 200000,
+ .max_adj = 10000000,
.gettime64 = sparx5_ptp_gettime64,
.settime64 = sparx5_ptp_settime64,
.adjtime = sparx5_ptp_adjtime,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
index 1231a80335d7..04f76f1e23f6 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -35,7 +35,7 @@
#define SPX5_SE_BURST_UNIT 4096
/* Dwrr */
-#define SPX5_DWRR_COST_MAX 63
+#define SPX5_DWRR_COST_MAX 31
struct sparx5_shaper {
u32 mode;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 469784d3a1a6..1b8269320464 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -551,44 +551,81 @@ static int ocelot_port_stop(struct net_device *dev)
return 0;
}
-static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static bool ocelot_xmit_timestamp(struct ocelot *ocelot, int port,
+ struct sk_buff *skb, u32 *rew_op)
{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot_port *ocelot_port = &priv->port;
- struct ocelot *ocelot = ocelot_port->ocelot;
- int port = priv->port.index;
- u32 rew_op = 0;
-
- if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
- !ocelot_can_inject(ocelot, 0))
- return NETDEV_TX_BUSY;
-
- /* Check if timestamping is needed */
if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct sk_buff *clone = NULL;
if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return false;
}
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
- rew_op = ocelot_ptp_rew_op(skb);
+ *rew_op = ocelot_ptp_rew_op(skb);
}
- if (static_branch_unlikely(&ocelot_fdma_enabled)) {
- ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
- } else {
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ return true;
+}
+
+static netdev_tx_t ocelot_port_xmit_fdma(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->port.index;
+ u32 rew_op = 0;
+
+ if (!ocelot_xmit_timestamp(ocelot, port, skb, &rew_op))
+ return NETDEV_TX_OK;
+
+ ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
+
+ return NETDEV_TX_OK;
+}
- consume_skb(skb);
+static netdev_tx_t ocelot_port_xmit_inj(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->port.index;
+ u32 rew_op = 0;
+
+ ocelot_lock_inj_grp(ocelot, 0);
+
+ if (!ocelot_can_inject(ocelot, 0)) {
+ ocelot_unlock_inj_grp(ocelot, 0);
+ return NETDEV_TX_BUSY;
}
+ if (!ocelot_xmit_timestamp(ocelot, port, skb, &rew_op)) {
+ ocelot_unlock_inj_grp(ocelot, 0);
+ return NETDEV_TX_OK;
+ }
+
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+
+ ocelot_unlock_inj_grp(ocelot, 0);
+
+ consume_skb(skb);
+
return NETDEV_TX_OK;
}
+static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ if (static_branch_unlikely(&ocelot_fdma_enabled))
+ return ocelot_port_xmit_fdma(skb, dev);
+
+ return ocelot_port_xmit_inj(skb, dev);
+}
+
enum ocelot_action_type {
OCELOT_MACT_LEARN,
OCELOT_MACT_FORGET,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 2f0cdbd4e2ac..d9b5d7999370 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -688,9 +688,9 @@ static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
- cmd.data0 = 0,
- cmd.data1 = 0,
- cmd.data2 = 0,
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
@@ -821,9 +821,9 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
- cmd.data0 = 0,
- cmd.data1 = 0,
- cmd.data2 = 0,
+ cmd.data0 = 0;
+ cmd.data1 = 0;
+ cmd.data2 = 0;
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c63099a77cc0..82375d34ad57 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5040,13 +5040,27 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
if (!priv->sph_active)
return 0;
- /* Not last descriptor */
- if (status & rx_not_ls)
+ /* For GMAC4, when split header is enabled, in some rare cases, the
+ * hardware does not fill buf2 of the first descriptor with payload.
+ * Thus we cannot assume buf2 is always fully filled if it is not
+ * the last descriptor. Otherwise, the length of buf2 of the second
+ * descriptor will be calculated wrong and cause an oops.
+ *
+ * If this is the last descriptor, 'plen' is the length of the
+ * received packet that was transferred to system memory.
+ * Otherwise, it is the accumulated number of bytes that have been
+ * transferred for the current packet.
+ *
+ * Thus 'plen - len' always gives the correct length of buf2.
+ */
+
+ /* Not GMAC4 and not last descriptor */
+ if (priv->plat->core_type != DWMAC_CORE_GMAC4 && (status & rx_not_ls))
return priv->dma_conf.dma_buf_sz;
+ /* GMAC4 or last descriptor */
plen = stmmac_get_rx_frame_len(priv, p, coe);
- /* Last descriptor */
return plen - len;
}