summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c39
-rw-r--r--drivers/net/bonding/bond_options.c8
-rw-r--r--drivers/net/can/Kconfig7
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c2
-rw-r--r--drivers/net/can/dev/Makefile5
-rw-r--r--drivers/net/can/dev/dev.c28
-rw-r--r--drivers/net/can/dev/netlink.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/can/usb/esd_usb.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c11
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c9
-rw-r--r--drivers/net/can/usb/mcba_usb.c8
-rw-r--r--drivers/net/can/usb/usb_8dev.c8
-rw-r--r--drivers/net/can/vcan.c15
-rw-r--r--drivers/net/can/vxcan.c15
-rw-r--r--drivers/net/dsa/b53/b53_common.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c23
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/dsa/yt921x.c15
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c39
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c5
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h5
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c69
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c22
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c276
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c62
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c26
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c10
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c29
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c4
-rw-r--r--drivers/net/fjes/fjes_hw.c12
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ipvlan/ipvlan.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c16
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c49
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/mdio/mdio-aspeed.c7
-rw-r--r--drivers/net/mdio/mdio-realtek-rtl9300.c6
-rw-r--r--drivers/net/netdevsim/bpf.c6
-rw-r--r--drivers/net/netdevsim/bus.c8
-rw-r--r--drivers/net/netdevsim/dev.c2
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c4
-rw-r--r--drivers/net/phy/intel-xway.c7
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c2
-rw-r--r--drivers/net/phy/micrel.c17
-rw-r--r--drivers/net/phy/motorcomm.c4
-rw-r--r--drivers/net/phy/mxl-86110.c3
-rw-r--r--drivers/net/phy/sfp.c4
-rw-r--r--drivers/net/team/team_core.c2
-rw-r--r--drivers/net/usb/asix_common.c5
-rw-r--r--drivers/net/usb/ax88172a.c6
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c181
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c5
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c6
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c17
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c9
166 files changed, 1364 insertions, 931 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3d56339a8a10..45bd2bb102ff 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1862,6 +1862,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
+ if (slave_dev->type != ARPHRD_ETHER &&
+ BOND_MODE(bond) == BOND_MODE_8023AD) {
+ SLAVE_NL_ERR(bond_dev, slave_dev, extack,
+ "8023AD mode requires Ethernet devices");
+ return -EINVAL;
+ }
slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
bond_dev->type, slave_dev->type);
@@ -2196,11 +2202,6 @@ skip_mac_set:
unblock_netpoll_tx();
}
- /* broadcast mode uses the all_slaves to loop through slaves. */
- if (bond_mode_can_use_xmit_hash(bond) ||
- BOND_MODE(bond) == BOND_MODE_BROADCAST)
- bond_update_slave_arr(bond, NULL);
-
if (!slave_dev->netdev_ops->ndo_bpf ||
!slave_dev->netdev_ops->ndo_xdp_xmit) {
if (bond->xdp_prog) {
@@ -2234,6 +2235,11 @@ skip_mac_set:
bpf_prog_inc(bond->xdp_prog);
}
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
+ bond_update_slave_arr(bond, NULL);
+
bond_xdp_set_features(bond_dev);
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
@@ -3041,8 +3047,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
__func__, &sip);
return;
}
- slave->last_rx = jiffies;
- slave->target_last_arp_rx[i] = jiffies;
+ WRITE_ONCE(slave->last_rx, jiffies);
+ WRITE_ONCE(slave->target_last_arp_rx[i], jiffies);
}
static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -3261,8 +3267,8 @@ static void bond_validate_na(struct bonding *bond, struct slave *slave,
__func__, saddr);
return;
}
- slave->last_rx = jiffies;
- slave->target_last_arp_rx[i] = jiffies;
+ WRITE_ONCE(slave->last_rx, jiffies);
+ WRITE_ONCE(slave->target_last_arp_rx[i], jiffies);
}
static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
@@ -3332,7 +3338,7 @@ int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
(slave_do_arp_validate_only(bond) && is_ipv6) ||
#endif
!slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
+ WRITE_ONCE(slave->last_rx, jiffies);
return RX_HANDLER_ANOTHER;
} else if (is_arp) {
return bond_arp_rcv(skb, bond, slave);
@@ -3400,7 +3406,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, last_tx, 1) &&
- bond_time_in_interval(bond, slave->last_rx, 1)) {
+ bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
slave_state_changed = 1;
@@ -3424,8 +3430,10 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
- !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
+ if (!bond_time_in_interval(bond, last_tx,
+ bond->params.missed_max) ||
+ !bond_time_in_interval(bond, READ_ONCE(slave->last_rx),
+ bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
@@ -4090,8 +4098,9 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
case BOND_XMIT_POLICY_ENCAP23:
case BOND_XMIT_POLICY_ENCAP34:
memset(fk, 0, sizeof(*fk));
- return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
- fk, data, l2_proto, nhoff, hlen, 0);
+ return __skb_flow_dissect(dev_net(bond->dev), skb,
+ &flow_keys_bonding, fk, data,
+ l2_proto, nhoff, hlen, 0);
default:
break;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 384499c869b8..f1c6e9d8f616 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1152,7 +1152,7 @@ static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot,
if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) {
bond_for_each_slave(bond, slave, iter)
- slave->target_last_arp_rx[slot] = last_rx;
+ WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx);
targets[slot] = target;
}
}
@@ -1221,8 +1221,8 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
bond_for_each_slave(bond, slave, iter) {
targets_rx = slave->target_last_arp_rx;
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
- targets_rx[i] = targets_rx[i+1];
- targets_rx[i] = 0;
+ WRITE_ONCE(targets_rx[i], READ_ONCE(targets_rx[i+1]));
+ WRITE_ONCE(targets_rx[i], 0);
}
for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++)
targets[i] = targets[i+1];
@@ -1377,7 +1377,7 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
bond_for_each_slave(bond, slave, iter) {
- slave->target_last_arp_rx[slot] = last_rx;
+ WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx);
slave_set_ns_maddr(bond, slave, target, &targets[slot]);
}
targets[slot] = *target;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index cfaea6178a71..e15e320db476 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_DEV
- bool "CAN Device Drivers"
+ tristate "CAN Device Drivers"
default y
depends on CAN
help
@@ -17,7 +17,10 @@ menuconfig CAN_DEV
virtual ones. If you own such devices or plan to use the virtual CAN
interfaces to develop applications, say Y here.
-if CAN_DEV && CAN
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 37e2f1a2faec..d7bc10a6b8ea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
-obj-$(CONFIG_CAN_DEV) += dev/
+obj-y += dev/
obj-y += esd/
obj-y += rcar/
obj-y += rockchip/
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index c2a3a4eef5b2..58da323f14d7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1099,7 +1099,7 @@ static int at91_can_probe(struct platform_device *pdev)
if (IS_ERR(transceiver)) {
err = PTR_ERR(transceiver);
dev_err_probe(&pdev->dev, err, "failed to get phy\n");
- goto exit_iounmap;
+ goto exit_free;
}
dev->netdev_ops = &at91_netdev_ops;
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 1e6b9e3dc2fe..0ea1ff28dfce 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -310,7 +310,7 @@ static int ctucan_set_secondary_sample_point(struct net_device *ndev)
}
ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
- ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x0);
}
ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 64226acf0f3d..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN) += can-dev.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
+
+can-dev-y += skb.o
-can-dev-$(CONFIG_CAN_DEV) += skb.o
can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
can-dev-$(CONFIG_CAN_NETLINK) += dev.o
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 091f30e94c61..769745e22a3c 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -332,6 +332,7 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
+ can_set_cap(dev, CAN_CAP_CC);
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
@@ -375,6 +376,32 @@ void can_set_default_mtu(struct net_device *dev)
}
}
+void can_set_cap_info(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 can_cap;
+
+ if (can_dev_in_xl_only_mode(priv)) {
+ /* XL only mode => no CC/FD capability */
+ can_cap = CAN_CAP_XL;
+ } else {
+ /* mixed mode => CC + FD/XL capability */
+ can_cap = CAN_CAP_CC;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_FD)
+ can_cap |= CAN_CAP_FD;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_XL)
+ can_cap |= CAN_CAP_XL;
+ }
+
+ if (priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED))
+ can_cap |= CAN_CAP_RO;
+
+ can_set_cap(dev, can_cap);
+}
+
/* helper to define static CAN controller features at device creation time */
int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
{
@@ -390,6 +417,7 @@ int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
/* override MTU which was set by default in can_setup()? */
can_set_default_mtu(dev);
+ can_set_cap_info(dev);
return 0;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index d6b0e686fb11..0498198a4696 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -377,6 +377,7 @@ static int can_ctrlmode_changelink(struct net_device *dev,
}
can_set_default_mtu(dev);
+ can_set_cap_info(dev);
return 0;
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index de8e212a1366..4c219a5b139b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -486,11 +486,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
ems_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 08da507faef4..8cc924c47042 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -541,13 +541,20 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ return;
+
+ usb_unanchor_urb(urb);
+
if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (err) {
+ } else {
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index f799233c2b72..2d248deb69dc 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1736,7 +1736,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a0233e550a5a..d8b2dd74b3a1 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -610,7 +610,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *parent = urb->context;
struct gs_can *dev;
- struct net_device *netdev;
+ struct net_device *netdev = NULL;
int rc;
struct net_device_stats *stats;
struct gs_host_frame *hf = urb->transfer_buffer;
@@ -751,7 +751,13 @@ resubmit_urb:
hf, parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
+ usb_anchor_urb(urb, &parent->rx_submitted);
+
rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!rc)
+ return;
+
+ usb_unanchor_urb(urb);
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
@@ -760,6 +766,9 @@ device_detach:
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
+ } else if (rc != -ESHUTDOWN && net_ratelimit()) {
+ netdev_info(netdev, "failed to re-submit IN URB: %pe\n",
+ ERR_PTR(rc));
}
}
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 62701ec34272..d0a2a2a33c1c 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -361,7 +361,14 @@ resubmit_urb:
urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE,
kvaser_usb_read_bulk_callback, dev);
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!err)
+ return;
+
+ usb_unanchor_urb(urb);
+
if (err == -ENODEV) {
for (i = 0; i < dev->nchannels; i++) {
struct kvaser_usb_net_priv *priv;
@@ -372,7 +379,7 @@ resubmit_urb:
netif_device_detach(priv->netdev);
}
- } else if (err) {
+ } else {
dev_err(&dev->intf->dev,
"Failed resubmitting read bulk urb: %d\n", err);
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 41c0a1c399bf..04170326dc7e 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -608,11 +608,17 @@ resubmit_urb:
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev, "failed resubmitting read bulk urb: %d\n",
retval);
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 7449328f7cd7..3125cf59d002 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -541,11 +541,17 @@ resubmit_urb:
urb->transfer_buffer, RX_BUFFER_SIZE,
usb_8dev_read_bulk_callback, priv);
+ usb_anchor_urb(urb, &priv->rx_submitted);
+
retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (!retval)
+ return;
+
+ usb_unanchor_urb(urb);
if (retval == -ENODEV)
netif_device_detach(netdev);
- else if (retval)
+ else
netdev_err(netdev,
"failed resubmitting read bulk urb: %d\n", retval);
}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index fdc662aea279..76e6b7b5c6a1 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -130,6 +130,19 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void vcan_set_cap_info(struct net_device *dev)
+{
+ u32 can_cap = CAN_CAP_CC;
+
+ if (dev->mtu > CAN_MTU)
+ can_cap |= CAN_CAP_FD;
+
+ if (dev->mtu >= CANXL_MIN_MTU)
+ can_cap |= CAN_CAP_XL;
+
+ can_set_cap(dev, can_cap);
+}
+
static int vcan_change_mtu(struct net_device *dev, int new_mtu)
{
/* Do not allow changing the MTU while running */
@@ -141,6 +154,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
WRITE_ONCE(dev->mtu, new_mtu);
+ vcan_set_cap_info(dev);
return 0;
}
@@ -162,6 +176,7 @@ static void vcan_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags = IFF_NOARP;
can_set_ml_priv(dev, netdev_priv(dev));
+ vcan_set_cap_info(dev);
/* set flags according to driver capabilities */
if (echo)
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b2c19f8c5f8e..f14c6f02b662 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -125,6 +125,19 @@ static int vxcan_get_iflink(const struct net_device *dev)
return iflink;
}
+static void vxcan_set_cap_info(struct net_device *dev)
+{
+ u32 can_cap = CAN_CAP_CC;
+
+ if (dev->mtu > CAN_MTU)
+ can_cap |= CAN_CAP_FD;
+
+ if (dev->mtu >= CANXL_MIN_MTU)
+ can_cap |= CAN_CAP_XL;
+
+ can_set_cap(dev, can_cap);
+}
+
static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
{
/* Do not allow changing the MTU while running */
@@ -136,6 +149,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
WRITE_ONCE(dev->mtu, new_mtu);
+ vxcan_set_cap_info(dev);
return 0;
}
@@ -167,6 +181,7 @@ static void vxcan_setup(struct net_device *dev)
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
can_set_ml_priv(dev, can_ml);
+ vxcan_set_cap_info(dev);
}
/* forward declaration for rtnl_create_link() */
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index a1a177713d99..2c4131ed7e30 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2169,6 +2169,9 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
if (!ent->is_valid)
return 0;
+ if (is_multicast_ether_addr(ent->mac))
+ return 0;
+
if (port != ent->port)
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index b4d48997bf46..09002c853b78 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3364,13 +3364,10 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
- struct device_node *phy_handle = NULL;
struct fwnode_handle *ports_fwnode;
struct fwnode_handle *port_fwnode;
struct dsa_switch *ds = chip->ds;
struct mv88e6xxx_port *p;
- struct dsa_port *dp;
- int tx_amp;
int err;
u16 reg;
u32 val;
@@ -3582,23 +3579,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- if (chip->info->ops->serdes_set_tx_amplitude) {
- dp = dsa_to_port(ds, port);
- if (dp)
- phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
-
- if (phy_handle && !of_property_read_u32(phy_handle,
- "tx-p2p-microvolt",
- &tx_amp))
- err = chip->info->ops->serdes_set_tx_amplitude(chip,
- port, tx_amp);
- if (phy_handle) {
- of_node_put(phy_handle);
- if (err)
- return err;
- }
- }
-
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -4768,7 +4748,6 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
@@ -5044,7 +5023,6 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5481,7 +5459,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.phylink_get_caps = mv88e6352_phylink_get_caps,
.pcs_ops = &mv88e6352_pcs_ops,
};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 2f211e55cb47..e073446ee7d0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -642,10 +642,6 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
- /* SERDES SGMII/Fiber Output Amplitude */
- int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
- int val);
-
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index b3330211edbc..a936ee80ce00 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -25,14 +25,6 @@ static int mv88e6352_serdes_read(struct mv88e6xxx_chip *chip, int reg,
reg, val);
}
-static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
- u16 val)
-{
- return mv88e6xxx_phy_page_write(chip, MV88E6352_ADDR_SERDES,
- MV88E6352_SERDES_PAGE_FIBER,
- reg, val);
-}
-
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
@@ -506,41 +498,3 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
p[i] = reg;
}
}
-
-static const int mv88e6352_serdes_p2p_to_reg[] = {
- /* Index of value in microvolts corresponds to the register value */
- 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
-};
-
-int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
- int val)
-{
- bool found = false;
- u16 ctrl, reg;
- int err;
- int i;
-
- err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
- if (err <= 0)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
- if (mv88e6352_serdes_p2p_to_reg[i] == val) {
- reg = i;
- found = true;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
- if (err)
- return err;
-
- ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
- ctrl |= reg;
-
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
-}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index ad887d8601bc..17a3e85fabaa 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -29,8 +29,6 @@ struct phylink_link_state;
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
-#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
-#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -140,9 +138,6 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
-int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
- int val);
-
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c
index 1c511f5dc6ab..7b8c1549a0fb 100644
--- a/drivers/net/dsa/yt921x.c
+++ b/drivers/net/dsa/yt921x.c
@@ -682,21 +682,22 @@ static int yt921x_read_mib(struct yt921x_priv *priv, int port)
const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i];
u32 reg = YT921X_MIBn_DATA0(port) + desc->offset;
u64 *valp = &((u64 *)mib)[i];
- u64 val = *valp;
u32 val0;
- u32 val1;
+ u64 val;
res = yt921x_reg_read(priv, reg, &val0);
if (res)
break;
if (desc->size <= 1) {
- if (val < (u32)val)
- /* overflow */
- val += (u64)U32_MAX + 1;
- val &= ~U32_MAX;
- val |= val0;
+ u64 old_val = *valp;
+
+ val = (old_val & ~(u64)U32_MAX) | val0;
+ if (val < old_val)
+ val += 1ull << 32;
} else {
+ u32 val1;
+
res = yt921x_reg_read(priv, reg + 4, &val1);
if (res)
break;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 8c9cc97efd4e..4fe4efdb3737 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1473,7 +1473,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
return 0;
free_ring:
- dma_free_coherent(&pdev->dev,
+ dma_free_coherent(gendev,
sizeof(struct boom_rx_desc) * RX_RING_SIZE +
sizeof(struct boom_tx_desc) * TX_RING_SIZE,
vp->rx_ring, vp->rx_ring_dma);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 75893c90a0a1..315d97036ac1 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -2924,19 +2924,26 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
port->id = id;
eth->ports[p] = port;
- err = airoha_metadata_dst_alloc(port);
- if (err)
- return err;
+ return airoha_metadata_dst_alloc(port);
+}
- err = register_netdev(dev);
- if (err)
- goto free_metadata_dst;
+static int airoha_register_gdm_devices(struct airoha_eth *eth)
+{
+ int i;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+ int err;
-free_metadata_dst:
- airoha_metadata_dst_free(port);
- return err;
+ if (!port)
+ continue;
+
+ err = register_netdev(port->dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int airoha_probe(struct platform_device *pdev)
@@ -3027,6 +3034,10 @@ static int airoha_probe(struct platform_device *pdev)
}
}
+ err = airoha_register_gdm_devices(eth);
+ if (err)
+ goto error_napi_stop;
+
return 0;
error_napi_stop:
@@ -3040,10 +3051,12 @@ error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED) {
+ if (!port)
+ continue;
+
+ if (port->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(port->dev);
- airoha_metadata_dst_free(port);
- }
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 0caabb0c3aa0..2221bafaf7c9 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -1547,13 +1547,16 @@ void airoha_ppe_deinit(struct airoha_eth *eth)
{
struct airoha_npu *npu;
- rcu_read_lock();
- npu = rcu_dereference(eth->npu);
+ mutex_lock(&flow_offload_mutex);
+
+ npu = rcu_replace_pointer(eth->npu, NULL,
+ lockdep_is_held(&flow_offload_mutex));
if (npu) {
npu->ops.ppe_deinit(npu);
airoha_npu_put(npu);
}
- rcu_read_unlock();
+
+ mutex_unlock(&flow_offload_mutex);
rhashtable_destroy(&eth->ppe->l2_flows);
rhashtable_destroy(&eth->flow_table);
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
index ac81c24016dd..4772185e669d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_devlink.c
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -53,10 +53,12 @@ void ena_devlink_disable_phc_param(struct devlink *devlink)
{
union devlink_param_value value;
+ devl_lock(devlink);
value.vbool = false;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
value);
+ devl_unlock(devlink);
}
static void ena_devlink_port_register(struct devlink *devlink)
@@ -145,10 +147,12 @@ static int ena_devlink_configure_params(struct devlink *devlink)
return rc;
}
+ devl_lock(devlink);
value.vbool = ena_phc_is_enabled(adapter);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
value);
+ devl_unlock(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3ddd896d6987..b5a60a048896 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1837,7 +1837,7 @@ static void xgbe_get_stats64(struct net_device *netdev,
s->multicast = pstats->rxmulticastframes_g;
s->rx_length_errors = pstats->rxlengtherror;
s->rx_crc_errors = pstats->rxcrcerror;
- s->rx_fifo_errors = pstats->rxfifooverflow;
+ s->rx_over_errors = pstats->rxfifooverflow;
s->tx_packets = pstats->txframecount_gb;
s->tx_bytes = pstats->txoctetcount_gb;
@@ -2292,9 +2292,6 @@ read_again:
goto read_again;
if (error || packet->errors) {
- if (packet->errors)
- netif_err(pdata, rx_err, netdev,
- "error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index a68757e8fd22..c63ddb12237e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1928,6 +1928,7 @@ static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
{
if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
pdata->rx_adapt_retries = 0;
+ pdata->mode_set = false;
return;
}
@@ -1974,6 +1975,7 @@ static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
*/
netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
pdata->rx_adapt_done = true;
+ pdata->rx_adapt_retries = 0;
pdata->mode_set = false;
return;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 666522d64775..cd7dddeb91dd 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -255,14 +255,15 @@ config BNXT_HWMON
devices, via the hwmon sysfs interface.
config BNGE
- tristate "Broadcom Ethernet device support"
+ tristate "Broadcom ThorUltra Ethernet device support"
depends on PCI
select NET_DEVLINK
select PAGE_POOL
+ select AUXILIARY_BUS
help
- This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
- The module will be called bng_en. To compile this driver as a module,
- choose M here.
+ This driver supports Broadcom ThorUltra 50/100/200/400/800 gigabit
+ Ethernet cards. The module will be called bng_en. To compile this
+ driver as a module, choose M here.
config BCMASP
tristate "Broadcom ASP 2.0 Ethernet support"
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index fd35f4b4dc50..014340f33345 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -156,7 +156,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_OFFSET_L4(32),
ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1));
- rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -166,7 +166,7 @@ static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv,
ASP_RX_FILTER_NET_CFG_UMC(nfilt->port),
ASP_RX_FILTER_NET_CFG(nfilt->hw_index));
- rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) |
+ rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->ch) |
ASP_RX_FILTER_NET_CFG_EN |
ASP_RX_FILTER_NET_CFG_L2_EN |
ASP_RX_FILTER_NET_CFG_L3_EN |
@@ -714,6 +714,7 @@ struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf,
nfilter = &priv->net_filters[open_index];
nfilter->claimed = true;
nfilter->port = intf->port;
+ nfilter->ch = intf->channel + priv->tx_chan_offset;
nfilter->hw_index = open_index;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 74adfdb50e11..e238507be40a 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -348,6 +348,7 @@ struct bcmasp_net_filter {
bool wake_filter;
int port;
+ int ch;
unsigned int hw_index;
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index b9973956c480..ceb6c11431dd 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -1261,7 +1261,7 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
phy_modes(intf->phy_interface), intf->port);
ret = -EINVAL;
- goto err_free_netdev;
+ goto err_deregister_fixed_link;
}
ret = of_get_ethdev_address(ndev_dn, ndev);
@@ -1286,6 +1286,9 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
return intf;
+err_deregister_fixed_link:
+ if (of_phy_is_fixed_link(ndev_dn))
+ of_phy_deregister_fixed_link(ndev_dn);
err_free_netdev:
free_netdev(ndev);
err:
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 411744894349..32fc16a37d02 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -5,7 +5,7 @@
#define _BNGE_H_
#define DRV_NAME "bng_en"
-#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver"
+#define DRV_SUMMARY "Broadcom ThorUltra NIC Ethernet Driver"
#include <linux/etherdevice.h>
#include <linux/bnxt/hsi.h>
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
index c94e132bebc8..b4090283df0f 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -19,7 +19,7 @@ char bnge_driver_name[] = DRV_NAME;
static const struct {
char *name;
} board_info[] = {
- [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
+ [BCM57708] = { "Broadcom BCM57708 ThorUltra 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" },
};
static const struct pci_device_id bnge_pci_tbl[] = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d17d0ea89c36..8419d1eb4035 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1482,9 +1482,11 @@ static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
u16 idx = agg_id & MAX_TPA_P5_MASK;
- if (test_bit(idx, map->agg_idx_bmap))
- idx = find_first_zero_bit(map->agg_idx_bmap,
- BNXT_AGG_IDX_BMAP_SIZE);
+ if (test_bit(idx, map->agg_idx_bmap)) {
+ idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA_P5);
+ if (idx >= MAX_TPA_P5)
+ return INVALID_HW_RING_ID;
+ }
__set_bit(idx, map->agg_idx_bmap);
map->agg_id_tbl[agg_id] = idx;
return idx;
@@ -1548,6 +1550,13 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ if (unlikely(agg_id == INVALID_HW_RING_ID)) {
+ netdev_warn(bp->dev, "Unable to allocate agg ID for ring %d, agg 0x%x\n",
+ rxr->bnapi->index,
+ TPA_START_AGG_ID_P5(tpa_start));
+ bnxt_sched_reset_rxr(bp, rxr);
+ return;
+ }
} else {
agg_id = TPA_START_AGG_ID(tpa_start);
}
@@ -16882,12 +16891,12 @@ init_err_dl:
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
- bnxt_free_hwrm_resources(bp);
- bnxt_hwmon_uninit(bp);
- bnxt_ethtool_free(bp);
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
+ bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
+ bnxt_ethtool_free(bp);
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f5f07a7e6b29..f88e7769a838 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1080,11 +1080,9 @@ struct bnxt_tpa_info {
struct rx_agg_cmp *agg_arr;
};
-#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
-
struct bnxt_tpa_idx_map {
u16 agg_id_tbl[1024];
- unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
+ DECLARE_BITMAP(agg_idx_bmap, MAX_TPA_P5);
};
struct bnxt_rx_ring_info {
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e461f5072884..6511ecd5856b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -708,7 +708,6 @@ static void macb_mac_link_up(struct phylink_config *config,
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
- bp->macbgem_ops.mog_init_rings(bp);
macb_init_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
@@ -2954,6 +2953,8 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
+ bp->macbgem_ops.mog_init_rings(bp);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_enable(&queue->napi_rx);
napi_enable(&queue->napi_tx);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index bb5d2fa15736..8ed45bceb537 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3801,6 +3801,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
{
int status;
bool pmac_valid = false;
+ u32 pmac_id;
eth_zero_addr(mac);
@@ -3813,7 +3814,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
adapter->if_handle, 0);
} else {
status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
- NULL, adapter->if_handle, 0);
+ &pmac_id, adapter->if_handle, 0);
}
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 5bb31c8fab39..995c159003d7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2141,7 +2141,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
struct be_aic_obj *aic;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- u64 rx_pkts = 0, tx_pkts = 0;
+ u64 rx_pkts = 0, tx_pkts = 0, pkts;
ulong now;
u32 pps, delta;
int i;
@@ -2157,15 +2157,17 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin(&rxo->stats.sync);
- rx_pkts += rxo->stats.rx_pkts;
+ pkts = rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry(&rxo->stats.sync, start));
+ rx_pkts += pkts;
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin(&txo->stats.sync);
- tx_pkts += txo->stats.tx_reqs;
+ pkts = txo->stats.tx_reqs;
} while (u64_stats_fetch_retry(&txo->stats.sync, start));
+ tx_pkts += pkts;
}
/* Skip, if wrapped around or first calculation */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dce27bd67a7d..aecd40aeef9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -79,9 +79,9 @@ struct enetc_lso_t {
#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
- (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+ min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD, 0xffff)
#define ENETC_RXB_DMA_SIZE_XDP \
- (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
+ min(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM, 0xffff)
struct enetc_rx_swbd {
dma_addr_t dma;
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
index 443983fdecd9..7fd39f895290 100644
--- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -577,11 +577,17 @@ static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
}
addr = netc_get_phy_addr(np);
- if (addr <= 0) {
+ if (addr < 0) {
dev_err(dev, "Failed to get PHY address\n");
return addr;
}
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD] is 0,
+ * so no need to set the register.
+ */
+ if (!addr)
+ return 0;
+
if (phy_mask & BIT(addr)) {
dev_err(dev,
"Find same PHY address in EMDIO and ENETC node\n");
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a753265961af..797ef6899657 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1150,7 +1150,7 @@ fec_restart(struct net_device *ndev)
u32 rcntl = FEC_RCR_MII;
if (OPT_ARCH_HAS_MAX_FL)
- rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
+ rcntl |= (fep->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1285,12 +1285,13 @@ fec_restart(struct net_device *ndev)
/* When Jumbo Frame is enabled, the FIFO may not be large enough
* to hold an entire frame. In such cases, if the MTU exceeds
- * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
- * to operate in cut-through mode, triggered by the FIFO threshold.
+ * (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN), configure
+ * the interface to operate in cut-through mode, triggered by
+ * the FIFO threshold.
* Otherwise, enable the ENET store-and-forward mode.
*/
if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
- (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ (ndev->mtu > (PKT_MAXBUF_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN)))
writel(0xF, fep->hwp + FEC_X_WMRK);
else
writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
@@ -4037,7 +4038,7 @@ static int fec_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ order = get_order(new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN
+ FEC_DRV_RESERVE_SPACE);
fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
fep->pagepool_order = order;
@@ -4588,7 +4589,7 @@ fec_probe(struct platform_device *pdev)
else
fep->max_buf_size = PKT_MAXBUF_SIZE;
- ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
+ ndev->max_mtu = fep->max_buf_size - VLAN_ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index affd5a6c44e7..131d1210dc4a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1602,8 +1602,10 @@ static void ugeth_mac_config(struct phylink_config *config, unsigned int mode,
pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
tbiphy = of_phy_find_device(ug_info->tbi_node);
- if (!tbiphy)
+ if (!tbiphy) {
pr_warn("Could not get TBI device\n");
+ return;
+ }
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 970d5ca8cdde..cbdf3a842cfe 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1206,6 +1206,11 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
}
}
+static inline bool gve_is_clock_enabled(struct gve_priv *priv)
+{
+ return priv->nic_ts_report;
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 52500ae8348e..311b106160b2 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -938,7 +938,7 @@ static int gve_get_ts_info(struct net_device *netdev,
ethtool_op_get_ts_info(netdev, info);
- if (priv->nic_timestamp_supported) {
+ if (gve_is_clock_enabled(priv)) {
info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index a7a088a77f37..52c5e4942cd4 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -558,7 +558,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
- 0, block->name, block);
+ IRQF_NO_AUTOEN, block->name, block);
if (err) {
dev_err(&priv->pdev->dev,
"Failed to receive msix vector %d\n", i);
@@ -680,10 +680,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
- err = gve_init_clock(priv);
- if (err) {
- dev_err(&priv->pdev->dev, "Failed to init clock");
- goto abort_with_ptype_lut;
+ if (priv->nic_timestamp_supported) {
+ err = gve_init_clock(priv);
+ if (err) {
+ dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support");
+ err = 0;
+ }
}
err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
@@ -2183,7 +2185,7 @@ static int gve_set_ts_config(struct net_device *dev,
}
if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
- if (!priv->nic_ts_report) {
+ if (!gve_is_clock_enabled(priv)) {
NL_SET_ERR_MSG_MOD(extack,
"RX timestamping is not supported");
kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index 073677d82ee8..de42fc2c19a1 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -70,11 +70,6 @@ static int gve_ptp_init(struct gve_priv *priv)
struct gve_ptp *ptp;
int err;
- if (!priv->nic_timestamp_supported) {
- dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
- return -EOPNOTSUPP;
- }
-
priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
if (!priv->ptp)
return -ENOMEM;
@@ -116,9 +111,6 @@ int gve_init_clock(struct gve_priv *priv)
{
int err;
- if (!priv->nic_timestamp_supported)
- return 0;
-
err = gve_ptp_init(priv);
if (err)
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f1bd8f5d5732..63a96106a693 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -484,7 +484,7 @@ int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
{
const struct gve_xdp_buff *ctx = (void *)_ctx;
- if (!ctx->gve->nic_ts_report)
+ if (!gve_is_clock_enabled(ctx->gve))
return -ENODATA;
if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index ace9b8698021..b53b7fcdcdaf 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -112,11 +112,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
netif_napi_set_irq_locked(&block->napi, block->irq);
+ enable_irq(block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ disable_irq(block->irq);
netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 7a0654e2d3dd..7a9573dcab74 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2529,44 +2529,47 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
struct hns3_enet_ring *ring, bool is_tx)
{
+ struct ring_stats ring_stats;
unsigned int start;
do {
start = u64_stats_fetch_begin(&ring->syncp);
- if (is_tx) {
- stats->tx_bytes += ring->stats.tx_bytes;
- stats->tx_packets += ring->stats.tx_pkts;
- stats->tx_dropped += ring->stats.sw_err_cnt;
- stats->tx_dropped += ring->stats.tx_vlan_err;
- stats->tx_dropped += ring->stats.tx_l4_proto_err;
- stats->tx_dropped += ring->stats.tx_l2l3l4_err;
- stats->tx_dropped += ring->stats.tx_tso_err;
- stats->tx_dropped += ring->stats.over_max_recursion;
- stats->tx_dropped += ring->stats.hw_limitation;
- stats->tx_dropped += ring->stats.copy_bits_err;
- stats->tx_dropped += ring->stats.skb2sgl_err;
- stats->tx_dropped += ring->stats.map_sg_err;
- stats->tx_errors += ring->stats.sw_err_cnt;
- stats->tx_errors += ring->stats.tx_vlan_err;
- stats->tx_errors += ring->stats.tx_l4_proto_err;
- stats->tx_errors += ring->stats.tx_l2l3l4_err;
- stats->tx_errors += ring->stats.tx_tso_err;
- stats->tx_errors += ring->stats.over_max_recursion;
- stats->tx_errors += ring->stats.hw_limitation;
- stats->tx_errors += ring->stats.copy_bits_err;
- stats->tx_errors += ring->stats.skb2sgl_err;
- stats->tx_errors += ring->stats.map_sg_err;
- } else {
- stats->rx_bytes += ring->stats.rx_bytes;
- stats->rx_packets += ring->stats.rx_pkts;
- stats->rx_dropped += ring->stats.l2_err;
- stats->rx_errors += ring->stats.l2_err;
- stats->rx_errors += ring->stats.l3l4_csum_err;
- stats->rx_crc_errors += ring->stats.l2_err;
- stats->multicast += ring->stats.rx_multicast;
- stats->rx_length_errors += ring->stats.err_pkt_len;
- }
+ ring_stats = ring->stats;
} while (u64_stats_fetch_retry(&ring->syncp, start));
+
+ if (is_tx) {
+ stats->tx_bytes += ring_stats.tx_bytes;
+ stats->tx_packets += ring_stats.tx_pkts;
+ stats->tx_dropped += ring_stats.sw_err_cnt;
+ stats->tx_dropped += ring_stats.tx_vlan_err;
+ stats->tx_dropped += ring_stats.tx_l4_proto_err;
+ stats->tx_dropped += ring_stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring_stats.tx_tso_err;
+ stats->tx_dropped += ring_stats.over_max_recursion;
+ stats->tx_dropped += ring_stats.hw_limitation;
+ stats->tx_dropped += ring_stats.copy_bits_err;
+ stats->tx_dropped += ring_stats.skb2sgl_err;
+ stats->tx_dropped += ring_stats.map_sg_err;
+ stats->tx_errors += ring_stats.sw_err_cnt;
+ stats->tx_errors += ring_stats.tx_vlan_err;
+ stats->tx_errors += ring_stats.tx_l4_proto_err;
+ stats->tx_errors += ring_stats.tx_l2l3l4_err;
+ stats->tx_errors += ring_stats.tx_tso_err;
+ stats->tx_errors += ring_stats.over_max_recursion;
+ stats->tx_errors += ring_stats.hw_limitation;
+ stats->tx_errors += ring_stats.copy_bits_err;
+ stats->tx_errors += ring_stats.skb2sgl_err;
+ stats->tx_errors += ring_stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring_stats.rx_bytes;
+ stats->rx_packets += ring_stats.rx_pkts;
+ stats->rx_dropped += ring_stats.l2_err;
+ stats->rx_errors += ring_stats.l2_err;
+ stats->rx_errors += ring_stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring_stats.l2_err;
+ stats->multicast += ring_stats.rx_multicast;
+ stats->rx_length_errors += ring_stats.err_pkt_len;
+ }
}
static void hns3_nic_get_stats64(struct net_device *netdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 9bb708fa42f2..416e02e7b995 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -731,7 +731,7 @@ struct hclge_fd_tcam_config_3_cmd {
#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
#define HCLGE_FD_AD_USE_COUNTER_B 12
#define HCLGE_FD_AD_COUNTER_NUM_S 13
-#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
#define HCLGE_FD_AD_NXT_STEP_B 20
#define HCLGE_FD_AD_NXT_KEY_S 21
#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c589baea7c77..b8e2aa19f9e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5690,7 +5690,7 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
- action->counter_id);
+ action->next_input_key);
req->ad_data = cpu_to_le64(ad_data);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index a69b361225e9..84bee5d6e638 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -43,21 +43,12 @@ static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
napi_enable(&irq_cfg->napi);
}
static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_RX, NULL);
- netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
- NETDEV_QUEUE_TYPE_TX, NULL);
- netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
@@ -150,6 +141,11 @@ int hinic3_qps_irq_init(struct net_device *netdev)
goto err_release_irqs;
}
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
+
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
irq_cfg->msix_entry_idx,
HINIC3_SET_MSIX_AUTO_MASK);
@@ -164,6 +160,10 @@ err_release_irqs:
q_id--;
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
@@ -184,6 +184,10 @@ void hinic3_qps_irq_uninit(struct net_device *netdev)
for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
qp_del_napi(irq_cfg);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ netif_queue_set_napi(irq_cfg->netdev, q_id,
+ NETDEV_QUEUE_TYPE_TX, NULL);
hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
HINIC3_MSIX_DISABLE);
hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 292389aceb2d..7f078ec9c14c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4094,7 +4094,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
- u8 last_byte = *(data + length - 1);
+ u8 last_byte;
+
+ /* Guard against OOB on data[length - 1] */
+ if (unlikely(!length))
+ return false;
+ /* Upper bound: length must not exceed rx_buffer_len */
+ if (unlikely(length > adapter->rx_buffer_len))
+ return false;
+ last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d2d03db2acec..dcb50c2e1aa2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1422,4 +1422,15 @@ static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
}
+static inline u32 i40e_get_max_num_descriptors(const struct i40e_pf *pf)
+{
+ const struct i40e_hw *hw = &pf->hw;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ return I40E_MAX_NUM_DESCRIPTORS_XL710;
+ default:
+ return I40E_MAX_NUM_DESCRIPTORS;
+ }
+}
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index f2c2646ea298..6a47ea0927e9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,18 +2013,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
-static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
-
- switch (hw->mac.type) {
- case I40E_MAC_XL710:
- return I40E_MAX_NUM_DESCRIPTORS_XL710;
- default:
- return I40E_MAX_NUM_DESCRIPTORS;
- }
-}
-
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d8192aa23254..0b1cc0481027 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2234,6 +2234,7 @@ static void i40e_set_rx_mode(struct net_device *netdev)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
}
+ i40e_service_event_schedule(vsi->back);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8b30a3accd31..1fa877b52f61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -656,7 +656,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 8 */
if (!IS_ALIGNED(info->ring_len, 8) ||
- info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_context;
}
@@ -726,7 +726,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 32 */
if (!IS_ALIGNED(info->ring_len, 32) ||
- info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index c2fbe443ef85..4b0fc8f354bc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1726,11 +1726,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter)
u16 i;
dw = (u32 *)adapter->rss_key;
- for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ for (i = 0; i < adapter->rss_key_size / 4; i++)
wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut;
- for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ for (i = 0; i < adapter->rss_lut_size / 4; i++)
wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
iavf_flush(hw);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index d88b7f3fd1f9..2ef39cc70c21 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -460,6 +460,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
ice_deinit_dev(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 147aaee192a7..00f75d87c73f 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -979,6 +979,7 @@ void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 046bc9c65c51..785bf5cc1b25 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2251,7 +2251,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
+ timeout = jiffies + 10 * usecs_to_jiffies(ICE_CTL_Q_SQ_CMD_TIMEOUT);
do {
status = ice_aq_release_res(hw, res, 0, NULL);
if (status != -EIO)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 969d4f8f9c02..3565a5d96c6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3626,11 +3626,7 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
if (!lut)
return -ENOMEM;
- err = ice_get_rss_key(vsi, rxfh->key);
- if (err)
- goto out;
-
- err = ice_get_rss_lut(vsi, lut, vsi->rss_table_size);
+ err = ice_get_rss(vsi, rxfh->key, lut, vsi->rss_table_size);
if (err)
goto out;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 15621707fbf8..d47af94f31a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -398,6 +398,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
+ u64_stats_init(&ring_stats->syncp);
+
WRITE_ONCE(tx_ring_stats[i], ring_stats);
}
@@ -417,6 +419,8 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
+ u64_stats_init(&ring_stats->syncp);
+
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
@@ -2779,12 +2783,14 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
ASSERT_RTNL();
ice_for_each_rxq(vsi, q_idx)
- netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
- &vsi->rx_rings[q_idx]->q_vector->napi);
+ if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
+ &vsi->rx_rings[q_idx]->q_vector->napi);
ice_for_each_txq(vsi, q_idx)
- netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
- &vsi->tx_rings[q_idx]->q_vector->napi);
+ if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector)
+ netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX,
+ &vsi->tx_rings[q_idx]->q_vector->napi);
/* Also set the interrupt number for the NAPI */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
@@ -3805,22 +3811,31 @@ int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
{
struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_pf *pf = vsi->back;
struct ice_vlan vlan;
int err;
- vlan = ICE_VLAN(0, 0, 0);
- err = vlan_ops->del_vlan(vsi, &vlan);
- if (err && err != -EEXIST)
- return err;
+ if (pf->lag && pf->lag->primary) {
+ dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting prune list\n");
+ } else {
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+ }
/* in SVM both VLAN 0 filters are identical */
if (!ice_is_dvm_ena(&vsi->back->hw))
return 0;
- vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
- err = vlan_ops->del_vlan(vsi, &vlan);
- if (err && err != -EEXIST)
- return err;
+ if (pf->lag && pf->lag->primary) {
+ dev_dbg(ice_pf_to_dev(pf), "Interface is primary in aggregate - not deleting QinQ prune list\n");
+ } else {
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+ }
/* when deleting the last VLAN filter, make sure to disable the VLAN
* promisc mode so the filter isn't left by accident
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4bb68e7a00f5..71c6d53b461e 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4836,6 +4836,7 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_dpll_deinit(pf);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
xa_destroy(&pf->eswitch.reprs);
+ ice_hwmon_exit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5437,8 +5438,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
- ice_hwmon_exit(pf);
-
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -6983,7 +6982,6 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes +
pf->stats.rx_undersize +
- pf->hw_csum_rx_error +
pf->stats.rx_jabber +
pf->stats.rx_fragments +
pf->stats.rx_oversize;
@@ -7989,6 +7987,34 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
}
/**
+ * ice_get_rss - Get RSS LUT and/or key
+ * @vsi: Pointer to VSI structure
+ * @seed: Buffer to store the key in
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return: 0 on success, negative on failure
+ */
+int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
+{
+ int err;
+
+ if (seed) {
+ err = ice_get_rss_key(vsi, seed);
+ if (err)
+ return err;
+ }
+
+ if (lut) {
+ err = ice_get_rss_lut(vsi, lut, lut_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_set_rss_hfunc - Set RSS HASH function
* @vsi: Pointer to VSI structure
* @hfunc: hash function (ICE_AQ_VSI_Q_OPT_RSS_*)
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 8cfc68cbfa06..1bf7934d4e28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -284,8 +284,7 @@ struct idpf_port_stats {
struct idpf_fsteer_fltr {
struct list_head list;
- u32 loc;
- u32 q_index;
+ struct ethtool_rx_flow_spec fs;
};
/**
@@ -424,14 +423,12 @@ enum idpf_user_flags {
* @rss_key: RSS hash key
* @rss_lut_size: Size of RSS lookup table
* @rss_lut: RSS lookup table
- * @cached_lut: Used to restore previously init RSS lut
*/
struct idpf_rss_data {
u16 rss_key_size;
u8 *rss_key;
u16 rss_lut_size;
u32 *rss_lut;
- u32 *cached_lut;
};
/**
@@ -558,6 +555,7 @@ struct idpf_vector_lifo {
* @max_q: Maximum possible queues
* @req_qs_chunks: Queue chunk data for requested queues
* @mac_filter_list_lock: Lock to protect mac filters
+ * @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
*/
struct idpf_vport_config {
@@ -565,6 +563,7 @@ struct idpf_vport_config {
struct idpf_vport_max_q max_q;
struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
+ spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2589e124e41c..2efa3c08aba5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -37,6 +37,7 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
+ struct idpf_vport_config *vport_config;
struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
unsigned int cnt = 0;
@@ -44,7 +45,8 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXCLSRLCNT:
@@ -52,26 +54,34 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = idpf_fsteer_max_rules(vport);
break;
case ETHTOOL_GRXCLSRULE:
- err = -EINVAL;
+ err = -ENOENT;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list)
- if (f->loc == cmd->fs.location) {
- cmd->fs.ring_cookie = f->q_index;
+ if (f->fs.location == cmd->fs.location) {
+ /* Avoid infoleak from padding: zero first,
+ * then assign fields
+ */
+ memset(&cmd->fs, 0, sizeof(cmd->fs));
+ cmd->fs = f->fs;
err = 0;
break;
}
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
case ETHTOOL_GRXCLSRLALL:
cmd->data = idpf_fsteer_max_rules(vport);
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry(f, &user_config->flow_steer_list, list) {
if (cnt == cmd->rule_cnt) {
err = -EMSGSIZE;
break;
}
- rule_locs[cnt] = f->loc;
+ rule_locs[cnt] = f->fs.location;
cnt++;
}
if (!err)
cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
break;
default:
break;
@@ -168,7 +178,7 @@ static int idpf_add_flow_steer(struct net_device *netdev,
struct idpf_vport *vport;
u32 flow_type, q_index;
u16 num_rxq;
- int err;
+ int err = 0;
vport = idpf_netdev_to_vport(netdev);
vport_config = vport->adapter->vport_config[np->vport_idx];
@@ -194,6 +204,29 @@ static int idpf_add_flow_steer(struct net_device *netdev,
if (!rule)
return -ENOMEM;
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out_free_rule;
+ }
+
+ /* detect duplicate entry and reject before adding rules */
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->fs.location == fsp->location) {
+ err = -EEXIST;
+ break;
+ }
+
+ if (f->fs.location > fsp->location)
+ break;
+ parent = f;
+ }
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+
+ if (err)
+ goto out;
+
rule->vport_id = cpu_to_le32(vport->vport_id);
rule->count = cpu_to_le32(1);
info = &rule->rule_info[0];
@@ -232,26 +265,20 @@ static int idpf_add_flow_steer(struct net_device *netdev,
goto out;
}
- fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
- if (!fltr) {
- err = -ENOMEM;
- goto out;
- }
-
- fltr->loc = fsp->location;
- fltr->q_index = q_index;
- list_for_each_entry(f, &user_config->flow_steer_list, list) {
- if (f->loc >= fltr->loc)
- break;
- parent = f;
- }
+ /* Save a copy of the user's flow spec so ethtool can later retrieve it */
+ fltr->fs = *fsp;
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
parent ? list_add(&fltr->list, &parent->list) :
list_add(&fltr->list, &user_config->flow_steer_list);
user_config->num_fsteer_fltrs++;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+ goto out_free_rule;
out:
+ kfree(fltr);
+out_free_rule:
kfree(rule);
return err;
}
@@ -302,17 +329,20 @@ static int idpf_del_flow_steer(struct net_device *netdev,
goto out;
}
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
list_for_each_entry_safe(f, iter,
&user_config->flow_steer_list, list) {
- if (f->loc == fsp->location) {
+ if (f->fs.location == fsp->location) {
list_del(&f->list);
kfree(f);
user_config->num_fsteer_fltrs--;
- goto out;
+ goto out_unlock;
}
}
- err = -EINVAL;
+ err = -ENOENT;
+out_unlock:
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
out:
kfree(rule);
return err;
@@ -381,7 +411,10 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
* @netdev: network interface device structure
* @rxfh: pointer to param struct (indir, key, hfunc)
*
- * Reads the indirection table directly from the hardware. Always returns 0.
+ * RSS LUT and Key information are read from driver's cached
+ * copy. When rxhash is off, rss lut will be displayed as zeros.
+ *
+ * Return: 0 on success, -errno otherwise.
*/
static int idpf_get_rxfh(struct net_device *netdev,
struct ethtool_rxfh_param *rxfh)
@@ -389,10 +422,13 @@ static int idpf_get_rxfh(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ bool rxhash_ena;
int err = 0;
u16 i;
idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
adapter = np->adapter;
@@ -402,9 +438,8 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (rxfh->key)
@@ -412,7 +447,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
if (rxfh->indir) {
for (i = 0; i < rss_data->rss_lut_size; i++)
- rxfh->indir[i] = rss_data->rss_lut[i];
+ rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0;
}
unlock_mutex:
@@ -452,8 +487,6 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (!test_bit(IDPF_VPORT_UP, np->state))
- goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -469,7 +502,8 @@ static int idpf_set_rxfh(struct net_device *netdev,
rss_data->rss_lut[lut] = rxfh->indir[lut];
}
- err = idpf_config_rss(vport);
+ if (test_bit(IDPF_VPORT_UP, np->state))
+ err = idpf_config_rss(vport);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
index 7e20a07e98e5..6dad0593f7f2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_idc.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -322,7 +322,7 @@ static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
for (i = 0; i < adapter->num_alloc_vports; i++) {
struct idpf_vport *vport = adapter->vports[i];
- if (!vport)
+ if (!vport || !vport->vdev_info)
continue;
idpf_unplug_aux_dev(vport->vdev_info->adev);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 7a7e101afeb6..131a8121839b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -443,6 +443,29 @@ send_dealloc_vecs:
}
/**
+ * idpf_del_all_flow_steer_filters - Delete all flow steer filters in list
+ * @vport: main vport struct
+ *
+ * Takes flow_steer_list_lock spinlock. Deletes all filters
+ */
+static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_fsteer_fltr *f, *ftmp;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+
+ spin_lock_bh(&vport_config->flow_steer_list_lock);
+ list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
+ list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ vport_config->user_config.num_fsteer_fltrs = 0;
+ spin_unlock_bh(&vport_config->flow_steer_list_lock);
+}
+
+/**
* idpf_find_mac_filter - Search filter list for specific mac filter
* @vconfig: Vport config structure
* @macaddr: The MAC address
@@ -729,6 +752,65 @@ static int idpf_init_mac_addr(struct idpf_vport *vport,
return 0;
}
+static void idpf_detach_and_close(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct net_device *netdev = adapter->netdevs[i];
+
+ /* If the interface is in detached state, that means the
+ * previous reset was not handled successfully for this
+ * vport.
+ */
+ if (!netif_device_present(netdev))
+ continue;
+
+ /* Hold RTNL to protect racing with callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ if (netif_running(netdev)) {
+ set_bit(IDPF_VPORT_UP_REQUESTED,
+ adapter->vport_config[i]->flags);
+ dev_close(netdev);
+ }
+ rtnl_unlock();
+ }
+}
+
+static void idpf_attach_and_open(struct idpf_adapter *adapter)
+{
+ int max_vports = adapter->max_vports;
+
+ for (int i = 0; i < max_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+ struct idpf_vport_config *vport_config;
+ struct net_device *netdev;
+
+ /* In case of a critical error in the init task, the vport
+ * will be freed. Only continue to restore the netdevs
+ * if the vport is allocated.
+ */
+ if (!vport)
+ continue;
+
+ /* No need for RTNL on attach as this function is called
+ * following detach and dev_close(). We do take RTNL for
+ * dev_open() below as it can race with external callbacks
+ * following the call to netif_device_attach().
+ */
+ netdev = adapter->netdevs[i];
+ netif_device_attach(netdev);
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
+ vport_config->flags)) {
+ rtnl_lock();
+ dev_open(netdev, NULL);
+ rtnl_unlock();
+ }
+ }
+}
+
/**
* idpf_cfg_netdev - Allocate, configure and register a netdev
* @vport: main vport structure
@@ -991,7 +1073,7 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss(vport);
+ idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
@@ -1023,6 +1105,8 @@ static void idpf_vport_rel(struct idpf_vport *vport)
kfree(adapter->vport_config[idx]->req_qs_chunks);
adapter->vport_config[idx]->req_qs_chunks = NULL;
}
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1041,12 +1125,15 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport, true);
- if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
+ idpf_vport_stop(vport, true);
idpf_decfg_netdev(vport);
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ }
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
idpf_del_all_mac_filters(vport);
+ idpf_del_all_flow_steer_filters(vport);
+ }
if (adapter->netdevs[i]) {
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
@@ -1139,6 +1226,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
u16 idx = adapter->next_vport;
struct idpf_vport *vport;
u16 num_max_q;
+ int err;
if (idx == IDPF_NO_FREE_SLOT)
return NULL;
@@ -1189,10 +1277,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
idpf_vport_init(vport, max_q);
- /* This alloc is done separate from the LUT because it's not strictly
- * dependent on how many queues we have. If we change number of queues
- * and soft reset we'll need a new LUT but the key can remain the same
- * for as long as the vport exists.
+ /* LUT and key are both initialized here. Key is not strictly dependent
+ * on how many queues we have. If we change number of queues and soft
+ * reset is initiated, LUT will be freed and a new LUT will be allocated
+ * as per the updated number of queues during vport bringup. However,
+ * the key remains the same for as long as the vport exists.
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
@@ -1202,6 +1291,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+ /* Initialize default rss LUT */
+ err = idpf_init_rss_lut(vport);
+ if (err)
+ goto free_rss_key;
+
/* fill vport slot in the adapter struct */
adapter->vports[idx] = vport;
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
@@ -1212,6 +1306,8 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return vport;
+free_rss_key:
+ kfree(rss_data->rss_key);
free_vector_idxs:
kfree(vport->q_vector_idxs);
free_vport:
@@ -1271,7 +1367,7 @@ void idpf_mbx_task(struct work_struct *work)
idpf_mb_irq_enable(adapter);
else
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
- msecs_to_jiffies(300));
+ usecs_to_jiffies(300));
idpf_recv_mb_msg(adapter);
}
@@ -1388,7 +1484,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_vport_config *vport_config;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1429,14 +1524,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
err = idpf_rx_bufs_init_all(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ goto intr_deinit;
}
idpf_rx_init_buf_tail(vport);
@@ -1482,13 +1577,9 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- vport_config = adapter->vport_config[vport->idx];
- if (vport_config->user_config.rss_data.rss_lut)
- err = idpf_config_rss(vport);
- else
- err = idpf_init_rss(vport);
+ err = idpf_config_rss(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
goto disable_vport;
}
@@ -1497,7 +1588,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
vport->vport_id, err);
- goto deinit_rss;
+ goto disable_vport;
}
if (rtnl)
@@ -1505,8 +1596,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
-deinit_rss:
- idpf_deinit_rss(vport);
disable_vport:
idpf_send_disable_vport_msg(vport);
disable_queues:
@@ -1544,7 +1633,6 @@ void idpf_init_task(struct work_struct *work)
struct idpf_vport_config *vport_config;
struct idpf_vport_max_q max_q;
struct idpf_adapter *adapter;
- struct idpf_netdev_priv *np;
struct idpf_vport *vport;
u16 num_default_vports;
struct pci_dev *pdev;
@@ -1579,10 +1667,15 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
+ err = idpf_send_get_rx_ptype_msg(vport);
+ if (err)
+ goto unwind_vports;
+
index = vport->idx;
vport_config = adapter->vport_config[index];
spin_lock_init(&vport_config->mac_filter_list_lock);
+ spin_lock_init(&vport_config->flow_steer_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
@@ -1590,21 +1683,11 @@ void idpf_init_task(struct work_struct *work)
err = idpf_check_supported_desc_ids(vport);
if (err) {
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
- goto cfg_netdev_err;
+ goto unwind_vports;
}
if (idpf_cfg_netdev(vport))
- goto cfg_netdev_err;
-
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto handle_err;
-
- /* Once state is put into DOWN, driver is ready for dev_open */
- np = netdev_priv(vport->netdev);
- clear_bit(IDPF_VPORT_UP, np->state);
- if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport, true);
+ goto unwind_vports;
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1635,21 +1718,15 @@ void idpf_init_task(struct work_struct *work)
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
- /* As all the required vports are created, clear the reset flag
- * unconditionally here in case we were in reset and the link was down.
- */
+ /* Clear the reset and load bits as all vports are created */
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
/* Start the statistics task now */
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
return;
-handle_err:
- idpf_decfg_netdev(vport);
-cfg_netdev_err:
- idpf_vport_rel(vport);
- adapter->vports[index] = NULL;
unwind_vports:
if (default_vport) {
for (index = 0; index < adapter->max_vports; index++) {
@@ -1657,6 +1734,15 @@ unwind_vports:
idpf_vport_dealloc(adapter->vports[index]);
}
}
+ /* Cleanup after vc_core_init, which has no way of knowing the
+ * init task failed on driver load.
+ */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+ }
+ idpf_ptp_release(adapter);
+
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
}
@@ -1787,27 +1873,6 @@ static int idpf_check_reset_complete(struct idpf_hw *hw,
}
/**
- * idpf_set_vport_state - Set the vport state to be after the reset
- * @adapter: Driver specific private structure
- */
-static void idpf_set_vport_state(struct idpf_adapter *adapter)
-{
- u16 i;
-
- for (i = 0; i < adapter->max_vports; i++) {
- struct idpf_netdev_priv *np;
-
- if (!adapter->netdevs[i])
- continue;
-
- np = netdev_priv(adapter->netdevs[i]);
- if (test_bit(IDPF_VPORT_UP, np->state))
- set_bit(IDPF_VPORT_UP_REQUESTED,
- adapter->vport_config[i]->flags);
- }
-}
-
-/**
* idpf_init_hard_reset - Initiate a hardware reset
* @adapter: Driver specific private structure
*
@@ -1815,37 +1880,25 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
* reallocate. Also reinitialize the mailbox. Return 0 on success,
* negative on failure.
*/
-static int idpf_init_hard_reset(struct idpf_adapter *adapter)
+static void idpf_init_hard_reset(struct idpf_adapter *adapter)
{
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev;
int err;
- u16 i;
+ idpf_detach_and_close(adapter);
mutex_lock(&adapter->vport_ctrl_lock);
dev_info(dev, "Device HW Reset initiated\n");
- /* Avoid TX hangs on reset */
- for (i = 0; i < adapter->max_vports; i++) {
- netdev = adapter->netdevs[i];
- if (!netdev)
- continue;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- }
-
/* Prepare for reset */
- if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
bool is_reset = idpf_is_reset_detected(adapter);
idpf_idc_issue_reset_event(adapter->cdev_info);
- idpf_set_vport_state(adapter);
idpf_vc_core_deinit(adapter);
if (!is_reset)
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
@@ -1892,11 +1945,14 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
unlock_mutex:
mutex_unlock(&adapter->vport_ctrl_lock);
- /* Wait until all vports are created to init RDMA CORE AUX */
- if (!err)
- err = idpf_idc_init(adapter);
-
- return err;
+ /* Attempt to restore netdevs and initialize RDMA CORE AUX device,
+ * provided vc_core_init succeeded. It is still possible that
+ * vports are not allocated at this point if the init task failed.
+ */
+ if (!err) {
+ idpf_attach_and_open(adapter);
+ idpf_idc_init(adapter);
+ }
}
/**
@@ -1997,7 +2053,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_vport_stop(vport, false);
}
- idpf_deinit_rss(vport);
/* We're passing in vport here because we need its wait_queue
* to send a message and it should be getting all the vport
* config data out of the adapter but we need to be careful not
@@ -2023,6 +2078,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
+ if (reset_cause == IDPF_SR_Q_CHANGE &&
+ !netif_is_rxfh_configured(vport->netdev))
+ idpf_fill_dflt_rss_lut(vport);
+
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2166,40 +2225,6 @@ static void idpf_set_rx_mode(struct net_device *netdev)
}
/**
- * idpf_vport_manage_rss_lut - disable/enable RSS
- * @vport: the vport being changed
- *
- * In the event of disable request for RSS, this function will zero out RSS
- * LUT, while in the event of enable request for RSS, it will reconfigure RSS
- * LUT with the default LUT configuration.
- */
-static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
-{
- bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
- struct idpf_rss_data *rss_data;
- u16 idx = vport->idx;
- int lut_size;
-
- rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
-
- if (ena) {
- /* This will contain the default or user configured LUT */
- memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
- } else {
- /* Save a copy of the current LUT to be restored later if
- * requested.
- */
- memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
-
- /* Zero out the current LUT to disable */
- memset(rss_data->rss_lut, 0, lut_size);
- }
-
- return idpf_config_rss(vport);
-}
-
-/**
* idpf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -2224,10 +2249,19 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_RXHASH) {
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
netdev->features ^= NETIF_F_RXHASH;
- err = idpf_vport_manage_rss_lut(vport);
- if (err)
- goto unlock_mutex;
+
+ /* If the interface is not up when changing the rxhash, update
+ * to the HW is skipped. The updated LUT will be committed to
+ * the HW when the interface is brought up.
+ */
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
+ err = idpf_config_rss(vport);
+ if (err)
+ goto unlock_mutex;
+ }
}
if (changed & NETIF_F_GRO_HW) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 3e1052d070cf..0a8b50350b86 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -108,11 +108,11 @@ static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter,
ptp_read_system_prets(sts);
idpf_ptp_enable_shtime(adapter);
+ lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
/* Read the system timestamp post PHC read */
ptp_read_system_postts(sts);
- lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
spin_unlock(&ptp->read_dev_clk_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 1d91c56f7469..f58f616d87fc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -695,9 +695,10 @@ err:
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
struct libeth_fq fq = {
- .count = rxq->desc_count,
- .type = LIBETH_FQE_MTU,
- .nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
int ret;
@@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
@@ -3939,7 +3941,7 @@ static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
static void idpf_net_dim(struct idpf_q_vector *q_vector)
{
struct dim_sample dim_sample = { };
- u64 packets, bytes;
+ u64 packets, bytes, pkts, bts;
u32 i;
if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
@@ -3951,9 +3953,12 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.packets);
- bytes += u64_stats_read(&txq->q_stats.bytes);
+ pkts = u64_stats_read(&txq->q_stats.packets);
+ bts = u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
+
+ packets += pkts;
+ bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
@@ -3970,9 +3975,12 @@ check_rx_itr:
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.packets);
- bytes += u64_stats_read(&rxq->q_stats.bytes);
+ pkts = u64_stats_read(&rxq->q_stats.packets);
+ bts = u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+
+ packets += pkts;
+ bytes += bts;
}
idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
@@ -4641,7 +4649,7 @@ int idpf_config_rss(struct idpf_vport *vport)
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
*/
-static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
u16 num_active_rxq = vport->num_rxq;
@@ -4650,57 +4658,47 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- for (i = 0; i < rss_data->rss_lut_size; i++) {
+ for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
- rss_data->cached_lut[i] = rss_data->rss_lut[i];
- }
}
/**
- * idpf_init_rss - Allocate and initialize RSS resources
+ * idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_init_rss(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
- u32 lut_size;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ if (!rss_data->rss_lut) {
+ u32 lut_size;
- lut_size = rss_data->rss_lut_size * sizeof(u32);
- rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->rss_lut)
- return -ENOMEM;
-
- rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
- if (!rss_data->cached_lut) {
- kfree(rss_data->rss_lut);
- rss_data->rss_lut = NULL;
-
- return -ENOMEM;
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->rss_lut)
+ return -ENOMEM;
}
/* Fill the default RSS lut values */
idpf_fill_dflt_rss_lut(vport);
- return idpf_config_rss(vport);
+ return 0;
}
/**
- * idpf_deinit_rss - Release RSS resources
+ * idpf_deinit_rss_lut - Release RSS LUT
* @vport: virtual port
*/
-void idpf_deinit_rss(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rss_data *rss_data;
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- kfree(rss_data->cached_lut);
- rss_data->cached_lut = NULL;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 75b977094741..423cc9486dce 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -101,6 +101,7 @@ do { \
idx = 0; \
} while (0)
+#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
@@ -1085,9 +1086,10 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
void idpf_vport_intr_ena(struct idpf_vport *vport);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss(struct idpf_vport *vport);
-void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_init_rss_lut(struct idpf_vport *vport);
+void idpf_deinit_rss_lut(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 44cd4b466c48..cb702eac86c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1016,6 +1016,9 @@ static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
struct idpf_vc_xn_params xn_params = {
.vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .send_buf.iov_len =
+ sizeof(struct virtchnl2_get_lan_memory_regions) +
+ sizeof(struct virtchnl2_mem_region),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
int num_regions, size;
@@ -1028,6 +1031,8 @@ static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
return -ENOMEM;
xn_params.recv_buf.iov_base = rcvd_regions;
+ rcvd_regions->num_memory_regions = cpu_to_le16(1);
+ xn_params.send_buf.iov_base = rcvd_regions;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -2799,6 +2804,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
* @vport: virtual port data structure
* @get: flag to set or get rss look up table
*
+ * When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
+ * is enabled, the LUT values stored in driver's soft copy will be used to setup
+ * the HW.
+ *
* Returns 0 on success, negative on failure.
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
@@ -2809,10 +2818,12 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
ssize_t reply_sz;
+ bool rxhash_ena;
int i;
rss_data =
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
@@ -2834,7 +2845,8 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
} else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
- rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+ rl->lut[i] = rxhash_ena ?
+ cpu_to_le32(rss_data->rss_lut[i]) : 0;
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
@@ -3565,6 +3577,7 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
+ struct idpf_hw *hw = &adapter->hw;
bool remove_in_prog;
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
@@ -3588,6 +3601,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_vport_params_buf_rel(adapter);
+ kfree(hw->lan_regs);
+ hw->lan_regs = NULL;
+
kfree(adapter->vports);
adapter->vports = NULL;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 498ba1522ca4..9482ab11f050 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -443,9 +443,10 @@
#define IGC_TXPBSIZE_DEFAULT ( \
IGC_TXPB0SIZE(20) | IGC_TXPB1SIZE(0) | IGC_TXPB2SIZE(0) | \
IGC_TXPB3SIZE(0) | IGC_OS2BMCPBSIZE(4))
+/* TSN value following I225/I226 SW User Manual Section 7.5.4 */
#define IGC_TXPBSIZE_TSN ( \
- IGC_TXPB0SIZE(7) | IGC_TXPB1SIZE(7) | IGC_TXPB2SIZE(7) | \
- IGC_TXPB3SIZE(7) | IGC_OS2BMCPBSIZE(4))
+ IGC_TXPB0SIZE(5) | IGC_TXPB1SIZE(5) | IGC_TXPB2SIZE(5) | \
+ IGC_TXPB3SIZE(5) | IGC_OS2BMCPBSIZE(4))
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e94c1922b97a..3172cdbca9cc 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1565,8 +1565,8 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
- /* Do not allow channel reconfiguration when mqprio is enabled */
- if (adapter->strict_priority_enable)
+ /* Do not allow channel reconfiguration when any TSN qdisc is enabled */
+ if (adapter->flags & IGC_FLAG_TSN_ANY_ENABLED)
return -EINVAL;
/* Verify the number of channels doesn't exceed hw limits */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 7aafa60ba0c8..89a321a344d2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7759,6 +7759,11 @@ int igc_reinit_queues(struct igc_adapter *adapter)
if (netif_running(netdev))
err = igc_open(netdev);
+ if (!err) {
+ /* Restore default IEEE 802.1Qbv schedule after queue reinit */
+ igc_tsn_clear_schedule(adapter);
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index b7b46d863bee..7aae83c108fd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -774,36 +774,43 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ u32 txstmpl_old;
u64 regval;
u32 mask;
int i;
+ /* Establish baseline of TXSTMPL_0 before checking TXTT_0.
+ * This baseline is used to detect if a new timestamp arrives in
+ * register 0 during the hardware bug workaround below.
+ */
+ txstmpl_old = rd32(IGC_TXSTMPL);
+
mask = rd32(IGC_TSYNCTXCTL) & IGC_TSYNCTXCTL_TXTT_ANY;
if (mask & IGC_TSYNCTXCTL_TXTT_0) {
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
} else {
- /* There's a bug in the hardware that could cause
- * missing interrupts for TX timestamping. The issue
- * is that for new interrupts to be triggered, the
- * IGC_TXSTMPH_0 register must be read.
+ /* TXTT_0 not set - register 0 has no new timestamp initially.
+ *
+ * Hardware bug: Future timestamp interrupts won't fire unless
+ * TXSTMPH_0 is read, even if the timestamp was captured in
+ * registers 1-3.
*
- * To avoid discarding a valid timestamp that just
- * happened at the "wrong" time, we need to confirm
- * that there was no timestamp captured, we do that by
- * assuming that no two timestamps in sequence have
- * the same nanosecond value.
+ * Workaround: Read TXSTMPH_0 here to enable future interrupts.
+ * However, this read clears TXTT_0. If a timestamp arrives in
+ * register 0 after checking TXTT_0 but before this read, it
+ * would be lost.
*
- * So, we read the "low" register, read the "high"
- * register (to latch a new timestamp) and read the
- * "low" register again, if "old" and "new" versions
- * of the "low" register are different, a valid
- * timestamp was captured, we can read the "high"
- * register again.
+ * To detect this race: We saved a baseline read of TXSTMPL_0
+ * before TXTT_0 check. After performing the workaround read of
+ * TXSTMPH_0, we read TXSTMPL_0 again. Since consecutive
+ * timestamps never share the same nanosecond value, a change
+ * between the baseline and new TXSTMPL_0 indicates a timestamp
+ * arrived during the race window. If so, read the complete
+ * timestamp.
*/
- u32 txstmpl_old, txstmpl_new;
+ u32 txstmpl_new;
- txstmpl_old = rd32(IGC_TXSTMPL);
rd32(IGC_TXSTMPH);
txstmpl_new = rd32(IGC_TXSTMPL);
@@ -818,7 +825,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
done:
/* Now that the problematic first register was handled, we can
- * use retrieve the timestamps from the other registers
+ * retrieve the timestamps from the other registers
* (starting from '1') with less complications.
*/
for (i = 1; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 034618e79169..c58051e4350b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11468,20 +11468,17 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
*/
static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
- bool disable_dev;
int err = -EIO;
if (hw->mac.type != ixgbe_mac_e610)
- goto clean_up_probe;
+ return err;
ixgbe_get_hw_control(adapter);
- mutex_init(&hw->aci.lock);
err = ixgbe_get_flash_data(&adapter->hw);
if (err)
- goto shutdown_aci;
+ goto err_release_hw_control;
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task);
@@ -11504,16 +11501,8 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
devl_unlock(adapter->devlink);
return 0;
-shutdown_aci:
- mutex_destroy(&adapter->hw.aci.lock);
+err_release_hw_control:
ixgbe_release_hw_control(adapter);
-clean_up_probe:
- disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
- free_netdev(netdev);
- devlink_free(adapter->devlink);
- pci_release_mem_regions(pdev);
- if (disable_dev)
- pci_disable_device(pdev);
return err;
}
@@ -11655,8 +11644,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- if (ixgbe_check_fw_error(adapter))
- return ixgbe_recovery_probe(adapter);
+ if (ixgbe_check_fw_error(adapter)) {
+ err = ixgbe_recovery_probe(adapter);
+ if (err)
+ goto err_sw_init;
+
+ return 0;
+ }
if (adapter->hw.mac.type == ixgbe_mac_e610) {
err = ixgbe_get_caps(&adapter->hw);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 44b201817d94..c116da7d7f18 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1389,7 +1389,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
if (efs->rule.flow_type < 0) {
ret = efs->rule.flow_type;
- goto clean_rule;
+ goto clean_eth_rule;
}
ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index bcea3fc26a8c..57db7ea2f5be 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1338,7 +1338,7 @@ int octep_device_setup(struct octep_device *oct)
ret = octep_ctrl_net_init(oct);
if (ret)
- return ret;
+ goto unsupported_dev;
INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 420c3f4cf741..1d9760b4b8f4 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -218,7 +218,7 @@ static int octep_vf_request_irqs(struct octep_vf_device *oct)
ioq_irq_err:
while (i) {
--i;
- free_irq(oct->msix_entries[i].vector, oct);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
}
return -1;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 2d78e08f985f..747fbdf2a908 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1551,8 +1551,8 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
return -ENODEV;
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
- int num_lfs, struct rsrc_attach *attach)
+static int rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1562,21 +1562,21 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
u64 cfg;
if (!num_lfs)
- return;
+ return -EINVAL;
blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
- return;
+ return -EFAULT;
block = &hw->block[blkaddr];
if (!block->lf.bmap)
- return;
+ return -ESRCH;
for (slot = 0; slot < num_lfs; slot++) {
/* Allocate the resource */
lf = rvu_alloc_rsrc(&block->lf);
if (lf < 0)
- return;
+ return -EFAULT;
cfg = (1ULL << 63) | (pcifunc << 8) | slot;
rvu_write64(rvu, blkaddr, block->lfcfg_reg |
@@ -1587,6 +1587,8 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
/* Set start MSIX vector for this LF within this PF/VF */
rvu_set_msix_offset(rvu, pfvf, block, lf);
}
+
+ return 0;
}
static int rvu_check_rsrc_availability(struct rvu *rvu,
@@ -1724,22 +1726,31 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
int err;
/* If first request, detach all existing attached resources */
- if (!attach->modify)
- rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ if (!attach->modify) {
+ err = rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ if (err)
+ return err;
+ }
mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
if (err)
- goto exit;
+ goto fail1;
/* Now attach the requested resources */
- if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+ if (attach->npalf) {
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
+ if (err)
+ goto fail1;
+ }
- if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+ if (attach->nixlf) {
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
+ if (err)
+ goto fail2;
+ }
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1749,33 +1760,64 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
- attach->sso, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
+ if (err)
+ goto fail3;
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
- attach->ssow, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
+ if (err)
+ goto fail4;
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
- attach->timlfs, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
+ if (err)
+ goto fail5;
}
if (attach->cptlfs) {
if (attach->modify &&
rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
- attach->cptlfs, attach);
+ err = rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
+ if (err)
+ goto fail6;
}
-exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+
+fail6:
+ if (attach->timlfs)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
+
+fail5:
+ if (attach->ssow)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
+
+fail4:
+ if (attach->sso)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
+
+fail3:
+ if (attach->nixlf)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_NIX);
+
+fail2:
+ if (attach->npalf)
+ rvu_detach_block(rvu, pcifunc, BLKTYPE_NPA);
+
+fail1:
mutex_unlock(&rvu->rsrc_lock);
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 3abd750a4bd7..3d91a34f8b57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -1222,6 +1222,9 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
u8 cgx_idx, lmac;
void *cgxd;
+ if (!rvu->fwdata)
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
+
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index e4a5f9fa6fd4..bbfd8231aed5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -56,7 +56,7 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
- if (rvu->fwdata->channel_data.valid) {
+ if (rvu->fwdata && rvu->fwdata->channel_data.valid) {
sdp_pf_num[0] = 0;
pfvf = &rvu->pf[sdp_pf_num[0]];
pfvf->sdp_info = &rvu->fwdata->channel_data.info;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 4c7e0f345cb5..060c715ebad0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -328,7 +328,7 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
req->mask[0] = ~0ULL;
- req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
+ req->mask[0] &= ~MCS_TCAM0_MAC_DA_MASK;
req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
req->mask[1] = ~0ULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e616a727a3a9..8cdfc36d79d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -940,13 +940,8 @@ static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
size_t offset, size_t size,
enum dma_data_direction dir)
{
- dma_addr_t iova;
-
- iova = dma_map_page_attrs(pfvf->dev, page,
+ return dma_map_page_attrs(pfvf->dev, page,
offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova)))
- return (dma_addr_t)NULL;
- return iova;
}
static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index b90e23dc49de..b6449f0a9e7d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -418,6 +418,14 @@ static int otx2_set_ringparam(struct net_device *netdev,
*/
if (rx_count < pfvf->hw.rq_skid)
rx_count = pfvf->hw.rq_skid;
+
+ if (ring->rx_pending < 16) {
+ netdev_err(netdev,
+ "rx ring size %u invalid, min is 16\n",
+ ring->rx_pending);
+ return -EINVAL;
+ }
+
rx_count = Q_COUNT(Q_SIZE(rx_count, 3));
/* Due pipelining impact minimum 2000 unused SQ CQE's
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a7feb4c392b3..6b2d8559f0eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3249,7 +3249,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
- netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 2a4c9df4eb79..e63d95c1842f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -387,6 +387,8 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev)
dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch),
dev->dev);
+ if (!dl)
+ return NULL;
return devlink_priv(dl);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 36806e813c33..1301c56e20d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -613,3 +613,19 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
cq->dbg = NULL;
}
}
+
+static int vhca_id_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+
+ seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vhca_id);
+
+void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev)
+{
+ debugfs_create_file("vhca_id", 0400, dev->priv.dbg.dbg_root, dev,
+ &vhca_id_fops);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 64c04f52990f..781e39b5aa1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -575,3 +575,17 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return plen && flen && flen == plen &&
!memcmp(fsystem_guid, psystem_guid, flen);
}
+
+void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+
+ if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH])
+ device_lock_assert(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]->adev.dev);
+ else
+ mlx5_core_err(dev, "ETH driver already removed\n");
+ if (priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP])
+ del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]->adev);
+ if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP])
+ del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]->adev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 262dc032e276..ff4ab4691baf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -962,7 +962,7 @@ struct mlx5e_priv {
};
struct mlx5e_dev {
- struct mlx5e_priv *priv;
+ struct net_device *netdev;
struct devlink_port dl_port;
};
@@ -1242,10 +1242,13 @@ struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
-void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
-int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
- const struct mlx5e_profile *new_profile, void *new_ppriv);
-void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
+void mlx5e_destroy_netdev(struct net_device *netdev);
+int mlx5e_netdev_change_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile,
+ void *new_ppriv);
+void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index a8fb4bec369c..9c7064187ed0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -430,7 +430,8 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT ||
+ x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
goto skip_replay_window;
switch (x->replay_esn->replay_window) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
index 38e7c77cc851..9a74438ce10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -44,6 +44,7 @@ struct mlx5e_accel_fs_psp_prot {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
+ struct mlx5_modify_hdr *rx_modify_hdr;
struct mlx5_flow_destination default_dest;
struct mlx5e_psp_rx_err rx_err;
u32 refcnt;
@@ -286,13 +287,19 @@ out_err:
return err;
}
-static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
{
if (fs_prot->def_rule) {
mlx5_del_flow_rules(fs_prot->def_rule);
fs_prot->def_rule = NULL;
}
+ if (fs_prot->rx_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, fs_prot->rx_modify_hdr);
+ fs_prot->rx_modify_hdr = NULL;
+ }
+
if (fs_prot->miss_rule) {
mlx5_del_flow_rules(fs_prot->miss_rule);
fs_prot->miss_rule = NULL;
@@ -396,6 +403,7 @@ static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
modify_hdr = NULL;
goto out_err;
}
+ fs_prot->rx_modify_hdr = modify_hdr;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
@@ -416,7 +424,7 @@ static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
goto out;
out_err:
- accel_psp_fs_rx_fs_destroy(fs_prot);
+ accel_psp_fs_rx_fs_destroy(fs, fs_prot);
out:
kvfree(flow_group_in);
kvfree(spec);
@@ -433,7 +441,7 @@ static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_ty
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_psp->fs_prot[type];
- accel_psp_fs_rx_fs_destroy(fs_prot);
+ accel_psp_fs_rx_fs_destroy(fs, fs_prot);
accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
index c17ea0fcd8ef..ef7f5338540f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
@@ -177,8 +177,6 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct net *net = sock_net(skb->sk);
- const struct ipv6hdr *ip6;
- struct tcphdr *th;
if (!mlx5e_psp_set_state(priv, skb, psp_st))
return true;
@@ -190,11 +188,18 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
return false;
}
if (skb_is_gso(skb)) {
- ip6 = ipv6_hdr(skb);
- th = inner_tcp_hdr(skb);
+ int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
+ struct tcphdr *th = inner_tcp_hdr(skb);
- th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
- &ip6->daddr, 0);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ const struct iphdr *ip = ip_hdr(skb);
+
+ th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ const struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
}
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 07fc4d2c8fad..4b2963bbe7ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4052,6 +4052,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_queue_update_stats(priv);
}
+ netdev_stats_to_stats64(stats, &dev->stats);
+
if (mlx5e_is_uplink_rep(priv)) {
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
@@ -4068,21 +4070,21 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
- stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards);
+ stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards);
- stats->rx_length_errors =
+ stats->rx_length_errors +=
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
- stats->rx_crc_errors =
+ stats->rx_crc_errors +=
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
- stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
- stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
- stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
- stats->rx_frame_errors;
- stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+ stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors);
+ stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards);
+ stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors;
}
static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
@@ -6325,6 +6327,7 @@ err_free_cpumask:
void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{
+ bool destroying = test_bit(MLX5E_STATE_DESTROYING, &priv->state);
int i;
/* bail if change profile failed and also rollback failed */
@@ -6352,6 +6355,8 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
}
memset(priv, 0, sizeof(*priv));
+ if (destroying) /* restore destroying bit, to allow unload */
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
}
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
@@ -6584,19 +6589,28 @@ profile_cleanup:
return err;
}
-int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
- const struct mlx5e_profile *new_profile, void *new_ppriv)
+int mlx5e_netdev_change_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *new_profile,
+ void *new_ppriv)
{
- const struct mlx5e_profile *orig_profile = priv->profile;
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
- void *orig_ppriv = priv->ppriv;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ const struct mlx5e_profile *orig_profile;
int err, rollback_err;
+ void *orig_ppriv;
- /* cleanup old profile */
- mlx5e_detach_netdev(priv);
- priv->profile->cleanup(priv);
- mlx5e_priv_cleanup(priv);
+ orig_profile = priv->profile;
+ orig_ppriv = priv->ppriv;
+
+ /* NULL could happen if previous change_profile failed to rollback */
+ if (priv->profile) {
+ WARN_ON_ONCE(priv->mdev != mdev);
+ /* cleanup old profile */
+ mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
+ }
+ /* priv members are not valid from this point ... */
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
@@ -6613,23 +6627,33 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
return 0;
rollback:
+ if (!orig_profile) {
+ netdev_warn(netdev, "no original profile to rollback to\n");
+ priv->profile = NULL;
+ return err;
+ }
+
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
- if (rollback_err)
- netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
- __func__, rollback_err);
+ if (rollback_err) {
+ netdev_err(netdev, "failed to rollback to orig profile, %d\n",
+ rollback_err);
+ priv->profile = NULL;
+ }
return err;
}
-void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
+void mlx5e_netdev_attach_nic_profile(struct net_device *netdev,
+ struct mlx5_core_dev *mdev)
{
- mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
+ mlx5e_netdev_change_profile(netdev, mdev, &mlx5e_nic_profile, NULL);
}
-void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
+void mlx5e_destroy_netdev(struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_priv_cleanup(priv);
+ if (priv->profile)
+ mlx5e_priv_cleanup(priv);
free_netdev(netdev);
}
@@ -6637,8 +6661,8 @@ static int _mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
- struct net_device *netdev = priv->netdev;
+ struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
+ struct net_device *netdev = mlx5e_dev->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5_core_dev *pos, *to;
int err, i;
@@ -6684,10 +6708,11 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_priv *priv = netdev_priv(mlx5e_dev->netdev);
+ struct net_device *netdev = mlx5e_dev->netdev;
+ struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5_core_dev *pos;
int i;
@@ -6748,11 +6773,11 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_devlink_port_unregister;
}
SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
+ mlx5e_dev->netdev = netdev;
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- mlx5e_dev->priv = priv;
priv->profile = profile;
priv->ppriv = NULL;
@@ -6785,7 +6810,7 @@ err_resume:
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
err_devlink_port_unregister:
mlx5e_devlink_port_unregister(mlx5e_dev);
err_devlink_unregister:
@@ -6815,17 +6840,21 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
- struct mlx5e_priv *priv = mlx5e_dev->priv;
+ struct net_device *netdev = mlx5e_dev->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = edev->mdev;
+ mlx5_eswitch_safe_aux_devs_remove(mdev);
mlx5_core_uplink_netdev_set(mdev, NULL);
- mlx5e_dcbnl_delete_app(priv);
+
+ if (priv->profile)
+ mlx5e_dcbnl_delete_app(priv);
/* When unload driver, the netdev is in registered state
* if it's from legacy mode. If from switchdev mode, it
* is already unregistered before changing to NIC profile.
*/
- if (priv->netdev->reg_state == NETREG_REGISTERED) {
- unregister_netdev(priv->netdev);
+ if (netdev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(netdev);
_mlx5e_suspend(adev, false);
} else {
struct mlx5_core_dev *pos;
@@ -6840,7 +6869,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ee9595109649..6eec88fa6d10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1508,17 +1508,16 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev;
- struct mlx5e_priv *priv;
int err;
netdev = mlx5_uplink_netdev_get(dev);
if (!netdev)
return 0;
- priv = netdev_priv(netdev);
- rpriv->netdev = priv->netdev;
- err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ /* must not use netdev_priv(netdev), it might not be initialized yet */
+ rpriv->netdev = netdev;
+ err = mlx5e_netdev_change_profile(netdev, dev,
+ &mlx5e_uplink_rep_profile, rpriv);
mlx5_uplink_netdev_put(dev, netdev);
return err;
}
@@ -1546,7 +1545,7 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
unregister_netdev(netdev);
- mlx5e_netdev_attach_nic_profile(priv);
+ mlx5e_netdev_attach_nic_profile(netdev, priv->mdev);
}
static int
@@ -1612,7 +1611,7 @@ err_cleanup_profile:
priv->profile->cleanup(priv);
err_destroy_netdev:
- mlx5e_destroy_netdev(netdev_priv(netdev));
+ mlx5e_destroy_netdev(netdev);
return err;
}
@@ -1667,7 +1666,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
mlx5e_rep_vnic_reporter_destroy(priv);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
- mlx5e_destroy_netdev(priv);
+ mlx5e_destroy_netdev(netdev);
free_ppriv:
kvfree(ppriv); /* mlx5e_rep_priv */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index a2802cfc9b98..a8af84fc9763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1608,12 +1608,13 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
{
int mode = fec_active_mode(priv->mdev);
- if (mode == MLX5E_FEC_NOFEC ||
- !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ if (mode == MLX5E_FEC_NOFEC)
return;
- fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, mode, fec_stats);
+ if (MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) {
+ fec_set_corrected_bits_total(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+ }
if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
fec_set_histograms_stats(priv, mode, hist);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a8773b2342c2..424786f489ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2147,11 +2147,14 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
{
+ struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *pos;
+ struct mlx5_eswitch *peer_esw;
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- if (i == mlx5_get_dev_index(flow->priv->mdev))
- continue;
+ devcom = flow->priv->mdev->priv.eswitch->devcom;
+ mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
+ i = mlx5_get_dev_index(peer_esw->dev);
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
}
@@ -5513,12 +5516,16 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{
+ struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *pos;
struct mlx5e_tc_flow *flow, *tmp;
+ struct mlx5_eswitch *peer_esw;
int i;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- if (i == mlx5_get_dev_index(esw->dev))
- continue;
+ devcom = esw->devcom;
+
+ mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
+ i = mlx5_get_dev_index(peer_esw->dev);
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 1c37098e09ea..49a637829c59 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -188,7 +188,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
if (IS_ERR(vport->ingress.acl)) {
err = PTR_ERR(vport->ingress.acl);
vport->ingress.acl = NULL;
- return err;
+ goto out;
}
err = esw_acl_ingress_lgcy_groups_create(esw, vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ad1073f7b79f..714ad28e8445 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -929,6 +929,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
+void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -1009,9 +1010,12 @@ mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
static inline bool
mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
{
- return -EOPNOTSUPP;
+ return false;
}
+static inline void
+mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ea94a727633f..02b7e474586d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3981,6 +3981,32 @@ static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw,
return true;
}
+#define MLX5_ESW_HOLD_TIMEOUT_MS 7000
+#define MLX5_ESW_HOLD_RETRY_DELAY_MS 500
+
+void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev)
+{
+ unsigned long timeout;
+ bool hold_esw = true;
+
+ /* Wait for any concurrent eswitch mode transition to complete. */
+ if (!mlx5_esw_hold(dev)) {
+ timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS);
+ while (!mlx5_esw_hold(dev)) {
+ if (!time_before(jiffies, timeout)) {
+ hold_esw = false;
+ break;
+ }
+ msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS);
+ }
+ }
+ if (hold_esw) {
+ if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS)
+ mlx5_core_reps_aux_devs_remove(dev);
+ mlx5_esw_release(dev);
+ }
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ced747bef641..c348ee62cd3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -1198,7 +1198,8 @@ int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, boo
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
- if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ if (disconnect &&
+ !MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
return -EOPNOTSUPP;
MLX5_SET(set_flow_table_root_in, in, opcode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index aee17fcf3b36..cdc99fe5c956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -173,10 +173,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
}
/* Handle multipath entry with lower priority value */
- if (mp->fib.mfi && mp->fib.mfi != fi &&
+ if (mp->fib.mfi &&
(mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) &&
- fi->fib_priority >= mp->fib.priority)
+ mp->fib.dst_len <= fen_info->dst_len &&
+ !(mp->fib.dst_len == fen_info->dst_len &&
+ fi->fib_priority < mp->fib.priority)) {
+ mlx5_core_dbg(ldev->pf[idx].dev,
+ "Multipath entry with lower priority was rejected\n");
return;
+ }
nh_dev0 = mlx5_lag_get_next_fib_dev(ldev, fi, NULL);
nh_dev1 = mlx5_lag_get_next_fib_dev(ldev, fi, nh_dev0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4209da722f9a..55b4e0cceae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1806,16 +1806,6 @@ err:
return -ENOMEM;
}
-static int vhca_id_show(struct seq_file *file, void *priv)
-{
- struct mlx5_core_dev *dev = file->private;
-
- seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(vhca_id);
-
static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
{
int err;
@@ -1884,7 +1874,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
- debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
+
INIT_LIST_HEAD(&priv->traps);
err = mlx5_cmd_init(dev);
@@ -2022,6 +2012,8 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one;
}
+ mlx5_vhca_debugfs_init(dev);
+
pci_save_state(pdev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index cfebc110c02f..f2d74382fb85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -258,6 +258,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
+void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
@@ -290,6 +291,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
+void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev);
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 85a9e534f442..7f8bed353e67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -393,9 +393,11 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- *status = MLX5_GET(mcia_reg, out, status);
- if (*status)
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
+ }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -429,7 +431,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
- mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ mlx5_core_dbg(dev, "Module ID not recognized: 0x%x\n",
+ module_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index b706f1486504..c45540fe7d9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -76,6 +76,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
+ mlx5_vhca_debugfs_init(mdev);
return 0;
init_one_err:
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index efb4e412ec7e..0055c231acf6 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -481,7 +481,7 @@ static void mana_serv_reset(struct pci_dev *pdev)
/* Perform PCI rescan on device if we failed on HWC */
dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n");
mana_serv_rescan(pdev);
- goto out;
+ return;
}
if (ret)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 08bee56aea35..c345d9b17c89 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -2307,14 +2307,16 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
- struct net_device *bond = ocelot->ports[lag]->bond;
+ struct ocelot_port *ocelot_port = ocelot->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!ocelot_port || !ocelot_port->bond || (visited & BIT(lag)))
continue;
+ bond = ocelot_port->bond;
bond_mask = ocelot_get_bond_mask(ocelot, bond);
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 36af94a2e062..2794f75df8fc 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1524,9 +1524,8 @@ static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (!wops->port_post_fini)
- return;
- wops->port_post_fini(rocker_port);
+ if (wops->port_post_fini)
+ wops->port_post_fini(rocker_port);
kfree(rocker_port->wpriv);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 6ef96292909a..3db589b90b68 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -2182,12 +2182,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
{
- int rc;
-
- mutex_lock(&efx->net_dev->ethtool->rss_lock);
- rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->net_dev->ethtool->rss_lock);
- return rc;
+ return efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
}
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 9d1a83a5fa7e..d16c178d1034 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -516,15 +516,7 @@ static inline void smc_rcv(struct net_device *dev)
* any other concurrent access and C would always interrupt B. But life
* isn't that easy in a SMP world...
*/
-#define smc_special_trylock(lock, flags) \
-({ \
- int __ret; \
- local_irq_save(flags); \
- __ret = spin_trylock(lock); \
- if (!__ret) \
- local_irq_restore(flags); \
- __ret; \
-})
+#define smc_special_trylock(lock, flags) spin_trylock_irqsave(lock, flags)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
index 220eb5ce7583..88e9424d2d51 100644
--- a/drivers/net/ethernet/spacemit/k1_emac.c
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -1099,7 +1099,13 @@ static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
100, 10000);
if (ret) {
- netdev_err(priv->ndev, "Read stat timeout\n");
+ /*
+ * This could be caused by the PHY stopping its refclk even when
+ * the link is up, for power saving. See also comments in
+ * emac_stats_update().
+ */
+ dev_err_ratelimited(&priv->ndev->dev,
+ "Read stat timeout. PHY clock stopped?\n");
return ret;
}
@@ -1147,17 +1153,25 @@ static void emac_stats_update(struct emac_priv *priv)
assert_spin_locked(&priv->stats_lock);
- if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
- /* Not up, don't try to update */
+ /*
+ * We can't read statistics if the interface is not up. Also, some PHYs
+ * stop their reference clocks for link down power saving, which also
+ * causes reading statistics to time out. Don't update and don't
+ * reschedule in these cases.
+ */
+ if (!netif_running(priv->ndev) ||
+ !netif_carrier_ok(priv->ndev) ||
+ !netif_device_present(priv->ndev)) {
return;
}
for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
/*
- * If reading stats times out, everything is broken and there's
- * nothing we can do. Reading statistics also can't return an
- * error, so just return without updating and without
- * rescheduling.
+ * If reading stats times out anyway, the stat registers will be
+ * stuck, and we can't really recover from that.
+ *
+ * Reading statistics also can't return an error, so just return
+ * without updating and without rescheduling.
*/
if (emac_tx_read_stat_cnt(priv, i, &res))
return;
@@ -1636,6 +1650,12 @@ static void emac_adjust_link(struct net_device *dev)
emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
emac_set_fc_autoneg(priv);
+
+ /*
+ * Reschedule stats updates now that link is up. See comments in
+ * emac_stats_update().
+ */
+ mod_timer(&priv->stats_timer, jiffies);
}
phy_print_status(phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da206b24aaed..3f42843cd9ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -89,6 +89,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_CONSUMED BIT(0)
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
+#define STMMAC_XSK_CONSUMED BIT(3)
static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
@@ -4358,11 +4359,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
+ bool set_ic, is_last_segment;
u32 pay_len, mss, queue;
int i, first_tx, nfrags;
u8 proto_hdr_len, hdr;
dma_addr_t des;
- bool set_ic;
/* Always insert VLAN tag to SKB payload for TSO frames.
*
@@ -4550,10 +4551,16 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_enable_tx_timestamp(priv, first);
}
+ /* If we only have one entry used, then the first entry is the last
+ * segment.
+ */
+ is_last_segment = ((tx_q->cur_tx - first_entry) &
+ (priv->dma_conf.dma_tx_size - 1)) == 1;
+
/* Complete the first descriptor before granting the DMA */
stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
- tx_q->tx_skbuff_dma[first_entry].last_segment,
- hdr / 4, (skb->len - proto_hdr_len));
+ is_last_segment, hdr / 4,
+ skb->len - proto_hdr_len);
/* If context desc is used to change MSS */
if (mss_desc) {
@@ -5126,6 +5133,7 @@ static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
struct xdp_buff *xdp)
{
+ bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
struct netdev_queue *nq;
@@ -5142,9 +5150,18 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
- res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
- if (res == STMMAC_XDP_TX)
+ /* For zero copy XDP_TX action, dma_map is true */
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
+ if (res == STMMAC_XDP_TX) {
stmmac_flush_tx_descriptors(priv, queue);
+ } else if (res == STMMAC_XDP_CONSUMED && zc) {
+ /* xdp has been freed by xdp_convert_buff_to_frame(),
+ * no need to call xsk_buff_free() again, so return
+ * STMMAC_XSK_CONSUMED.
+ */
+ res = STMMAC_XSK_CONSUMED;
+ xdp_return_frame(xdpf);
+ }
__netif_tx_unlock(nq);
@@ -5494,6 +5511,8 @@ read_again:
break;
case STMMAC_XDP_CONSUMED:
xsk_buff_free(buf->xdp);
+ fallthrough;
+ case STMMAC_XSK_CONSUMED:
rx_dropped++;
break;
case STMMAC_XDP_TX:
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index d138dea7d208..ec278f99d295 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -21,6 +21,7 @@ config LIBWX
depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
select DIMLIB
+ select PHYLINK
help
Common library for Wangxun(R) Ethernet drivers.
@@ -29,7 +30,6 @@ config NGBE
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
- select PHYLINK
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
@@ -48,7 +48,6 @@ config TXGBE
depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
- select PHYLINK
select HWMON if TXGBE=y
select SFP
select GPIOLIB
@@ -71,7 +70,6 @@ config TXGBEVF
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
- select PHYLINK
help
This driver supports virtual functions for SP1000A, WX1820AL,
WX5XXX, WX5XXXAL.
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
index 62d7f47d4f8d..f0514251d4f3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -70,7 +70,7 @@ int txgbe_test_hostif(struct wx *wx)
buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
- WX_HI_COMMAND_TIMEOUT, true);
+ WX_HI_COMMAND_TIMEOUT, false);
}
int txgbe_read_eeprom_hostif(struct wx *wx,
@@ -148,7 +148,7 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
buffer.duplex = duplex;
return wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
- WX_HI_COMMAND_TIMEOUT, true);
+ WX_HI_COMMAND_TIMEOUT, false);
}
static void txgbe_get_link_capabilities(struct wx *wx, int *speed,
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b9b5554ea862..5ad2673f213d 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -334,7 +334,7 @@ int fjes_hw_init(struct fjes_hw *hw)
ret = fjes_hw_reset(hw);
if (ret)
- return ret;
+ goto err_iounmap;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
@@ -347,8 +347,10 @@ int fjes_hw_init(struct fjes_hw *hw)
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
- if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
- return -ENXIO;
+ if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) {
+ ret = -ENXIO;
+ goto err_iounmap;
+ }
ret = fjes_hw_setup(hw);
@@ -356,6 +358,10 @@ int fjes_hw_init(struct fjes_hw *hw)
hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
return ret;
+
+err_iounmap:
+ fjes_hw_iounmap(hw);
+ return ret;
}
void fjes_hw_exit(struct fjes_hw *hw)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3d47d749ef9f..cbd52cb79268 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1750,6 +1750,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (!ndc->rx_table_sz)
+ return -EOPNOTSUPP;
+
rndis_dev = ndev->extension;
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 50de3ee204db..80f84fc87008 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -69,7 +69,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
- spinlock_t addrs_lock;
};
struct ipvl_addr {
@@ -90,6 +89,7 @@ struct ipvl_port {
struct net_device *dev;
possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ spinlock_t addrs_lock; /* guards hash-table and addrs */
struct list_head ipvlans;
u16 mode;
u16 flags;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2efa3ba148aa..bdb3a46b327c 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -107,17 +107,15 @@ void ipvlan_ht_addr_del(struct ipvl_addr *addr)
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
- struct ipvl_addr *addr, *ret = NULL;
+ struct ipvl_addr *addr;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
- if (addr_equal(is_v6, addr, iaddr)) {
- ret = addr;
- break;
- }
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
+ list_for_each_entry(addr, &ipvlan->addrs, anode) {
+ if (addr_equal(is_v6, addr, iaddr))
+ return addr;
}
- rcu_read_unlock();
- return ret;
+ return NULL;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 660f3db11766..baccdad695fd 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ spin_lock_init(&port->addrs_lock);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
@@ -181,6 +182,7 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -189,10 +191,10 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
- rcu_read_unlock();
+ spin_unlock_bh(&port->addrs_lock);
return 0;
}
@@ -206,10 +208,10 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
+ spin_lock_bh(&ipvlan->port->addrs_lock);
+ list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
- rcu_read_unlock();
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return 0;
}
@@ -579,7 +581,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
- spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -657,13 +658,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -817,6 +818,8 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
+ assert_spin_locked(&ipvlan->port->addrs_lock);
+
addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
if (!addr)
return -ENOMEM;
@@ -847,16 +850,16 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
kfree_rcu(addr, rcu);
}
@@ -878,14 +881,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -924,21 +927,24 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
NL_SET_ERR_MSG(i6vi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
#endif
@@ -946,14 +952,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -995,21 +1001,24 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
struct in_validator_info *ivi = (struct in_validator_info *)ptr;
struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ int ret = NOTIFY_OK;
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
NL_SET_ERR_MSG(ivi->extack,
"Address already assigned to an ipvlan device");
- return notifier_from_errno(-EADDRINUSE);
+ ret = notifier_from_errno(-EADDRINUSE);
}
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
break;
}
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 7966545512cf..b4df7e184791 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -59,7 +59,7 @@ struct macvlan_port {
struct macvlan_source_entry {
struct hlist_node hlist;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
unsigned char addr[6+2] __aligned(sizeof(u16));
struct rcu_head rcu;
};
@@ -146,7 +146,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source(
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
if (ether_addr_equal_64bits(entry->addr, addr) &&
- entry->vlan == vlan)
+ rcu_access_pointer(entry->vlan) == vlan)
return entry;
}
return NULL;
@@ -168,7 +168,7 @@ static int macvlan_hash_add_source(struct macvlan_dev *vlan,
return -ENOMEM;
ether_addr_copy(entry->addr, addr);
- entry->vlan = vlan;
+ RCU_INIT_POINTER(entry->vlan, vlan);
h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
hlist_add_head_rcu(&entry->hlist, h);
vlan->macaddr_count++;
@@ -187,6 +187,7 @@ static void macvlan_hash_add(struct macvlan_dev *vlan)
static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
{
+ RCU_INIT_POINTER(entry->vlan, NULL);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
}
@@ -390,7 +391,7 @@ static void macvlan_flush_sources(struct macvlan_port *port,
int i;
hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
- if (entry->vlan == vlan)
+ if (rcu_access_pointer(entry->vlan) == vlan)
macvlan_hash_del_source(entry);
vlan->macaddr_count = 0;
@@ -433,9 +434,14 @@ static bool macvlan_forward_source(struct sk_buff *skb,
hlist_for_each_entry_rcu(entry, h, hlist) {
if (ether_addr_equal_64bits(entry->addr, addr)) {
- if (entry->vlan->flags & MACVLAN_FLAG_NODST)
+ struct macvlan_dev *vlan = rcu_dereference(entry->vlan);
+
+ if (!vlan)
+ continue;
+
+ if (vlan->flags & MACVLAN_FLAG_NODST)
consume = true;
- macvlan_forward_source_one(skb, entry->vlan);
+ macvlan_forward_source_one(skb, vlan);
}
}
@@ -1680,7 +1686,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb,
struct macvlan_source_entry *entry;
hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) {
- if (entry->vlan != vlan)
+ if (rcu_access_pointer(entry->vlan) != vlan)
continue;
if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
return 1;
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index e55be6dc9ae7..d6b9004c61dc 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -63,6 +63,13 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ /* Workaround for read-after-write issue.
+ * The controller may return stale data if a read follows immediately
+ * after a write. A dummy read forces the hardware to update its
+ * internal state, ensuring that the next real read returns correct data.
+ */
+ ioread32(ctx->base + ASPEED_MDIO_CTRL);
+
return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
!(ctrl & ASPEED_MDIO_CTRL_FIRE),
ASPEED_MDIO_INTERVAL_US,
diff --git a/drivers/net/mdio/mdio-realtek-rtl9300.c b/drivers/net/mdio/mdio-realtek-rtl9300.c
index 33694c3ff9a7..405a07075dd1 100644
--- a/drivers/net/mdio/mdio-realtek-rtl9300.c
+++ b/drivers/net/mdio/mdio-realtek-rtl9300.c
@@ -354,7 +354,6 @@ static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_pri
struct fwnode_handle *node)
{
struct rtl9300_mdio_chan *chan;
- struct fwnode_handle *child;
struct mii_bus *bus;
u32 mdio_bus;
int err;
@@ -371,7 +370,7 @@ static int rtl9300_mdiobus_probe_one(struct device *dev, struct rtl9300_mdio_pri
* compatible = "ethernet-phy-ieee802.3-c45". This does mean we can't
* support both c45 and c22 on the same MDIO bus.
*/
- fwnode_for_each_child_node(node, child)
+ fwnode_for_each_child_node_scoped(node, child)
if (fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"))
priv->smi_bus_is_c45[mdio_bus] = true;
@@ -409,7 +408,6 @@ static int rtl9300_mdiobus_map_ports(struct device *dev)
{
struct rtl9300_mdio_priv *priv = dev_get_drvdata(dev);
struct device *parent = dev->parent;
- struct fwnode_handle *port;
int err;
struct fwnode_handle *ports __free(fwnode_handle) =
@@ -418,7 +416,7 @@ static int rtl9300_mdiobus_map_ports(struct device *dev)
return dev_err_probe(dev, -EINVAL, "%pfwP missing ethernet-ports\n",
dev_fwnode(parent));
- fwnode_for_each_child_node(ports, port) {
+ fwnode_for_each_child_node_scoped(ports, port) {
struct device_node *mdio_dn;
u32 addr;
u32 bus;
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 49537d3c4120..5f17f68f3c08 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -244,7 +244,9 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+ mutex_lock(&nsim_dev->progs_list_lock);
list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
+ mutex_unlock(&nsim_dev->progs_list_lock);
prog->aux->offload->dev_priv = state;
@@ -273,12 +275,16 @@ static int nsim_bpf_translate(struct bpf_prog *prog)
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
+ struct nsim_dev *nsim_dev;
state = prog->aux->offload->dev_priv;
+ nsim_dev = state->nsim_dev;
WARN(state->is_loaded,
"offload state destroyed while program still bound");
debugfs_remove_recursive(state->ddir);
+ mutex_lock(&nsim_dev->progs_list_lock);
list_del(&state->l);
+ mutex_unlock(&nsim_dev->progs_list_lock);
kfree(state);
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 70e8c38ddad6..d16b95304aa7 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -332,6 +332,11 @@ static ssize_t link_device_store(const struct bus_type *bus, const char *buf, si
rcu_assign_pointer(nsim_a->peer, nsim_b);
rcu_assign_pointer(nsim_b->peer, nsim_a);
+ if (netif_running(dev_a) && netif_running(dev_b)) {
+ netif_carrier_on(dev_a);
+ netif_carrier_on(dev_b);
+ }
+
out_err:
put_net(ns_b);
put_net(ns_a);
@@ -381,6 +386,9 @@ static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf,
if (!peer)
goto out_put_netns;
+ netif_carrier_off(dev);
+ netif_carrier_off(peer->netdev);
+
err = 0;
RCU_INIT_POINTER(nsim->peer, NULL);
RCU_INIT_POINTER(peer->peer, NULL);
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 2683a989873e..dfd571b22107 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1647,6 +1647,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
nsim_dev->test2 = NSIM_DEV_TEST2_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
+ mutex_init(&nsim_dev->progs_list_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
@@ -1785,6 +1786,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
+ mutex_destroy(&nsim_dev->progs_list_lock);
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index d1a941e2b18f..46c67983c517 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -324,6 +324,7 @@ struct nsim_dev {
u32 prog_id_gen;
struct list_head bpf_bound_progs;
struct list_head bpf_bound_maps;
+ struct mutex progs_list_lock;
struct netdev_phys_item_id switch_id;
struct list_head port_list;
bool fw_update_status;
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 149ddf51d785..87df3a9dfc9b 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -93,12 +93,10 @@ static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
{
switch (interface) {
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
- case PHY_INTERFACE_MODE_2500BASEX:
- return LINK_INBAND_DISABLE;
-
default:
return 0;
}
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 9766dd99afaa..12ff4c1f285d 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -277,7 +277,7 @@ static int xway_gphy_init_leds(struct phy_device *phydev)
static int xway_gphy_config_init(struct phy_device *phydev)
{
- struct device_node *np = phydev->mdio.dev.of_node;
+ struct device_node *np;
int err;
/* Mask all interrupts */
@@ -286,7 +286,10 @@ static int xway_gphy_config_init(struct phy_device *phydev)
return err;
/* Use default LED configuration if 'leds' node isn't defined */
- if (!of_get_child_by_name(np, "leds"))
+ np = of_get_child_by_name(phydev->mdio.dev.of_node, "leds");
+ if (np)
+ of_node_put(np);
+ else
xway_gphy_init_leds(phydev);
/* Clear all pending interrupts */
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index cd09fbf92ef2..2c4bbc236202 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -1167,9 +1167,9 @@ static int mt798x_phy_calibration(struct phy_device *phydev)
}
buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
if (IS_ERR(buf))
return PTR_ERR(buf);
- nvmem_cell_put(cell);
if (!buf[0] || !buf[1] || !buf[2] || !buf[3] || len < 4 * sizeof(u32)) {
phydev_err(phydev, "invalid efuse data\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 05de68b9f719..8208ecbb575c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2643,11 +2643,21 @@ static int kszphy_probe(struct phy_device *phydev)
kszphy_parse_led_mode(phydev);
- clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref");
+ clk = devm_clk_get_optional(&phydev->mdio.dev, "rmii-ref");
/* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
if (!IS_ERR_OR_NULL(clk)) {
- unsigned long rate = clk_get_rate(clk);
bool rmii_ref_clk_sel_25_mhz;
+ unsigned long rate;
+ int err;
+
+ err = clk_prepare_enable(clk);
+ if (err) {
+ phydev_err(phydev, "Failed to enable rmii-ref clock\n");
+ return err;
+ }
+
+ rate = clk_get_rate(clk);
+ clk_disable_unprepare(clk);
if (type)
priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel;
@@ -2665,13 +2675,12 @@ static int kszphy_probe(struct phy_device *phydev)
}
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
- clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
+ clk = devm_clk_get_optional(&phydev->mdio.dev, NULL);
}
if (IS_ERR(clk))
return PTR_ERR(clk);
- clk_disable_unprepare(clk);
priv->clk = clk;
if (ksz8041_fiber_mode(phydev))
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 89b5b19a9bd2..42d46b5758fc 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1741,10 +1741,10 @@ static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
val |= YT8521_LED_1000_ON_EN;
if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
- val |= YT8521_LED_HDX_ON_EN;
+ val |= YT8521_LED_FDX_ON_EN;
if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
- val |= YT8521_LED_FDX_ON_EN;
+ val |= YT8521_LED_HDX_ON_EN;
if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
test_bit(TRIGGER_NETDEV_RX, &rules))
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
index e5d137a37a1d..42a5fe3f115f 100644
--- a/drivers/net/phy/mxl-86110.c
+++ b/drivers/net/phy/mxl-86110.c
@@ -938,6 +938,9 @@ static struct phy_driver mxl_phy_drvs[] = {
PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
.name = "MXL86110 Gigabit Ethernet",
.config_init = mxl86110_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = genphy_soft_reset,
.get_wol = mxl86110_get_wol,
.set_wol = mxl86110_set_wol,
.led_brightness_set = mxl86110_led_brightness_set,
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 6166e9196364..47f095bd91ce 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -519,6 +519,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ SFP_QUIRK_F("H-COM", "SPP425H-GAB4", sfp_fixup_potron),
+
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
@@ -765,7 +767,7 @@ static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr,
dev_addr++;
}
- return 0;
+ return data - (u8 *)buf;
}
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 4d5c9ae8f221..c08a5c1bd6e4 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -878,7 +878,7 @@ static void __team_queue_override_enabled_check(struct team *team)
static void team_queue_override_port_prio_changed(struct team *team,
struct team_port *port)
{
- if (!port->queue_id || team_port_enabled(port))
+ if (!port->queue_id || !team_port_enabled(port))
return;
__team_queue_override_port_del(team, port);
__team_queue_override_port_add(team, port);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7fd763917ae2..6ab3486072cb 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -335,6 +335,11 @@ int asix_read_phy_addr(struct usbnet *dev, bool internal)
offset = (internal ? 1 : 0);
ret = buf[offset];
+ if (ret >= PHY_MAX_ADDR) {
+ netdev_err(dev->net, "invalid PHY address: %d\n", ret);
+ return -ENODEV;
+ }
+
netdev_dbg(dev->net, "%s PHY address 0x%x\n",
internal ? "internal" : "external", ret);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index f613e4bc68c8..758a423a459b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -210,11 +210,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
- if (ret >= PHY_MAX_ADDR) {
- netdev_err(dev->net, "Invalid PHY address %#x\n", ret);
- ret = -ENODEV;
- goto free;
- }
+
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 8b6d6a1b3c2e..2b4716ccf0c5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -604,10 +604,6 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
- USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
- .driver_info = (unsigned long)&dm9601_info,
- },
- {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 81ca64debc5b..c514483134f0 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -168,6 +168,8 @@ static int update_eth_regs_async(pegasus_t *pegasus)
netif_device_detach(pegasus->net);
netif_err(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ usb_free_urb(async_urb);
+ kfree(req);
}
return ret;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 278e6cb6f4d9..e40b0669d9f4 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -211,6 +211,8 @@ static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
if (res == -ENODEV)
netif_device_detach(dev->netdev);
dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
+ kfree(req);
+ usb_free_urb(async_urb);
}
return res;
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 091bc2aca7e8..820c4c506979 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -52,7 +52,7 @@ static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ return usbnet_write_cmd(dev, SR_WR_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
@@ -65,7 +65,7 @@ static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ usbnet_write_cmd_async(dev, SR_WR_REG, SR_REQ_WR_REG,
value, reg, NULL, 0);
}
@@ -539,6 +539,11 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */
.driver_info = (unsigned long)&sr9700_driver_info,
},
+ {
+ /* SR9700 with virtual driver CD-ROM - interface 0 is the CD-ROM device */
+ USB_DEVICE_INTERFACE_NUMBER(0x0fe6, 0x9702, 1),
+ .driver_info = (unsigned long)&sr9700_driver_info,
+ },
{}, /* END */
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1d9faa70ba3b..9280ef544bbb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -831,7 +831,6 @@ int usbnet_stop(struct net_device *net)
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue(net);
- netdev_reset_queue(net);
netif_info(dev, ifdown, dev->net,
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
@@ -875,6 +874,8 @@ int usbnet_stop(struct net_device *net)
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
+ netdev_reset_queue(net);
+
if (!pm)
usb_autopm_put_interface(dev->intf);
@@ -1820,9 +1821,12 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
if ((dev->driver_info->flags & FLAG_NOARP) != 0)
net->flags |= IFF_NOARP;
- /* maybe the remote can't receive an Ethernet MTU */
- if (net->mtu > (dev->hard_mtu - net->hard_header_len))
- net->mtu = dev->hard_mtu - net->hard_header_len;
+ if (net->max_mtu > (dev->hard_mtu - net->hard_header_len))
+ net->max_mtu = dev->hard_mtu - net->hard_header_len;
+
+ if (net->mtu > net->max_mtu)
+ net->mtu = net->max_mtu;
+
} else if (!info->in || !info->out)
status = usbnet_get_endpoints(dev, udev);
else {
@@ -1983,6 +1987,7 @@ int usbnet_resume(struct usb_interface *intf)
} else {
netif_trans_update(dev->net);
__skb_queue_tail(&dev->txq, skb);
+ netdev_sent_queue(dev->net, skb->len);
}
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 14e6f2a2fb77..9982412fd7f2 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -228,16 +228,20 @@ static void veth_get_ethtool_stats(struct net_device *dev,
const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
const void *base = (void *)&rq_stats->vs;
unsigned int start, tx_idx = idx;
+ u64 buf[VETH_TQ_STATS_LEN];
size_t offset;
- tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
do {
start = u64_stats_fetch_begin(&rq_stats->syncp);
for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
offset = veth_tq_stats_desc[j].offset;
- data[tx_idx + j] += *(u64 *)(base + offset);
+ buf[j] = *(u64 *)(base + offset);
}
} while (u64_stats_fetch_retry(&rq_stats->syncp, start));
+
+ tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
+ for (j = 0; j < VETH_TQ_STATS_LEN; j++)
+ data[tx_idx + j] += buf[j];
}
pp_idx = idx + dev->real_num_tx_queues * VETH_TQ_STATS_LEN;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1bb3aeca66c6..db88dcaefb20 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -425,9 +425,6 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_rss_config_hdr *rss_hdr;
- struct virtio_net_rss_config_trailer rss_trailer;
- u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -441,9 +438,6 @@ struct virtnet_info {
/* Packet virtio header size */
u8 hdr_len;
- /* Work struct for delayed refilling if we run low on memory. */
- struct delayed_work refill;
-
/* UDP tunnel support */
bool tx_tnl;
@@ -451,12 +445,6 @@ struct virtnet_info {
bool rx_tnl_csum;
- /* Is delayed refill enabled? */
- bool refill_enabled;
-
- /* The lock to synchronize the access to refill_enabled */
- spinlock_t refill_lock;
-
/* Work struct for config space updates */
struct work_struct config_work;
@@ -493,7 +481,16 @@ struct virtnet_info {
struct failover *failover;
u64 device_stats_cap;
+
+ struct virtio_net_rss_config_hdr *rss_hdr;
+
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct virtio_net_rss_config_trailer, rss_trailer, hash_key_data,
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
+ );
};
+static_assert(offsetof(struct virtnet_info, rss_trailer.hash_key_data) ==
+ offsetof(struct virtnet_info, rss_hash_key_data));
struct padded_vnet_hdr {
struct virtio_net_hdr_v1_hash hdr;
@@ -720,20 +717,6 @@ static void virtnet_rq_free_buf(struct virtnet_info *vi,
put_page(virt_to_head_page(buf));
}
-static void enable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = true;
- spin_unlock_bh(&vi->refill_lock);
-}
-
-static void disable_delayed_refill(struct virtnet_info *vi)
-{
- spin_lock_bh(&vi->refill_lock);
- vi->refill_enabled = false;
- spin_unlock_bh(&vi->refill_lock);
-}
-
static void enable_rx_mode_work(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2948,42 +2931,6 @@ static void virtnet_napi_disable(struct receive_queue *rq)
napi_disable(napi);
}
-static void refill_work(struct work_struct *work)
-{
- struct virtnet_info *vi =
- container_of(work, struct virtnet_info, refill.work);
- bool still_empty;
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct receive_queue *rq = &vi->rq[i];
-
- /*
- * When queue API support is added in the future and the call
- * below becomes napi_disable_locked, this driver will need to
- * be refactored.
- *
- * One possible solution would be to:
- * - cancel refill_work with cancel_delayed_work (note:
- * non-sync)
- * - cancel refill_work with cancel_delayed_work_sync in
- * virtnet_remove after the netdev is unregistered
- * - wrap all of the work in a lock (perhaps the netdev
- * instance lock)
- * - check netif_running() and return early to avoid a race
- */
- napi_disable(&rq->napi);
- still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_do_enable(rq->vq, &rq->napi);
-
- /* In theory, this can happen: if we don't get any buffers in
- * we will *never* try to fill again.
- */
- if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
- }
-}
-
static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
struct receive_queue *rq,
int budget,
@@ -3046,16 +2993,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
else
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
+ u64_stats_set(&stats.packets, packets);
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
- if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
- spin_lock(&vi->refill_lock);
- if (vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock(&vi->refill_lock);
- }
+ if (!try_fill_recv(vi, rq, GFP_ATOMIC))
+ /* We need to retry refilling in the next NAPI poll so
+ * we must return budget to make sure the NAPI is
+ * repolled.
+ */
+ packets = budget;
}
- u64_stats_set(&stats.packets, packets);
u64_stats_update_begin(&rq->stats.syncp);
for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
@@ -3226,13 +3173,12 @@ static int virtnet_open(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i, err;
- enable_delayed_refill(vi);
-
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
- /* Make sure we have some buffers: if oom use wq. */
- if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
+ /* Pre-fill rq agressively, to make sure we are ready to
+ * get packets immediately.
+ */
+ try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
err = virtnet_enable_queue_pair(vi, i);
if (err < 0)
@@ -3251,9 +3197,6 @@ static int virtnet_open(struct net_device *dev)
return 0;
err_enable_qp:
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
-
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
virtnet_cancel_dim(vi, &vi->rq[i].dim);
@@ -3432,8 +3375,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void __virtnet_rx_pause(struct virtnet_info *vi,
- struct receive_queue *rq)
+static void virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
@@ -3447,62 +3390,37 @@ static void virtnet_rx_pause_all(struct virtnet_info *vi)
{
int i;
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++)
- __virtnet_rx_pause(vi, &vi->rq[i]);
+ virtnet_rx_pause(vi, &vi->rq[i]);
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
- /*
- * Make sure refill_work does not run concurrently to
- * avoid napi_disable race which leads to deadlock.
- */
- disable_delayed_refill(vi);
- cancel_delayed_work_sync(&vi->refill);
- __virtnet_rx_pause(vi, rq);
-}
-
-static void __virtnet_rx_resume(struct virtnet_info *vi,
- struct receive_queue *rq,
- bool refill)
-{
- bool running = netif_running(vi->dev);
- bool schedule_refill = false;
+ if (netif_running(vi->dev)) {
+ /* Pre-fill rq agressively, to make sure we are ready to get
+ * packets immediately.
+ */
+ if (refill)
+ try_fill_recv(vi, rq, GFP_KERNEL);
- if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
- schedule_refill = true;
- if (running)
virtnet_napi_enable(rq);
-
- if (schedule_refill)
- schedule_delayed_work(&vi->refill, 0);
+ }
}
static void virtnet_rx_resume_all(struct virtnet_info *vi)
{
int i;
- enable_delayed_refill(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (i < vi->curr_queue_pairs)
- __virtnet_rx_resume(vi, &vi->rq[i], true);
+ virtnet_rx_resume(vi, &vi->rq[i], true);
else
- __virtnet_rx_resume(vi, &vi->rq[i], false);
+ virtnet_rx_resume(vi, &vi->rq[i], false);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
-{
- enable_delayed_refill(vi);
- __virtnet_rx_resume(vi, rq, true);
-}
-
static int virtnet_rx_resize(struct virtnet_info *vi,
struct receive_queue *rq, u32 ring_num)
{
@@ -3516,7 +3434,7 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
if (err)
netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
return err;
}
@@ -3791,7 +3709,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
old_rss_hdr = vi->rss_hdr;
old_rss_trailer = vi->rss_trailer;
- vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ vi->rss_hdr = devm_kzalloc(&vi->vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
if (!vi->rss_hdr) {
vi->rss_hdr = old_rss_hdr;
return -ENOMEM;
@@ -3802,7 +3720,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!virtnet_commit_rss_command(vi)) {
/* restore ctrl_rss if commit_rss_command failed */
- devm_kfree(&dev->dev, vi->rss_hdr);
+ devm_kfree(&vi->vdev->dev, vi->rss_hdr);
vi->rss_hdr = old_rss_hdr;
vi->rss_trailer = old_rss_trailer;
@@ -3810,7 +3728,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
}
- devm_kfree(&dev->dev, old_rss_hdr);
+ devm_kfree(&vi->vdev->dev, old_rss_hdr);
goto succ;
}
@@ -3829,11 +3747,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
}
succ:
vi->curr_queue_pairs = queue_pairs;
- /* virtnet_open() will refill when device is going to up. */
- spin_lock_bh(&vi->refill_lock);
- if (dev->flags & IFF_UP && vi->refill_enabled)
- schedule_delayed_work(&vi->refill, 0);
- spin_unlock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP) {
+ local_bh_disable();
+ for (int i = 0; i < vi->curr_queue_pairs; ++i)
+ virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
+ local_bh_enable();
+ }
return 0;
}
@@ -3843,10 +3762,6 @@ static int virtnet_close(struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int i;
- /* Make sure NAPI doesn't schedule refill work */
- disable_delayed_refill(vi);
- /* Make sure refill_work doesn't re-enable napi! */
- cancel_delayed_work_sync(&vi->refill);
/* Prevent the config change callback from changing carrier
* after close
*/
@@ -5802,7 +5717,6 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
- enable_delayed_refill(vi);
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
@@ -5892,7 +5806,7 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
rq->xsk_pool = pool;
- virtnet_rx_resume(vi, rq);
+ virtnet_rx_resume(vi, rq, true);
if (pool)
return 0;
@@ -6559,7 +6473,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
if (!vi->rq)
goto err_rq;
- INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
@@ -6901,7 +6814,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
- spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
@@ -7165,7 +7077,6 @@ free_failover:
net_failover_destroy(vi->failover);
free_vqs:
virtio_reset_device(vdev);
- cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
virtnet_del_vqs(vi);
free:
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 7bbda46cfd93..82f120ee1c66 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1727,8 +1727,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ ce_state->src_ring->base_addr_owner_space_unaligned,
+ ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1737,8 +1737,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ ce_state->dest_ring->base_addr_owner_space_unaligned,
+ ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}
@@ -1758,8 +1758,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->src_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
- ce_state->src_ring->base_addr_owner_space,
- ce_state->src_ring->base_addr_ce_space);
+ ce_state->src_ring->base_addr_owner_space_unaligned,
+ ce_state->src_ring->base_addr_ce_space_unaligned);
kfree(ce_state->src_ring);
}
@@ -1768,8 +1768,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
- ce_state->dest_ring->base_addr_owner_space,
- ce_state->dest_ring->base_addr_ce_space);
+ ce_state->dest_ring->base_addr_owner_space_unaligned,
+ ce_state->dest_ring->base_addr_ce_space_unaligned);
kfree(ce_state->dest_ring);
}
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index 9a63608838ac..4aea58446838 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -984,8 +984,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->src_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->src_ring->base_addr_owner_space,
- pipe->src_ring->base_addr_ce_space);
+ pipe->src_ring->base_addr_owner_space_unaligned,
+ pipe->src_ring->base_addr_ce_space_unaligned);
kfree(pipe->src_ring);
pipe->src_ring = NULL;
}
@@ -995,8 +995,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->dest_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->dest_ring->base_addr_owner_space,
- pipe->dest_ring->base_addr_ce_space);
+ pipe->dest_ring->base_addr_owner_space_unaligned,
+ pipe->dest_ring->base_addr_ce_space_unaligned);
kfree(pipe->dest_ring);
pipe->dest_ring = NULL;
}
@@ -1007,8 +1007,8 @@ void ath12k_ce_free_pipes(struct ath12k_base *ab)
dma_free_coherent(ab->dev,
pipe->status_ring->nentries * desc_sz +
CE_DESC_RING_ALIGN,
- pipe->status_ring->base_addr_owner_space,
- pipe->status_ring->base_addr_ce_space);
+ pipe->status_ring->base_addr_owner_space_unaligned,
+ pipe->status_ring->base_addr_ce_space_unaligned);
kfree(pipe->status_ring);
pipe->status_ring = NULL;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index f7a2a544bef2..e0e49f782bf8 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -5495,7 +5495,8 @@ static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
for_each_set_bit(link_id, &links_map, ATH12K_NUM_MAX_LINKS) {
arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- if (!arvif || arvif->is_started)
+ if (!arvif || !arvif->is_created ||
+ arvif->ar->scan.arvif != arvif)
continue;
ar = arvif->ar;
@@ -9172,7 +9173,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
} else {
- link_id = 0;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ link_id = ATH12K_FIRST_SCAN_LINK;
+ else
+ link_id = 0;
}
arvif = rcu_dereference(ahvif->link[link_id]);
@@ -12142,6 +12146,9 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
if (drop)
return;
+ for_each_ar(ah, ar, i)
+ wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
+
/* vif can be NULL when flush() is considered for hw */
if (!vif) {
for_each_ar(ah, ar, i)
@@ -12149,9 +12156,6 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
return;
}
- for_each_ar(ah, ar, i)
- wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
-
ahvif = ath12k_vif_to_ahvif(vif);
links = ahvif->links_map;
for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
@@ -13343,7 +13347,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
ath12k_scan_abort(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
- wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+ wiphy_work_flush(hw->wiphy, &ar->scan.vdev_clean_wk);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index be8b2943094f..3ce5fcb0e460 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -6575,16 +6575,9 @@ static int freq_to_idx(struct ath12k *ar, int freq)
if (!sband)
continue;
- for (ch = 0; ch < sband->n_channels; ch++, idx++) {
- if (sband->channels[ch].center_freq <
- KHZ_TO_MHZ(ar->freq_range.start_freq) ||
- sband->channels[ch].center_freq >
- KHZ_TO_MHZ(ar->freq_range.end_freq))
- continue;
-
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
- }
}
exit:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 3391f07b01de..f8fc6f30fbe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1597,7 +1597,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
*/
static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
- unsigned int min_core, max_core, loaded_core;
+ int min_core, max_core, loaded_core;
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
const struct iwl_ucode_header *ucode;
@@ -1676,7 +1676,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (loaded_core < min_core || loaded_core > max_core) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
- "Driver supports FW core %u..%u, firmware is %u.\n",
+ "Driver supports FW core %d..%d, firmware is %d.\n",
min_core, max_core, loaded_core);
goto try_again;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
index ffeb37a7f830..231920425c06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -121,6 +121,12 @@ static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
return 0;
}
+static int iwl_mld_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
@@ -279,6 +285,7 @@ void iwl_mld_ptp_init(struct iwl_mld *mld)
mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
+ mld->ptp_data.ptp_clock_info.settime64 = iwl_mld_ptp_settime;
mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c b/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
index 06a4c9f74797..ad156b82eaa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ptp.c
@@ -220,6 +220,12 @@ static int iwl_mvm_ptp_gettime(struct ptp_clock_info *ptp,
return 0;
}
+static int iwl_mvm_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static int iwl_mvm_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm,
@@ -281,6 +287,7 @@ void iwl_mvm_ptp_init(struct iwl_mvm *mvm)
mvm->ptp_data.ptp_clock_info.adjfine = iwl_mvm_ptp_adjfine;
mvm->ptp_data.ptp_clock_info.adjtime = iwl_mvm_ptp_adjtime;
mvm->ptp_data.ptp_clock_info.gettime64 = iwl_mvm_ptp_gettime;
+ mvm->ptp_data.ptp_clock_info.settime64 = iwl_mvm_ptp_settime;
mvm->ptp_data.scaled_freq = SCALE_FACTOR;
/* Give a short 'friendly name' to identify the PHC clock */
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 354c5ce66045..f3397dc6c422 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -825,7 +825,7 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
bool coex_flag)
{
- u8 i;
+ u8 i, j;
u32 rx_win_size;
struct mwifiex_private *priv;
@@ -863,8 +863,8 @@ static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
if (rx_win_size != priv->add_ba_param.rx_win_size) {
if (!priv->media_connected)
continue;
- for (i = 0; i < MAX_NUM_TID; i++)
- mwifiex_11n_delba(priv, i);
+ for (j = 0; j < MAX_NUM_TID; j++)
+ mwifiex_11n_delba(priv, j);
}
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index ea99167765b0..0457712286d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -3019,7 +3019,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WM Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, false);
@@ -3048,7 +3048,7 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
}
hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
- dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s\n",
+ dev_info(dev->dev, "WA Firmware Version: %.10s, Build Time: %.15s",
hdr->fw_ver, hdr->build_date);
ret = mt76_connac_mcu_send_ram_firmware(dev, hdr, fw->data, true);
@@ -3101,7 +3101,6 @@ int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
int i, ret, sem, max_len = mt76_is_sdio(dev) ? 2048 : 4096;
const struct mt76_connac2_patch_hdr *hdr;
const struct firmware *fw = NULL;
- char build_date[17];
sem = mt76_connac_mcu_patch_sem_ctrl(dev, true);
switch (sem) {
@@ -3125,11 +3124,8 @@ int mt76_connac2_load_patch(struct mt76_dev *dev, const char *fw_name)
}
hdr = (const void *)fw->data;
- strscpy(build_date, hdr->build_date, sizeof(build_date));
- build_date[16] = '\0';
- strim(build_date);
- dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
- be32_to_cpu(hdr->hw_sw_ver), build_date);
+ dev_info(dev->dev, "HW/SW Version: 0x%x, Build Time: %.16s",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
struct mt76_connac2_patch_sec *sec;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index aa702ba7c9f5..d6c35e8d02a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -511,7 +511,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
if (sta) {
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
tid = ieee80211_get_tid(hdr);
- agg_state = sta_entry->tids[tid].agg.agg_state;
+ if (tid < MAX_TID_COUNT)
+ agg_state = sta_entry->tids[tid].agg.agg_state;
ampdu_density = sta->deflink.ht_cap.ampdu_density;
}
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index 99d7c629eac6..e35de52d8eb4 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -144,8 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
{
+ bool might_indirect_under_power_off = rtwdev->chip->id == RTW_CHIP_TYPE_8822C;
+
if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
- !rtw_sdio_is_bus_addr(addr))
+ !rtw_sdio_is_bus_addr(addr) && might_indirect_under_power_off)
return false;
return !rtw_sdio_is_sdio30_supported(rtwdev) ||
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 009202c627d2..3b5126ffc81a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -965,8 +965,7 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
struct sk_buff *rx_skb;
int i;
- rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH | WQ_UNBOUND,
- 0);
+ rtwusb->rxwq = alloc_workqueue("rtw88_usb: rx wq", WQ_BH, 0);
if (!rtwusb->rxwq) {
rtw_err(rtwdev, "failed to create RX work queue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index f3a853edfc11..8c8e074a3a70 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -2035,6 +2035,7 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->queues = MAX_HW_QUEUES;
hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
+ hw->vif_data_size = sizeof(struct vif_priv);
hw->max_rates = 1;
hw->max_rate_tries = MAX_RETRIES;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f76087be2f75..6241866d39df 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -207,6 +207,11 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
+ if (skb_headroom(skb) < (total_len - skb->len) &&
+ pskb_expand_head(skb, (total_len - skb->len), 0, GFP_ATOMIC)) {
+ wl1271_free_tx_id(wl, id);
+ return -EAGAIN;
+ }
desc = skb_push(skb, total_len - skb->len);
wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 551f5eb4e747..79cc63272134 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4040,7 +4040,7 @@ mac80211_hwsim_nan_dw_start(struct hrtimer *timer)
ieee80211_vif_to_wdev(data->nan_device_vif);
if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
- ch = ieee80211_get_channel(hw->wiphy, 5475);
+ ch = ieee80211_get_channel(hw->wiphy, 5745);
else
ch = ieee80211_get_channel(hw->wiphy, 2437);
@@ -4112,14 +4112,14 @@ static int mac80211_hwsim_stop_nan(struct ieee80211_hw *hw,
hrtimer_cancel(&data->nan_timer);
data->nan_device_vif = NULL;
- spin_lock(&hwsim_radio_lock);
+ spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data2, &hwsim_radios, list) {
if (data2->nan_device_vif) {
nan_cluster_running = true;
break;
}
}
- spin_unlock(&hwsim_radio_lock);
+ spin_unlock_bh(&hwsim_radio_lock);
if (!nan_cluster_running)
memset(hwsim_nan_cluster_id, 0, ETH_ALEN);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
index fc928b298a98..b846889fcb09 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -456,6 +456,7 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
struct sk_buff_head *free_list;
union mux_msg mux_msg;
struct sk_buff *skb;
+ int i;
if (!ipc_mux->initialized)
return;
@@ -479,5 +480,10 @@ void ipc_mux_deinit(struct iosm_mux *ipc_mux)
ipc_mux->channel->dl_pipe.is_open = false;
}
+ if (ipc_mux->protocol != MUX_LITE) {
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
+ kfree(ipc_mux->ul_adb.pp_qlt[i]);
+ }
+
kfree(ipc_mux);
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 0dace12f5ad0..f8bc9a39bfa3 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -78,9 +78,8 @@ struct mhi_mbim_context {
struct mbim_tx_hdr {
struct usb_cdc_ncm_nth16 nth16;
-
- /* Must be last as it ends in a flexible-array member. */
struct usb_cdc_ncm_ndp16 ndp16;
+ struct usb_cdc_ncm_dpe16 dpe16[2];
} __packed;
static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
@@ -108,20 +107,20 @@ static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
- DEFINE_RAW_FLEX(struct mbim_tx_hdr, mbim_hdr, ndp16.dpe16, 2);
unsigned int dgram_size = skb->len;
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
+ struct mbim_tx_hdr *mbim_hdr;
/* Only one NDP is sent, containing the IP packet (no aggregation) */
/* Ensure we have enough headroom for crafting MBIM header */
- if (skb_cow_head(skb, __struct_size(mbim_hdr))) {
+ if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
dev_kfree_skb_any(skb);
return NULL;
}
- mbim_hdr = skb_push(skb, __struct_size(mbim_hdr));
+ mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
/* Fill NTB header */
nth16 = &mbim_hdr->nth16;
@@ -134,11 +133,12 @@ static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
/* Fill the unique NDP */
ndp16 = &mbim_hdr->ndp16;
ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
- ndp16->wLength = cpu_to_le16(struct_size(ndp16, dpe16, 2));
+ ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ + sizeof(struct usb_cdc_ncm_dpe16) * 2);
ndp16->wNextNdpIndex = 0;
/* Datagram follows the mbim header */
- ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(__struct_size(mbim_hdr));
+ ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
/* null termination */
@@ -584,8 +584,7 @@ static void mhi_mbim_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
ndev->type = ARPHRD_RAWIP;
- ndev->needed_headroom =
- struct_size_t(struct mbim_tx_hdr, ndp16.dpe16, 2);
+ ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index b76bea6ab2d7..5af90ca6e063 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -395,6 +395,7 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
struct sk_buff *skb)
{
unsigned long long data_bus_addr, data_base_addr;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
struct device *dev = rxq->dpmaif_ctrl->dev;
struct dpmaif_bat_page *page_info;
unsigned int data_len;
@@ -402,18 +403,22 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
page_info = rxq->bat_frag->bat_skb;
page_info += t7xx_normal_pit_bid(pkt_info);
- dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
if (!page_info->page)
return -EINVAL;
+ if (shinfo->nr_frags >= MAX_SKB_FRAGS)
+ return -EINVAL;
+
+ dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
+
data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
data_base_addr = page_info->data_bus_addr;
data_offset = data_bus_addr - data_base_addr;
data_offset += page_info->offset;
data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page,
data_offset, data_len, page_info->data_len);
page_info->page = NULL;