summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c55
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c198
-rw-r--r--drivers/net/ethernet/freescale/fec.h31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c72
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c64
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c91
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h7
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c12
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c73
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c217
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c12
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c22
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c24
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h15
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h41
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c11
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c187
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c31
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c5
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c148
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c87
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c76
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c105
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c295
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c81
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c469
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c394
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h25
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c45
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c12
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c260
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c38
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c23
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h34
204 files changed, 4485 insertions, 2084 deletions
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index c373f21d95f5..0caabb0c3aa0 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -308,7 +308,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
if (!airoha_is_valid_gdm_port(eth, port))
return -EINVAL;
- if (dsa_port >= 0)
+ if (dsa_port >= 0 || eth->ports[1])
pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
: port->id;
else
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 0b53a1fab46d..4a6b35c84dab 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -255,7 +255,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index d8dc39da4161..b576be626a29 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -22,7 +22,8 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
}
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f3adf29b222b..0653e69f0ef7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1259,6 +1259,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
udp_tunnel_nic_reset_ntf(netdev);
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ goto err_txrx;
+
netif_tx_start_all_queues(netdev);
xgbe_start_timers(pdata);
@@ -1268,6 +1273,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
return 0;
+err_txrx:
+ hw_if->disable_rx(pdata);
+ hw_if->disable_tx(pdata);
+
err_irqs:
xgbe_free_irqs(pdata);
@@ -1574,11 +1583,6 @@ static int xgbe_open(struct net_device *netdev)
goto err_dev_wq;
}
- /* Reset the phy settings */
- ret = xgbe_phy_reset(pdata);
- if (ret)
- goto err_an_wq;
-
/* Enable the clocks */
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 35a381a83647..a68757e8fd22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -989,6 +989,7 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
return ret;
}
phy_data->phydev = phydev;
+ phy_data->phydev->mac_managed_pm = true;
xgbe_phy_external_phy_quirks(pdata);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index 1921741f7311..18b08277d2e1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -15,6 +15,7 @@
#include "aq_hw.h"
#include "aq_nic.h"
+#include "hw_atl/hw_atl_llh.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
@@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
lo_hi_writeq(value, hw->mmio + reg);
}
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
+{
+ int err;
+ u32 val;
+
+ /* Invalidate Descriptor Cache to prevent writing to the cached
+ * descriptors and to the data pointer of those descriptors
+ */
+ hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
+
+ err = aq_hw_err_from_flags(hw);
+ if (err)
+ goto err_exit;
+
+ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+ hw, val, val == 1, 1000U, 10000U);
+
+err_exit:
+ return err;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index ffa6e4067c21..d89c63d88e4a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
+int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f21de0c21e52..d23d23bed39f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
bool is_rsc_completed = true;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 493432d036b9..c7895bfb2ecf 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
- int err;
- u32 val;
-
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
- /* Invalidate Descriptor Cache to prevent writing to the cached
- * descriptors and to the data pointer of those descriptors
- */
- hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
-
- err = aq_hw_err_from_flags(self);
-
- if (err)
- goto err_exit;
-
- readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
- self, val, val == 1, 1000U, 10000U);
-
-err_exit:
- return err;
+ return aq_hw_invalidate_descriptor_cache(self);
}
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index b0ed572e88c6..0ce9caae8799 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
- return 0;
+ return aq_hw_invalidate_descriptor_cache(self);
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc8dec37a9e4..3d853eeb976f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3355,19 +3355,11 @@ static int bnx2x_get_rxfh_fields(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 bnx2x_get_rx_ring_count(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = BNX2X_NUM_ETH_QUEUES(bp);
- return 0;
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return BNX2X_NUM_ETH_QUEUES(bp);
}
static int bnx2x_set_rxfh_fields(struct net_device *dev,
@@ -3674,7 +3666,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
@@ -3702,7 +3694,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
- .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rx_ring_count = bnx2x_get_rx_ring_count,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f0f05d7315ac..aca4267babc8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -308,8 +308,11 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/****************************************************************************
* General service functions
****************************************************************************/
-
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config);
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
@@ -12813,14 +12816,9 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -EAGAIN;
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return bnx2x_hwtstamp_ioctl(bp, ifr);
- default:
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
- }
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
}
static int bnx2x_validate_addr(struct net_device *dev)
@@ -13036,6 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
+ .ndo_hwtstamp_get = bnx2x_hwtstamp_get,
+ .ndo_hwtstamp_set = bnx2x_hwtstamp_set,
};
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
@@ -15350,31 +15350,57 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
return 0;
}
-static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+static int bnx2x_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct bnx2x *bp = netdev_priv(dev);
int rc;
- DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+ DP(BNX2X_MSG_PTP, "HWTSTAMP SET called\n");
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ if (!netif_running(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
bp->hwtstamp_ioctl_called = true;
- bp->tx_type = config.tx_type;
- bp->rx_filter = config.rx_filter;
+ bp->tx_type = config->tx_type;
+ bp->rx_filter = config->rx_filter;
rc = bnx2x_configure_ptp_filters(bp);
- if (rc)
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "HW configuration failure");
return rc;
+ }
+
+ config->rx_filter = bp->rx_filter;
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct bnx2x *bp = netdev_priv(dev);
- config.rx_filter = bp->rx_filter;
+ config->rx_filter = bp->rx_filter;
+ config->tx_type = bp->tx_type;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/* Configures HW for PTP */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a625e7c311dd..d17d0ea89c36 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -877,7 +877,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
next_tx_int:
cons = NEXT_TX(cons);
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, budget);
}
WRITE_ONCE(txr->tx_cons, cons);
@@ -4479,7 +4479,14 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+ RX_BD_TYPE_RX_AGG_BD;
+
+ /* On P7, setting EOP will cause the chip to disable
+ * Relaxed Ordering (RO) for TPA data. Disable EOP for
+ * potentially higher performance with RO.
+ */
+ if (BNXT_CHIP_P5_AND_MINUS(bp) || !(bp->flags & BNXT_FLAG_TPA))
+ type |= RX_BD_FLAGS_AGG_EOP;
bnxt_init_rxbd_pages(ring, type);
}
@@ -5688,6 +5695,10 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
+ if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) &&
+ cmd == HWRM_PORT_PHY_QCFG)
+ continue;
+
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
@@ -8506,6 +8517,11 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+ if (resp->roce_bidi_opt_mode &
+ FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED)
+ bp->cos0_cos1_shared = 1;
+ else
+ bp->cos0_cos1_shared = 0;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -9653,6 +9669,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
@@ -14020,11 +14038,19 @@ static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
{
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- int i = bnapi->index;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2;
+ int i = bnapi->index, j;
netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
+ for (j = 0; j < cpr->cp_ring_count; j++) {
+ cpr2 = &cpr->cp_ring_arr[j];
+ if (!cpr2->bnapi)
+ continue;
+ netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n",
+ i, j, cpr2->cp_ring_struct.fw_ring_id,
+ cpr2->cp_raw_cons);
+ }
}
static void bnxt_dbg_dump_states(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3613a172483a..f5f07a7e6b29 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -131,6 +131,7 @@ struct rx_bd {
#define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
#define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
#define RX_BD_FLAGS_SOP (1 << 6)
+ #define RX_BD_FLAGS_AGG_EOP (1 << 6)
#define RX_BD_FLAGS_EOP (1 << 7)
#define RX_BD_FLAGS_BUFFERS (3 << 8)
#define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
@@ -2424,6 +2425,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 cos0_cos1_shared;
u8 num_tc;
u16 max_pfcwd_tmo_ms;
@@ -2482,6 +2484,7 @@ struct bnxt {
#define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
#define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(9)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 67ca02d84c97..15de802bbac4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1086,7 +1086,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
@@ -1168,7 +1169,8 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 41686a6f84b5..efb9bf20e66b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -688,16 +688,22 @@ skip_ring_stats:
buf[j] = *(rx_port_stats_ext + n);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_bytes_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_bytes_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
for (i = 0; i < 8; i++, j++) {
- long n = bnxt_tx_pkts_pri_arr[i].base_off +
- bp->pri2cos_idx[i];
+ u8 cos_idx = bp->pri2cos_idx[i];
+ long n;
+ n = bnxt_tx_pkts_pri_arr[i].base_off + cos_idx;
buf[j] = *(tx_port_stats_ext + n);
+ if (bp->cos0_cos1_shared && !cos_idx)
+ buf[j] += *(tx_port_stats_ext + n + 1);
}
}
}
@@ -4617,6 +4623,11 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
return 0;
+ if (bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ bp->link_info.phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE){
+ NL_SET_ERR_MSG_MOD(extack, "Operation not supported as PHY type is Base-T");
+ return -EOPNOTSUPP;
+ }
switch (bp->link_info.module_status) {
case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 80fed2c07b9e..be7deb9cc410 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -332,6 +332,38 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return rc;
}
+static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input *req;
+ struct bnxt_vf_info *vf;
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ return 0;
+
+ vf = &bp->pf.vf[vf_id];
+
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(vf->fw_fid);
+ switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
+ case BNXT_VF_LINK_FORCED:
+ req->options =
+ FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
+ break;
+ case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
+ break;
+ default:
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ break;
+ }
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
{
struct bnxt *bp = netdev_priv(dev);
@@ -357,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
break;
default:
netdev_err(bp->dev, "Invalid link option\n");
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
- if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
+ rc = bnxt_set_vf_link_admin_state(bp, vf_id);
+ else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
@@ -666,15 +699,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
+ struct bnxt_vf_info *vf = &pf->vf[i];
+
+ vf->fw_fid = pf->first_vf_id + i;
+ rc = bnxt_set_vf_link_admin_state(bp, i);
+ if (rc)
+ break;
+
if (reset)
__bnxt_set_vf_params(bp, i);
- req->vf_id = cpu_to_le16(pf->first_vf_id + i);
+ req->vf_id = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
- pf->vf[i].fw_fid = pf->first_vf_id + i;
}
if (pf->active_vfs) {
@@ -741,6 +780,12 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
+ if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
+ req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
+ req->enables |=
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
+ }
+
mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index f8c2c72b382d..927971c362f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -142,7 +142,6 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
- edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
netdev_unlock(dev);
@@ -159,8 +158,6 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
ulp = edev->ulp_tbl;
netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
- if (ulp->msix_requested)
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
@@ -298,7 +295,7 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
struct bnxt_ulp_ops *ops;
bool reset = false;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
@@ -321,7 +318,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
- if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ if (!edev)
return;
if (bnxt_ulp_registered(bp->edev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 7b9dd8ebe4bc..3c5b8a53f715 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -58,7 +58,6 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
- #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index fc6053414b7d..413028bdcacb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -541,21 +541,11 @@ static int nicvf_get_rxfh_fields(struct net_device *dev,
return 0;
}
-static int nicvf_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rules)
+static u32 nicvf_get_rx_ring_count(struct net_device *dev)
{
struct nicvf *nic = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = nic->rx_queues;
- ret = 0;
- break;
- default:
- break;
- }
- return ret;
+ return nic->rx_queues;
}
static int nicvf_set_rxfh_fields(struct net_device *dev,
@@ -861,7 +851,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
- .get_rxnfc = nicvf_get_rxnfc,
+ .get_rx_ring_count = nicvf_get_rx_ring_count,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7e2283c95b97..66b8854e059f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3485,7 +3485,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
struct adapter *adap = pi->adapter;
struct ch_sched_queue qe = { 0 };
struct ch_sched_params p = { 0 };
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 req_rate;
int err = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 1672d3afe5be..f8dcf0b4abcd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -56,7 +56,7 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct flow_action_entry *entry;
struct ch_sched_queue qe;
- struct sched_class *e;
+ struct ch_sched_class *e;
u64 max_link_rate;
u32 i, speed;
int ret;
@@ -180,7 +180,7 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *entry;
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u32 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 338b04f339b3..a2dcd2e24263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -330,7 +330,7 @@ static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
struct cxgb4_tc_port_mqprio *tc_port_mqprio;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- struct sched_class *e;
+ struct ch_sched_class *e;
int ret;
u8 i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index a1b14468d1ff..38a30aeee122 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -44,7 +44,7 @@ static int t4_sched_class_fw_cmd(struct port_info *pi,
{
struct adapter *adap = pi->adapter;
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
e = &s->tab[p->u.params.class];
@@ -122,7 +122,7 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
const u32 val)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *e, *end;
+ struct ch_sched_class *e, *end;
void *found = NULL;
/* Look for an entry with matching @val */
@@ -166,8 +166,8 @@ static void *t4_sched_entry_lookup(struct port_info *pi,
return found;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p)
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p)
{
struct port_info *pi = netdev2pinfo(dev);
struct sched_queue_entry *qe = NULL;
@@ -187,7 +187,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->queue < 0 || p->queue >= pi->nqsets)
@@ -218,7 +218,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
struct sched_queue_entry *qe = NULL;
struct adapter *adap = pi->adapter;
struct sge_eth_txq *txq;
- struct sched_class *e;
+ struct ch_sched_class *e;
unsigned int qid;
int err = 0;
@@ -260,7 +260,7 @@ static int t4_sched_flowc_unbind(struct port_info *pi, struct ch_sched_flowc *p)
{
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -288,7 +288,7 @@ static int t4_sched_flowc_bind(struct port_info *pi, struct ch_sched_flowc *p)
struct sched_table *s = pi->sched_tbl;
struct sched_flowc_entry *fe = NULL;
struct adapter *adap = pi->adapter;
- struct sched_class *e;
+ struct ch_sched_class *e;
int err = 0;
if (p->tid < 0 || p->tid >= adap->tids.neotids)
@@ -322,7 +322,7 @@ out_err:
}
static void t4_sched_class_unbind_all(struct port_info *pi,
- struct sched_class *e,
+ struct ch_sched_class *e,
enum sched_bind_type type)
{
if (!e)
@@ -476,12 +476,12 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
}
/* If @p is NULL, fetch any available unused class */
-static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
- const struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_lookup(struct port_info *pi,
+ const struct ch_sched_params *p)
{
struct sched_table *s = pi->sched_tbl;
- struct sched_class *found = NULL;
- struct sched_class *e, *end;
+ struct ch_sched_class *found = NULL;
+ struct ch_sched_class *e, *end;
if (!p) {
/* Get any available unused class */
@@ -522,10 +522,10 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
return found;
}
-static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
- struct ch_sched_params *p)
+static struct ch_sched_class *t4_sched_class_alloc(struct port_info *pi,
+ struct ch_sched_params *p)
{
- struct sched_class *e = NULL;
+ struct ch_sched_class *e = NULL;
u8 class_id;
int err;
@@ -579,8 +579,8 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
* scheduling class with matching @p is found, then the matching class is
* returned.
*/
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p)
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p)
{
struct port_info *pi = netdev2pinfo(dev);
u8 class_id;
@@ -607,7 +607,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
struct port_info *pi = netdev2pinfo(dev);
struct sched_table *s = pi->sched_tbl;
struct ch_sched_params p;
- struct sched_class *e;
+ struct ch_sched_class *e;
u32 speed;
int ret;
@@ -640,7 +640,7 @@ void cxgb4_sched_class_free(struct net_device *dev, u8 classid)
}
}
-static void t4_sched_class_free(struct net_device *dev, struct sched_class *e)
+static void t4_sched_class_free(struct net_device *dev, struct ch_sched_class *e)
{
struct port_info *pi = netdev2pinfo(dev);
@@ -660,7 +660,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
s->sched_size = sched_size;
for (i = 0; i < s->sched_size; i++) {
- memset(&s->tab[i], 0, sizeof(struct sched_class));
+ memset(&s->tab[i], 0, sizeof(struct ch_sched_class));
s->tab[i].idx = i;
s->tab[i].state = SCHED_STATE_UNUSED;
INIT_LIST_HEAD(&s->tab[i].entry_list);
@@ -682,7 +682,7 @@ void t4_cleanup_sched(struct adapter *adap)
continue;
for (i = 0; i < s->sched_size; i++) {
- struct sched_class *e;
+ struct ch_sched_class *e;
e = &s->tab[i];
if (e->state == SCHED_STATE_ACTIVE)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 6b3c778815f0..4d3b5a757536 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -71,7 +71,7 @@ struct sched_flowc_entry {
struct ch_sched_flowc param;
};
-struct sched_class {
+struct ch_sched_class {
u8 state;
u8 idx;
struct ch_sched_params info;
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[] __counted_by(sched_size);
+ struct ch_sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
@@ -103,15 +103,15 @@ static inline bool valid_class_id(struct net_device *dev, u8 class_id)
return true;
}
-struct sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
- struct ch_sched_queue *p);
+struct ch_sched_class *cxgb4_sched_queue_lookup(struct net_device *dev,
+ struct ch_sched_queue *p);
int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
enum sched_bind_type type);
int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
enum sched_bind_type type);
-struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
- struct ch_sched_params *p);
+struct ch_sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
+ struct ch_sched_params *p);
void cxgb4_sched_class_free(struct net_device *dev, u8 classid);
struct sched_table *t4_init_sched(unsigned int size);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 4036db466e18..ee19933e2cca 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -159,19 +159,13 @@ static u8 tcp_state_to_flowc_state(u8 state)
int send_tx_flowc_wr(struct sock *sk, int compl,
u32 snd_nxt, u32 rcv_nxt)
{
- struct flowc_packed {
- struct fw_flowc_wr fc;
- struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX];
- } __packed sflowc;
+ DEFINE_RAW_FLEX(struct fw_flowc_wr, flowc, mnemval, FW_FLOWC_MNEM_MAX);
int nparams, paramidx, flowclen16, flowclen;
- struct fw_flowc_wr *flowc;
struct chtls_sock *csk;
struct tcp_sock *tp;
csk = rcu_dereference_sk_user_data(sk);
tp = tcp_sk(sk);
- memset(&sflowc, 0, sizeof(sflowc));
- flowc = &sflowc.fc;
#define FLOWC_PARAM(__m, __v) \
do { \
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cb004fd16252..5bb31c8fab39 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
- struct sk_buff **skb)
+ struct sk_buff **skb,
+ struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
- *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1387,7 +1388,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
- if (be_send_pkt_to_bmc(adapter, &skb)) {
+ if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index d09e456f14c0..ed3fa80af8c3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -467,6 +467,47 @@ revert_values:
return res;
}
+static void dpaa_get_pause_stats(struct net_device *net_dev,
+ struct ethtool_pause_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_pause_stats)
+ mac_dev->get_pause_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_rmon_stats(struct net_device *net_dev,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_rmon_stats)
+ mac_dev->get_rmon_stats(mac_dev->fman_mac, s, ranges);
+}
+
+static void dpaa_get_eth_ctrl_stats(struct net_device *net_dev,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_ctrl_stats)
+ mac_dev->get_eth_ctrl_stats(mac_dev->fman_mac, s);
+}
+
+static void dpaa_get_eth_mac_stats(struct net_device *net_dev,
+ struct ethtool_eth_mac_stats *s)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
+
+ if (mac_dev->get_eth_mac_stats)
+ mac_dev->get_eth_mac_stats(mac_dev->fman_mac, s);
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
@@ -487,4 +528,8 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
+ .get_pause_stats = dpaa_get_pause_stats,
+ .get_rmon_stats = dpaa_get_rmon_stats,
+ .get_eth_ctrl_stats = dpaa_get_eth_ctrl_stats,
+ .get_eth_mac_stats = dpaa_get_eth_mac_stats,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index ebea4298791c..3ed0f7a02767 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -170,6 +170,9 @@
/* Port MAC 0/1 Maximum Frame Length Register */
#define ENETC4_PM_MAXFRM(mac) (0x5014 + (mac) * 0x400)
+/* Port internal MDIO base address, use to access PCS */
+#define ENETC4_PM_IMDIO_BASE 0x5030
+
/* Port MAC 0/1 Pause Quanta Register */
#define ENETC4_PM_PAUSE_QUANTA(mac) (0x5054 + (mac) * 0x400)
@@ -198,6 +201,9 @@
#define SSP_1G 2
#define PM_IF_MODE_ENA BIT(15)
+/* Port external MDIO Base address, use to access off-chip PHY */
+#define ENETC4_EMDIO_BASE 0x5c00
+
/**********************ENETC Pseudo MAC port registers************************/
/* Port pseudo MAC receive octets counter (64-bit) */
#define ENETC4_PPMROCR 0x5080
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
index 9c634205e2a7..76263b8566bb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf_common.c
@@ -176,7 +176,12 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_EMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_EMDIO_BASE;
+
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
err = of_mdiobus_register(bus, np);
@@ -221,7 +226,12 @@ static int enetc_imdio_create(struct enetc_pf *pf)
bus->phy_mask = ~0;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
- mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+
+ if (is_enetc_rev1(pf->si))
+ mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+ else
+ mdio_priv->mdio_base = ENETC4_PM_IMDIO_BASE;
+
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
err = mdiobus_register(bus);
diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
index d7aee3c934d3..443983fdecd9 100644
--- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
+++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
@@ -67,6 +67,9 @@
#define IERB_EMDIOFAUXR 0x344
#define IERB_T0FAUXR 0x444
#define IERB_ETBCR(a) (0x300c + 0x100 * (a))
+#define IERB_LBCR(a) (0x1010 + 0x40 * (a))
+#define LBCR_MDIO_PHYAD_PRTAD(addr) (((addr) & 0x1f) << 8)
+
#define IERB_EFAUXR(a) (0x3044 + 0x100 * (a))
#define IERB_VFAUXR(a) (0x4004 + 0x40 * (a))
#define FAUXR_LDID GENMASK(3, 0)
@@ -322,6 +325,142 @@ static int netc_unlock_ierb_with_warm_reset(struct netc_blk_ctrl *priv)
1000, 100000, true, priv->prb, PRB_NETCRR);
}
+static int netc_get_phy_addr(struct device_node *np)
+{
+ struct device_node *mdio_node, *phy_node;
+ u32 addr = 0;
+ int err = 0;
+
+ mdio_node = of_get_child_by_name(np, "mdio");
+ if (!mdio_node)
+ return 0;
+
+ phy_node = of_get_next_child(mdio_node, NULL);
+ if (!phy_node)
+ goto of_put_mdio_node;
+
+ err = of_property_read_u32(phy_node, "reg", &addr);
+ if (err)
+ goto of_put_phy_node;
+
+ if (addr >= PHY_MAX_ADDR)
+ err = -EINVAL;
+
+of_put_phy_node:
+ of_node_put(phy_node);
+
+of_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return err ? err : addr;
+}
+
+static int netc_parse_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ u32 mask = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ u32 addr;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &addr);
+ if (err)
+ return err;
+
+ if (addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mask |= BIT(addr);
+ }
+
+ *phy_mask = mask;
+
+ return 0;
+}
+
+static int netc_get_emdio_phy_mask(struct device_node *np, u32 *phy_mask)
+{
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,ee00"))
+ continue;
+
+ return netc_parse_emdio_phy_mask(gchild, phy_mask);
+ }
+ }
+
+ return 0;
+}
+
+static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev)
+{
+ struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int bus_devfn, addr, err;
+ u32 phy_mask = 0;
+
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
+ /* Update the port EMDIO PHY address through parsing phy properties.
+ * This is needed when using the port EMDIO but it's harmless when
+ * using the central EMDIO. So apply it on all cases.
+ */
+ for_each_child_of_node_scoped(np, child) {
+ for_each_child_of_node_scoped(child, gchild) {
+ if (!of_device_is_compatible(gchild, "pci1131,e101"))
+ continue;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(gchild);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(gchild);
+ if (addr < 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is
+ * 0, so no need to set the register.
+ */
+ if (!addr)
+ continue;
+
+ switch (bus_devfn) {
+ case IMX95_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(0),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(1),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX95_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(2),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int imx95_ierb_init(struct platform_device *pdev)
{
struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
@@ -349,7 +488,7 @@ static int imx95_ierb_init(struct platform_device *pdev)
/* NETC TIMER */
netc_reg_write(priv->ierb, IERB_T0FAUXR, 7);
- return 0;
+ return imx95_enetc_mdio_phyaddr_config(pdev);
}
static int imx94_get_enetc_id(struct device_node *np)
@@ -424,12 +563,64 @@ end:
return 0;
}
+static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv,
+ struct device_node *np,
+ u32 phy_mask)
+{
+ struct device *dev = &priv->pdev->dev;
+ int bus_devfn, addr;
+
+ bus_devfn = netc_of_pci_get_bus_devfn(np);
+ if (bus_devfn < 0) {
+ dev_err(dev, "Failed to get BDF number\n");
+ return bus_devfn;
+ }
+
+ addr = netc_get_phy_addr(np);
+ if (addr <= 0) {
+ dev_err(dev, "Failed to get PHY address\n");
+ return addr;
+ }
+
+ if (phy_mask & BIT(addr)) {
+ dev_err(dev,
+ "Find same PHY address in EMDIO and ENETC node\n");
+ return -EINVAL;
+ }
+
+ switch (bus_devfn) {
+ case IMX94_ENETC0_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC0_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC1_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC1_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ case IMX94_ENETC2_BUS_DEVFN:
+ netc_reg_write(priv->ierb, IERB_LBCR(IMX94_ENETC2_LINK),
+ LBCR_MDIO_PHYAD_PRTAD(addr));
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int imx94_ierb_init(struct platform_device *pdev)
{
struct netc_blk_ctrl *priv = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
+ u32 phy_mask = 0;
int err;
+ err = netc_get_emdio_phy_mask(np, &phy_mask);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get PHY address mask\n");
+ return err;
+ }
+
for_each_child_of_node_scoped(np, child) {
for_each_child_of_node_scoped(child, gchild) {
if (!of_device_is_compatible(gchild, "pci1131,e101"))
@@ -438,6 +629,11 @@ static int imx94_ierb_init(struct platform_device *pdev)
err = imx94_enetc_update_tid(priv, gchild);
if (err)
return err;
+
+ err = imx94_enetc_mdio_phyaddr_config(priv, gchild,
+ phy_mask);
+ if (err)
+ return err;
}
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 41e0d85d15da..fd9a93d02f8e 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -24,9 +24,7 @@
#include <linux/timecounter.h>
#include <net/xdp.h>
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -242,23 +240,6 @@ struct bufdesc_ex {
__fec16 res0[4];
};
-/*
- * The following definitions courtesy of commproc.h, which where
- * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
- */
-#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
-#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
-#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
-#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
-#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
-#define BD_SC_BR ((ushort)0x0020) /* Break received */
-#define BD_SC_FR ((ushort)0x0010) /* Framing error */
-#define BD_SC_PR ((ushort)0x0008) /* Parity error */
-#define BD_SC_OV ((ushort)0x0002) /* Overrun */
-#define BD_SC_CD ((ushort)0x0001) /* ?? */
-
/* Buffer descriptor control/status used by Ethernet receive.
*/
#define BD_ENET_RX_EMPTY ((ushort)0x8000)
@@ -530,12 +511,6 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
-struct fec_enet_priv_txrx_info {
- int offset;
- struct page *page;
- struct sk_buff *skb;
-};
-
enum {
RX_XDP_REDIRECT = 0,
RX_XDP_PASS,
@@ -575,7 +550,7 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+ struct page *rx_buf[RX_RING_SIZE];
/* page_pool */
struct page_pool *page_pool;
@@ -668,7 +643,6 @@ struct fec_enet_private {
struct pm_qos_request pm_qos_req;
unsigned int tx_align;
- unsigned int rx_align;
/* hw interrupt coalesce */
unsigned int rx_pkts_itr;
@@ -687,6 +661,7 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ bool perout_enable;
struct hrtimer perout_timer;
u64 perout_stime;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b6fbb84cfb06..c685a5c0cc51 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -253,9 +253,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64)
+#ifndef CONFIG_M5272
#define OPT_ARCH_HAS_MAX_FL 1
#else
#define OPT_ARCH_HAS_MAX_FL 0
@@ -1012,7 +1010,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
rxq->bd.cur = rxq->bd.base;
}
@@ -1062,7 +1060,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -1657,8 +1655,7 @@ static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
if (unlikely(!new_page))
return -ENOMEM;
- rxq->rx_skb_info[index].page = new_page;
- rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[index] = new_page;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
@@ -1773,7 +1770,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
__fec32 cbd_bufaddr;
u32 sub_len = 4;
-#if !defined(CONFIG_M5272)
/*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
* FEC_RACC_SHIFT16 is set by default in the probe function.
*/
@@ -1781,7 +1777,6 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
data_start += 2;
sub_len += 2;
}
-#endif
#if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
/*
@@ -1840,7 +1835,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
ndev->stats.rx_bytes -= 2;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- page = rxq->rx_skb_info[index].page;
+ page = rxq->rx_buf[index];
cbd_bufaddr = bdp->cbd_bufaddr;
if (fec_enet_update_cbd(rxq, bdp, index)) {
ndev->stats.rx_dropped++;
@@ -2517,9 +2512,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_set_max_speed(phy_dev, 1000);
phy_remove_link_mode(phy_dev,
ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-#if !defined(CONFIG_M5272)
phy_support_sym_pause(phy_dev);
-#endif
}
else
phy_set_max_speed(phy_dev, 100);
@@ -2710,9 +2703,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
}
/* List of registers that can be safety be read to dump them with ethtool */
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
static __u32 fec_enet_register_version = 2;
static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
@@ -2786,30 +2777,22 @@ static u32 fec_enet_register_offset[] = {
static void fec_enet_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *regbuf)
{
+ u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
struct fec_enet_private *fep = netdev_priv(ndev);
u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *reg_list = fec_enet_register_offset;
struct device *dev = &fep->pdev->dev;
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
-#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
- defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
- u32 *reg_list;
- u32 reg_cnt;
-
- if (!of_machine_is_compatible("fsl,imx6ul")) {
- reg_list = fec_enet_register_offset;
- reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
- } else {
+
+#if !defined(CONFIG_M5272) || defined(CONFIG_COMPILE_TEST)
+ if (of_machine_is_compatible("fsl,imx6ul")) {
reg_list = fec_enet_register_offset_6ul;
reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
}
-#else
- /* coldfire */
- static u32 *reg_list = fec_enet_register_offset;
- static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
#endif
+
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -3328,7 +3311,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
for (i = 0; i < rxq->bd.ring_size; i++)
- page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_buf[i],
+ false);
for (i = 0; i < XDP_STATS_TOTAL; i++)
rxq->stats[i] = 0;
@@ -3454,6 +3438,19 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
return err;
}
+ /* Some platforms require the RX buffer must be 64 bytes alignment.
+ * Some platforms require 16 bytes alignment. And some platforms
+ * require 4 bytes alignment. But since the page pool have been
+ * introduced into the driver, the address of RX buffer is always
+ * the page address plus FEC_ENET_XDP_HEADROOM, and
+ * FEC_ENET_XDP_HEADROOM is 256 bytes. Therefore, this address can
+ * satisfy all platforms. To prevent future modifications to
+ * FEC_ENET_XDP_HEADROOM from ignoring this hardware limitation, a
+ * BUILD_BUG_ON() test has been added, which ensures that
+ * FEC_ENET_XDP_HEADROOM provides the required alignment.
+ */
+ BUILD_BUG_ON(FEC_ENET_XDP_HEADROOM & 0x3f);
+
for (i = 0; i < rxq->bd.ring_size; i++) {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -3462,8 +3459,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skb_info[i].page = page;
- rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
+ rxq->rx_buf[i] = page;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3476,7 +3472,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_RX_WRAP);
return 0;
err_alloc:
@@ -3512,7 +3508,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
- bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+ bdp->cbd_sc |= cpu_to_fec16(BD_ENET_TX_WRAP);
return 0;
@@ -4089,10 +4085,8 @@ static int fec_enet_init(struct net_device *ndev)
WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- fep->rx_align = 0xf;
fep->tx_align = 0xf;
#else
- fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
@@ -4181,10 +4175,8 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES)
fep->tx_align = 0;
- fep->rx_align = 0x3f;
- }
ndev->hw_features = ndev->features;
@@ -4402,11 +4394,9 @@ fec_probe(struct platform_device *pdev)
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
-#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (fep->quirks & FEC_QUIRK_HAS_GBIT)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
-#endif
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index fa88b47d526c..4b7bad9a485d 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,6 +128,12 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->perout_enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ dev_err(&fep->pdev->dev, "PEROUT is running");
+ return -EBUSY;
+ }
+
if (fep->pps_enable == enable) {
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return 0;
@@ -243,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
* the FEC_TCCR register in time and missed the start time.
*/
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ fep->perout_enable = false;
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -1;
@@ -497,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
{
unsigned long flags;
+ hrtimer_cancel(&fep->perout_timer);
+
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ fep->perout_enable = false;
writel(0, fep->hwp + FEC_TCSR(channel));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -529,6 +539,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return ret;
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ u32 reload_period;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -548,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
- fep->reload_period = div_u64(period_ns, 2);
- if (on && fep->reload_period) {
+ reload_period = div_u64(period_ns, 2);
+ if (on && reload_period) {
+ u64 perout_stime;
+
/* Convert 1588 timestamp to ns*/
start_time.tv_sec = rq->perout.start.sec;
start_time.tv_nsec = rq->perout.start.nsec;
- fep->perout_stime = timespec64_to_ns(&start_time);
+ perout_stime = timespec64_to_ns(&start_time);
mutex_lock(&fep->ptp_clk_mutex);
if (!fep->ptp_clk_on) {
@@ -562,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (fep->pps_enable) {
+ dev_err(&fep->pdev->dev, "PPS is running");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (fep->perout_enable) {
+ dev_err(&fep->pdev->dev,
+ "PEROUT has been enabled\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
/* Read current timestamp */
curr_time = timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
- mutex_unlock(&fep->ptp_clk_mutex);
+ if (perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev,
+ "Start time must be greater than current time\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
/* Calculate time difference */
- delta = fep->perout_stime - curr_time;
+ delta = perout_stime - curr_time;
+ fep->reload_period = reload_period;
+ fep->perout_stime = perout_stime;
+ fep->perout_enable = true;
- if (fep->perout_stime <= curr_time) {
- dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
- return -EINVAL;
- }
+unlock:
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ if (ret)
+ return ret;
/* Because the timer counter of FEC only has 31-bits, correspondingly,
* the time comparison register FEC_TCCR also only low 31 bits can be
@@ -681,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
fep->next_counter = (fep->next_counter + fep->reload_period) &
fep->cc.mask;
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
+ if (fep->pps_enable) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 0291093f2e4e..c84f0336c94c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -649,6 +649,7 @@ static u32 memac_if_mode(phy_interface_t interface)
return IF_MODE_GMII | IF_MODE_RGMII;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
return IF_MODE_GMII;
case PHY_INTERFACE_MODE_10GBASER:
@@ -667,6 +668,7 @@ static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
return memac->sgmii_pcs;
case PHY_INTERFACE_MODE_QSGMII:
return memac->qsgmii_pcs;
@@ -685,6 +687,7 @@ static int memac_prepare(struct phylink_config *config, unsigned int mode,
switch (iface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_10GBASER:
return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
@@ -897,6 +900,89 @@ static int memac_set_exception(struct fman_mac *memac,
return 0;
}
+static u64 memac_read64(void __iomem *reg)
+{
+ u32 low, high, tmp;
+
+ do {
+ high = ioread32be(reg + 4);
+ low = ioread32be(reg);
+ tmp = ioread32be(reg + 4);
+ } while (high != tmp);
+
+ return ((u64)high << 32) | low;
+}
+
+static void memac_get_pause_stats(struct fman_mac *memac,
+ struct ethtool_pause_stats *s)
+{
+ s->tx_pause_frames = memac_read64(&memac->regs->txpf_l);
+ s->rx_pause_frames = memac_read64(&memac->regs->rxpf_l);
+}
+
+static const struct ethtool_rmon_hist_range memac_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 9600 },
+ {},
+};
+
+static void memac_get_rmon_stats(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = memac_read64(&memac->regs->rund_l);
+ s->oversize_pkts = memac_read64(&memac->regs->rovr_l);
+ s->fragments = memac_read64(&memac->regs->rfrg_l);
+ s->jabbers = memac_read64(&memac->regs->rjbr_l);
+
+ s->hist[0] = memac_read64(&memac->regs->r64_l);
+ s->hist[1] = memac_read64(&memac->regs->r127_l);
+ s->hist[2] = memac_read64(&memac->regs->r255_l);
+ s->hist[3] = memac_read64(&memac->regs->r511_l);
+ s->hist[4] = memac_read64(&memac->regs->r1023_l);
+ s->hist[5] = memac_read64(&memac->regs->r1518_l);
+ s->hist[6] = memac_read64(&memac->regs->r1519x_l);
+
+ s->hist_tx[0] = memac_read64(&memac->regs->t64_l);
+ s->hist_tx[1] = memac_read64(&memac->regs->t127_l);
+ s->hist_tx[2] = memac_read64(&memac->regs->t255_l);
+ s->hist_tx[3] = memac_read64(&memac->regs->t511_l);
+ s->hist_tx[4] = memac_read64(&memac->regs->t1023_l);
+ s->hist_tx[5] = memac_read64(&memac->regs->t1518_l);
+ s->hist_tx[6] = memac_read64(&memac->regs->t1519x_l);
+
+ *ranges = memac_rmon_ranges;
+}
+
+static void memac_get_eth_ctrl_stats(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = memac_read64(&memac->regs->tcnp_l);
+ s->MACControlFramesReceived = memac_read64(&memac->regs->rcnp_l);
+}
+
+static void memac_get_eth_mac_stats(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = memac_read64(&memac->regs->tfrm_l);
+ s->FramesReceivedOK = memac_read64(&memac->regs->rfrm_l);
+ s->FrameCheckSequenceErrors = memac_read64(&memac->regs->rfcs_l);
+ s->AlignmentErrors = memac_read64(&memac->regs->raln_l);
+ s->OctetsTransmittedOK = memac_read64(&memac->regs->teoct_l);
+ s->FramesLostDueToIntMACXmitError = memac_read64(&memac->regs->terr_l);
+ s->OctetsReceivedOK = memac_read64(&memac->regs->reoct_l);
+ s->FramesLostDueToIntMACRcvError = memac_read64(&memac->regs->rdrntp_l);
+ s->MulticastFramesXmittedOK = memac_read64(&memac->regs->tmca_l);
+ s->BroadcastFramesXmittedOK = memac_read64(&memac->regs->tbca_l);
+ s->MulticastFramesReceivedOK = memac_read64(&memac->regs->rmca_l);
+ s->BroadcastFramesReceivedOK = memac_read64(&memac->regs->rbca_l);
+}
+
static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
@@ -1089,6 +1175,10 @@ int memac_initialization(struct mac_device *mac_dev,
mac_dev->set_tstamp = memac_set_tstamp;
mac_dev->enable = memac_enable;
mac_dev->disable = memac_disable;
+ mac_dev->get_pause_stats = memac_get_pause_stats;
+ mac_dev->get_rmon_stats = memac_get_rmon_stats;
+ mac_dev->get_eth_ctrl_stats = memac_get_eth_ctrl_stats;
+ mac_dev->get_eth_mac_stats = memac_get_eth_mac_stats;
mac_dev->fman_mac = memac_config(mac_dev, params);
if (!mac_dev->fman_mac)
@@ -1226,6 +1316,7 @@ int memac_initialization(struct mac_device *mac_dev,
* those configurations modes don't use in-band autonegotiation.
*/
if (!of_property_present(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_2500BASEX &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 955ace338965..63c2c5b4f99e 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -16,6 +16,11 @@
#include "fman.h"
#include "fman_mac.h"
+struct ethtool_eth_ctrl_stats;
+struct ethtool_eth_mac_stats;
+struct ethtool_pause_stats;
+struct ethtool_rmon_stats;
+struct ethtool_rmon_hist_range;
struct fman_mac;
struct mac_priv_s;
@@ -46,6 +51,15 @@ struct mac_device {
enet_addr_t *eth_addr);
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*get_pause_stats)(struct fman_mac *memac,
+ struct ethtool_pause_stats *s);
+ void (*get_rmon_stats)(struct fman_mac *memac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges);
+ void (*get_eth_ctrl_stats)(struct fman_mac *memac,
+ struct ethtool_eth_ctrl_stats *s);
+ void (*get_eth_mac_stats)(struct fman_mac *memac,
+ struct ethtool_eth_mac_stats *s);
void (*update_speed)(struct mac_device *mac_dev, int speed);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index a33b44c1eb86..970d5ca8cdde 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -205,6 +205,13 @@ struct gve_rx_buf_state_dqo {
s16 next;
};
+/* Wrapper for XDP Rx metadata */
+struct gve_xdp_buff {
+ struct xdp_buff xdp;
+ struct gve_priv *gve;
+ const struct gve_rx_compl_desc_dqo *compl_desc;
+};
+
/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {
s16 head;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 6eb442096e02..5871f773f0c7 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -36,6 +36,7 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
bool gve_xdp_poll_dqo(struct gve_notify_block *block);
bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6fb8fbb38a7d..a5a2b18d309b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2188,10 +2188,6 @@ static int gve_set_ts_config(struct net_device *dev,
}
kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
- gve_clock_nic_ts_read(priv);
- ptp_schedule_worker(priv->ptp->clock, 0);
- } else {
- ptp_cancel_worker_sync(priv->ptp->clock);
}
priv->ts_config.rx_filter = kernel_config->rx_filter;
@@ -2352,6 +2348,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_set_features_flag_locked(priv->dev, xdp_features);
}
+static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
+ .xmo_rx_timestamp = gve_xdp_rx_timestamp,
+};
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2447,6 +2447,9 @@ setup_device:
}
gve_set_netdev_xdp_features(priv);
+ if (!gve_is_gqi(priv))
+ priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
+
err = gve_setup_device_resources(priv);
if (err)
goto err_free_xsk_bitmap;
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index a384a9ed4914..073677d82ee8 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -133,9 +133,21 @@ int gve_init_clock(struct gve_priv *priv)
err = -ENOMEM;
goto release_ptp;
}
+ err = gve_clock_nic_ts_read(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err);
+ goto release_nic_ts_report;
+ }
+ ptp_schedule_worker(priv->ptp->clock,
+ msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS));
return 0;
+release_nic_ts_report:
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
release_ptp:
gve_ptp_release(priv);
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 1aff3bbb8cfc..f1bd8f5d5732 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -240,6 +240,11 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->rx_headroom = 0;
}
+ /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
+ * the 24 byte field cb to store gve specific data.
+ */
+ XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
+
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
@@ -456,20 +461,38 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
* Note that this means if the time delta between packet reception and the last
* clock read is greater than ~2 seconds, this will provide invalid results.
*/
+static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts)
+{
+ u64 last_read = READ_ONCE(gve->last_sync_nic_counter);
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ return ns_to_ktime(last_read + diff);
+}
+
static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *desc)
{
- u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
struct sk_buff *skb = rx->ctx.skb_head;
- u32 ts, low;
- s32 diff;
-
- if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID) {
- ts = le32_to_cpu(desc->ts);
- low = (u32)last_read;
- diff = ts - low;
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
- }
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)
+ skb_hwtstamps(skb)->hwtstamp =
+ gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts));
+}
+
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct gve_xdp_buff *ctx = (void *)_ctx;
+
+ if (!ctx->gve->nic_ts_report)
+ return -ENODATA;
+
+ if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+ return -ENODATA;
+
+ *timestamp = gve_rx_get_hwtstamp(ctx->gve,
+ le32_to_cpu(ctx->compl_desc->ts));
+ return 0;
}
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
@@ -683,16 +706,23 @@ err:
}
static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_rx_buf_state_dqo *buf_state,
struct bpf_prog *xprog)
{
struct xdp_buff *xdp = buf_state->xsk_buff;
+ int buf_len = compl_desc->packet_len;
struct gve_priv *priv = rx->gve;
+ struct gve_xdp_buff *gve_xdp;
int xdp_act;
xdp->data_end = xdp->data + buf_len;
xsk_buff_dma_sync_for_cpu(xdp);
+ gve_xdp = (void *)xdp;
+ gve_xdp->gve = priv;
+ gve_xdp->compl_desc = compl_desc;
+
if (xprog) {
xdp_act = bpf_prog_run_xdp(xprog, xdp);
buf_len = xdp->data_end - xdp->data;
@@ -782,7 +812,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
xprog = READ_ONCE(priv->xdp_prog);
if (buf_state->xsk_buff)
- return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+ return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
/* Page might have not been used for awhile and was likely last written
* by a different thread.
@@ -840,23 +870,26 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (xprog) {
- struct xdp_buff xdp;
+ struct gve_xdp_buff gve_xdp;
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size,
&rx->xdp_rxq);
- xdp_prepare_buff(&xdp,
+ xdp_prepare_buff(&gve_xdp.xdp,
buf_state->page_info.page_address +
buf_state->page_info.page_offset,
buf_state->page_info.pad,
buf_len, false);
- old_data = xdp.data;
- xdp_act = bpf_prog_run_xdp(xprog, &xdp);
- buf_state->page_info.pad += xdp.data - old_data;
- buf_len = xdp.data_end - xdp.data;
+ gve_xdp.gve = priv;
+ gve_xdp.compl_desc = compl_desc;
+
+ old_data = gve_xdp.xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp);
+ buf_state->page_info.pad += gve_xdp.xdp.data - old_data;
+ buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data;
if (xdp_act != XDP_PASS) {
- gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act,
buf_state);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index c6ff0968929d..97efc8d27e6f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -730,7 +730,9 @@ unmap_drop:
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 6f1d515673d2..40b89b3e5a31 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1002,7 +1002,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
dev_kfree_skb_any(skb);
return 0;
}
@@ -1324,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv,
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
+
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
+
net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
priv->dev->name,
(int)(pending_packet - tx->dqo.pending_packets));
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 38875c196cb6..18eca7d12c20 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -151,6 +151,7 @@ config HIBMCGE
select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
+ select PAGE_POOL
help
If you wish to compile a kernel for a BMC with HIBMC-xx_gmac
then you should answer Y to this. This makes this driver suitable for use
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index 1a9da564b306..d6610ba16855 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -3,6 +3,7 @@
# Makefile for the HISILICON BMC GE network device drivers.
#
+ccflags-y += -I$(src)
obj-$(CONFIG_HIBMCGE) += hibmcge.o
hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index 2097e4c2b3d7..8e134da3e217 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -7,6 +7,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/page_pool/helpers.h>
#include "hbg_reg.h"
#define HBG_STATUS_DISABLE 0x0
@@ -55,6 +56,12 @@ struct hbg_buffer {
dma_addr_t skb_dma;
u32 skb_len;
+ struct page *page;
+ void *page_addr;
+ dma_addr_t page_dma;
+ u32 page_size;
+ u32 page_offset;
+
enum hbg_dir dir;
struct hbg_ring *ring;
struct hbg_priv *priv;
@@ -78,6 +85,7 @@ struct hbg_ring {
struct hbg_priv *priv;
struct napi_struct napi;
char *tout_log_buf; /* tx timeout log buffer */
+ struct page_pool *page_pool; /* only for rx */
};
enum hbg_hw_event_type {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index a39d1e796e4a..30b3903c8f2d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -252,6 +252,8 @@ struct hbg_rx_desc {
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W3_IP_OFFSET_M GENMASK(23, 16)
+#define HBG_RX_DESC_W3_VLAN_M GENMASK(15, 0)
#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
@@ -269,6 +271,8 @@ struct hbg_rx_desc {
#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+#define HBG_RX_DESC_W4_PARSE_MODE_M GENMASK(6, 5)
+#define HBG_RX_DESC_W5_VALID_SIZE_M GENMASK(15, 0)
enum hbg_l3_err_code {
HBG_L3_OK = 0,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
new file mode 100644
index 000000000000..b70fd960da8d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hibmcge
+
+#if !defined(_HBG_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HBG_TRACE_H_
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+#include "hbg_reg.h"
+
+TRACE_EVENT(hbg_rx_desc,
+ TP_PROTO(struct hbg_priv *priv, u32 index,
+ struct hbg_rx_desc *rx_desc),
+ TP_ARGS(priv, index, rx_desc),
+
+ TP_STRUCT__entry(__field(u32, index)
+ __field(u8, port_num)
+ __field(u8, ip_offset)
+ __field(u8, parse_mode)
+ __field(u8, l4_error_code)
+ __field(u8, l3_error_code)
+ __field(u8, l2_error_code)
+ __field(u16, packet_len)
+ __field(u16, valid_size)
+ __field(u16, vlan)
+ __string(pciname, pci_name(priv->pdev))
+ __string(devname, priv->netdev->name)
+ ),
+
+ TP_fast_assign(__entry->index = index,
+ __entry->packet_len =
+ FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M,
+ rx_desc->word2);
+ __entry->port_num =
+ FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M,
+ rx_desc->word2);
+ __entry->ip_offset =
+ FIELD_GET(HBG_RX_DESC_W3_IP_OFFSET_M,
+ rx_desc->word3);
+ __entry->vlan =
+ FIELD_GET(HBG_RX_DESC_W3_VLAN_M,
+ rx_desc->word3);
+ __entry->parse_mode =
+ FIELD_GET(HBG_RX_DESC_W4_PARSE_MODE_M,
+ rx_desc->word4);
+ __entry->l4_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l3_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M,
+ rx_desc->word4);
+ __entry->l2_error_code =
+ FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B,
+ rx_desc->word4);
+ __entry->valid_size =
+ FIELD_GET(HBG_RX_DESC_W5_VALID_SIZE_M,
+ rx_desc->word5);
+ __assign_str(pciname);
+ __assign_str(devname);
+ ),
+
+ TP_printk("%s %s index:%u, port num:%u, len:%u, valid size:%u, ip_offset:%u, vlan:0x%04x, parse mode:%u, l4_err:0x%x, l3_err:0x%x, l2_err:0x%x",
+ __get_str(pciname), __get_str(devname), __entry->index,
+ __entry->port_num, __entry->packet_len,
+ __entry->valid_size, __entry->ip_offset, __entry->vlan,
+ __entry->parse_mode, __entry->l4_error_code,
+ __entry->l3_error_code, __entry->l2_error_code
+ )
+);
+
+#endif /* _HBG_TRACE_H_ */
+
+/* This must be outside ifdef _HBG_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hbg_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index 8d814c8f19ea..a4ea92c31c2f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -7,6 +7,9 @@
#include "hbg_reg.h"
#include "hbg_txrx.h"
+#define CREATE_TRACE_POINTS
+#include "hbg_trace.h"
+
#define netdev_get_tx_ring(netdev) \
(&(((struct hbg_priv *)netdev_priv(netdev))->tx_ring))
@@ -28,6 +31,11 @@
typeof(ring) _ring = (ring); \
_ring->p = hbg_queue_next_prt(_ring->p, _ring); })
+#define hbg_get_page_order(ring) ({ \
+ typeof(ring) _ring = (ring); \
+ get_order(hbg_spec_max_frame_len(_ring->priv, _ring->dir)); })
+#define hbg_get_page_size(ring) (PAGE_SIZE << hbg_get_page_order((ring)))
+
#define HBG_TX_STOP_THRS 2
#define HBG_TX_START_THRS (2 * HBG_TX_STOP_THRS)
@@ -62,6 +70,43 @@ static void hbg_dma_unmap(struct hbg_buffer *buffer)
buffer->skb_dma = 0;
}
+static void hbg_buffer_free_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+
+ if (unlikely(!buffer->page))
+ return;
+
+ page_pool_put_full_page(ring->page_pool, buffer->page, false);
+
+ buffer->page = NULL;
+ buffer->page_dma = 0;
+ buffer->page_addr = NULL;
+ buffer->page_size = 0;
+ buffer->page_offset = 0;
+}
+
+static int hbg_buffer_alloc_page(struct hbg_buffer *buffer)
+{
+ struct hbg_ring *ring = buffer->ring;
+ u32 len = hbg_get_page_size(ring);
+ u32 offset;
+
+ if (unlikely(!ring->page_pool))
+ return 0;
+
+ buffer->page = page_pool_dev_alloc_frag(ring->page_pool, &offset, len);
+ if (unlikely(!buffer->page))
+ return -ENOMEM;
+
+ buffer->page_dma = page_pool_get_dma_addr(buffer->page) + offset;
+ buffer->page_addr = page_address(buffer->page) + offset;
+ buffer->page_size = len;
+ buffer->page_offset = offset;
+
+ return 0;
+}
+
static void hbg_init_tx_desc(struct hbg_buffer *buffer,
struct hbg_tx_desc *tx_desc)
{
@@ -135,24 +180,14 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer)
buffer->skb = NULL;
}
-static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer)
-{
- u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir);
- struct hbg_priv *priv = buffer->priv;
-
- buffer->skb = netdev_alloc_skb(priv->netdev, len);
- if (unlikely(!buffer->skb))
- return -ENOMEM;
-
- buffer->skb_len = len;
- memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE);
- return 0;
-}
-
static void hbg_buffer_free(struct hbg_buffer *buffer)
{
- hbg_dma_unmap(buffer);
- hbg_buffer_free_skb(buffer);
+ if (buffer->skb) {
+ hbg_dma_unmap(buffer);
+ return hbg_buffer_free_skb(buffer);
+ }
+
+ hbg_buffer_free_page(buffer);
}
static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
@@ -374,25 +409,44 @@ static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
struct hbg_buffer *buffer;
int ret;
- if (hbg_queue_is_full(ring->ntc, ring->ntu, ring))
+ if (hbg_queue_is_full(ring->ntc, ring->ntu, ring) ||
+ hbg_fifo_is_full(priv, ring->dir))
return 0;
buffer = &ring->queue[ring->ntu];
- ret = hbg_buffer_alloc_skb(buffer);
+ ret = hbg_buffer_alloc_page(buffer);
if (unlikely(ret))
return ret;
- ret = hbg_dma_map(buffer);
- if (unlikely(ret)) {
- hbg_buffer_free_skb(buffer);
- return ret;
- }
+ memset(buffer->page_addr, 0, HBG_PACKET_HEAD_SIZE);
+ dma_sync_single_for_device(&priv->pdev->dev, buffer->page_dma,
+ HBG_PACKET_HEAD_SIZE, DMA_TO_DEVICE);
- hbg_hw_fill_buffer(priv, buffer->skb_dma);
+ hbg_hw_fill_buffer(priv, buffer->page_dma);
hbg_queue_move_next(ntu, ring);
return 0;
}
+static int hbg_rx_fill_buffers(struct hbg_priv *priv)
+{
+ u32 remained = hbg_hw_get_fifo_used_num(priv, HBG_DIR_RX);
+ u32 max_count = priv->dev_specs.rx_fifo_num;
+ u32 refill_count;
+ int ret;
+
+ if (unlikely(remained >= max_count))
+ return 0;
+
+ refill_count = max_count - remained;
+ while (refill_count--) {
+ ret = hbg_rx_fill_one_buffer(priv);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
struct hbg_buffer *buffer)
{
@@ -401,13 +455,29 @@ static bool hbg_sync_data_from_hw(struct hbg_priv *priv,
/* make sure HW write desc complete */
dma_rmb();
- dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma,
- buffer->skb_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, buffer->page_dma,
+ buffer->page_size, DMA_FROM_DEVICE);
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0;
}
+static int hbg_build_skb(struct hbg_priv *priv,
+ struct hbg_buffer *buffer, u32 pkt_len)
+{
+ net_prefetch(buffer->page_addr);
+
+ buffer->skb = napi_build_skb(buffer->page_addr, buffer->page_size);
+ if (unlikely(!buffer->skb))
+ return -ENOMEM;
+ skb_mark_for_recycle(buffer->skb);
+
+ /* page will be freed together with the skb */
+ buffer->page = NULL;
+
+ return 0;
+}
+
static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
{
struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi);
@@ -417,33 +487,39 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
u32 packet_done = 0;
u32 pkt_len;
+ hbg_rx_fill_buffers(priv);
while (packet_done < budget) {
if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu, ring)))
break;
buffer = &ring->queue[ring->ntc];
- if (unlikely(!buffer->skb))
+ if (unlikely(!buffer->page))
goto next_buffer;
if (unlikely(!hbg_sync_data_from_hw(priv, buffer)))
break;
- rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
+ rx_desc = (struct hbg_rx_desc *)buffer->page_addr;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
+ trace_hbg_rx_desc(priv, ring->ntc, rx_desc);
+
+ if (unlikely(hbg_build_skb(priv, buffer, pkt_len))) {
+ hbg_buffer_free_page(buffer);
+ goto next_buffer;
+ }
if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
- hbg_buffer_free(buffer);
+ hbg_buffer_free_skb(buffer);
goto next_buffer;
}
- hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
priv->netdev);
-
dev_sw_netstats_rx_add(priv->netdev, pkt_len);
napi_gro_receive(napi, buffer->skb);
buffer->skb = NULL;
+ buffer->page = NULL;
next_buffer:
hbg_rx_fill_one_buffer(priv);
@@ -458,6 +534,42 @@ next_buffer:
return packet_done;
}
+static void hbg_ring_page_pool_destory(struct hbg_ring *ring)
+{
+ if (!ring->page_pool)
+ return;
+
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+}
+
+static int hbg_ring_page_pool_init(struct hbg_priv *priv, struct hbg_ring *ring)
+{
+ u32 buf_size = hbg_spec_max_frame_len(priv, ring->dir);
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = hbg_get_page_order(ring),
+ .pool_size = ring->len * buf_size / hbg_get_page_size(ring),
+ .nid = dev_to_node(&priv->pdev->dev),
+ .dev = &priv->pdev->dev,
+ .napi = &ring->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = hbg_get_page_size(ring),
+ };
+ int ret = 0;
+
+ ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(ring->page_pool)) {
+ ret = PTR_ERR(ring->page_pool);
+ dev_err(&priv->pdev->dev,
+ "failed to create page pool, ret = %d\n", ret);
+ ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
static void hbg_ring_uninit(struct hbg_ring *ring)
{
struct hbg_buffer *buffer;
@@ -476,6 +588,7 @@ static void hbg_ring_uninit(struct hbg_ring *ring)
buffer->priv = NULL;
}
+ hbg_ring_page_pool_destory(ring);
dma_free_coherent(&ring->priv->pdev->dev,
ring->len * sizeof(*ring->queue),
ring->queue, ring->queue_dma);
@@ -491,8 +604,19 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
{
struct hbg_buffer *buffer;
u32 i, len;
+ int ret;
len = hbg_get_spec_fifo_max_num(priv, dir) + 1;
+ /* To improve receiving performance under high-stress scenarios,
+ * in the `hbg_napi_rx_poll()`, we first use the other half of
+ * the buffer to receive packets from the hardware via the
+ * `hbg_rx_fill_buffers()`, and then process the packets in the
+ * original half of the buffer to avoid packet loss caused by
+ * hardware overflow as much as possible.
+ */
+ if (dir == HBG_DIR_RX)
+ len += hbg_get_spec_fifo_max_num(priv, dir);
+
ring->queue = dma_alloc_coherent(&priv->pdev->dev,
len * sizeof(*ring->queue),
&ring->queue_dma, GFP_KERNEL);
@@ -514,11 +638,23 @@ static int hbg_ring_init(struct hbg_priv *priv, struct hbg_ring *ring,
ring->ntu = 0;
ring->len = len;
- if (dir == HBG_DIR_TX)
+ if (dir == HBG_DIR_TX) {
netif_napi_add_tx(priv->netdev, &ring->napi, napi_poll);
- else
+ } else {
netif_napi_add(priv->netdev, &ring->napi, napi_poll);
+ ret = hbg_ring_page_pool_init(priv, ring);
+ if (ret) {
+ netif_napi_del(&ring->napi);
+ dma_free_coherent(&ring->priv->pdev->dev,
+ ring->len * sizeof(*ring->queue),
+ ring->queue, ring->queue_dma);
+ ring->queue = NULL;
+ ring->len = 0;
+ return ret;
+ }
+ }
+
napi_enable(&ring->napi);
return 0;
}
@@ -541,21 +677,16 @@ static int hbg_tx_ring_init(struct hbg_priv *priv)
static int hbg_rx_ring_init(struct hbg_priv *priv)
{
int ret;
- u32 i;
ret = hbg_ring_init(priv, &priv->rx_ring, hbg_napi_rx_poll, HBG_DIR_RX);
if (ret)
return ret;
- for (i = 0; i < priv->rx_ring.len - 1; i++) {
- ret = hbg_rx_fill_one_buffer(priv);
- if (ret) {
- hbg_ring_uninit(&priv->rx_ring);
- return ret;
- }
- }
+ ret = hbg_rx_fill_buffers(priv);
+ if (ret)
+ hbg_ring_uninit(&priv->rx_ring);
- return 0;
+ return ret;
}
int hbg_txrx_init(struct hbg_priv *priv)
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cee57a2149ab..7b1ac90b3de4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -551,9 +551,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- size_t total_len, max_len;
u16 *eeprom_buff;
int ret_val = 0;
+ size_t max_len;
int first_word;
int last_word;
void *ptr;
@@ -571,10 +571,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
max_len = hw->nvm.word_size * 2;
- if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) ||
- total_len > max_len)
- return -EFBIG;
-
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index bf2029144c1d..76e42abca965 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -734,22 +734,11 @@ static int fm10k_get_rssh_fields(struct net_device *dev,
return 0;
}
-static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static u32 fm10k_get_rx_ring_count(struct net_device *dev)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
- switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = interface->num_rx_queues;
- ret = 0;
- break;
- default:
- break;
- }
-
- return ret;
+ return interface->num_rx_queues;
}
static int fm10k_set_rssh_fields(struct net_device *dev,
@@ -1160,7 +1149,7 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_ringparam = fm10k_set_ringparam,
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
- .get_rxnfc = fm10k_get_rxnfc,
+ .get_rx_ring_count = fm10k_get_rx_ring_count,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
index bc205e3077c7..229179ccc131 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devlink.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -24,7 +24,8 @@ static int i40e_max_mac_per_vf_set(struct devlink *devlink,
static int i40e_max_mac_per_vf_get(struct devlink *devlink,
u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct i40e_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 86c72596617a..f2c2646ea298 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3522,6 +3522,20 @@ no_input_set:
}
/**
+ * i40e_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 i40e_get_rx_ring_count(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3538,10 +3552,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -5819,6 +5829,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_msglevel = i40e_set_msglevel,
.get_rxnfc = i40e_get_rxnfc,
.set_rxnfc = i40e_set_rxnfc,
+ .get_rx_ring_count = i40e_get_rx_ring_count,
.self_test = i40e_diag_test,
.get_strings = i40e_get_strings,
.get_eee = i40e_get_eee,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 9d91a382612d..8b30a3accd31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2967,7 +2967,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
dev_err(&pf->pdev->dev,
"Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
mac_add_max);
- return -EPERM;
+ return -EPERM;
}
if (!vf_trusted) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index a3f8ced23266..2cc21289a707 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1639,6 +1639,19 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * iavf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ **/
+static u32 iavf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->num_active_queues;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -1653,10 +1666,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_active_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
break;
@@ -1866,6 +1875,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
.set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
+ .get_rx_ring_count = iavf_get_rx_ring_count,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 34a422a4a29c..88156082a41d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -793,7 +793,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -838,7 +839,8 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN add (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
@@ -941,7 +943,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl, vlan_id, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v1) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl, vlan_id,
--count);
@@ -987,7 +990,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
len = virtchnl_struct_size(vvfl_v2, filters, count);
if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ dev_info(&adapter->pdev->dev,
+ "virtchnl: Too many VLAN delete (v2) requests; splitting into multiple messages to PF\n");
while (len > IAVF_MAX_AQ_BUF_SIZE)
len = virtchnl_struct_size(vvfl_v2, filters,
--count);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 938914abbe06..d88b7f3fd1f9 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -610,11 +610,13 @@ exit_release_res:
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
+ * @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
@@ -1349,7 +1351,8 @@ static const struct devlink_ops ice_sf_devlink_ops;
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct iidc_rdma_core_dev_info *cdev;
@@ -1415,7 +1418,8 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
static int
ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct iidc_rdma_core_dev_info *cdev;
@@ -1522,11 +1526,13 @@ static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
* @devlink: Pointer to the devlink instance.
* @id: The parameter ID to set.
* @ctx: Context to store the parameter value.
+ * @extack: netlink extended ACK structure
*
* Return: Zero.
*/
static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct ice_port_info *pi;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a1d9abee97e5..969d4f8f9c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3084,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 ice_get_rx_ring_count(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3103,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
hw = &vsi->back->hw;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = hw->fdir_active_fltr;
/* report total rule count */
@@ -4853,6 +4863,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
+ .get_rx_ring_count = ice_get_rx_ring_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 26b357c0ae15..b29fbdec9442 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -1121,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* ice_fdir_has_frag - does flow type have 2 ptypes
* @flow: flow ptype
*
- * returns true is there is a fragment packet for this ptype
+ * Return: true if there is a fragment packet for this ptype
*/
bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index d86db081579f..973a13d3d92a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -534,7 +534,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n",
component, module,
libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 985b3e79b312..4c8d20f2d2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3253,7 +3253,7 @@ void ice_ptp_init(struct ice_pf *pf)
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err_exit;
+ goto err_clean_pf;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3270,13 +3270,19 @@ void ice_ptp_init(struct ice_pf *pf)
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
+err_clean_pf:
+ mutex_destroy(&ptp->port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- ptp->state = ICE_PTP_ERROR;
+ /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
+ * and to avoid duplicated resources release.
+ */
+ ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3289,9 +3295,19 @@ err_exit:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (pf->ptp.state != ICE_PTP_READY)
+ if (pf->ptp.state == ICE_PTP_UNINIT)
return;
+ if (pf->ptp.state != ICE_PTP_READY) {
+ mutex_destroy(&pf->ptp.port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ return;
+ }
+
pf->ptp.state = ICE_PTP_UNINIT;
/* Disable timestamping for both Tx and Rx */
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
index 7928f4e8e788..f73d5a3e83d4 100644
--- a/drivers/net/ethernet/intel/ice/virt/queues.c
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -842,6 +842,9 @@ int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
goto error_param;
+
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+
if (qpi->rxq.max_pkt_size > max_frame_size ||
qpi->rxq.max_pkt_size < 64)
goto error_param;
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 50fa7be0c00d..8cfc68cbfa06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -131,14 +131,12 @@ enum idpf_cap_field {
/**
* enum idpf_vport_state - Current vport state
- * @__IDPF_VPORT_DOWN: Vport is down
- * @__IDPF_VPORT_UP: Vport is up
- * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ * @IDPF_VPORT_UP: Vport is up
+ * @IDPF_VPORT_STATE_NBITS: Must be last, number of states
*/
enum idpf_vport_state {
- __IDPF_VPORT_DOWN,
- __IDPF_VPORT_UP,
- __IDPF_VPORT_STATE_LAST,
+ IDPF_VPORT_UP,
+ IDPF_VPORT_STATE_NBITS
};
/**
@@ -162,7 +160,7 @@ struct idpf_netdev_priv {
u16 vport_idx;
u16 max_tx_hdr_size;
u16 tx_max_bufs;
- enum idpf_vport_state state;
+ DECLARE_BITMAP(state, IDPF_VPORT_STATE_NBITS);
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index a5a1eec9ade8..2589e124e41c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -6,6 +6,25 @@
#include "idpf_virtchnl.h"
/**
+ * idpf_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 idpf_get_rx_ring_count(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ u32 num_rxq;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ num_rxq = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return num_rxq;
+}
+
+/**
* idpf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -28,9 +47,6 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vport->num_rxq;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = user_config->num_fsteer_fltrs;
cmd->data = idpf_fsteer_max_rules(vport);
@@ -386,7 +402,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -436,7 +452,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
@@ -1167,7 +1183,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP) {
+ if (!test_bit(IDPF_VPORT_UP, np->state)) {
idpf_vport_ctrl_unlock(netdev);
return;
@@ -1319,7 +1335,7 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
@@ -1507,7 +1523,7 @@ static int idpf_set_coalesce(struct net_device *netdev,
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
@@ -1710,7 +1726,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
} while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
for (u16 i = 0; i < vport->num_txq_grp; i++) {
@@ -1757,6 +1773,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
.set_rxnfc = idpf_set_rxnfc,
+ .get_rx_ring_count = idpf_get_rx_ring_count,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 8a941f0fb048..7a7e101afeb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -519,7 +519,7 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
}
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (np->state == __IDPF_VPORT_UP) {
+ if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
err = idpf_add_del_mac_filters(vport, np, false, async);
@@ -590,7 +590,7 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
if (err)
return err;
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
err = idpf_add_del_mac_filters(vport, np, true, async);
return err;
@@ -894,7 +894,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (rtnl)
@@ -921,7 +921,7 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
rtnl_unlock();
@@ -1345,7 +1345,7 @@ static int idpf_up_complete(struct idpf_vport *vport)
netif_tx_start_all_queues(vport->netdev);
}
- np->state = __IDPF_VPORT_UP;
+ set_bit(IDPF_VPORT_UP, np->state);
return 0;
}
@@ -1391,7 +1391,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
struct idpf_vport_config *vport_config;
int err;
- if (np->state != __IDPF_VPORT_DOWN)
+ if (test_bit(IDPF_VPORT_UP, np->state))
return -EBUSY;
if (rtnl)
@@ -1602,7 +1602,7 @@ void idpf_init_task(struct work_struct *work)
/* Once state is put into DOWN, driver is ready for dev_open */
np = netdev_priv(vport->netdev);
- np->state = __IDPF_VPORT_DOWN;
+ clear_bit(IDPF_VPORT_UP, np->state);
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
idpf_vport_open(vport, true);
@@ -1801,7 +1801,7 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
continue;
np = netdev_priv(adapter->netdevs[i]);
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
set_bit(IDPF_VPORT_UP_REQUESTED,
adapter->vport_config[i]->flags);
}
@@ -1939,7 +1939,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
- enum idpf_vport_state current_state = np->state;
+ bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
int err;
@@ -1990,7 +1990,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
- if (current_state <= __IDPF_VPORT_DOWN) {
+ if (!vport_is_up) {
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
@@ -2023,7 +2023,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
if (err)
goto err_open;
- if (current_state == __IDPF_VPORT_UP)
+ if (vport_is_up)
err = idpf_vport_open(vport, false);
goto free_vport;
@@ -2033,7 +2033,7 @@ err_reset:
vport->num_rxq, vport->num_bufq);
err_open:
- if (current_state == __IDPF_VPORT_UP)
+ if (vport_is_up)
idpf_vport_open(vport, false);
free_vport:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index 7a06eaf46a08..de5d722cc21d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -141,6 +141,8 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vport_config[i])
+ continue;
kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 61e613066140..e3ddf18dcbf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -570,7 +570,7 @@ fetch_next_txq_desc:
np = netdev_priv(tx_q->netdev);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = np->state != __IDPF_VPORT_UP ||
+ dont_wake = !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 828f7c444d30..1d91c56f7469 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -134,7 +134,7 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
{
idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- if (!complq->comp)
+ if (!complq->desc_ring)
return;
dma_free_coherent(complq->netdev->dev.parent, complq->size,
@@ -922,8 +922,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_rx_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Queue %u failed\n",
- i);
+ "Memory allocation for Rx queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -939,8 +939,8 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
err = idpf_bufq_desc_alloc(vport, q);
if (err) {
pci_err(vport->adapter->pdev,
- "Memory allocation for Rx Buffer Queue %u failed\n",
- i);
+ "Memory allocation for Rx Buffer Queue %u from queue group %u failed\n",
+ j, i);
goto err_out;
}
}
@@ -2275,7 +2275,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || !test_bit(IDPF_VPORT_UP, np->state) ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index cbb5fa30f5a0..44cd4b466c48 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -68,7 +68,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
vport->link_up = v2e->link_status;
- if (np->state != __IDPF_VPORT_UP)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return;
if (vport->link_up) {
@@ -2755,7 +2755,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
/* Don't send get_stats message if the link is down */
- if (np->state <= __IDPF_VPORT_DOWN)
+ if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 21ce25b0567f..958d16f87424 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -418,7 +418,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
- if (np->state == __IDPF_VPORT_UP)
+ if (test_bit(IDPF_VPORT_UP, np->state))
idpf_xdp_copy_prog_to_rqs(vport, prog);
old = xchg(&vport->xdp_prog, prog);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 10e2445e0ded..b507576b28b2 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2541,6 +2541,13 @@ static int igb_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 igb_get_rx_ring_count(struct net_device *dev)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2548,10 +2555,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_filter_count;
ret = 0;
@@ -3473,6 +3476,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ts_info = igb_get_ts_info,
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
+ .get_rx_ring_count = igb_get_rx_ring_count,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index bb783042d1af..e94c1922b97a 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1091,15 +1091,19 @@ static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
return 0;
}
+static u32 igc_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
+ return adapter->num_rx_queues;
+}
+
static int igc_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
- return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->nfc_rule_count;
return 0;
@@ -2170,6 +2174,7 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_coalesce = igc_ethtool_set_coalesce,
.get_rxnfc = igc_ethtool_get_rxnfc,
.set_rxnfc = igc_ethtool_set_rxnfc,
+ .get_rx_ring_count = igc_ethtool_get_rx_ring_count,
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 2d660e9edb80..2ad81f687a84 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2805,6 +2805,14 @@ static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
return 64;
}
+static u32 ixgbe_get_rx_ring_count(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
+ return min_t(u32, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2812,11 +2820,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = min_t(int, adapter->num_rx_queues,
- ixgbe_rss_indir_tbl_max(adapter));
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->fdir_filter_count;
ret = 0;
@@ -3743,6 +3746,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
@@ -3791,6 +3795,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
+ .get_rx_ring_count = ixgbe_get_rx_ring_count,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
.get_rxfh_indir_size = ixgbe_rss_indir_size,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3190ce7e44c7..4af3b3e71ff1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7449,7 +7449,7 @@ int ixgbe_open(struct net_device *netdev)
adapter->hw.link.link_info.link_cfg_err);
err = ixgbe_non_sfp_link_config(&adapter->hw);
- if (ixgbe_non_sfp_link_config(&adapter->hw))
+ if (err)
e_dev_err("Link setup failed, err %d.\n", err);
}
@@ -12046,7 +12046,7 @@ err_dma:
* @pdev: PCI device information struct
*
* ixgbe_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
+ * that it should release a PCI device. This could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index bebad564188e..537a60d5276f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -867,19 +867,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
return 0;
}
-static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 ixgbevf_get_rx_ring_count(struct net_device *dev)
{
struct ixgbevf_adapter *adapter = netdev_priv(dev);
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = adapter->num_rx_queues;
- return 0;
- default:
- hw_dbg(&adapter->hw, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
+ return adapter->num_rx_queues;
}
static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
@@ -987,7 +979,7 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
.get_coalesce = ixgbevf_get_coalesce,
.set_coalesce = ixgbevf_set_coalesce,
- .get_rxnfc = ixgbevf_get_rxnfc,
+ .get_rx_ring_count = ixgbevf_get_rx_ring_count,
.get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 039187607e98..516a6fdd23d0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -241,23 +241,7 @@ struct ixgbevf_q_vector {
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
- struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int state;
-#define IXGBEVF_QV_STATE_IDLE 0
-#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
-#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
-#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
-#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
-#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
-#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
- IXGBEVF_QV_STATE_POLL_YIELD)
-#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
- IXGBEVF_QV_STATE_POLL_YIELD)
- spinlock_t lock;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct ixgbevf_ring ring[] ____cacheline_internodealigned_in_smp;
};
/* microsecond values for various ITR rates shifted by 2 to fit itr register
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 89ccb8eb82c7..7af44f858fa3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5012,17 +5012,9 @@ static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
return MVNETA_RSS_LU_TABLE_SIZE;
}
-static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info,
- u32 *rules __always_unused)
+static u32 mvneta_ethtool_get_rx_ring_count(struct net_device *dev)
{
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = rxq_number;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ return rxq_number;
}
static int mvneta_config_rss(struct mvneta_port *pp)
@@ -5356,7 +5348,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count,
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
- .get_rxnfc = mvneta_ethtool_get_rxnfc,
+ .get_rx_ring_count = mvneta_ethtool_get_rx_ring_count,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ab0c99aa9f9a..33426fded919 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5580,6 +5580,13 @@ static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(port->phylink, cmd);
}
+static u32 mvpp2_ethtool_get_rx_ring_count(struct net_device *dev)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return port->nrxqs;
+}
+
static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -5590,9 +5597,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = port->nrxqs;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = port->n_rfs_rules;
break;
@@ -5827,6 +5831,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.set_pauseparam = mvpp2_ethtool_set_pause_param,
.get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
.set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
+ .get_rx_ring_count = mvpp2_ethtool_get_rx_ring_count,
.get_rxnfc = mvpp2_ethtool_get_rxnfc,
.set_rxnfc = mvpp2_ethtool_set_rxnfc,
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 7370812ece2a..15d3cb0b9da6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1663,6 +1663,9 @@ static void print_tm_tree(struct seq_file *m,
int blkaddr;
u64 cfg;
+ if (!sq_ctx->ena)
+ return;
+
blkaddr = nix_hw->blkaddr;
schq = sq_ctx->smq;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 3735372539bd..0f9953eaf1b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1233,7 +1233,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1259,7 +1260,8 @@ enum rvu_af_dl_param_id {
};
static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1314,7 +1316,8 @@ static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1376,7 +1379,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink
}
static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1402,7 +1406,8 @@ static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index e13ae5484c19..a72694219df4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -48,7 +48,8 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -84,7 +85,8 @@ static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
}
static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a68cd3f0304c..ad6298456639 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1727,6 +1727,13 @@ static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
}
+static u32 mlx4_en_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -1743,9 +1750,6 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return -EINVAL;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = priv->rx_ring_num;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = mlx4_en_get_num_flows(priv);
break;
@@ -2154,6 +2158,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
+ .get_rx_ring_count = mlx4_en_get_rx_ring_count,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 03d2fc7d9b09..2de226951e19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -174,7 +174,8 @@ MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is defaul
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vbool = !!mlx4_internal_err_reset;
return 0;
@@ -189,7 +190,8 @@ static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 722282cebce9..5b08e5ffe0e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -181,6 +181,7 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
lockdep_assert_held(&cmd->alloc_lock);
+ cmd->ent_arr[idx] = NULL;
set_bit(idx, &cmd->vars.bitmask);
}
@@ -1200,6 +1201,44 @@ out_err:
return err;
}
+/* Check if all command slots are stalled (timed out and not recovered).
+ * returns true if all slots timed out on a recent command and have not been
+ * completed by FW yet. (stalled state)
+ * false otherwise (at least one slot is not stalled).
+ *
+ * In such odd situation "all_stalled", this serves as a protection mechanism
+ * to avoid blocking the kernel for long periods of time in case FW is not
+ * responding to commands.
+ */
+static bool mlx5_cmd_all_stalled(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ bool all_stalled = true;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+
+ /* at least one command slot is free */
+ if (bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds) > 0) {
+ all_stalled = false;
+ goto out;
+ }
+
+ for_each_clear_bit(i, &cmd->vars.bitmask, cmd->vars.max_reg_cmds) {
+ struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
+
+ if (!test_bit(MLX5_CMD_ENT_STATE_TIMEDOUT, &ent->state)) {
+ all_stalled = false;
+ break;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return all_stalled;
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
@@ -1230,6 +1269,15 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (callback && page_queue)
return -EINVAL;
+ if (!page_queue && mlx5_cmd_all_stalled(dev)) {
+ mlx5_core_err_rl(dev,
+ "All CMD slots are stalled, aborting command\n");
+ /* there's no reason to wait and block the whole kernel if FW
+ * isn't currently responding to all slots, fail immediately
+ */
+ return -EAGAIN;
+ }
+
ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
callback, context, page_queue);
if (IS_ERR(ent))
@@ -1700,6 +1748,13 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
+ if (forced && ent->ret == -ETIMEDOUT)
+ set_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+ else if (!forced) /* real FW completion */
+ clear_bit(MLX5_CMD_ENT_STATE_TIMEDOUT,
+ &ent->state);
+
/* if we already completed the command, ignore it */
if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index c9555119a661..43b9bf8829cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -26,7 +26,8 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
- MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 080e7eab52c7..7bcf822a89f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -54,7 +54,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) {
mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 12e10feb30f0..424f8a2728a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -82,7 +82,7 @@ static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
}
static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
struct skb_shared_hwtstamps hwts = {};
ktime_t diff;
@@ -92,8 +92,17 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
/* Maximal allowed diff is 1 / 128 second */
if (diff > (NSEC_PER_SEC >> 7)) {
- cq_stats->abort++;
- cq_stats->abort_abs_diff_ns += diff;
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+
+ ptpsq->cq_stats->abort++;
+ ptpsq->cq_stats->abort_abs_diff_ns += diff;
+ if (diff > (NSEC_PER_SEC >> 1) &&
+ !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+ netdev_warn(sq->channel->netdev,
+ "PTP TX timestamp difference between CQE and port exceeds threshold: %lld ns, recovering SQ %u\n",
+ (s64)diff, sq->sqn);
+ queue_work(sq->priv->wq, &ptpsq->report_unhealthy_work);
+ }
return;
}
@@ -103,7 +112,7 @@ static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats)
+ struct mlx5e_ptpsq *ptpsq)
{
switch (hwtstamp_type) {
case (MLX5E_SKB_CB_CQE_HWTSTAMP):
@@ -121,7 +130,7 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
!mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
return;
- mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
+ mlx5e_skb_cb_hwtstamp_tx(skb, ptpsq);
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
@@ -209,7 +218,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
- hwtstamp, ptpsq->cq_stats);
+ hwtstamp, ptpsq);
ptpsq->cq_stats->cqe++;
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 1c0e0a86a9ac..2a457a2ed707 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -147,7 +147,7 @@ enum {
void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
ktime_t hwtstamp,
- struct mlx5e_ptp_cq_stats *cq_stats);
+ struct mlx5e_ptpsq *ptpsq);
void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
#endif /* __MLX5_EN_PTP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 9b93da4d52f6..cf8f14ce4cd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -627,7 +627,7 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
- } else if (max_bw_value[i] <= upper_limit_gbps) {
+ } else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 01b8f05a23db..fe67c73849f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -261,6 +261,11 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT,
ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_1600000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_1600000baseDR8_2_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(bool ext,
@@ -2027,7 +2032,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
int size_read = 0;
u8 data[4] = {0};
- size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data, NULL);
if (size_read < 2)
return -EIO;
@@ -2069,6 +2074,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int offset = ee->offset;
int size_read;
+ u8 status = 0;
int i = 0;
if (!ee->len)
@@ -2078,15 +2084,15 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
while (i < ee->len) {
size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
- data + i);
-
+ data + i, &status);
if (!size_read)
/* Done reading */
return 0;
if (size_read < 0) {
- netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
- __func__, size_read);
+ netdev_err(netdev,
+ "%s: mlx5_query_eeprom failed:0x%x, status %u\n",
+ __func__, size_read, status);
return size_read;
}
@@ -2106,6 +2112,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
u8 *data = page_data->data;
int size_read;
+ u8 status = 0;
int i = 0;
if (!page_data->length)
@@ -2119,7 +2126,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
query.page = page_data->page;
while (i < page_data->length) {
query.size = page_data->length - i;
- size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
+ size_read = mlx5_query_module_eeprom_by_page(mdev, &query,
+ data + i, &status);
/* Done reading, return how many bytes was read */
if (!size_read)
@@ -2128,8 +2136,8 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d",
- i, size_read);
+ "Query module eeprom by page failed, read %u bytes, err %d, status %u",
+ i, size_read, status);
return size_read;
}
@@ -2492,21 +2500,18 @@ static int mlx5e_set_rxfh_fields(struct net_device *dev,
return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
}
+static u32 mlx5e_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -2766,6 +2771,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.remove_rxfh_context = mlx5e_remove_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_rx_ring_count = mlx5e_get_rx_ring_count,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pause_stats = mlx5e_get_pause_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 79916f1abd14..63bdef5b4ba5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -704,7 +704,7 @@ static int validate_flow(struct mlx5e_priv *priv,
num_tuples += ret;
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
ret = validate_vlan(fs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2702b3885f06..14884b9ea7f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -755,7 +755,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
- hwts.hwtstamp, sq->ptpsq->cq_stats);
+ hwts.hwtstamp, sq->ptpsq);
} else {
skb_tstamp_tx(skb, &hwts);
sq->stats->timestamps++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 56e6f54b1e2e..4278bcb04c72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -341,13 +341,6 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If nodes max min_rate divider is 0 but their parent has bw_share
- * configured, then set bw_share for nodes to minimal value.
- */
-
- if (parent && parent->bw_share)
- return 1;
-
/* If the node nodes has min_rate configured, a divider of 0 sets all
* nodes' bw_share to 0, effectively disabling min guarantees.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index e2ffb87b94cb..4b7a1ce7f406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -875,13 +875,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport_num, 1,
vport->info.link_state);
- /* Host PF has its own mac/guid. */
- if (vport_num) {
- mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
- vport->info.mac);
- mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
- vport->info.node_guid);
- }
+ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true,
+ vport->info.mac);
+ mlx5_query_nic_vport_node_guid(esw->dev, vport_num, true,
+ &vport->info.node_guid);
flags = (vport->info.vlan || vport->info.qos) ?
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
@@ -947,12 +944,6 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
goto err_vhca_mapping;
}
- /* External controller host PF has factory programmed MAC.
- * Read it from the device.
- */
- if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
- mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1483,7 +1474,7 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
info.new_mode = mode;
- blocking_notifier_call_chain(&esw->n_head, 0, &info);
+ blocking_notifier_call_chain(&esw->dev->priv.esw_n_head, 0, &info);
}
static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
@@ -1978,7 +1969,8 @@ static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -2059,7 +2051,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
- BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
@@ -2235,6 +2226,9 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->vf = vport - 1;
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport, true,
+ evport->info.mac);
ether_addr_copy(ivi->mac, evport->info.mac);
ivi->linkstate = evport->info.link_state;
ivi->vlan = evport->info.vlan;
@@ -2385,14 +2379,16 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&esw->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.esw_n_head, nb);
}
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&esw->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.esw_n_head, nb);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index beaec450a734..ad1073f7b79f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -403,7 +403,6 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
- struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
u16 enabled_ipsec_vf_count;
@@ -864,8 +863,10 @@ struct mlx5_esw_event_info {
u16 new_mode;
};
-int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
-void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
+int mlx5_esw_event_notifier_register(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_core_dev *dev,
+ struct notifier_block *n);
bool mlx5_esw_hold(struct mlx5_core_dev *dev);
void mlx5_esw_release(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0b1a180ef238..8de6c7f6c294 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2618,7 +2618,8 @@ done:
}
static int esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -4491,6 +4492,9 @@ int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
+
+ mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, true,
+ vport->info.mac);
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
mutex_unlock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index e5c1012921d2..1ec61164e6b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -211,7 +211,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
if (!max_num_qps) {
mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n");
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1af76da8b132..ced747bef641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -239,6 +239,10 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_flow_table_root_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(set_flow_table_root_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
if (!err &&
@@ -302,6 +306,10 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
@@ -360,6 +368,10 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
if (!err)
@@ -394,6 +406,10 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(modify_flow_table_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(modify_flow_table_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -429,6 +445,10 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(create_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
@@ -451,6 +471,10 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(destroy_flow_group_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(destroy_flow_group_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
@@ -559,6 +583,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(set_fte_in, in, eswitch_owner_vhca_id, ft->esw_owner_vhca_id);
+ MLX5_SET(set_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -788,6 +815,10 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(delete_fte_in, in, eswitch_owner_vhca_id,
+ ft->esw_owner_vhca_id);
+ MLX5_SET(delete_fte_in, in, other_eswitch,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_ESWITCH));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2ca3bddbdf05..0a6031a64c6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -939,10 +939,10 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
- enum fs_flow_table_type table_type,
- enum fs_flow_table_op_mod op_mod,
- u32 flags)
+static struct mlx5_flow_table *
+alloc_flow_table(struct mlx5_flow_table_attr *ft_attr, u16 vport,
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod)
{
struct mlx5_flow_table *ft;
int ret;
@@ -957,12 +957,13 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
return ERR_PTR(ret);
}
- ft->level = level;
+ ft->level = ft_attr->level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->flags = flags;
+ ft->esw_owner_vhca_id = ft_attr->esw_owner_vhca_id;
+ ft->flags = ft_attr->flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1370,10 +1371,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
/* The level is related to the
* priority level range.
*/
- ft = alloc_flow_table(ft_attr->level,
- vport,
- root->table_type,
- op_mod, ft_attr->flags);
+ ft = alloc_flow_table(ft_attr, vport, root->table_type, op_mod);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto unlock_root;
@@ -3310,6 +3308,62 @@ err:
return ret;
}
+static bool mlx5_fs_ns_is_empty(struct mlx5_flow_namespace *ns)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->num_ft)
+ return false;
+ }
+
+ return true;
+}
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type)
+{
+ struct mlx5_flow_root_namespace **root;
+ int total_vports;
+ int i;
+
+ switch (table_type) {
+ case FS_FT_RDMA_TRANSPORT_TX:
+ root = dev->priv.steering->rdma_transport_tx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_tx_vports;
+ break;
+ case FS_FT_RDMA_TRANSPORT_RX:
+ root = dev->priv.steering->rdma_transport_rx_root_ns;
+ total_vports = dev->priv.steering->rdma_transport_rx_vports;
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < total_vports; i++) {
+ mutex_lock(&root[i]->chain_lock);
+ if (!mlx5_fs_ns_is_empty(&root[i]->ns)) {
+ mutex_unlock(&root[i]->chain_lock);
+ goto err;
+ }
+ root[i]->dev = new_dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ return 0;
+err:
+ while (i--) {
+ mutex_lock(&root[i]->chain_lock);
+ root[i]->dev = dev;
+ mutex_unlock(&root[i]->chain_lock);
+ }
+ /* If you hit this error try destroying all flow tables and try again */
+ mlx5_core_err(dev, "Failed to set root device for RDMA TRANSPORT\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mlx5_fs_set_root_dev);
+
static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
{
struct mlx5_core_dev *dev = steering->dev;
@@ -3779,7 +3833,8 @@ static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 8458ce203dac..1c6591425260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -103,24 +103,6 @@ enum fs_node_type {
FS_TYPE_FLOW_DEST
};
-enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_NIC_TX = 0x1,
- FS_FT_ESW_EGRESS_ACL = 0x2,
- FS_FT_ESW_INGRESS_ACL = 0x3,
- FS_FT_FDB = 0X4,
- FS_FT_SNIFFER_RX = 0X5,
- FS_FT_SNIFFER_TX = 0X6,
- FS_FT_RDMA_RX = 0X7,
- FS_FT_RDMA_TX = 0X8,
- FS_FT_PORT_SEL = 0X9,
- FS_FT_FDB_RX = 0xa,
- FS_FT_FDB_TX = 0xb,
- FS_FT_RDMA_TRANSPORT_RX = 0xd,
- FS_FT_RDMA_TRANSPORT_TX = 0xe,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
-};
-
enum fs_flow_table_op_mod {
FS_FT_OP_MOD_NORMAL,
FS_FT_OP_MOD_LAG_DEMUX,
@@ -205,6 +187,7 @@ struct mlx5_flow_table {
};
u32 id;
u16 vport;
+ u16 esw_owner_vhca_id;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 89e399606877..2bceb42c98cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -73,7 +73,8 @@ static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u3
}
static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 4b3430ac3905..3b2f54ca30a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -266,21 +266,18 @@ static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
+static u32 mlx5i_get_rx_ring_count(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return priv->channels.params.num_channels;
+}
+
static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
- * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
- * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
- * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
- */
- if (info->cmd == ETHTOOL_GRXRINGS) {
- info->data = priv->channels.params.num_channels;
- return 0;
- }
-
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
@@ -304,6 +301,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
+ .get_rx_ring_count = mlx5i_get_rx_ring_count,
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
index 459a0b4d08e6..19bb620b7436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -8,6 +8,8 @@ enum {
MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP = 0x10b,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF = 0x11d,
MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
};
@@ -32,6 +34,12 @@ union mlx5_ifc_config_item_type_auto_bits {
u8 reserved_at_0[0x20];
};
+enum {
+ MLX5_ACCESS_MODE_NEXT = 0,
+ MLX5_ACCESS_MODE_CURRENT,
+ MLX5_ACCESS_MODE_DEFAULT,
+};
+
struct mlx5_ifc_config_item_bits {
u8 valid[0x2];
u8 priority[0x2];
@@ -123,6 +131,17 @@ struct mlx5_ifc_nv_sw_offload_conf_bits {
u8 lro_log_timeout0[0x4];
};
+struct mlx5_ifc_nv_sw_offload_cap_bits {
+ u8 reserved_at_0[0x19];
+ u8 swp_l4_csum_mode_l4_only[0x1];
+ u8 reserved_at_1a[0x6];
+};
+
+struct mlx5_ifc_nv_sw_accelerate_conf_bits {
+ u8 swp_l4_csum_mode[0x2];
+ u8 reserved_at_2[0x3e];
+};
+
#define MNVDA_HDR_SZ \
(MLX5_ST_SZ_BYTES(mnvda_reg) - \
MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
@@ -195,12 +214,39 @@ mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
return mlx5_nv_param_read(dev, mnvda, len);
}
+static int
+mlx5_nv_param_read_sw_offload_cap(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int
+mlx5_nv_param_read_sw_accelerate_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len, int access_mode)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_ACCELERATE_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_accelerate_conf);
+ MLX5_SET(mnvda_reg, mnvda, configuration_item_header.access_mode,
+ access_mode);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
static const char *const
cqe_compress_str[] = { "balanced", "aggressive" };
static int
mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -268,6 +314,182 @@ mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
}
+enum swp_l4_csum_mode {
+ SWP_L4_CSUM_MODE_DEFAULT = 0,
+ SWP_L4_CSUM_MODE_FULL_CSUM = 1,
+ SWP_L4_CSUM_MODE_L4_ONLY = 2,
+};
+
+static const char *const
+ swp_l4_csum_mode_str[] = { "default", "full_csum", "l4_only" };
+
+static int
+mlx5_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ int access_mode, u8 *value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ access_mode);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ *value = MLX5_GET(nv_sw_accelerate_conf, data, swp_l4_csum_mode);
+
+ if (*value >= ARRAY_SIZE(swp_l4_csum_mode_str)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Invalid swp_l4_csum_mode value %u read from device",
+ *value);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_NEXT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(swp_l4_csum_mode_str); i++) {
+ if (!strcmp(val.vstr, swp_l4_csum_mode_str[i]))
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(swp_l4_csum_mode_str) ||
+ i == SWP_L4_CSUM_MODE_DEFAULT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are full_csum/l4_only");
+ return -EINVAL;
+ }
+
+ if (i == SWP_L4_CSUM_MODE_L4_ONLY) {
+ err = mlx5_nv_param_read_sw_offload_cap(dev, cap, sizeof(cap));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+ if (!MLX5_GET(nv_sw_offload_cap, data, swp_l4_csum_mode_l4_only)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "l4_only mode is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_swp_l4_csum_mode_set(struct devlink *devlink, u32 id, u8 value,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_accelerate_conf(dev, mnvda, sizeof(mnvda),
+ MLX5_ACCESS_MODE_NEXT);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_accelerate_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_accelerate_conf, data, swp_l4_csum_mode, value);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to write sw_accelerate_conf mnvda reg");
+
+ return err;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "full_csum"))
+ value = SWP_L4_CSUM_MODE_FULL_CSUM;
+ else
+ value = SWP_L4_CSUM_MODE_L4_ONLY;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_get_default(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ strscpy(ctx->val.vstr, swp_l4_csum_mode_str[value],
+ sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_devlink_swp_l4_csum_mode_set_default(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack)
+{
+ u8 value;
+ int err;
+
+ err = mlx5_swp_l4_csum_mode_get(devlink, id, MLX5_ACCESS_MODE_DEFAULT,
+ &value, extack);
+ if (err)
+ return err;
+
+ return mlx5_swp_l4_csum_mode_set(devlink, id, value, extack);
+}
+
static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
void *mnvda, size_t len)
{
@@ -302,7 +524,8 @@ static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
}
static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -413,7 +636,8 @@ static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
}
static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
@@ -545,6 +769,14 @@ static const struct devlink_param mlx5_nv_param_devlink_params[] = {
mlx5_nv_param_devlink_cqe_compress_get,
mlx5_nv_param_devlink_cqe_compress_set,
mlx5_nv_param_devlink_cqe_compress_validate),
+ DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(MLX5_DEVLINK_PARAM_ID_SWP_L4_CSUM_MODE,
+ "swp_l4_csum_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_swp_l4_csum_mode_get,
+ mlx5_devlink_swp_l4_csum_mode_set,
+ mlx5_devlink_swp_l4_csum_mode_validate,
+ mlx5_devlink_swp_l4_csum_mode_get_default,
+ mlx5_devlink_swp_l4_csum_mode_set_default),
};
int mlx5_nv_param_register_dl_params(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
index 47fe215f66bf..ef06fe6cbb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/st.c
@@ -19,13 +19,16 @@ struct mlx5_st {
struct mutex lock;
struct xa_limit index_limit;
struct xarray idx_xa; /* key == index, value == struct mlx5_st_idx_data */
+ u8 direct_mode : 1;
};
struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
struct mlx5_st *st;
+ u8 direct_mode = 0;
u16 num_entries;
+ u32 tbl_loc;
int ret;
if (!MLX5_CAP_GEN(dev, mkey_pcie_tph))
@@ -40,10 +43,16 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
if (!pdev->tph_cap)
return NULL;
- num_entries = pcie_tph_get_st_table_size(pdev);
- /* We need a reserved entry for non TPH cases */
- if (num_entries < 2)
- return NULL;
+ tbl_loc = pcie_tph_get_st_table_loc(pdev);
+ if (tbl_loc == PCI_TPH_LOC_NONE)
+ direct_mode = 1;
+
+ if (!direct_mode) {
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ /* We need a reserved entry for non TPH cases */
+ if (num_entries < 2)
+ return NULL;
+ }
/* The OS doesn't support ST */
ret = pcie_enable_tph(pdev, PCI_TPH_ST_DS_MODE);
@@ -56,6 +65,10 @@ struct mlx5_st *mlx5_st_create(struct mlx5_core_dev *dev)
mutex_init(&st->lock);
xa_init_flags(&st->idx_xa, XA_FLAGS_ALLOC);
+ st->direct_mode = direct_mode;
+ if (st->direct_mode)
+ return st;
+
/* entry 0 is reserved for non TPH cases */
st->index_limit.min = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX + 1;
st->index_limit.max = num_entries - 1;
@@ -96,6 +109,11 @@ int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
if (ret)
return ret;
+ if (st->direct_mode) {
+ *st_index = tag;
+ return 0;
+ }
+
mutex_lock(&st->lock);
xa_for_each(&st->idx_xa, index, idx_data) {
@@ -145,6 +163,9 @@ int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
if (!st)
return -EOPNOTSUPP;
+ if (st->direct_mode)
+ return 0;
+
mutex_lock(&st->lock);
idx_data = xa_load(&st->idx_xa, st_index);
if (WARN_ON_ONCE(!idx_data)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index d55e15c1f380..304912637c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -149,7 +149,7 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
struct mlx5_vxlan *vxlan;
if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c904696cbc3a..024339ce41f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1010,16 +1010,10 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_irq_cleanup;
}
- err = mlx5_events_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize events\n");
- goto err_eq_cleanup;
- }
-
err = mlx5_fw_reset_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize fw reset events\n");
- goto err_events_cleanup;
+ goto err_eq_cleanup;
}
mlx5_cq_debugfs_init(dev);
@@ -1121,8 +1115,6 @@ err_tables_cleanup:
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
-err_events_cleanup:
- mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
@@ -1155,7 +1147,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_fw_reset_cleanup(dev);
- mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
@@ -1386,12 +1377,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_vhca_event_start(dev);
- err = mlx5_sf_hw_table_create(dev);
- if (err) {
- mlx5_core_err(dev, "sf table create failed %d\n", err);
- goto err_vhca;
- }
-
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1420,8 +1405,6 @@ err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
err_ec:
- mlx5_sf_hw_table_destroy(dev);
-err_vhca:
mlx5_vhca_event_stop(dev);
err_set_hca:
mlx5_fs_core_cleanup(dev);
@@ -1447,12 +1430,12 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
+ mlx5_vhca_event_stop(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
- mlx5_vhca_event_stop(dev);
mlx5_fs_core_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
@@ -1833,6 +1816,50 @@ static int vhca_id_show(struct seq_file *file, void *priv)
DEFINE_SHOW_ATTRIBUTE(vhca_id);
+static int mlx5_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize events\n");
+ return err;
+ }
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.esw_n_head);
+ mlx5_vhca_state_notifier_init(dev);
+
+ err = mlx5_sf_hw_notifier_init(dev);
+ if (err)
+ goto err_sf_hw_notifier;
+
+ err = mlx5_sf_notifiers_init(dev);
+ if (err)
+ goto err_sf_notifiers;
+
+ err = mlx5_sf_dev_notifier_init(dev);
+ if (err)
+ goto err_sf_dev_notifier;
+
+ return 0;
+
+err_sf_dev_notifier:
+ mlx5_sf_notifiers_cleanup(dev);
+err_sf_notifiers:
+ mlx5_sf_hw_notifier_cleanup(dev);
+err_sf_hw_notifier:
+ mlx5_events_cleanup(dev);
+ return err;
+}
+
+static void mlx5_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ mlx5_sf_dev_notifier_cleanup(dev);
+ mlx5_sf_notifiers_cleanup(dev);
+ mlx5_sf_hw_notifier_cleanup(dev);
+ mlx5_events_cleanup(dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1888,6 +1915,10 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ err = mlx5_notifiers_init(dev);
+ if (err)
+ goto err_hca_caps;
+
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
* Those values are supplied to FW as part of the init HCA command to
@@ -1930,6 +1961,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
if (priv->sw_vhca_id > 0)
ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+ mlx5_notifiers_cleanup(dev);
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index acef7d0ffa09..cfebc110c02f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -357,11 +357,11 @@ int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled);
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data);
+ u16 offset, u16 size, u8 *data, u8 *status);
int
mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data);
+ u8 *data, u8 *status);
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index e18a850c615c..aa3b5878e3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -324,10 +324,8 @@ err_xa:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
- if (i && rmap && *rmap) {
- free_irq_cpu_rmap(*rmap);
- *rmap = NULL;
- }
+ if (i && rmap && *rmap)
+ irq_cpu_rmap_remove(*rmap, irq->map.virq);
err_irq_rmap:
#endif
if (i && pci_msix_can_alloc_dyn(dev->pdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index aa9f2b0a77d3..85a9e534f442 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -289,11 +289,11 @@ int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
}
static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
- u8 *module_id)
+ u8 *module_id, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int err, status;
+ int err;
u8 *ptr;
MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
@@ -308,12 +308,12 @@ static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ if (MLX5_GET(mcia_reg, out, status)) {
+ if (status)
+ *status = MLX5_GET(mcia_reg, out, status);
return -EIO;
}
+
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
*module_id = ptr[0];
@@ -370,13 +370,14 @@ static int mlx5_mcia_max_bytes(struct mlx5_core_dev *dev)
}
static int mlx5_query_mcia(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data)
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data, u8 *status)
{
u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- int status, err;
void *ptr;
u16 size;
+ int err;
size = min_t(int, params->size, mlx5_mcia_max_bytes(dev));
@@ -392,12 +393,9 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
if (err)
return err;
- status = MLX5_GET(mcia_reg, out, status);
- if (status) {
- mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
- status);
+ *status = MLX5_GET(mcia_reg, out, status);
+ if (*status)
return -EIO;
- }
ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
@@ -406,7 +404,7 @@ static int mlx5_query_mcia(struct mlx5_core_dev *dev,
}
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data)
+ u16 offset, u16 size, u8 *data, u8 *status)
{
struct mlx5_module_eeprom_query_params query = {0};
u8 module_id;
@@ -416,7 +414,8 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
if (err)
return err;
- err = mlx5_query_module_id(dev, query.module_number, &module_id);
+ err = mlx5_query_module_id(dev, query.module_number, &module_id,
+ status);
if (err)
return err;
@@ -441,12 +440,12 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
query.size = size;
query.offset = offset;
- return mlx5_query_mcia(dev, &query, data);
+ return mlx5_query_mcia(dev, &query, data, status);
}
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
- u8 *data)
+ u8 *data, u8 *status)
{
int err;
@@ -460,7 +459,7 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return -EINVAL;
}
- return mlx5_query_mcia(dev, params, data);
+ return mlx5_query_mcia(dev, params, data, status);
}
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
@@ -1109,6 +1108,7 @@ mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
[MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
[MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
+ [MLX5E_1600TAUI_8_1600TBASE_CR8_KR8] = {.speed = 1600000, .lanes = 8},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 99219ea52c4b..f310bde3d11f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -16,7 +16,6 @@ struct mlx5_sf_dev_table {
struct xarray devices;
phys_addr_t base_address;
u64 sf_bar_length;
- struct notifier_block nb;
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
@@ -156,18 +155,23 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
static int
mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
{
- struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_dev_nb);
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_dev *sf_dev;
u16 max_functions;
u16 sf_index;
u16 base_id;
- max_functions = mlx5_sf_max_functions(table->dev);
+ if (!table)
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
if (!max_functions)
return 0;
- base_id = mlx5_sf_start_function_id(table->dev);
+ base_id = mlx5_sf_start_function_id(dev);
if (event->function_id < base_id || event->function_id >= (base_id + max_functions))
return 0;
@@ -177,19 +181,19 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
break;
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
if (sf_dev)
- mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ mlx5_sf_dev_del(dev, sf_dev, sf_index);
else
- mlx5_core_err(table->dev,
+ mlx5_core_err(dev,
"SF DEV: teardown state for invalid dev index=%d sfnum=0x%x\n",
sf_index, event->sw_function_id);
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ mlx5_sf_dev_add(dev, sf_index, event->function_id,
event->sw_function_id);
break;
default:
@@ -315,6 +319,15 @@ static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
}
}
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_dev_nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ return mlx5_vhca_event_notifier_register(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -329,17 +342,12 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
goto table_err;
}
- table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices);
dev->priv.sf_dev_table = table;
- err = mlx5_vhca_event_notifier_register(dev, &table->nb);
- if (err)
- goto vhca_err;
-
err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
@@ -351,10 +359,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
arm_err:
mlx5_sf_dev_destroy_active_works(table);
-add_active_err:
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
-vhca_err:
+add_active_err:
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -372,6 +378,14 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
}
}
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_dev_nb);
+}
+
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
@@ -380,8 +394,6 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
return;
mlx5_sf_dev_destroy_active_works(table);
- mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index b99131e95e37..3ab0449c770c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -25,7 +25,9 @@ struct mlx5_sf_peer_devlink_event_ctx {
int err;
};
+int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_driver_register(void);
@@ -35,10 +37,19 @@ bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
#else
+static inline int mlx5_sf_dev_notifier_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
}
+static inline void mlx5_sf_dev_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3304f25cc805..b82323b8449e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -31,9 +31,6 @@ struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
- struct notifier_block esw_nb;
- struct notifier_block vhca_nb;
- struct notifier_block mdev_nb;
};
static struct mlx5_sf *
@@ -391,11 +388,16 @@ static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_vhca_nb);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
const struct mlx5_vhca_state_event *event = data;
bool update = false;
struct mlx5_sf *sf;
+ if (!table)
+ return 0;
+
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
@@ -407,7 +409,7 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
- trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ trace_mlx5_sf_update_state(dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
unlock:
mutex_unlock(&table->sf_state_lock);
@@ -425,12 +427,16 @@ static void mlx5_sf_del_all(struct mlx5_sf_table *table)
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_esw_nb);
const struct mlx5_esw_event_info *mode = data;
+ if (!dev->priv.sf_table)
+ return 0;
+
switch (mode->new_mode) {
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_del_all(table);
+ mlx5_sf_del_all(dev->priv.sf_table);
break;
default:
break;
@@ -441,15 +447,16 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_table_mdev_nb);
struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int ret = NOTIFY_DONE;
struct mlx5_sf *sf;
- if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ if (!table || event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
return NOTIFY_DONE;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
if (!sf)
@@ -464,10 +471,40 @@ out:
return ret;
}
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ dev->priv.sf_table_esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev, &dev->priv.sf_table_esw_nb);
+ if (err)
+ return err;
+
+ dev->priv.sf_table_vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_table_vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ dev->priv.sf_table_mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ err = mlx5_blocking_notifier_register(dev, &dev->priv.sf_table_mdev_nb);
+ if (err)
+ goto mdev_err;
+
+ return 0;
+mdev_err:
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
+ return err;
+}
+
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_table *table;
- int err;
if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
return 0;
@@ -480,28 +517,18 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
table->dev = dev;
xa_init(&table->function_ids);
dev->priv.sf_table = table;
- table->esw_nb.notifier_call = mlx5_sf_esw_event;
- err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
- if (err)
- goto reg_err;
-
- table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
- err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
- if (err)
- goto vhca_err;
-
- table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
- mlx5_blocking_notifier_register(dev, &table->mdev_nb);
return 0;
+}
-vhca_err:
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
-reg_err:
- mutex_destroy(&table->sf_state_lock);
- kfree(table);
- dev->priv.sf_table = NULL;
- return err;
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_blocking_notifier_unregister(dev, &dev->priv.sf_table_mdev_nb);
+ mlx5_vhca_event_notifier_unregister(dev, &dev->priv.sf_table_vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev, &dev->priv.sf_table_esw_nb);
}
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
@@ -511,9 +538,6 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
- mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
- mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
mutex_destroy(&table->sf_state_lock);
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index 1f613320fe07..bd968f3b3855 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -30,9 +30,7 @@ enum mlx5_sf_hwc_index {
};
struct mlx5_sf_hw_table {
- struct mlx5_core_dev *dev;
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
- struct notifier_block vhca_nb;
struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
};
@@ -71,14 +69,16 @@ mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
return NULL;
}
-static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
+static int mlx5_sf_hw_table_id_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller,
u32 usr_sfnum)
{
struct mlx5_sf_hwc_table *hwc;
int free_idx = -1;
int i;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
if (!hwc->sfs)
return -ENOSPC;
@@ -100,11 +100,13 @@ static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 control
return free_idx;
}
-static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
+static void mlx5_sf_hw_table_id_free(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table,
+ u32 controller, int id)
{
struct mlx5_sf_hwc_table *hwc;
- hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
+ hwc = mlx5_sf_controller_to_hwc(dev, controller);
hwc->sfs[id].allocated = false;
hwc->sfs[id].pending_delete = false;
}
@@ -120,7 +122,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
return -EOPNOTSUPP;
mutex_lock(&table->table_lock);
- sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
+ sw_id = mlx5_sf_hw_table_id_alloc(dev, table, controller, usr_sfnum);
if (sw_id < 0) {
err = sw_id;
goto exist_err;
@@ -151,7 +153,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
vhca_err:
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
err:
- mlx5_sf_hw_table_id_free(table, controller, sw_id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, sw_id);
exist_err:
mutex_unlock(&table->table_lock);
return err;
@@ -165,7 +167,7 @@ void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
mutex_lock(&table->table_lock);
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
mlx5_cmd_dealloc_sf(dev, hw_fn_id);
- mlx5_sf_hw_table_id_free(table, controller, id);
+ mlx5_sf_hw_table_id_free(dev, table, controller, id);
mutex_unlock(&table->table_lock);
}
@@ -216,10 +218,12 @@ static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
}
}
-static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
+static void mlx5_sf_hw_table_dealloc_all(struct mlx5_core_dev *dev,
+ struct mlx5_sf_hw_table *table)
{
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
- mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev,
+ &table->hwc[MLX5_SF_HWC_EXTERNAL]);
+ mlx5_sf_hw_table_hwc_dealloc_all(dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
}
static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
@@ -301,7 +305,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
}
mutex_init(&table->table_lock);
- table->dev = dev;
dev->priv.sf_hw_table = table;
base_id = mlx5_sf_start_function_id(dev);
@@ -338,19 +341,22 @@ void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
mutex_destroy(&table->table_lock);
kfree(table);
+ dev->priv.sf_hw_table = NULL;
res_unregister:
mlx5_sf_hw_table_res_unregister(dev);
}
static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
{
- struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ struct mlx5_core_dev *dev = container_of(nb, struct mlx5_core_dev,
+ priv.sf_hw_table_vhca_nb);
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
const struct mlx5_vhca_state_event *event = data;
struct mlx5_sf_hwc_table *hwc;
struct mlx5_sf_hw *sf_hw;
u16 sw_id;
- if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ if (!table || event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
return 0;
hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
@@ -365,20 +371,28 @@ static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode
* Hence recycle the sf hardware id for reuse.
*/
if (sf_hw->allocated && sf_hw->pending_delete)
- mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
+ mlx5_sf_hw_table_hwc_sf_free(dev, hwc, sw_id);
mutex_unlock(&table->table_lock);
return 0;
}
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
- struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
-
- if (!table)
+ if (mlx5_core_is_sf(dev))
return 0;
- table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
- return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
+ dev->priv.sf_hw_table_vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
+}
+
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev,
+ &dev->priv.sf_hw_table_vhca_nb);
}
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
@@ -388,9 +402,8 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
/* Dealloc SFs whose firmware event has been missed. */
- mlx5_sf_hw_table_dealloc_all(table);
+ mlx5_sf_hw_table_dealloc_all(dev, table);
}
bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 89559a37997a..d8a934a0e968 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -12,10 +12,13 @@
int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
@@ -44,20 +47,33 @@ static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+static inline int mlx5_sf_hw_notifier_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_hw_notifier_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
{
}
+static inline int mlx5_sf_notifiers_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
{
return 0;
}
+static inline void mlx5_sf_notifiers_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index cda01ba441ae..b04cf6cf8956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -9,15 +9,9 @@
#define CREATE_TRACE_POINTS
#include "diag/vhca_tracepoint.h"
-struct mlx5_vhca_state_notifier {
- struct mlx5_core_dev *dev;
- struct mlx5_nb nb;
- struct blocking_notifier_head n_head;
-};
-
struct mlx5_vhca_event_work {
struct work_struct work;
- struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_core_dev *dev;
struct mlx5_vhca_state_event event;
};
@@ -95,16 +89,14 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
mlx5_vhca_event_arm(dev, event->function_id);
trace_mlx5_sf_vhca_event(dev, event);
- blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+ blocking_notifier_call_chain(&dev->priv.vhca_state_n_head, 0, event);
}
static void mlx5_vhca_state_work_handler(struct work_struct *_work)
{
struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
- struct mlx5_vhca_state_notifier *notifier = work->notifier;
- struct mlx5_core_dev *dev = notifier->dev;
- mlx5_vhca_event_notify(dev, &work->event);
+ mlx5_vhca_event_notify(work->dev, &work->event);
kfree(work);
}
@@ -116,8 +108,8 @@ void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct wo
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
- struct mlx5_vhca_state_notifier *notifier =
- mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_core_dev *dev = mlx5_nb_cof(nb, struct mlx5_core_dev,
+ priv.vhca_state_nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
int wq_idx;
@@ -126,10 +118,10 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
if (!work)
return NOTIFY_DONE;
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
- work->notifier = notifier;
+ work->dev = dev;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
- mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -145,9 +137,15 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
}
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+ BLOCKING_INIT_NOTIFIER_HEAD(&dev->priv.vhca_state_n_head);
+ MLX5_NB_INIT(&dev->priv.vhca_state_nb, mlx5_vhca_state_change_notifier,
+ VHCA_STATE_CHANGE);
+}
+
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
@@ -160,7 +158,6 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
return -ENOMEM;
events->dev = dev;
- dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
@@ -169,20 +166,10 @@ int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
goto err_create_wq;
}
}
+ dev->priv.vhca_events = events;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier) {
- err = -ENOMEM;
- goto err_notifier;
- }
-
- dev->priv.vhca_state_notifier = notifier;
- notifier->dev = dev;
- BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
- MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
-err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
@@ -211,8 +198,6 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_vhca_event_supported(dev))
return;
- kfree(dev->priv.vhca_state_notifier);
- dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
@@ -221,34 +206,30 @@ void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_register(dev, &notifier->nb);
+ mlx5_eq_notifier_register(dev, &dev->priv.vhca_state_nb);
}
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
{
- struct mlx5_vhca_state_notifier *notifier;
-
- if (!dev->priv.vhca_state_notifier)
+ if (!mlx5_vhca_event_supported(dev))
return;
- notifier = dev->priv.vhca_state_notifier;
- mlx5_eq_notifier_unregister(dev, &notifier->nb);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.vhca_state_nb);
+
+ /* Flush workqueues of all pending events. */
+ mlx5_vhca_event_work_queues_flush(dev);
}
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- if (!dev->priv.vhca_state_notifier)
- return -EOPNOTSUPP;
- return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_n_head,
+ nb);
}
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_n_head, nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 1725ba64f8af..52790423874c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -18,6 +18,7 @@ static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
}
void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev);
int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
@@ -37,6 +38,10 @@ static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *s
{
}
+static inline void mlx5_vhca_state_notifier_init(struct mlx5_core_dev *dev)
+{
+}
+
static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
index 65740bb68b09..e8c67ed9f748 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
@@ -410,7 +410,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
@@ -419,7 +419,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.supp_sw_steering = true;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
@@ -428,10 +428,10 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
break;
case MLX5DR_DOMAIN_TYPE_FDB:
if (!dmn->info.caps.eswitch_manager)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 992873536c1b..306affbcfd3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -78,15 +78,14 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
}
static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
- u32 *out)
+ bool other_vport, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
}
@@ -97,7 +96,7 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
int err;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (!err)
*min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode);
@@ -219,7 +218,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (!err)
*mtu = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.mtu);
@@ -429,7 +428,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -451,7 +450,7 @@ int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -462,7 +461,8 @@ out:
return err;
}
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
@@ -472,7 +472,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
if (err)
goto out;
@@ -529,7 +529,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
@@ -804,7 +804,7 @@ int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, vport, out);
+ err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
if (err)
goto out;
@@ -908,7 +908,7 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
if (!out)
return -ENOMEM;
- err = mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, false, out);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index b032d5a4b3b8..10f5bc4892fc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
info->psid);
+ if (err)
+ goto unlock;
sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
info->fw_sub_minor);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index b1d08e958bf9..69f9da9fb305 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1489,7 +1489,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam *tcam;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6a4a81c63451..353fd9ca89a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -830,8 +830,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!rule)
- return -EINVAL;
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&drops, &lastuse, &used_hw_stats);
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
index dff51f23d295..ca5c7ac2a5bc 100644
--- a/drivers/net/ethernet/meta/Kconfig
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -26,6 +26,7 @@ config FBNIC
depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select PAGE_POOL
+ select PCS_XPCS
select PHYLINK
select PLDMFW
help
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 15e8ff649615..72c41af65364 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -21,6 +21,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_mdio.o \
fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index b03e5a3d5144..779a083b9215 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -34,7 +34,7 @@ struct fbnic_dev {
u32 __iomem *uc_addr4;
const struct fbnic_mac *mac;
unsigned int fw_msix_vector;
- unsigned int pcs_msix_vector;
+ unsigned int mac_msix_vector;
unsigned short num_irqs;
struct {
@@ -83,6 +83,10 @@ struct fbnic_dev {
/* Last @time_high refresh time in jiffies (to catch stalls) */
unsigned long last_read;
+ /* PMD specific data */
+ unsigned long end_of_pmd_training;
+ u8 pmd_state;
+
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
@@ -91,6 +95,9 @@ struct fbnic_dev {
u64 prev_firmware_time;
struct fbnic_fw_log fw_log;
+
+ /* MDIO bus for PHYs */
+ struct mii_bus *mdio_bus;
};
/* Reserve entry 0 in the MSI-X "others" array until we have filled all
@@ -175,8 +182,8 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
void fbnic_hwmon_register(struct fbnic_dev *fbd);
void fbnic_hwmon_unregister(struct fbnic_dev *fbd);
-int fbnic_pcs_request_irq(struct fbnic_dev *fbd);
-void fbnic_pcs_free_irq(struct fbnic_dev *fbd);
+int fbnic_mac_request_irq(struct fbnic_dev *fbd);
+void fbnic_mac_free_irq(struct fbnic_dev *fbd);
void fbnic_napi_name_irqs(struct fbnic_dev *fbd);
int fbnic_napi_request_irq(struct fbnic_dev *fbd,
@@ -200,6 +207,8 @@ void fbnic_dbg_exit(void);
void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+int fbnic_mdiobus_create(struct fbnic_dev *fbd);
+
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index d3a7ad921f18..422265dc7abd 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -787,6 +787,8 @@ enum {
/* MAC PCS registers */
#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */
+#define FBNIC_PCS_PAGE(n) (0x10000 + 0x400 * (n)) /* 0x40000 + 1024*n */
+#define FBNIC_PCS(reg, n) ((reg) + FBNIC_PCS_PAGE(n))
#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 95fac020eb93..693ebdf38705 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -1863,6 +1863,14 @@ fbnic_get_rmon_stats(struct net_device *netdev,
*ranges = fbnic_rmon_ranges;
}
+static void fbnic_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ stats->link_down_events = fbn->link_down_events;
+}
+
static const struct ethtool_ops fbnic_ethtool_ops = {
.cap_link_lanes_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -1874,6 +1882,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = fbnic_get_link_ext_stats,
.get_coalesce = fbnic_get_coalesce,
.set_coalesce = fbnic_set_coalesce,
.get_ringparam = fbnic_get_ringparam,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 1166fa17438d..d8d9b6cfde82 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -201,7 +201,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
return -ENODEV;
/* Fill all but 1 unused descriptors in the Rx queue. */
- count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
while (!err && count--) {
struct fbnic_tlv_msg *msg;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
index 1c88a2bf3a7a..02e8b0b257fe 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -118,12 +118,12 @@ void fbnic_fw_free_mbx(struct fbnic_dev *fbd)
fbd->fw_msix_vector = 0;
}
-static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+static irqreturn_t fbnic_mac_msix_intr(int __always_unused irq, void *data)
{
struct fbnic_dev *fbd = data;
struct fbnic_net *fbn;
- if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ if (fbd->mac->get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
1u << FBNIC_PCS_MSIX_ENTRY);
return IRQ_HANDLED;
@@ -131,26 +131,28 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
fbn = netdev_priv(fbd->netdev);
- phylink_pcs_change(&fbn->phylink_pcs, false);
+ /* Record link down events */
+ if (!fbd->mac->get_link(fbd, fbn->aui, fbn->fec))
+ phylink_pcs_change(fbn->pcs, false);
return IRQ_HANDLED;
}
/**
- * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link
+ * fbnic_mac_request_irq - Configure the MAC to enable it to advertise link
* @fbd: Pointer to device to initialize
*
- * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * This function provides basic bringup for the MAC/PHY IRQ. For now the IRQ
* will remain disabled until we start the MAC/PCS/PHY logic via phylink.
*
* Return: non-zero on failure.
**/
-int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
+int fbnic_mac_request_irq(struct fbnic_dev *fbd)
{
struct pci_dev *pdev = to_pci_dev(fbd->dev);
int vector, err;
- WARN_ON(fbd->pcs_msix_vector);
+ WARN_ON(fbd->mac_msix_vector);
vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
if (vector < 0)
@@ -159,7 +161,7 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
/* Request the IRQ for PCS link vector.
* Map PCS cause to it, and unmask it
*/
- err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ err = request_irq(vector, &fbnic_mac_msix_intr, 0,
fbd->netdev->name, fbd);
if (err)
return err;
@@ -168,22 +170,22 @@ int fbnic_pcs_request_irq(struct fbnic_dev *fbd)
fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
- fbd->pcs_msix_vector = vector;
+ fbd->mac_msix_vector = vector;
return 0;
}
/**
- * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping
+ * fbnic_mac_free_irq - Teardown the MAC IRQ to prepare for stopping
* @fbd: Pointer to device that is stopping
*
- * This function undoes the work done in fbnic_pcs_request_irq and prepares
+ * This function undoes the work done in fbnic_mac_request_irq and prepares
* the device to no longer receive traffic on the host interface.
**/
-void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
+void fbnic_mac_free_irq(struct fbnic_dev *fbd)
{
/* Vector has already been freed */
- if (!fbd->pcs_msix_vector)
+ if (!fbd->mac_msix_vector)
return;
/* Disable interrupt */
@@ -192,14 +194,14 @@ void fbnic_pcs_free_irq(struct fbnic_dev *fbd)
fbnic_wrfl(fbd);
/* Synchronize IRQ to prevent race that would unmask vector */
- synchronize_irq(fbd->pcs_msix_vector);
+ synchronize_irq(fbd->mac_msix_vector);
/* Mask the vector */
fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
/* Free the vector */
- free_irq(fbd->pcs_msix_vector, fbd);
- fbd->pcs_msix_vector = 0;
+ free_irq(fbd->mac_msix_vector, fbd);
+ fbd->mac_msix_vector = 0;
}
void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 2a84bd1d7e26..fc7abea4ef5b 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -434,14 +434,14 @@ static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
}
-static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+static int fbnic_mac_get_link_event(struct fbnic_dev *fbd)
{
- u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+ u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
- if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
return FBNIC_LINK_EVENT_DOWN;
- return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
}
@@ -466,9 +466,8 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
return command_config;
}
-static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
u32 pcs_status, lane_mask = ~0;
pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
@@ -476,7 +475,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return false;
/* Define the expected lane mask for the status bits we need to check */
- switch (fbn->aui) {
+ switch (aui) {
case FBNIC_AUI_100GAUI2:
lane_mask = 0xf;
break;
@@ -484,7 +483,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
lane_mask = 3;
break;
case FBNIC_AUI_LAUI2:
- switch (fbn->fec) {
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask = 0x63;
break;
@@ -502,7 +501,7 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
}
/* Use an XOR to remove the bits we expect to see set */
- switch (fbn->fec) {
+ switch (fec) {
case FBNIC_FEC_OFF:
lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
pcs_status);
@@ -521,7 +520,46 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return !lane_mask;
}
-static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect)
+{
+ /* Delay link up for 4 seconds to allow for link training.
+ * The state transitions for this are as follows:
+ *
+ * All states have the following two transitions in common:
+ * Loss of signal -> FBNIC_PMD_INITIALIZE
+ * The condition handled below (!signal)
+ * Reconfiguration -> FBNIC_PMD_INITIALIZE
+ * Occurs when mac_prepare starts a PHY reconfig
+ * FBNIC_PMD_TRAINING:
+ * signal still detected && 4s have passed -> Report link up
+ * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA
+ * FBNIC_PMD_INITIALIZE:
+ * signal detected -> FBNIC_PMD_TRAINING
+ */
+ if (!signal_detect) {
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
+ return false;
+ }
+
+ switch (fbd->pmd_state) {
+ case FBNIC_PMD_TRAINING:
+ return time_before(fbd->end_of_pmd_training, jiffies);
+ case FBNIC_PMD_LINK_READY:
+ case FBNIC_PMD_SEND_DATA:
+ return true;
+ }
+
+ fbd->end_of_pmd_training = jiffies + 4 * HZ;
+
+ /* Ensure end_of_training is visible before the state change */
+ smp_wmb();
+
+ fbd->pmd_state = FBNIC_PMD_TRAINING;
+
+ return false;
+}
+
+static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
bool link;
@@ -538,7 +576,8 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
- link = fbnic_mac_get_pcs_link_status(fbd);
+ link = fbnic_mac_get_link_status(fbd, aui, fec);
+ link = fbnic_pmd_update_state(fbd, link);
/* Enable interrupt to only capture changes in link state */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
@@ -586,20 +625,15 @@ void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
}
}
-static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec)
{
/* Mask and clear the PCS interrupt, will be enabled by link handler */
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
- return 0;
-}
-
-static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
-{
- /* Mask and clear the PCS interrupt */
- wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
- wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+ /* If we don't have link tear it all down and start over */
+ if (!fbnic_mac_get_link_status(fbd, aui, fec))
+ fbd->pmd_state = FBNIC_PMD_INITIALIZE;
}
static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
@@ -867,10 +901,9 @@ exit_free:
static const struct fbnic_mac fbnic_mac_asic = {
.init_regs = fbnic_mac_init_regs,
- .pcs_enable = fbnic_pcs_enable_asic,
- .pcs_disable = fbnic_pcs_disable_asic,
- .pcs_get_link = fbnic_pcs_get_link_asic,
- .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_link = fbnic_mac_get_link,
+ .get_link_event = fbnic_mac_get_link_event,
+ .prepare = fbnic_mac_prepare,
.get_fec_stats = fbnic_mac_get_fec_stats,
.get_pcs_stats = fbnic_mac_get_pcs_stats,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index ede5ff0dae22..f08fe8b7c497 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -10,6 +10,24 @@ struct fbnic_dev;
#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+/* States loosely based on section 136.8.11.7.5 of IEEE 802.3-2022 Ethernet
+ * Standard. These are needed to track the state of the PHY as it has a delay
+ * of several seconds from the time link comes up until it has completed
+ * training that we need to wait to report the link.
+ *
+ * Currently we treat training as a single block as this is managed by the
+ * firmware.
+ *
+ * We have FBNIC_PMD_SEND_DATA set to 0 as the expected default at driver load
+ * and we initialize the structure containing it to zero at allocation.
+ */
+enum {
+ FBNIC_PMD_SEND_DATA = 0x0,
+ FBNIC_PMD_INITIALIZE = 0x1,
+ FBNIC_PMD_TRAINING = 0x2,
+ FBNIC_PMD_LINK_READY = 0x3,
+};
+
enum {
FBNIC_LINK_EVENT_NONE = 0,
FBNIC_LINK_EVENT_UP = 1,
@@ -38,6 +56,7 @@ enum {
FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
FBNIC_AUI_UNKNOWN = 4,
+ __FBNIC_AUI_MAX__
};
#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
@@ -55,15 +74,15 @@ enum fbnic_sensor_id {
* void (*init_regs)(struct fbnic_dev *fbd);
* Initialize MAC registers to enable Tx/Rx paths and FIFOs.
*
- * void (*pcs_enable)(struct fbnic_dev *fbd);
- * Configure and enable PCS to enable link if not already enabled
- * void (*pcs_disable)(struct fbnic_dev *fbd);
- * Shutdown the link if we are the only consumer of it.
- * bool (*pcs_get_link)(struct fbnic_dev *fbd);
- * Check PCS link status
- * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * int (*get_link_event)(struct fbnic_dev *fbd)
* Get the current link event status, reports true if link has
* changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ * bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Check link status
+ *
+ * void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+ * Prepare PHY for init by fetching settings, disabling interrupts,
+ * and sending an updated PHY config to FW if needed.
*
* void (*link_down)(struct fbnic_dev *fbd);
* Configure MAC for link down event
@@ -74,10 +93,10 @@ enum fbnic_sensor_id {
struct fbnic_mac {
void (*init_regs)(struct fbnic_dev *fbd);
- int (*pcs_enable)(struct fbnic_dev *fbd);
- void (*pcs_disable)(struct fbnic_dev *fbd);
- bool (*pcs_get_link)(struct fbnic_dev *fbd);
- int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ int (*get_link_event)(struct fbnic_dev *fbd);
+ bool (*get_link)(struct fbnic_dev *fbd, u8 aui, u8 fec);
+
+ void (*prepare)(struct fbnic_dev *fbd, u8 aui, u8 fec);
void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_fec_stats *fec_stats);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
new file mode 100644
index 000000000000..709041f7fc43
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mdio.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/mdio.h>
+#include <linux/pcs/pcs-xpcs.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+
+#define DW_VENDOR BIT(15)
+#define FBNIC_PCS_VENDOR BIT(9)
+#define FBNIC_PCS_ZERO_MASK (DW_VENDOR - FBNIC_PCS_VENDOR)
+
+static int
+fbnic_mdio_read_pmd(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ u8 aui = FBNIC_AUI_UNKNOWN;
+ struct fbnic_net *fbn;
+ int ret = 0;
+
+ /* We don't need a second PMD, just one can handle both lanes */
+ if (addr)
+ return 0;
+
+ if (fbd->netdev) {
+ fbn = netdev_priv(fbd->netdev);
+ if (fbn->aui < FBNIC_AUI_UNKNOWN)
+ aui = fbn->aui;
+ }
+
+ switch (regnum) {
+ case MDIO_DEVID1:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID >> 16;
+ break;
+ case MDIO_DEVID2:
+ ret = MP_FBNIC_XPCS_PMA_100G_ID & 0xffff;
+ break;
+ case MDIO_DEVS1:
+ ret = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+ break;
+ case MDIO_STAT2:
+ ret = MDIO_STAT2_DEVPRST_VAL;
+ break;
+ case MDIO_PMA_RXDET:
+ /* If training isn't complete default to 0 */
+ if (fbd->pmd_state != FBNIC_PMD_SEND_DATA)
+ break;
+ /* Report either 1 or 2 lanes detected depending on config */
+ ret = (MDIO_PMD_RXDET_GLOBAL | MDIO_PMD_RXDET_0) |
+ ((aui & FBNIC_AUI_MODE_R2) *
+ (MDIO_PMD_RXDET_1 / FBNIC_AUI_MODE_R2));
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(fbd->dev,
+ "SWMII PMD Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_pcs(struct fbnic_dev *fbd, int addr, int regnum)
+{
+ int ret, offset = 0;
+
+ /* We will need access to both PCS instances to get config info */
+ if (addr >= 2)
+ return 0;
+
+ /* Report 0 for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return 0;
+
+ /* Intercept and return correct ID for PCS */
+ if (regnum == MDIO_DEVID1)
+ return DW_XPCS_ID >> 16;
+ if (regnum == MDIO_DEVID2)
+ return DW_XPCS_ID & 0xffff;
+ if (regnum == MDIO_DEVS1)
+ return MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ offset ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ ret = fbnic_rd32(fbd, FBNIC_PCS_PAGE(addr) + (regnum ^ offset));
+
+ dev_dbg(fbd->dev,
+ "SWMII PCS Rd: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, ret);
+
+ return ret;
+}
+
+static int
+fbnic_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ return fbnic_mdio_read_pmd(fbd, addr, regnum);
+
+ if (devnum == MDIO_MMD_PCS)
+ return fbnic_mdio_read_pcs(fbd, addr, regnum);
+
+ return 0;
+}
+
+static void
+fbnic_mdio_write_pmd(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PMD Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+}
+
+static void
+fbnic_mdio_write_pcs(struct fbnic_dev *fbd, int addr, int regnum, u16 val)
+{
+ dev_dbg(fbd->dev,
+ "SWMII PCS Wr: Addr: %d RegNum: %d Value: 0x%04x\n",
+ addr, regnum, val);
+
+ /* Allow access to both halves of PCS for 50R2 config */
+ if (addr > 2)
+ return;
+
+ /* Skip write for reserved registers */
+ if (regnum & FBNIC_PCS_ZERO_MASK)
+ return;
+
+ /* Swap vendor page bit for FBNIC PCS vendor page bit */
+ if (regnum & DW_VENDOR)
+ regnum ^= DW_VENDOR | FBNIC_PCS_VENDOR;
+
+ fbnic_wr32(fbd, FBNIC_PCS_PAGE(addr) + regnum, val);
+}
+
+static int
+fbnic_mdio_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct fbnic_dev *fbd = bus->priv;
+
+ if (devnum == MDIO_MMD_PMAPMD)
+ fbnic_mdio_write_pmd(fbd, addr, regnum, val);
+
+ if (devnum == MDIO_MMD_PCS)
+ fbnic_mdio_write_pcs(fbd, addr, regnum, val);
+
+ return 0;
+}
+
+/**
+ * fbnic_mdiobus_create - Create an MDIO bus to allow interfacing w/ PHYs
+ * @fbd: Pointer to FBNIC device structure to populate bus on
+ *
+ * Initialize an MDIO bus and place a pointer to it on the fbd struct. This bus
+ * will be used to interface with the PMA/PMD and PCS.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_mdiobus_create(struct fbnic_dev *fbd)
+{
+ struct mii_bus *bus;
+ int err;
+
+ bus = devm_mdiobus_alloc(fbd->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "fbnic_mii_bus";
+ bus->read_c45 = &fbnic_mdio_read_c45;
+ bus->write_c45 = &fbnic_mdio_write_c45;
+
+ /* Disable PHY auto probing. We will add PCS manually */
+ bus->phy_mask = ~0;
+
+ bus->parent = fbd->dev;
+ bus->priv = fbd;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(fbd->dev));
+
+ err = devm_mdiobus_register(fbd->dev, bus);
+ if (err) {
+ dev_err(fbd->dev, "Failed to create MDIO bus: %d\n", err);
+ return err;
+ }
+
+ fbd->mdio_bus = bus;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index e95be0e7bd9e..81c9d5c9a4b2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -44,7 +44,7 @@ int __fbnic_open(struct fbnic_net *fbn)
if (err)
goto time_stop;
- err = fbnic_pcs_request_irq(fbd);
+ err = fbnic_mac_request_irq(fbd);
if (err)
goto time_stop;
@@ -86,10 +86,10 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ fbnic_mac_free_irq(fbn->fbd);
phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
fbnic_down(fbn);
- fbnic_pcs_free_irq(fbn->fbd);
fbnic_time_stop(fbn);
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
@@ -697,10 +697,7 @@ void fbnic_reset_queues(struct fbnic_net *fbn,
**/
void fbnic_netdev_free(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
-
- if (fbn->phylink)
- phylink_destroy(fbn->phylink);
+ fbnic_phylink_destroy(fbd->netdev);
free_netdev(fbd->netdev);
fbd->netdev = NULL;
@@ -802,7 +799,7 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netif_tx_stop_all_queues(netdev);
- if (fbnic_phylink_init(netdev)) {
+ if (fbnic_phylink_create(netdev)) {
fbnic_netdev_free(fbd);
return NULL;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index b0a87c57910f..9129a658f8fa 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -44,7 +44,7 @@ struct fbnic_net {
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs *pcs;
u8 aui;
u8 fec;
@@ -73,6 +73,8 @@ struct fbnic_net {
/* Time stamping filter config */
struct kernel_hwtstamp_config hwtstamp_config;
+
+ bool tx_pause;
};
int __fbnic_open(struct fbnic_net *fbn);
@@ -106,8 +108,10 @@ int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
struct ethtool_link_ksettings *cmd);
int fbnic_phylink_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam);
+int fbnic_phylink_create(struct net_device *netdev);
+void fbnic_phylink_destroy(struct net_device *netdev);
int fbnic_phylink_init(struct net_device *netdev);
-
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev);
bool fbnic_check_split_frames(struct bpf_prog *prog,
unsigned int mtu, u32 hds_threshold);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index 4620f1847f2e..861d98099c44 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -207,6 +207,10 @@ static void fbnic_service_task(struct work_struct *work)
{
struct fbnic_dev *fbd = container_of(to_delayed_work(work),
struct fbnic_dev, service_task);
+ struct net_device *netdev = fbd->netdev;
+
+ if (netif_running(netdev))
+ fbnic_phylink_pmd_training_complete_notify(netdev);
rtnl_lock();
@@ -224,7 +228,7 @@ static void fbnic_service_task(struct work_struct *work)
netdev_unlock(fbd->netdev);
}
- if (netif_running(fbd->netdev))
+ if (netif_running(netdev))
schedule_delayed_work(&fbd->service_task, HZ);
rtnl_unlock();
@@ -335,6 +339,9 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_failure_mode;
}
+ if (fbnic_mdiobus_create(fbd))
+ goto init_failure_mode;
+
netdev = fbnic_netdev_alloc(fbd);
if (!netdev) {
dev_err(&pdev->dev, "Netdev allocation failed\n");
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 7ce3fdd25282..09c5225111be 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/phy.h>
#include <linux/phylink.h>
@@ -101,88 +102,47 @@ int fbnic_phylink_get_fecparam(struct net_device *netdev,
return 0;
}
-static struct fbnic_net *
-fbnic_pcs_to_net(struct phylink_pcs *pcs)
-{
- return container_of(pcs, struct fbnic_net, phylink_pcs);
-}
-
-static void
-fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
- struct phylink_link_state *state)
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
-
- switch (fbn->aui) {
- case FBNIC_AUI_25GAUI:
- state->speed = SPEED_25000;
- break;
- case FBNIC_AUI_LAUI2:
- case FBNIC_AUI_50GAUI1:
- state->speed = SPEED_50000;
- break;
- case FBNIC_AUI_100GAUI2:
- state->speed = SPEED_100000;
- break;
- default:
- state->link = 0;
- return;
- }
-
- state->duplex = DUPLEX_FULL;
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
- state->link = fbd->mac->pcs_get_link(fbd);
+ return fbn->pcs;
}
static int
-fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+fbnic_phylink_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
- return fbd->mac->pcs_enable(fbd);
+ fbd->mac->prepare(fbd, fbn->aui, fbn->fec);
+
+ return 0;
}
static void
-fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
- struct fbnic_dev *fbd = fbn->fbd;
-
- return fbd->mac->pcs_disable(fbd);
}
static int
-fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
- phy_interface_t interface,
- const unsigned long *advertising,
- bool permit_pause_to_mac)
-{
- return 0;
-}
-
-static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
- .pcs_config = fbnic_phylink_pcs_config,
- .pcs_enable = fbnic_phylink_pcs_enable,
- .pcs_disable = fbnic_phylink_pcs_disable,
- .pcs_get_state = fbnic_phylink_pcs_get_state,
-};
-
-static struct phylink_pcs *
-fbnic_phylink_mac_select_pcs(struct phylink_config *config,
- phy_interface_t interface)
+fbnic_phylink_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
struct net_device *netdev = to_net_dev(config->dev);
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
- return &fbn->phylink_pcs;
-}
+ /* Retest the link state and restart interrupts */
+ fbd->mac->get_link(fbd, fbn->aui, fbn->fec);
-static void
-fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
-{
+ return 0;
}
static void
@@ -208,23 +168,48 @@ fbnic_phylink_mac_link_up(struct phylink_config *config,
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
+ fbn->tx_pause = tx_pause;
+ fbnic_config_drop_mode(fbn, tx_pause);
+
fbd->mac->link_up(fbd, tx_pause, rx_pause);
}
static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
.mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_prepare = fbnic_phylink_mac_prepare,
.mac_config = fbnic_phylink_mac_config,
+ .mac_finish = fbnic_phylink_mac_finish,
.mac_link_down = fbnic_phylink_mac_link_down,
.mac_link_up = fbnic_phylink_mac_link_up,
};
-int fbnic_phylink_init(struct net_device *netdev)
+/**
+ * fbnic_phylink_create - Phylink device creation
+ * @netdev: Network Device struct to attach phylink device
+ *
+ * Initialize and attach a phylink instance to the device. The phylink
+ * device will make use of the netdev struct to track carrier and will
+ * eventually be used to expose the current state of the MAC and PCS
+ * setup.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_phylink_create(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
+ struct phylink_pcs *pcs;
struct phylink *phylink;
+ int err;
+
+ pcs = xpcs_create_pcs_mdiodev(fbd->mdio_bus, 0);
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err(fbd->dev, "Failed to create PCS device: %d\n", err);
+ return err;
+ }
- fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+ fbn->pcs = pcs;
fbn->phylink_config.dev = &netdev->dev;
fbn->phylink_config.type = PHYLINK_NETDEV;
@@ -247,10 +232,80 @@ int fbnic_phylink_init(struct net_device *netdev)
phylink = phylink_create(&fbn->phylink_config, NULL,
fbnic_phylink_select_interface(fbn->aui),
&fbnic_phylink_mac_ops);
- if (IS_ERR(phylink))
- return PTR_ERR(phylink);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ dev_err(netdev->dev.parent,
+ "Failed to create Phylink interface, err: %d\n", err);
+ xpcs_destroy_pcs(pcs);
+ return err;
+ }
fbn->phylink = phylink;
return 0;
}
+
+/**
+ * fbnic_phylink_destroy - Teardown phylink related interfaces
+ * @netdev: Network Device struct containing phylink device
+ *
+ * Detach and free resources related to phylink interface.
+ **/
+void fbnic_phylink_destroy(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+ if (fbn->pcs)
+ xpcs_destroy_pcs(fbn->pcs);
+}
+
+/**
+ * fbnic_phylink_pmd_training_complete_notify - PMD training complete notifier
+ * @netdev: Netdev struct phylink device attached to
+ *
+ * When the link first comes up the PMD will have a period of 2 to 3 seconds
+ * where the link will flutter due to link training. To avoid spamming the
+ * kernel log with messages about this we add a delay of 4 seconds from the
+ * time of the last PCS report of link so that we can guarantee we are unlikely
+ * to see any further link loss events due to link training.
+ **/
+void fbnic_phylink_pmd_training_complete_notify(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ if (fbd->pmd_state != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Prevent reading end_of_pmd_training until we verified state */
+ smp_rmb();
+
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* At this point we have verified that the link has been up for
+ * the full training duration. As a first step we will try
+ * transitioning to link ready.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_TRAINING,
+ FBNIC_PMD_LINK_READY) != FBNIC_PMD_TRAINING)
+ return;
+
+ /* Perform a follow-up check to verify that the link didn't flap
+ * just before our transition by rechecking the training timer.
+ */
+ if (!time_before(READ_ONCE(fbd->end_of_pmd_training), jiffies))
+ return;
+
+ /* The training timeout has been completed. We are good to swap out
+ * link_ready for send_data assuming no other events have occurred
+ * that would have pulled us back into initialization or training.
+ */
+ if (cmpxchg(&fbd->pmd_state, FBNIC_PMD_LINK_READY,
+ FBNIC_PMD_SEND_DATA) != FBNIC_PMD_LINK_READY)
+ return;
+
+ phylink_pcs_change(fbn->pcs, false);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index 57e18a68f5d2..13d508ce637f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -653,7 +653,8 @@ static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
FBNIC_TWD_TYPE_AL;
total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
- page_pool_put_page(page->pp, page, -1, pp_allow_direct);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page, -1,
+ pp_allow_direct);
next_desc:
head++;
head &= ring->size_mask;
@@ -1807,7 +1808,7 @@ int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
free_vectors:
fbnic_free_napi_vectors(fbn);
- return -ENOMEM;
+ return err;
}
static void fbnic_free_ring_resources(struct device *dev,
@@ -2574,11 +2575,15 @@ write_ctl:
}
static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
- struct fbnic_ring *rcq)
+ struct fbnic_ring *rcq, bool tx_pause)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 drop_mode, rcq_ctl;
- drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ if (!tx_pause && fbn->num_rx_queues > 1)
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+ else
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_NEVER;
/* Specify packet layout */
rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
@@ -2588,6 +2593,21 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause)
+{
+ int i, t;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ for (t = 0; t < nv->rxt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[nv->txt_count + t];
+
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl, tx_pause);
+ }
+ }
+}
+
static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
{
u32 threshold;
@@ -2637,7 +2657,7 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
u32 hds_thresh = fbn->hds_thresh;
u32 rcq_ctl = 0;
- fbnic_config_drop_mode_rcq(nv, rcq);
+ fbnic_config_drop_mode_rcq(nv, rcq, fbn->tx_pause);
/* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
* be split at L4. It would also result in the frames being split at
@@ -2700,7 +2720,6 @@ static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
&nv->napi);
fbnic_enable_bdq(&qt->sub0, &qt->sub1);
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
fbnic_enable_rcq(nv, &qt->cmpl);
}
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index ca37da5a0b17..27776e844e29 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -184,6 +184,7 @@ void fbnic_reset_netif_queues(struct fbnic_net *fbn);
irqreturn_t fbnic_msix_clean_rings(int irq, void *data);
void fbnic_napi_enable(struct fbnic_net *fbn);
void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_config_drop_mode(struct fbnic_net *fbn, bool tx_pause);
void fbnic_enable(struct fbnic_net *fbn);
void fbnic_disable(struct fbnic_net *fbn);
void fbnic_flush(struct fbnic_net *fbn);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index b4377b8613c3..8c40db90ee8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/ptp_classify.h>
+#include <linux/units.h>
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define LAN9X66_CLOCK_RATE 165617754
+
#define LAN966X_MAX_PTP_ID 512
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
@@ -1126,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
u32 lan966x_ptp_get_period_ps(void)
{
/* This represents the system clock period in picoseconds */
- return 15125;
+ return PICO / LAN9X66_CLOCK_RATE;
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index effe0a2f207a..8fd70b34807a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
struct gdma_posted_wqe_info *wqe_info)
{
u32 client_oob_size = wqe_req->inline_oob_size;
- struct gdma_context *gc;
u32 sgl_data_size;
u32 max_wqe_size;
u32 wqe_size;
@@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
if (wqe_size > max_wqe_size)
return -EINVAL;
- if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
- gc = wq->gdma_dev->gdma_context;
- dev_err(gc->dev, "unsuccessful flow control!\n");
+ if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
return -ENOSPC;
- }
if (wqe_info)
wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index cccd5b63cee6..1ad154f9db1a 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/skbuff.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@@ -329,6 +330,21 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
+ BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
+ if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
+ skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* GSO skb with Hardware SGE limit exceeded is not expected here
+ * as they are handled in mana_features_check() callback
+ */
+ if (skb_linearize(skb)) {
+ netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
+ skb_shinfo(skb)->nr_frags,
+ skb_is_gso(skb));
+ goto tx_drop_count;
+ }
+ apc->eth_stats.tx_linear_pkt_cnt++;
+ }
+
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
@@ -442,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
pkg.wqe_req.sgl = pkg.sgl_array;
} else {
@@ -478,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (err) {
(void)skb_dequeue_tail(&txq->pending_skbs);
+ mana_unmap_skb(skb, apc);
netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
- err = NETDEV_TX_BUSY;
- goto tx_busy;
+ goto free_sgl_ptr;
}
err = NETDEV_TX_OK;
@@ -500,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
-tx_busy:
if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
netif_tx_wake_queue(net_txq);
apc->eth_stats.wake_queue++;
@@ -518,6 +531,25 @@ tx_drop:
return NETDEV_TX_OK;
}
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+static netdev_features_t mana_features_check(struct sk_buff *skb,
+ struct net_device *ndev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
+ /* Exceeds HW SGE limit.
+ * GSO case:
+ * Disable GSO so the stack will software-segment the skb
+ * into smaller skbs that fit the SGE budget.
+ * Non-GSO case:
+ * The xmit path will attempt skb_linearize() as a fallback.
+ */
+ features &= ~NETIF_F_GSO_MASK;
+ }
+ return features;
+}
+#endif
+
static void mana_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *st)
{
@@ -534,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev,
netdev_stats_to_stats64(st, &ndev->stats);
+ if (apc->ac->hwc_timeout_occurred)
+ netdev_warn_once(ndev, "HWC timeout occurred\n");
+
+ st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -878,6 +915,9 @@ static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
.ndo_select_queue = mana_select_queue,
+#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
+ .ndo_features_check = mana_features_check,
+#endif
.ndo_start_xmit = mana_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_get_stats64 = mana_get_stats64,
@@ -1646,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
return 0;
}
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
@@ -2809,11 +2849,12 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
return 0;
}
-void mana_query_gf_stats(struct mana_port_context *apc)
+int mana_query_gf_stats(struct mana_context *ac)
{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_gf_stat_resp resp = {};
struct mana_query_gf_stat_req req = {};
- struct net_device *ndev = apc->ndev;
+ struct device *dev = gc->dev;
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
@@ -2847,52 +2888,54 @@ void mana_query_gf_stats(struct mana_port_context *apc)
STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
- err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ err = mana_send_request(ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to query GF stats: %d\n", err);
- return;
+ dev_err(dev, "Failed to query GF stats: %d\n", err);
+ return err;
}
err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
sizeof(resp));
if (err || resp.hdr.status) {
- netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
- resp.hdr.status);
- return;
+ dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
+ resp.hdr.status);
+ return err;
}
- apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
- apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
- apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
- apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
- apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
- apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
- apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
- apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
- apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
- apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
- apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
+ ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
+ ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
+ ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
+ ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
+ ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
+ ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
+ ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
+ ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
+ ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
+ ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
+ ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
resp.tx_err_inval_vport_offset_pkt;
- apc->eth_stats.hc_tx_err_vlan_enforcement =
+ ac->hc_stats.hc_tx_err_vlan_enforcement =
resp.tx_err_vlan_enforcement;
- apc->eth_stats.hc_tx_err_eth_type_enforcement =
+ ac->hc_stats.hc_tx_err_eth_type_enforcement =
resp.tx_err_ethtype_enforcement;
- apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
- apc->eth_stats.hc_tx_err_sqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
+ ac->hc_stats.hc_tx_err_sqpdid_enforcement =
resp.tx_err_SQPDID_enforcement;
- apc->eth_stats.hc_tx_err_cqpdid_enforcement =
+ ac->hc_stats.hc_tx_err_cqpdid_enforcement =
resp.tx_err_CQPDID_enforcement;
- apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
- apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
- apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
- apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
- apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
- apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
- apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
- apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
- apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
- apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+ ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
+ ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
+ ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
+ ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
+ ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
+ ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
+ ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
+ ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
+ ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
+ ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
+
+ return 0;
}
void mana_query_phy_stats(struct mana_port_context *apc)
@@ -3427,6 +3470,24 @@ int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type even
return 0;
}
+#define MANA_GF_STATS_PERIOD (2 * HZ)
+
+static void mana_gf_stats_work_handler(struct work_struct *work)
+{
+ struct mana_context *ac =
+ container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
+ int err;
+
+ err = mana_query_gf_stats(ac);
+ if (err == -ETIMEDOUT) {
+ /* HWC timeout detected - reset stats and stop rescheduling */
+ ac->hwc_timeout_occurred = true;
+ memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
+ return;
+ }
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
@@ -3519,6 +3580,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = add_adev(gd, "eth");
+
+ INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
+ schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
+
out:
if (err) {
mana_remove(gd, false);
@@ -3543,6 +3608,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
int i;
disable_work_sync(&ac->link_change_work);
+ cancel_delayed_work_sync(&ac->gf_stats_work);
/* adev currently doesn't support suspending, always remove it */
if (gd->adev)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index a1afa75a9463..0e2f4343ac67 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -15,66 +15,71 @@ struct mana_stats_desc {
static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
- {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
+ {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
+ {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ tx_cqe_unknown_type)},
+ {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
+ tx_linear_pkt_cnt)},
+ {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
+ rx_coalesced_err)},
+ {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
+ rx_cqe_unknown_type)},
+};
+
+static const struct mana_stats_desc mana_hc_stats[] = {
+ {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats,
hc_rx_discards_no_wqe)},
- {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_rx_err_vport_disabled)},
- {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
- {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)},
+ {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_pkts)},
- {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_ucast_bytes)},
- {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_pkts)},
- {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_bcast_bytes)},
- {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
- hc_rx_mcast_pkts)},
- {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
+ hc_rx_mcast_pkts)},
+ {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_rx_mcast_bytes)},
- {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gf_disabled)},
- {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vport_disabled)},
{"hc_tx_err_inval_vportoffset_pkt",
- offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_vportoffset_pkt)},
- {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_vlan_enforcement)},
{"hc_tx_err_eth_type_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
- {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)},
+ {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_sa_enforcement)},
{"hc_tx_err_sqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)},
{"hc_tx_err_cqpdid_enforcement",
- offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
- {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
+ offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)},
+ {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_mtu_violation)},
- {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_inval_oob)},
- {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats,
hc_tx_err_gdma)},
- {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
- {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)},
+ {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_pkts)},
- {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_ucast_bytes)},
- {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_pkts)},
- {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_bcast_bytes)},
- {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_pkts)},
- {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
+ {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
hc_tx_mcast_bytes)},
- {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
- {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- tx_cqe_unknown_type)},
- {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
- rx_coalesced_err)},
- {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
- rx_cqe_unknown_type)},
};
static const struct mana_stats_desc mana_phy_stats[] = {
@@ -138,7 +143,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) +
num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
@@ -150,10 +155,12 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
if (stringset != ETH_SS_STATS)
return;
-
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++)
+ ethtool_puts(&data, mana_hc_stats[i].name);
+
for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
ethtool_puts(&data, mana_phy_stats[i].name);
@@ -186,6 +193,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *hc_stats = &apc->ac->hc_stats;
void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
@@ -207,8 +215,6 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
if (!apc->port_is_up)
return;
- /* we call mana function to update stats from GDMA */
- mana_query_gf_stats(apc);
/* We call this mana function to get the phy stats from GDMA and includes
* aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
@@ -219,6 +225,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
+ data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
+
for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 0e1a3800f371..85e3b19e6165 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -81,7 +81,8 @@ static const struct nfp_devlink_param_u8_arg nfp_devlink_u8_args[] = {
static int
nfp_devlink_param_u8_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 94c5689b5abd..0c5278c0598c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -121,7 +121,8 @@ void qed_fw_reporters_destroy(struct devlink *devlink)
}
static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 847fa62c80df..e338bfc8b7b2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
+#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
- for (i = 0; cqe->len_list[i]; i++)
+ for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b5d744d2586f..66ab1b9d65a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -506,25 +506,6 @@ static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
}
#endif
-static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct qede_dev *edev = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EAGAIN;
-
- switch (cmd) {
- case SIOCSHWTSTAMP:
- return qede_ptp_hw_ts(edev, ifr);
- default:
- DP_VERBOSE(edev, QED_MSG_DEBUG,
- "default IOCTL cmd 0x%x\n", cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
{
char *p_sb = (char *)fp->sb_info->sb_virt;
@@ -717,7 +698,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
- .ndo_eth_ioctl = qede_ioctl,
.ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
@@ -742,6 +722,8 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
.ndo_xdp_xmit = qede_xdp_transmit,
.ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_hwtstamp_get = qede_hwtstamp_get,
+ .ndo_hwtstamp_set = qede_hwtstamp_set,
};
static const struct net_device_ops qede_netdev_vf_ops = {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index a38f1e72c62b..d351be5fbda1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -199,18 +199,15 @@ static u64 qede_ptp_read_cc(struct cyclecounter *cc)
return phc_cycles;
}
-static int qede_ptp_cfg_filters(struct qede_dev *edev)
+static void qede_ptp_cfg_filters(struct qede_dev *edev)
{
enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
-
if (!ptp->hw_ts_ioctl_called) {
DP_INFO(edev, "TS IOCTL not called\n");
- return 0;
+ return;
}
switch (ptp->tx_type) {
@@ -223,11 +220,6 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
-
- case HWTSTAMP_TX_ONESTEP_SYNC:
- case HWTSTAMP_TX_ONESTEP_P2P:
- DP_ERR(edev, "One-step timestamping is not supported\n");
- return -ERANGE;
}
spin_lock_bh(&ptp->lock);
@@ -286,39 +278,65 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
spin_unlock_bh(&ptp->lock);
-
- return 0;
}
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct qede_dev *edev = netdev_priv(netdev);
struct qede_ptp *ptp;
- int rc;
+
+ if (!netif_running(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is down");
+ return -EAGAIN;
+ }
ptp = edev->ptp;
- if (!ptp)
+ if (!ptp) {
+ NL_SET_ERR_MSG_MOD(extack, "HW timestamping is not supported");
return -EIO;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
+ }
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
- config.tx_type, config.rx_filter);
+ "HWTSTAMP SET: Requested tx_type = %d, requested rx_filters = %d\n",
+ config->tx_type, config->rx_filter);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "One-step timestamping is not supported");
+ return -ERANGE;
+ }
ptp->hw_ts_ioctl_called = 1;
- ptp->tx_type = config.tx_type;
- ptp->rx_filter = config.rx_filter;
+ ptp->tx_type = config->tx_type;
+ ptp->rx_filter = config->rx_filter;
- rc = qede_ptp_cfg_filters(edev);
- if (rc)
- return rc;
+ qede_ptp_cfg_filters(edev);
+
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
+}
- config.rx_filter = ptp->rx_filter;
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+ struct qede_ptp *ptp;
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ config->tx_type = ptp->tx_type;
+ config->rx_filter = ptp->rx_filter;
+
+ return 0;
}
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index adafc894797e..88f168395812 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -14,7 +14,11 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
-int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+int qede_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int qede_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0b96b6aa4214..630319604211 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -59,6 +59,7 @@
#define FIRMWARE_8125D_2 "rtl_nic/rtl8125d-2.fw"
#define FIRMWARE_8125K_1 "rtl_nic/rtl8125k-1.fw"
#define FIRMWARE_8125BP_2 "rtl_nic/rtl8125bp-2.fw"
+#define FIRMWARE_9151A_1 "rtl_nic/rtl9151a-1.fw"
#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define FIRMWARE_8126A_3 "rtl_nic/rtl8126a-3.fw"
#define FIRMWARE_8127A_1 "rtl_nic/rtl8127a-1.fw"
@@ -111,6 +112,7 @@ static const struct rtl_chip_info {
{ 0x7cf, 0x681, RTL_GIGA_MAC_VER_66, "RTL8125BP", FIRMWARE_8125BP_2 },
/* 8125D family. */
+ { 0x7cf, 0x68b, RTL_GIGA_MAC_VER_64, "RTL9151A", FIRMWARE_9151A_1 },
{ 0x7cf, 0x68a, RTL_GIGA_MAC_VER_64, "RTL8125K", FIRMWARE_8125K_1 },
{ 0x7cf, 0x689, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_2 },
{ 0x7cf, 0x688, RTL_GIGA_MAC_VER_64, "RTL8125D", FIRMWARE_8125D_1 },
@@ -774,6 +776,7 @@ MODULE_FIRMWARE(FIRMWARE_8125D_1);
MODULE_FIRMWARE(FIRMWARE_8125D_2);
MODULE_FIRMWARE(FIRMWARE_8125K_1);
MODULE_FIRMWARE(FIRMWARE_8125BP_2);
+MODULE_FIRMWARE(FIRMWARE_9151A_1);
MODULE_FIRMWARE(FIRMWARE_8126A_2);
MODULE_FIRMWARE(FIRMWARE_8126A_3);
MODULE_FIRMWARE(FIRMWARE_8127A_1);
@@ -1509,6 +1512,7 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_52:
return RTL_DASH_EP;
case RTL_GIGA_MAC_VER_66:
+ case RTL_GIGA_MAC_VER_80:
return RTL_DASH_25_BP;
default:
return RTL_DASH_NONE;
@@ -1517,11 +1521,20 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
- tp->mac_version != RTL_GIGA_MAC_VER_28 &&
- tp->mac_version != RTL_GIGA_MAC_VER_31 &&
- tp->mac_version != RTL_GIGA_MAC_VER_38)
- r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_38:
+ break;
+ case RTL_GIGA_MAC_VER_80:
+ r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
+ break;
+ default:
+ r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
+ r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
+ break;
+ }
}
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
@@ -2376,26 +2389,6 @@ void r8169_apply_firmware(struct rtl8169_private *tp)
}
}
-static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
-{
- /* Adjust EEE LED frequency */
- if (tp->mac_version != RTL_GIGA_MAC_VER_38)
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
- rtl_eri_set_bits(tp, 0x1b0, 0x0003);
-}
-
-static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
- r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
-}
-
-static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
-{
- r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
-}
-
static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
@@ -3173,8 +3166,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
@@ -3199,8 +3190,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
rtl_mod_config5(tp, Spi_en, 0);
-
- rtl8168_config_eee_mac(tp);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -3250,8 +3239,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
@@ -3392,8 +3379,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3441,8 +3426,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -3498,8 +3481,6 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
- rtl8168_config_eee_mac(tp);
-
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
@@ -3740,11 +3721,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- rtl8125a_config_eee_mac(tp);
- else
- rtl8125b_config_eee_mac(tp);
-
rtl_disable_rxdvgate(tp);
}
@@ -4747,6 +4723,41 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void rtl_enable_tx_lpi(struct rtl8169_private *tp, bool enable)
+{
+ if (!rtl_supports_eee(tp))
+ return;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_52:
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ if (enable)
+ rtl_eri_set_bits(tp, 0x1b0, 0x0003);
+ else
+ rtl_eri_clear_bits(tp, 0x1b0, 0x0003);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ if (enable) {
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, 0x0006);
+ } else {
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ r8168_mac_ocp_modify(tp, 0xeb62, 0x0006, 0);
+ }
+ break;
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_LAST:
+ if (enable)
+ r8168_mac_ocp_modify(tp, 0xe040, 0, 0x0003);
+ else
+ r8168_mac_ocp_modify(tp, 0xe040, 0x0003, 0);
+ break;
+ default:
+ break;
+ }
+}
+
static void r8169_phylink_handler(struct net_device *ndev)
{
struct rtl8169_private *tp = netdev_priv(ndev);
@@ -4754,6 +4765,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
+ rtl_enable_tx_lpi(tp, tp->phydev->enable_tx_lpi);
pm_request_resume(d);
} else {
pm_runtime_idle(d);
@@ -5451,6 +5463,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
tp->aspm_manageable = !rc;
+ /* Fiber mode on RTL8127AF isn't supported */
+ if (rtl_is_8125(tp)) {
+ u16 data = r8168_mac_ocp_read(tp, 0xd006);
+
+ if ((data & 0xff) == 0x07)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Fiber mode not supported\n");
+ }
+
tp->dash_type = rtl_get_dash_type(tp);
tp->dash_enabled = rtl_dash_is_enabled(tp);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 75bad561b352..849c5a6c2af1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1521,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
skb = priv->rxq[qnum]->rx_skbuff[entry];
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
netdev_err(priv->dev, "rx descriptor is not consistent\n");
+ break;
+ }
prefetch(skb->data - NET_IP_ALIGN);
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 87c5bea6c2a2..907fe2e927f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -349,6 +349,11 @@ config DWMAC_VISCONTI
endif
+config STMMAC_LIBPCI
+ tristate
+ help
+ This option enables the PCI bus helpers for the stmmac driver.
+
config DWMAC_INTEL
tristate "Intel GMAC support"
default X86
@@ -362,16 +367,18 @@ config DWMAC_INTEL
config DWMAC_LOONGSON
tristate "Loongson PCI DWMAC support"
default MACH_LOONGSON64
- depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the LOONGSON PCI bus support for the stmmac driver,
Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
config STMMAC_PCI
tristate "STMMAC PCI bus support"
- depends on STMMAC_ETH && PCI
+ depends on PCI
depends on COMMON_CLK
+ select STMMAC_LIBPCI
help
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 1681a8a28313..7bf528731034 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := dwmac-socfpga.o
+obj-$(CONFIG_STMMAC_LIBPCI) += stmmac_libpci.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index fb55efd52240..120a009c9992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -83,14 +83,13 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
+ bool ret = false;
if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
- (!enh_desc && (len > BUF_SIZE_2KiB))) {
- ret = 1;
- }
+ (!enh_desc && (len > BUF_SIZE_2KiB)))
+ ret = true;
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7395bbb94aea..49df46be3669 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -548,6 +548,19 @@ struct dma_features {
#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+/* Common definitions for AXI Master Bus Mode */
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_AXI_BLEN_MASK GENMASK(7, 1)
+
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len);
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 84072c8ed741..5e0fc31762d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -34,7 +34,7 @@ static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
writel(val, gmac->ctl_block + reg);
}
-static int anarion_gmac_init(struct platform_device *pdev, void *priv)
+static int anarion_gmac_init(struct device *dev, void *priv)
{
uint32_t sw_config;
struct anarion_gmac *gmac = priv;
@@ -52,7 +52,7 @@ static int anarion_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
+static void anarion_gmac_exit(struct device *dev, void *priv)
{
struct anarion_gmac *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index c7cd6497d42d..d043bad4a862 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -38,8 +38,6 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
u32 burst_map = 0;
- u32 bit_index = 0;
- u32 a_index = 0;
if (!plat_dat->axi) {
plat_dat->axi = devm_kzalloc(&pdev->dev,
@@ -83,30 +81,8 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
}
device_property_read_u32(dev, "snps,burst-map", &burst_map);
- /* converts burst-map bitmask to burst array */
- for (bit_index = 0; bit_index < 7; bit_index++) {
- if (burst_map & (1 << bit_index)) {
- switch (bit_index) {
- case 0:
- plat_dat->axi->axi_blen[a_index] = 4; break;
- case 1:
- plat_dat->axi->axi_blen[a_index] = 8; break;
- case 2:
- plat_dat->axi->axi_blen[a_index] = 16; break;
- case 3:
- plat_dat->axi->axi_blen[a_index] = 32; break;
- case 4:
- plat_dat->axi->axi_blen[a_index] = 64; break;
- case 5:
- plat_dat->axi->axi_blen[a_index] = 128; break;
- case 6:
- plat_dat->axi->axi_blen[a_index] = 256; break;
- default:
- break;
- }
- a_index++;
- }
- }
+ plat_dat->axi->axi_blen_regval = FIELD_PREP(DMA_AXI_BLEN_MASK,
+ burst_map);
/* dwc-qos needs GMAC4, AAL, TSO and PMT */
plat_dat->core_type = DWMAC_CORE_GMAC4;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
index 1dcf2037001e..bcb8e000e720 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c
@@ -58,14 +58,14 @@ static int eic7700_clks_config(void *priv, bool enabled)
return ret;
}
-static int eic7700_dwmac_init(struct platform_device *pdev, void *priv)
+static int eic7700_dwmac_init(struct device *dev, void *priv)
{
struct eic7700_qos_priv *dwc = priv;
return eic7700_clks_config(dwc, true);
}
-static void eic7700_dwmac_exit(struct platform_device *pdev, void *priv)
+static void eic7700_dwmac_exit(struct device *dev, void *priv)
{
struct eic7700_qos_priv *dwc = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index b2194e414ec1..aad1be1ec4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -569,26 +569,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
@@ -629,22 +609,12 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].chan = i;
-
- /* Disable Priority config by default */
- plat->rx_queues_cfg[i].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- }
for (i = 0; i < plat->tx_queues_to_use; i++) {
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- /* Disable Priority config by default */
- plat->tx_queues_cfg[i].use_prio = false;
/* Default TX Q0 to use TSO and rest TXQ for TBS */
if (i > 0)
plat->tx_queues_cfg[i].tbs_en = 1;
@@ -680,9 +650,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_xit_frm = 0;
plat->axi->axi_wr_osr_lmt = 1;
plat->axi->axi_rd_osr_lmt = 1;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16;
plat->ptp_max_adj = plat->clk_ptp_rate;
@@ -706,15 +675,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
plat->flags |= STMMAC_FLAG_VLAN_FAIL_Q_EN;
/* Use the last Rx queue */
@@ -1286,7 +1246,7 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!intel_priv)
return -ENOMEM;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index dd2fc39ec3e2..107a7c84ace8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/of_irq.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
#include "dwmac_dma.h"
#include "dwmac1000.h"
@@ -95,28 +96,12 @@ static void loongson_default_data(struct pci_dev *pdev,
plat->core_type = DWMAC_CORE_GMAC;
plat->force_sf_dma_mode = 1;
- /* Set default value for multicast hash bins */
+ /* Increase the default value for multicast hash bins */
plat->multicast_filter_bins = 256;
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
-
plat->clk_ref_rate = 125000000;
plat->clk_ptp_rate = 125000000;
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
-
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
@@ -140,8 +125,6 @@ static void loongson_default_data(struct pci_dev *pdev,
break;
default:
ld->multichan = 0;
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
break;
}
}
@@ -520,37 +503,6 @@ static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioad
10000, 2000000);
}
-static int loongson_dwmac_suspend(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int loongson_dwmac_resume(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -559,7 +511,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
struct loongson_data *ld;
int ret;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -595,8 +547,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->mac_setup = loongson_dwmac_setup;
plat->fix_soc_reset = loongson_dwmac_fix_reset;
- plat->suspend = loongson_dwmac_suspend;
- plat->resume = loongson_dwmac_resume;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 894ee66f5c9b..de9aba756aac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -48,7 +48,7 @@ struct ls1x_dwmac {
struct ls1x_data {
int (*setup)(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat);
- int (*init)(struct platform_device *pdev, void *bsp_priv);
+ int (*init)(struct device *dev, void *bsp_priv);
};
static int ls1b_dwmac_setup(struct platform_device *pdev,
@@ -79,7 +79,7 @@ static int ls1b_dwmac_setup(struct platform_device *pdev,
return 0;
}
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1b_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
@@ -98,7 +98,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
@@ -122,7 +122,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
break;
default:
- dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ dev_err(dev, "Unsupported PHY mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
@@ -133,7 +133,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
return 0;
}
-static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+static int ls1c_dwmac_syscon_init(struct device *dev, void *priv)
{
struct ls1x_dwmac *dwmac = priv;
struct plat_stmmacenet_data *plat = dwmac->plat_dat;
@@ -143,7 +143,7 @@ static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
phy_intf_sel = stmmac_get_phy_intf_sel(plat->phy_interface);
if (phy_intf_sel != PHY_INTF_SEL_GMII_MII &&
phy_intf_sel != PHY_INTF_SEL_RMII) {
- dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ dev_err(dev, "Unsupported PHY-mode %u\n",
plat->phy_interface);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 1a616a71c36a..0826a7bd32ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -116,27 +116,39 @@ struct qcom_ethqos {
bool needs_sgmii_loopback;
};
-static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+static u32 rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
return readl(ethqos->rgmii_base + offset);
}
-static void rgmii_writel(struct qcom_ethqos *ethqos,
- int value, unsigned int offset)
+static void rgmii_writel(struct qcom_ethqos *ethqos, u32 value,
+ unsigned int offset)
{
writel(value, ethqos->rgmii_base + offset);
}
-static void rgmii_updatel(struct qcom_ethqos *ethqos,
- int mask, int val, unsigned int offset)
+static void rgmii_updatel(struct qcom_ethqos *ethqos, u32 mask, u32 val,
+ unsigned int offset)
{
- unsigned int temp;
+ u32 temp;
temp = rgmii_readl(ethqos, offset);
temp = (temp & ~(mask)) | val;
rgmii_writel(ethqos, temp, offset);
}
+static void rgmii_setmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, mask, offset);
+}
+
+static void rgmii_clrmask(struct qcom_ethqos *ethqos, u32 mask,
+ unsigned int offset)
+{
+ rgmii_updatel(ethqos, mask, 0, offset);
+}
+
static void rgmii_dump(void *priv)
{
struct qcom_ethqos *ethqos = priv;
@@ -194,8 +206,7 @@ qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
qcom_ethqos_set_sgmii_loopback(ethqos, true);
- rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
- RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
static const struct ethqos_emac_por emac_v2_3_0_por[] = {
@@ -300,69 +311,55 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
struct device *dev = &ethqos->pdev->dev;
- unsigned int val;
- int retry = 1000;
+ u32 val;
/* Set CDR_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
- SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
/* Set CDR_EXT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
- SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Clear CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ SDCC_HC_REG_DLL_CONFIG);
}
/* Wait for CK_OUT_EN clear */
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (!val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ !(val & SDCC_DLL_CONFIG_CK_OUT_EN),
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Clear CK_OUT_EN timedout\n");
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Wait for CK_OUT_EN set */
- retry = 1000;
- do {
- val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
- val &= SDCC_DLL_CONFIG_CK_OUT_EN;
- if (val)
- break;
- mdelay(1);
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDCC_DLL_CONFIG_CK_OUT_EN,
+ 1000, 1000000, false,
+ ethqos, SDCC_HC_REG_DLL_CONFIG))
dev_err(dev, "Set CK_OUT_EN timedout\n");
/* Set DDR_CAL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
- SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_HC_REG_DLL_CONFIG2);
if (!ethqos->has_emac_ge_3) {
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ SDCC_HC_REG_DLL_CONFIG2);
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
@@ -370,8 +367,7 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
SDCC_HC_REG_DLL_CONFIG2);
}
@@ -392,8 +388,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
phase_shift = RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN;
/* Disable loopback mode */
- rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG2);
/* Determine if this platform wants loopback enabled after programming */
if (ethqos->rgmii_config_loopback_en)
@@ -402,29 +398,26 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
loopback = 0;
/* Select RGMII, write 0 to interface select */
- rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_INTF_SEL, RGMII_IO_MACRO_CONFIG);
switch (speed) {
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- RGMII_CONFIG_POS_NEG_DATA_SEL,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
/* PRG_RCLK_DLY = TCXO period * TCXO_CYCLES_CNT / 2 * RX delay ns,
@@ -439,87 +432,78 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
57, SDCC_HC_REG_DDR_CONFIG);
}
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
- SDCC_DDR_CONFIG_PRG_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
BIT(6), RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
- rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
- RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
- RGMII_CONFIG_BYPASS_TX_ID_EN,
+ rgmii_setmask(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_setmask(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_clrmask(ethqos, RGMII_CONFIG_PROG_SWAP,
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
phase_shift, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ RGMII_IO_MACRO_CONFIG2);
if (ethqos->has_emac_ge_3)
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- RGMII_CONFIG2_RX_PROG_SWAP,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
RGMII_IO_MACRO_CONFIG2);
else
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_clrmask(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
- SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ rgmii_setmask(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
loopback, RGMII_IO_MACRO_CONFIG);
@@ -535,8 +519,8 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos, int speed)
static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
{
struct device *dev = &ethqos->pdev->dev;
- volatile unsigned int dll_lock;
- unsigned int i, retry = 1000;
+ unsigned int i;
+ u32 val;
/* Reset to POR values and enable clk */
for (i = 0; i < ethqos->num_por; i++)
@@ -547,12 +531,12 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
/* Initialize the DLL first */
/* Set DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
- SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
- SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_HC_REG_DLL_CONFIG);
if (ethqos->has_emac_ge_3) {
if (speed == SPEED_1000) {
@@ -566,21 +550,18 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
}
/* Clear DLL_RST */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
/* Clear PDN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
- SDCC_HC_REG_DLL_CONFIG);
+ rgmii_clrmask(ethqos, SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
if (speed != SPEED_100 && speed != SPEED_10) {
/* Set DLL_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
- SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_HC_REG_DLL_CONFIG);
/* Set CK_OUT_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
- SDCC_DLL_CONFIG_CK_OUT_EN,
+ rgmii_setmask(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
SDCC_HC_REG_DLL_CONFIG);
/* Set USR_CTL bit 26 with mask of 3 bits */
@@ -589,14 +570,10 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos, int speed)
SDCC_USR_CTL);
/* wait for DLL LOCK */
- do {
- mdelay(1);
- dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
- if (dll_lock & SDC4_STATUS_DLL_LOCK)
- break;
- retry--;
- } while (retry > 0);
- if (!retry)
+ if (read_poll_timeout_atomic(rgmii_readl, val,
+ val & SDC4_STATUS_DLL_LOCK,
+ 1000, 1000000, true,
+ ethqos, SDC4_STATUS))
dev_err(dev, "Timeout while waiting for DLL lock\n");
}
@@ -631,15 +608,13 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
switch (speed) {
case SPEED_2500:
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_2500);
ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
- rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
- RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ rgmii_setmask(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
ethqos_pcs_set_inband(priv, true);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index bc7bb975803c..be7f5eb2cdcf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -91,7 +91,7 @@ static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
-static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
+static int renesas_gbeth_init(struct device *dev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
struct renesas_gbeth *gbeth = priv;
@@ -113,7 +113,7 @@ static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
+static void renesas_gbeth_exit(struct device *dev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
struct renesas_gbeth *gbeth = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a5c7e03ebc63..0a95f54e725e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -149,11 +149,13 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
return clk_set_rate(clk_mac_speed, rate);
}
-#define HIWORD_UPDATE(val, mask, shift) \
- (FIELD_PREP_WM16((mask) << (shift), (val)))
+#define GRF_FIELD(hi, lo, val) \
+ FIELD_PREP_WM16(GENMASK_U16(hi, lo), val)
+#define GRF_FIELD_CONST(hi, lo, val) \
+ FIELD_PREP_WM16_CONST(GENMASK_U16(hi, lo), val)
-#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
-#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
#define DELAY_ENABLE(soc, tx, rx) \
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
@@ -167,9 +169,9 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
#define RK_MACPHY_ENABLE GRF_BIT(0)
#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+#define RK_GMAC2PHY_RMII_MODE GRF_FIELD(7, 6, 1)
+#define RK_GRF_CON2_MACPHY_ID GRF_FIELD(15, 0, 0x1234)
+#define RK_GRF_CON3_MACPHY_ID GRF_FIELD(5, 0, 0x35)
static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
{
@@ -203,7 +205,7 @@ static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
-#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_24M_CLK_SEL GRF_FIELD(9, 8, 3)
#define RK_FEPHY_PHY_ID GRF_BIT(11)
static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
@@ -232,15 +234,14 @@ static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
-#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define PX30_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define PX30_GMAC_SPEED_100M GRF_BIT(2)
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_PHY_INTF_SEL_RMII);
+ PX30_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static int px30_set_speed(struct rk_priv_data *bsp_priv,
@@ -285,23 +286,20 @@ static const struct rk_gmac_ops px30_ops = {
#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3128_GRF_MAC_CON1 */
-#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
-#define RK3128_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3128_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3128_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -309,7 +307,7 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3128_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
@@ -320,7 +318,8 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+ RK3128_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3128_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3128_reg_speed_data = {
@@ -350,23 +349,20 @@ static const struct rk_gmac_ops rk3128_ops = {
#define RK3228_GRF_CON_MUX 0x50
/* RK3228_GRF_MAC_CON0 */
-#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3228_GRF_MAC_CON1 */
-#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3228_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
-#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
-#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_125M GRF_FIELD_CONST(9, 8, 0)
+#define RK3228_GMAC_CLK_25M GRF_FIELD_CONST(9, 8, 3)
+#define RK3228_GMAC_CLK_2_5M GRF_FIELD_CONST(9, 8, 2)
#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -381,7 +377,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3228_GMAC_RMII_MODE_CLR |
DELAY_ENABLE(RK3228, tx_delay, rx_delay));
@@ -393,7 +389,7 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3228_GMAC_RMII_MODE);
/* set MAC to RMII mode */
@@ -435,19 +431,16 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GRF_SOC_CON3 0x0250
/*RK3288_GRF_SOC_CON1*/
-#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_CLR_BIT(8))
-#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
- GRF_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL(val) GRF_FIELD(8, 6, val)
#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
-#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
-#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
-#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_125M GRF_FIELD_CONST(13, 12, 0)
+#define RK3288_GMAC_CLK_25M GRF_FIELD_CONST(13, 12, 3)
+#define RK3288_GMAC_CLK_2_5M GRF_FIELD_CONST(13, 12, 2)
#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
@@ -456,14 +449,14 @@ static const struct rk_gmac_ops rk3228_ops = {
#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3288_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
@@ -474,7 +467,8 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+ RK3288_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3288_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3288_reg_speed_data = {
@@ -501,8 +495,7 @@ static const struct rk_gmac_ops rk3288_ops = {
#define RK3308_GRF_MAC_CON0 0x04a0
/* RK3308_GRF_MAC_CON0 */
-#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
- GRF_BIT(4))
+#define RK3308_GMAC_PHY_INTF_SEL(val) GRF_FIELD(4, 2, val)
#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
@@ -511,7 +504,7 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_PHY_INTF_SEL_RMII);
+ RK3308_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_reg_speed_data rk3308_reg_speed_data = {
@@ -537,23 +530,20 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GRF_MACPHY_CON1 0xb04
/* RK3328_GRF_MAC_CON0 */
-#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
-#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(13, 7, val)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RK3328_GRF_MAC_CON1 */
-#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3328_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
-#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
-#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
-#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_125M GRF_FIELD_CONST(12, 11, 0)
+#define RK3328_GMAC_CLK_25M GRF_FIELD_CONST(12, 11, 3)
+#define RK3328_GMAC_CLK_2_5M GRF_FIELD_CONST(12, 11, 2)
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
@@ -566,7 +556,7 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3328_GMAC_RMII_MODE_CLR |
RK3328_GMAC_RXCLK_DLY_ENABLE |
RK3328_GMAC_TXCLK_DLY_ENABLE);
@@ -584,7 +574,7 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3328_GRF_MAC_CON1;
regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
RK3328_GMAC_RMII_MODE);
}
@@ -630,19 +620,16 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GRF_SOC_CON7 0x041c
/* RK3366_GRF_SOC_CON6 */
-#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3366_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3366_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -651,14 +638,14 @@ static const struct rk_gmac_ops rk3328_ops = {
#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3366_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
@@ -669,7 +656,8 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+ RK3366_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3366_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3366_reg_speed_data = {
@@ -697,19 +685,16 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GRF_SOC_CON16 0x0440
/* RK3368_GRF_SOC_CON15 */
-#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3368_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3368_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -718,14 +703,14 @@ static const struct rk_gmac_ops rk3366_ops = {
#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3368_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
@@ -736,7 +721,8 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+ RK3368_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3368_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3368_reg_speed_data = {
@@ -764,19 +750,16 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GRF_SOC_CON6 0xc218
/* RK3399_GRF_SOC_CON5 */
-#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_CLR_BIT(11))
-#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
- GRF_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL(val) GRF_FIELD(11, 9, val)
#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
-#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
-#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
-#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_125M GRF_FIELD_CONST(5, 4, 0)
+#define RK3399_GMAC_CLK_25M GRF_FIELD_CONST(5, 4, 3)
+#define RK3399_GMAC_CLK_2_5M GRF_FIELD_CONST(5, 4, 2)
#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
@@ -785,14 +768,14 @@ static const struct rk_gmac_ops rk3368_ops = {
#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
-#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3399_GMAC_RMII_MODE_CLR);
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
@@ -803,7 +786,8 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+ RK3399_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII) |
+ RK3399_GMAC_RMII_MODE);
}
static const struct rk_reg_speed_data rk3399_reg_speed_data = {
@@ -901,8 +885,8 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
-#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
@@ -916,9 +900,9 @@ static const struct rk_gmac_ops rk3506_ops = {
#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
-#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
-#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
-#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV1 GRF_FIELD_CONST(11, 10, 0)
+#define RK3528_GMAC1_CLK_RGMII_DIV5 GRF_FIELD_CONST(11, 10, 3)
+#define RK3528_GMAC1_CLK_RGMII_DIV50 GRF_FIELD_CONST(11, 10, 2)
#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
@@ -1029,10 +1013,7 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GRF_GMAC1_CON1 0x038c
/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
-#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RK3568_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1041,8 +1022,8 @@ static const struct rk_gmac_ops rk3528_ops = {
#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
-#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
@@ -1059,7 +1040,7 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
regmap_write(bsp_priv->grf, con1,
- RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RK3568_GMAC_RXCLK_DLY_ENABLE |
RK3568_GMAC_TXCLK_DLY_ENABLE);
}
@@ -1070,7 +1051,8 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
- regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rk3568_ops = {
@@ -1096,8 +1078,8 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
#define RK3576_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
-#define RK3576_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RK3576_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RK3576_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RK3576_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* SDGMAC_GRF */
#define RK3576_GRF_GMAC_CON0 0X0020
@@ -1112,12 +1094,9 @@ static const struct rk_gmac_ops rk3568_ops = {
#define RK3576_GMAC_CLK_RMII_DIV2 GRF_BIT(5)
#define RK3576_GMAC_CLK_RMII_DIV20 GRF_CLR_BIT(5)
-#define RK3576_GMAC_CLK_RGMII_DIV1 \
- (GRF_CLR_BIT(6) | GRF_CLR_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV5 \
- (GRF_BIT(6) | GRF_BIT(5))
-#define RK3576_GMAC_CLK_RGMII_DIV50 \
- (GRF_BIT(6) | GRF_CLR_BIT(5))
+#define RK3576_GMAC_CLK_RGMII_DIV1 GRF_FIELD_CONST(6, 5, 0)
+#define RK3576_GMAC_CLK_RGMII_DIV5 GRF_FIELD_CONST(6, 5, 3)
+#define RK3576_GMAC_CLK_RGMII_DIV50 GRF_FIELD_CONST(6, 5, 2)
#define RK3576_GMAC_CLK_RMII_GATE GRF_BIT(4)
#define RK3576_GMAC_CLK_RMII_NOGATE GRF_CLR_BIT(4)
@@ -1220,17 +1199,15 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
-#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
-#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) GRF_FIELD(15, 8, val)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) GRF_FIELD(7, 0, val)
/* php_grf */
#define RK3588_GRF_GMAC_CON0 0X0008
#define RK3588_GRF_CLK_CON1 0X0070
-#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
- (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
-#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
- (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL(id, val) \
+ (GRF_FIELD(5, 3, val) << ((id) * 6))
#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
@@ -1242,11 +1219,11 @@ static const struct rk_gmac_ops rk3576_ops = {
#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 0) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
- (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 3) << ((id) * 5))
#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
- (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+ (GRF_FIELD_CONST(3, 2, 2) << ((id) * 5))
#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
@@ -1260,7 +1237,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
RK3588_GRF_GMAC_CON8;
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+ RK3588_GMAC_PHY_INTF_SEL(id, PHY_INTF_SEL_RGMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RGMII_MODE(id));
@@ -1277,7 +1254,7 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
- RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+ RK3588_GMAC_PHY_INTF_SEL(bsp_priv->id, PHY_INTF_SEL_RMII));
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
@@ -1347,8 +1324,7 @@ static const struct rk_gmac_ops rk3588_ops = {
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
-#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
- GRF_BIT(6))
+#define RV1108_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
@@ -1359,7 +1335,7 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_PHY_INTF_SEL_RMII);
+ RV1108_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_reg_speed_data rv1108_reg_speed_data = {
@@ -1384,10 +1360,7 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GRF_GMAC_CON2 0X0078
/* RV1126_GRF_GMAC_CON0 */
-#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
- (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
-#define RV1126_GMAC_PHY_INTF_SEL_RMII \
- (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL(val) GRF_FIELD(6, 4, val)
#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
@@ -1400,17 +1373,17 @@ static const struct rk_gmac_ops rv1108_ops = {
#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
/* RV1126_GRF_GMAC_CON1 */
-#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
/* RV1126_GRF_GMAC_CON2 */
-#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
-#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) GRF_FIELD(14, 8, val)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) GRF_FIELD(6, 0, val)
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RGMII) |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
@@ -1428,7 +1401,7 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
- RV1126_GMAC_PHY_INTF_SEL_RMII);
+ RV1126_GMAC_PHY_INTF_SEL(PHY_INTF_SEL_RMII));
}
static const struct rk_gmac_ops rv1126_ops = {
@@ -1762,8 +1735,7 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
struct rk_priv_data *bsp_priv = bsp_priv_;
if (bsp_priv->ops->set_speed)
- return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface,
- speed);
+ return bsp_priv->ops->set_speed(bsp_priv, interface, speed);
return -EINVAL;
}
@@ -1790,6 +1762,22 @@ static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
return 0;
}
+static int rk_gmac_init(struct device *dev, void *bsp_priv)
+{
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct device *dev, void *bsp_priv_)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ rk_gmac_powerdown(bsp_priv);
+
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
+}
+
static int rk_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -1822,6 +1810,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->init = rk_gmac_init;
+ plat_dat->exit = rk_gmac_exit;
plat_dat->suspend = rk_gmac_suspend;
plat_dat->resume = rk_gmac_resume;
@@ -1833,33 +1823,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = rk_gmac_powerup(plat_dat->bsp_priv);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_gmac_powerdown;
-
- return 0;
-
-err_gmac_powerdown:
- rk_gmac_powerdown(plat_dat->bsp_priv);
-
- return ret;
-}
-
-static void rk_gmac_remove(struct platform_device *pdev)
-{
- struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
- struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
-
- stmmac_dvr_remove(&pdev->dev);
-
- rk_gmac_powerdown(bsp_priv);
-
- if (priv->plat->phy_node && bsp_priv->integrated_phy)
- clk_put(bsp_priv->clk_phy);
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
@@ -1885,7 +1849,6 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
static struct platform_driver rk_gmac_dwmac_driver = {
.probe = rk_gmac_probe,
- .remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
.pm = &stmmac_simple_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 2b7ad64bfdf7..5a485ee98fa7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -47,7 +47,7 @@ static int s32_gmac_write_phy_intf_select(struct s32_priv_data *gmac)
return 0;
}
-static int s32_gmac_init(struct platform_device *pdev, void *priv)
+static int s32_gmac_init(struct device *dev, void *priv)
{
struct s32_priv_data *gmac = priv;
int ret;
@@ -55,31 +55,31 @@ static int s32_gmac_init(struct platform_device *pdev, void *priv)
/* Set initial TX interface clock */
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- dev_err(&pdev->dev, "Can't enable tx clock\n");
+ dev_err(dev, "Can't enable tx clock\n");
return ret;
}
ret = clk_set_rate(gmac->tx_clk, GMAC_INTF_RATE_125M);
if (ret) {
- dev_err(&pdev->dev, "Can't set tx clock\n");
+ dev_err(dev, "Can't set tx clock\n");
goto err_tx_disable;
}
/* Set initial RX interface clock */
ret = clk_prepare_enable(gmac->rx_clk);
if (ret) {
- dev_err(&pdev->dev, "Can't enable rx clock\n");
+ dev_err(dev, "Can't enable rx clock\n");
goto err_tx_disable;
}
ret = clk_set_rate(gmac->rx_clk, GMAC_INTF_RATE_125M);
if (ret) {
- dev_err(&pdev->dev, "Can't set rx clock\n");
+ dev_err(dev, "Can't set rx clock\n");
goto err_txrx_disable;
}
/* Set interface mode */
ret = s32_gmac_write_phy_intf_select(gmac);
if (ret) {
- dev_err(&pdev->dev, "Can't set PHY interface mode\n");
+ dev_err(dev, "Can't set PHY interface mode\n");
goto err_txrx_disable;
}
@@ -92,7 +92,7 @@ err_tx_disable:
return ret;
}
-static void s32_gmac_exit(struct platform_device *pdev, void *priv)
+static void s32_gmac_exit(struct device *dev, void *priv)
{
struct s32_priv_data *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 49d651948e2b..a2b52d2c4eb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -551,7 +551,7 @@ static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
return priv->hw->phylink_pcs;
}
-static int socfpga_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+static int socfpga_dwmac_init(struct device *dev, void *bsp_priv)
{
struct socfpga_dwmac *dwmac = bsp_priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
index 3b7947a7a7ba..44d4ceb8415f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -7,11 +7,16 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "stmmac_platform.h"
+struct sophgo_dwmac_data {
+ bool has_internal_rx_delay;
+};
+
static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
@@ -24,7 +29,6 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
- plat_dat->unicast_filter_entries = 1;
return 0;
}
@@ -32,6 +36,7 @@ static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
static int sophgo_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
+ const struct sophgo_dwmac_data *data;
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
int ret;
@@ -50,11 +55,23 @@ static int sophgo_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
+ data = device_get_match_data(&pdev->dev);
+ if (data && data->has_internal_rx_delay) {
+ plat_dat->phy_interface = phy_fix_phy_mode_for_mac_delays(plat_dat->phy_interface,
+ false, true);
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_NA)
+ return -EINVAL;
+ }
+
return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
}
+static const struct sophgo_dwmac_data sg2042_dwmac_data = {
+ .has_internal_rx_delay = true,
+};
+
static const struct of_device_id sophgo_dwmac_match[] = {
- { .compatible = "sophgo,sg2042-dwmac" },
+ { .compatible = "sophgo,sg2042-dwmac", .data = &sg2042_dwmac_data },
{ .compatible = "sophgo,sg2044-dwmac" },
{ /* sentinel */ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index b0509ab6b31c..f50547b67fbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -229,14 +229,14 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
return 0;
}
-static int sti_dwmac_init(struct platform_device *pdev, void *bsp_priv)
+static int sti_dwmac_init(struct device *dev, void *bsp_priv)
{
struct sti_dwmac *dwmac = bsp_priv;
return clk_prepare_enable(dwmac->clk);
}
-static void sti_dwmac_exit(struct platform_device *pdev, void *bsp_priv)
+static void sti_dwmac_exit(struct device *dev, void *bsp_priv)
{
struct sti_dwmac *dwmac = bsp_priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 7434d4bbb526..8aa496ac85cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -571,16 +571,16 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
-static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+static int sun8i_dwmac_init(struct device *dev, void *priv)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct sunxi_priv_data *gmac = priv;
int ret;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
if (ret) {
- dev_err(&pdev->dev, "Fail to enable regulator\n");
+ dev_err(dev, "Fail to enable regulator\n");
return ret;
}
}
@@ -1005,7 +1005,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
(H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
-static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+static void sun8i_dwmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
@@ -1265,7 +1265,7 @@ static void sun8i_dwmac_shutdown(struct platform_device *pdev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- sun8i_dwmac_exit(pdev, gmac);
+ sun8i_dwmac_exit(&pdev->dev, gmac);
}
static const struct of_device_id sun8i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 7f560d78209d..52593ba3a3a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -27,7 +27,7 @@ struct sunxi_priv_data {
#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
#define SUN7I_GMAC_MII_RATE 25000000
-static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+static int sun7i_gmac_init(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
int ret = 0;
@@ -58,7 +58,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+static void sun7i_gmac_exit(struct device *dev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index a3378046b061..e291028ba56e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -186,7 +186,7 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
return 0;
}
-static int thead_dwmac_init(struct platform_device *pdev, void *priv)
+static int thead_dwmac_init(struct device *dev, void *priv)
{
struct thead_dwmac *dwmac = priv;
unsigned int reg;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 118a22406a2e..5877fec9f6c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -19,7 +19,6 @@
static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
- int i;
pr_info("dwmac1000: Master AXI performs %s burst length\n",
!(value & DMA_AXI_UNDEF) ? "fixed" : "any");
@@ -39,33 +38,10 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_AXI_BUS_MODE);
}
@@ -159,10 +135,10 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv,
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
- csr6 |= DMA_CONTROL_RSF;
+ csr6 |= DMA_CONTROL_RSF | DMA_CONTROL_DFF;
} else {
pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
- csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= ~(DMA_CONTROL_RSF | DMA_CONTROL_DFF);
csr6 &= DMA_CONTROL_TC_RX_MASK;
if (mode <= 32)
csr6 |= DMA_CONTROL_RTC_32;
@@ -286,6 +262,7 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
.dma_rx_mode = dwmac1000_dma_operation_mode_rx,
.dma_tx_mode = dwmac1000_dma_operation_mode_tx,
.enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_reception = dwmac_enable_dma_reception,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
.start_tx = dwmac_dma_start_tx,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d87a8b595e6a..7b513324cfb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -18,7 +18,6 @@
static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
- int i;
pr_info("dwmac4: Master AXI performs %s burst length\n",
(value & DMA_SYS_BUS_FB) ? "fixed" : "any");
@@ -38,33 +37,10 @@ static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
/* Depending on the UNDEF bit the Master AXI will perform any burst
* length according to the BLEN programmed (by default all BLEN are
- * set).
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
*/
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= DMA_AXI_BLEN256;
- break;
- case 128:
- value |= DMA_AXI_BLEN128;
- break;
- case 64:
- value |= DMA_AXI_BLEN64;
- break;
- case 32:
- value |= DMA_AXI_BLEN32;
- break;
- case 16:
- value |= DMA_AXI_BLEN16;
- break;
- case 8:
- value |= DMA_AXI_BLEN8;
- break;
- case 4:
- value |= DMA_AXI_BLEN4;
- break;
- }
- }
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + DMA_SYS_BUS_MODE);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 4f980dcd3958..f27126f05551 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -69,15 +69,8 @@
#define DMA_SYS_BUS_MB BIT(14)
#define DMA_AXI_1KBBE BIT(13)
-#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_AAL DMA_AXI_AAL
#define DMA_SYS_BUS_EAME BIT(11)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_SYS_BUS_FB BIT(0)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
@@ -85,8 +78,6 @@
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
-
/* DMA TBS Control */
#define DMA_TBS_FTOS GENMASK(31, 8)
#define DMA_TBS_FTOV BIT(0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 5d9c18f5bbf5..054ecb20ce3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -68,23 +68,14 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define DMA_AXI_OSR_MAX 0xf
#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
(DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
-#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_AAL BIT(12)
-#define DMA_AXI_BLEN256 BIT(7)
-#define DMA_AXI_BLEN128 BIT(6)
-#define DMA_AXI_BLEN64 BIT(5)
-#define DMA_AXI_BLEN32 BIT(4)
-#define DMA_AXI_BLEN16 BIT(3)
-#define DMA_AXI_BLEN8 BIT(2)
-#define DMA_AXI_BLEN4 BIT(1)
#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
DMA_AXI_BLEN4)
-#define DMA_AXI_UNDEF BIT(0)
+#define DMA_AXI_1KBBE BIT(13)
-#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+#define DMA_AXI_UNDEF BIT(0)
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
@@ -178,6 +169,7 @@ static inline u32 dma_chan_base_addr(u32 base, u32 chan)
#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan);
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan);
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 467f1a05747e..97a803d68e3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -33,6 +33,11 @@ void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan)
writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan));
}
+void dwmac_enable_dma_reception(void __iomem *ioaddr, u32 chan)
+{
+ writel(1, ioaddr + DMA_CHAN_RCV_POLL_DEMAND(chan));
+}
+
void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index e48cfa05000c..fecda3034d36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -338,16 +338,9 @@
#define XGMAC_RD_OSR_LMT_SHIFT 16
#define XGMAC_EN_LPI BIT(15)
#define XGMAC_LPI_XIT_PKT BIT(14)
-#define XGMAC_AAL BIT(12)
+#define XGMAC_AAL DMA_AXI_AAL
#define XGMAC_EAME BIT(11)
-#define XGMAC_BLEN GENMASK(7, 1)
-#define XGMAC_BLEN256 BIT(7)
-#define XGMAC_BLEN128 BIT(6)
-#define XGMAC_BLEN64 BIT(5)
-#define XGMAC_BLEN32 BIT(4)
-#define XGMAC_BLEN16 BIT(3)
-#define XGMAC_BLEN8 BIT(2)
-#define XGMAC_BLEN4 BIT(1)
+/* XGMAC_BLEN* are now defined as DMA_AXI_BLEN* in common.h */
#define XGMAC_UNDEF BIT(0)
#define XGMAC_TX_EDMA_CTRL 0x00003040
#define XGMAC_TDPS GENMASK(29, 0)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 4d6bb995d8d8..cc1bdc0975d5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -84,7 +84,6 @@ static void dwxgmac2_dma_init_tx_chan(struct stmmac_priv *priv,
static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
- int i;
if (axi->axi_lpi_en)
value |= XGMAC_EN_LPI;
@@ -102,32 +101,12 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
if (!axi->axi_fb)
value |= XGMAC_UNDEF;
- value &= ~XGMAC_BLEN;
- for (i = 0; i < AXI_BLEN; i++) {
- switch (axi->axi_blen[i]) {
- case 256:
- value |= XGMAC_BLEN256;
- break;
- case 128:
- value |= XGMAC_BLEN128;
- break;
- case 64:
- value |= XGMAC_BLEN64;
- break;
- case 32:
- value |= XGMAC_BLEN32;
- break;
- case 16:
- value |= XGMAC_BLEN16;
- break;
- case 8:
- value |= XGMAC_BLEN8;
- break;
- case 4:
- value |= XGMAC_BLEN4;
- break;
- }
- }
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set). Note that the UNDEF bit is readonly, and is the inverse of
+ * Bus Mode bit 16.
+ */
+ value = (value & ~DMA_AXI_BLEN_MASK) | axi->axi_blen_regval;
writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index d359722100fa..df6e8a567b1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -201,6 +201,7 @@ struct stmmac_dma_ops {
void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x,
void __iomem *ioaddr);
void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan);
+ void (*enable_dma_reception)(void __iomem *ioaddr, u32 chan);
void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 chan, bool rx, bool tx);
void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -261,6 +262,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
#define stmmac_enable_dma_transmission(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_reception(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_reception, __args)
#define stmmac_enable_dma_irq(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_dma_irq, __priv, __args)
#define stmmac_disable_dma_irq(__priv, __args...) \
@@ -541,7 +544,7 @@ struct stmmac_rx_queue;
struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ bool (*is_jumbo_frm)(unsigned int len, bool enh_desc);
int (*jumbo_frm)(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
int csum);
int (*set_16kib_bfsize)(int mtu);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d218412ca832..382d94a3b972 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -91,14 +91,9 @@ static int jumbo_frm(struct stmmac_tx_queue *tx_q, struct sk_buff *skb,
return entry;
}
-static unsigned int is_jumbo_frm(int len, int enh_desc)
+static bool is_jumbo_frm(unsigned int len, bool enh_desc)
{
- unsigned int ret = 0;
-
- if (len >= BUF_SIZE_4KiB)
- ret = 1;
-
- return ret;
+ return len >= BUF_SIZE_4KiB;
}
static void refill_desc3(struct stmmac_rx_queue *rx_q, struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 0ea74c88a779..012b0a477255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -254,8 +254,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
- int sph;
- int sph_cap;
+ bool sph_active;
+ bool sph_capable;
u32 sarc_type;
u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
@@ -408,6 +408,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev);
+
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
return !!priv->xdp_prog;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
new file mode 100644
index 000000000000..5c5dd502f79a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCI bus helpers for STMMAC driver
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include "stmmac_libpci.h"
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_suspend);
+
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_pci_plat_resume);
+
+MODULE_DESCRIPTION("STMMAC PCI helper library");
+MODULE_AUTHOR("Yao Zi <ziyao@disroot.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
new file mode 100644
index 000000000000..71553184f982
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Yao Zi <ziyao@disroot.org>
+ */
+
+#ifndef __STMMAC_LIBPCI_H__
+#define __STMMAC_LIBPCI_H__
+
+int stmmac_pci_plat_suspend(struct device *dev, void *bsp_priv);
+int stmmac_pci_plat_resume(struct device *dev, void *bsp_priv);
+
+#endif /* __STMMAC_LIBPCI_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d202f604161e..da206b24aaed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -190,6 +190,44 @@ int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
/**
+ * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
+ * @regval: pointer to a u32 for the resulting register value
+ * @blen: pointer to an array of u32 containing the burst length values in bytes
+ * @len: the number of entries in the @blen array
+ */
+void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
+{
+ size_t i;
+ u32 val;
+
+ for (val = i = 0; i < len; i++) {
+ u32 burst = blen[i];
+
+ /* Burst values of zero must be skipped. */
+ if (!burst)
+ continue;
+
+ /* The valid range for the burst length is 4 to 256 inclusive,
+ * and it must be a power of two.
+ */
+ if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
+ pr_err("stmmac: invalid burst length %u at index %zu\n",
+ burst, i);
+ continue;
+ }
+
+ /* Since burst is a power of two, and the register field starts
+ * with burst = 4, shift right by two bits so bit 0 of the field
+ * corresponds with the minimum value.
+ */
+ val |= burst >> 2;
+ }
+
+ *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
+}
+EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -1245,7 +1283,11 @@ static int stmmac_phylink_setup(struct stmmac_priv *priv)
/* Stmmac always requires an RX clock for hardware initialization */
config->mac_requires_rxc = true;
- if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
+ /* Disable EEE RX clock stop to ensure VLAN register access works
+ * correctly.
+ */
+ if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
+ !(priv->dev->features & NETIF_F_VLAN_FEATURES))
config->eee_rx_clk_stop_enable = true;
/* Set the default transmit clock stop bit based on the platform glue */
@@ -1523,7 +1565,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
buf->page_offset = stmmac_rx_offset(priv);
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -2109,7 +2151,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = dma_conf->dma_buf_sz;
- if (priv->sph) {
+ if (priv->sph_active) {
pp_params.offset = 0;
pp_params.max_len += stmmac_rx_offset(priv);
}
@@ -3603,7 +3645,7 @@ static int stmmac_hw_setup(struct net_device *dev)
}
/* Enable Split Header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
for (chan = 0; chan < rx_cnt; chan++)
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
@@ -4579,18 +4621,18 @@ static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
*/
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned int first_entry, tx_packets, enh_desc;
+ bool enh_desc, has_vlan, set_ic, is_jumbo = false;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int nopaged_len = skb_headlen(skb);
- int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, tx_packets;
int gso = skb_shinfo(skb)->gso_type;
struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ int i, csum_insertion = 0;
int entry, first_tx;
dma_addr_t des;
u32 sdu_len;
@@ -4895,7 +4937,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
- if (priv->sph && !buf->sec_page) {
+ if (priv->sph_active && !buf->sec_page) {
buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4906,7 +4948,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->sph)
+ if (priv->sph_active)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
@@ -4931,6 +4973,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ /* Wake up Rx DMA from the suspend state if required */
+ stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
}
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
@@ -4941,12 +4985,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
- if (priv->sph && len)
+ if (priv->sph_active && len)
return 0;
/* First descriptor, get split header length */
stmmac_get_rx_header_len(priv, p, &hlen);
- if (priv->sph && hlen) {
+ if (priv->sph_active && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
}
@@ -4969,7 +5013,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
unsigned int plen = 0;
/* Not split header, buffer is not available */
- if (!priv->sph)
+ if (!priv->sph_active)
return 0;
/* Not last descriptor */
@@ -5352,10 +5396,10 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
+read_again:
if (count >= limit)
break;
-read_again:
buf1_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -6037,8 +6081,8 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- if (priv->sph_cap) {
- bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_capable) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
u32 chan;
for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
@@ -6987,7 +7031,7 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* Adjust Split header */
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
@@ -7489,7 +7533,8 @@ static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
}
static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
struct stmmac_priv *priv = dl_priv->stmmac_priv;
@@ -7555,19 +7600,43 @@ static void stmmac_unregister_devlink(struct stmmac_priv *priv)
devlink_free(priv->devlink);
}
-/**
- * stmmac_dvr_probe
- * @device: device pointer
- * @plat_dat: platform data pointer
- * @res: stmmac resource pointer
- * Description: this is the main probe function used to
- * call the alloc_etherdev, allocate the priv structure.
- * Return:
- * returns 0 on success, otherwise errno.
- */
-int stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *res)
+struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ int i;
+
+ plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
+ if (!plat_dat)
+ return NULL;
+
+ /* Set the defaults:
+ * - phy autodetection
+ * - determine GMII_Address CR field from CSR clock
+ * - allow MTU up to JUMBO_LEN
+ * - hash table size
+ * - one unicast filter entry
+ */
+ plat_dat->phy_addr = -1;
+ plat_dat->clk_csr = -1;
+ plat_dat->maxmtu = JUMBO_LEN;
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+ plat_dat->unicast_filter_entries = 1;
+
+ /* Set the mtl defaults */
+ plat_dat->tx_queues_to_use = 1;
+ plat_dat->rx_queues_to_use = 1;
+
+ /* Setup the default RX queue channel map */
+ for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
+ plat_dat->rx_queues_cfg[i].chan = i;
+
+ return plat_dat;
+}
+EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
+
+static int __stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
@@ -7702,8 +7771,8 @@ int stmmac_dvr_probe(struct device *device,
if (priv->dma_cap.sphen &&
!(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
ndev->hw_features |= NETIF_F_GRO;
- priv->sph_cap = true;
- priv->sph = priv->sph_cap;
+ priv->sph_capable = true;
+ priv->sph_active = priv->sph_capable;
dev_info(priv->device, "SPH feature enabled\n");
}
@@ -7868,6 +7937,34 @@ error_wq_init:
return ret;
}
+
+/**
+ * stmmac_dvr_probe
+ * @dev: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ if (plat_dat->init) {
+ ret = plat_dat->init(dev, plat_dat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = __stmmac_dvr_probe(dev, plat_dat, res);
+ if (ret && plat_dat->exit)
+ plat_dat->exit(dev, plat_dat->bsp_priv);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
@@ -7906,6 +8003,9 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(dev, priv->plat->bsp_priv);
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 94b3a3b27270..270ad066ced3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include "stmmac.h"
+#include "stmmac_libpci.h"
struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
@@ -27,26 +28,6 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->force_sf_dma_mode = 1;
plat->mdio_bus_data->needs_reset = true;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default number of RX and TX queues to use */
- plat->tx_queues_to_use = 1;
- plat->rx_queues_to_use = 1;
-
- /* Disable Priority config by default */
- plat->tx_queues_cfg[0].use_prio = false;
- plat->rx_queues_cfg[0].use_prio = false;
-
- /* Disable RX queues routing by default */
- plat->rx_queues_cfg[0].pkt_route = 0x0;
}
static int stmmac_default_data(struct pci_dev *pdev,
@@ -81,22 +62,12 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->flags |= STMMAC_FLAG_TSO_EN;
plat->pmt = 1;
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
/* Set default number of RX and TX queues to use */
plat->tx_queues_to_use = 4;
plat->rx_queues_to_use = 4;
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
for (i = 0; i < plat->tx_queues_to_use; i++) {
- plat->tx_queues_cfg[i].use_prio = false;
plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[i].weight = 25;
if (i > 0)
@@ -104,15 +75,10 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
}
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
- for (i = 0; i < plat->rx_queues_to_use; i++) {
- plat->rx_queues_cfg[i].use_prio = false;
+ for (i = 0; i < plat->rx_queues_to_use; i++)
plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
- plat->rx_queues_cfg[i].pkt_route = 0x0;
- plat->rx_queues_cfg[i].chan = i;
- }
plat->bus_id = 1;
- plat->phy_addr = -1;
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
@@ -127,10 +93,8 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->axi->axi_rd_osr_lmt = 31;
plat->axi->axi_fb = false;
- plat->axi->axi_blen[0] = 4;
- plat->axi->axi_blen[1] = 8;
- plat->axi->axi_blen[2] = 16;
- plat->axi->axi_blen[3] = 32;
+ plat->axi->axi_blen_regval = DMA_AXI_BLEN4 | DMA_AXI_BLEN8 |
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN32;
return 0;
}
@@ -139,37 +103,6 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = {
.setup = snps_gmac5_default_data,
};
-static int stmmac_pci_suspend(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int stmmac_pci_resume(struct device *dev, void *bsp_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
/**
* stmmac_pci_probe
*
@@ -191,7 +124,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
int ret;
int i;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return -ENOMEM;
@@ -249,8 +182,8 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
- plat->suspend = stmmac_pci_suspend;
- plat->resume = stmmac_pci_resume;
+ plat->suspend = stmmac_pci_plat_suspend;
+ plat->resume = stmmac_pci_plat_resume;
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6483d52b4c0f..8979a50b5507 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -95,6 +95,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
{
struct device_node *np;
struct stmmac_axi *axi;
+ u32 axi_blen[AXI_BLEN];
np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
if (!np)
@@ -117,7 +118,8 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_wr_osr_lmt = 1;
if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
axi->axi_rd_osr_lmt = 1;
- of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_property_read_u32_array(np, "snps,blen", axi_blen, AXI_BLEN);
+ stmmac_axi_blen_to_mask(&axi->axi_blen_regval, axi_blen, AXI_BLEN);
of_node_put(np);
return axi;
@@ -137,13 +139,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
u8 queue = 0;
int ret = 0;
- /* For backwards-compatibility with device trees that don't have any
- * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
- * to one RX and TX queues each.
- */
- plat->rx_queues_to_use = 1;
- plat->tx_queues_to_use = 1;
-
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
@@ -162,9 +157,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing RX queues common config */
- if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
- &plat->rx_queues_to_use))
- plat->rx_queues_to_use = 1;
+ of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use);
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
@@ -185,18 +179,13 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
else
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
- if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
- &plat->rx_queues_cfg[queue].chan))
- plat->rx_queues_cfg[queue].chan = queue;
+ of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan);
/* TODO: Dynamic mapping to be included in the future */
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->rx_queues_cfg[queue].prio)) {
- plat->rx_queues_cfg[queue].prio = 0;
- plat->rx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio))
plat->rx_queues_cfg[queue].use_prio = true;
- }
/* RX queue specific packet type routing */
if (of_property_read_bool(q_node, "snps,route-avcp"))
@@ -209,8 +198,6 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
- else
- plat->rx_queues_cfg[queue].pkt_route = 0x0;
queue++;
}
@@ -221,9 +208,8 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing TX queues common config */
- if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
- &plat->tx_queues_to_use))
- plat->tx_queues_to_use = 1;
+ of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use);
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
@@ -268,13 +254,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
- if (of_property_read_u32(q_node, "snps,priority",
- &plat->tx_queues_cfg[queue].prio)) {
- plat->tx_queues_cfg[queue].prio = 0;
- plat->tx_queues_cfg[queue].use_prio = false;
- } else {
+ if (!of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio))
plat->tx_queues_cfg[queue].use_prio = true;
- }
plat->tx_queues_cfg[queue].coe_unsupported =
of_property_read_bool(q_node, "snps,coe-unsupported");
@@ -436,7 +418,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
void *ret;
int rc;
- plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ plat = stmmac_plat_dat_alloc(&pdev->dev);
if (!plat)
return ERR_PTR(-ENOMEM);
@@ -480,13 +462,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->bus_id = ++bus_id;
}
- /* Default to phy auto-detection */
- plat->phy_addr = -1;
-
- /* Default to get clk_csr from stmmac_clk_csr_set(),
- * or get clk_csr from device tree.
- */
- plat->clk_csr = -1;
if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
of_property_read_u32(np, "clk_csr", &plat->clk_csr);
@@ -515,17 +490,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
}
- /* Set the maxmtu to a default of JUMBO_LEN in case the
- * parameter is not present in the device tree.
- */
- plat->maxmtu = JUMBO_LEN;
-
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
/*
* Currently only the properties needed on SPEAr600
* are provided. All other properties should be added
@@ -785,40 +749,40 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
/**
* stmmac_pltfr_init
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-static int stmmac_pltfr_init(struct platform_device *pdev,
+static int stmmac_pltfr_init(struct device *dev,
struct plat_stmmacenet_data *plat)
{
int ret = 0;
if (plat->init)
- ret = plat->init(pdev, plat->bsp_priv);
+ ret = plat->init(dev, plat->bsp_priv);
return ret;
}
/**
* stmmac_pltfr_exit
- * @pdev: pointer to the platform device
+ * @dev: pointer to the device structure
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-static void stmmac_pltfr_exit(struct platform_device *pdev,
+static void stmmac_pltfr_exit(struct device *dev,
struct plat_stmmacenet_data *plat)
{
if (plat->exit)
- plat->exit(pdev, plat->bsp_priv);
+ plat->exit(dev, plat->bsp_priv);
}
static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
{
struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
- stmmac_pltfr_exit(to_platform_device(dev), priv->plat);
+ stmmac_pltfr_exit(dev, priv->plat);
return 0;
}
@@ -827,7 +791,7 @@ static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
{
struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
- return stmmac_pltfr_init(to_platform_device(dev), priv->plat);
+ return stmmac_pltfr_init(dev, priv->plat);
}
/**
@@ -842,24 +806,12 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res)
{
- int ret;
-
if (!plat->suspend && plat->exit)
plat->suspend = stmmac_plat_suspend;
if (!plat->resume && plat->init)
plat->resume = stmmac_plat_resume;
- ret = stmmac_pltfr_init(pdev, plat);
- if (ret)
- return ret;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat, res);
- if (ret) {
- stmmac_pltfr_exit(pdev, plat);
- return ret;
- }
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat, res);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
@@ -901,12 +853,7 @@ EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
*/
void stmmac_pltfr_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
stmmac_dvr_remove(&pdev->dev);
- stmmac_pltfr_exit(pdev, plat);
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a01bc394d1ac..e90a2c469b9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -1721,7 +1721,7 @@ static int stmmac_test_sph(struct stmmac_priv *priv)
struct stmmac_packet_attrs attr = { };
int ret;
- if (!priv->sph)
+ if (!priv->sph_active)
return -EOPNOTSUPP;
/* Check for UDP first */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
index ff02a79c00d4..b18404dd5a8b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -122,7 +122,8 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
/* Extended Rx VLAN Filter Enable */
for (i = 0; i < hw->num_vlan; i++) {
- if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
ret = vlan_write_filter(dev, hw, i, 0);
if (!ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
index aa6f16d3df64..d7e4db7224b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
bpf_prog_put(old_prog);
/* Disable RX SPH for XDP operation */
- priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+ priv->sph_active = priv->sph_capable && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update)
stmmac_xdp_open(dev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index d5f358ec9820..5924db6be3fe 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -3068,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
}
static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *common = dl_priv->common;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 8b9e2078c602..ab88d4c02cbd 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1618,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = {
};
static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1753,7 +1754,8 @@ exit:
}
static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 0eed29d6187a..090aa74d3ce7 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
}
EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+static int emac_xsk_xmit_zc(struct prueth_emac *emac,
+ unsigned int q_idx)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx];
+ struct xsk_buff_pool *pool = tx_chn->xsk_pool;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *host_desc;
+ dma_addr_t dma_desc, dma_buf;
+ struct prueth_swdata *swdata;
+ struct xdp_desc xdp_desc;
+ int num_tx = 0, pkt_len;
+ int descs_avail, ret;
+ u32 *epib;
+ int i;
+
+ descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool);
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (descs_avail <= MAX_SKB_FRAGS)
+ return 0;
+
+ descs_avail -= MAX_SKB_FRAGS;
+
+ for (i = 0; i < descs_avail; i++) {
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ pkt_len = xdp_desc.len;
+ xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len);
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc))
+ break;
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, 0);
+ epib = host_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0,
+ (emac->port_id | (q_idx << 8)));
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf,
+ pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ swdata->type = PRUETH_SWDATA_XSK;
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ host_desc);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn,
+ host_desc, dma_desc);
+
+ if (ret) {
+ ndev->stats.tx_errors++;
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ break;
+ }
+
+ num_tx++;
+ }
+
+ xsk_tx_release(tx_chn->xsk_pool);
+ return num_tx;
+}
+
void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (swdata->type == PRUETH_SWDATA_XSK)
+ goto free_pool;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_pool:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ int xsk_frames_done = 0;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
total_bytes += xdpf->len;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ pkt_len = cppi5_hdesc_get_pktlen(desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ xsk_frames_done++;
+ break;
default:
prueth_xmit_free(tx_chn, desc_tx);
ndev->stats.tx_dropped++;
@@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
__netif_tx_unlock(netif_txq);
}
+ if (tx_chn->xsk_pool) {
+ if (xsk_frames_done)
+ xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_chn->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_chn->xsk_pool);
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ txq_trans_cond_update(netif_txq);
+ emac_xsk_xmit_zc(emac, chn);
+ }
+
return num_tx;
}
@@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
struct prueth_tx_chn *tx_chns =
container_of(timer, struct prueth_tx_chn, tx_hrtimer);
- enable_irq(tx_chns->irq);
+ if (tx_chns->irq_disabled) {
+ tx_chns->irq_disabled = false;
+ enable_irq(tx_chns->irq);
+ }
return HRTIMER_NORESTART;
}
@@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
ns_to_ktime(tx_chn->tx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(tx_chn->irq);
+ if (tx_chn->irq_disabled) {
+ tx_chn->irq_disabled = false;
+ enable_irq(tx_chn->irq);
+ }
}
}
@@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
{
struct prueth_tx_chn *tx_chn = dev_id;
+ tx_chn->irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&tx_chn->napi_tx);
@@ -362,6 +465,29 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
@@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct device *dev = emac->prueth->dev;
struct net_device *ndev = emac->ndev;
u32 fdqring_id, hdesc_size;
+ struct page_pool *pool;
int i, ret = 0, slice;
int flow_id_base;
@@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
goto fail;
}
+ pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto fail;
+ }
+
+ rx_chn->pg_pool = pool;
+
flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
emac->rx_mgm_flow_id_base = flow_id_base;
@@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac,
* emac_xmit_xdp_frame - transmits an XDP frame
* @emac: emac device
* @xdpf: data to transmit
- * @page: page from page pool if already DMA mapped
* @q_idx: queue id
+ * @buff_type: Type of buffer to be transmitted
*
* Return: XDP state
*/
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx)
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type)
{
struct cppi5_host_desc_t *first_desc;
struct net_device *ndev = emac->ndev;
@@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
struct prueth_swdata *swdata;
+ struct page *page;
u32 *epib;
int ret;
@@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
return ICSSG_XDP_CONSUMED; /* drop */
}
- if (page) { /* already DMA mapped by page_pool */
+ if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */
+ page = virt_to_head_page(xdpf->data);
+ if (unlikely(!page)) {
+ netdev_err(ndev, "xdp tx: failed to get page from xdpf\n");
+ goto drop_free_descs;
+ }
buf_dma = page_pool_get_dma_addr(page);
buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
} else { /* Map the linear buffer */
@@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
* emac_run_xdp - run an XDP program
* @emac: emac device
* @xdp: XDP buffer containing the frame
- * @page: page with RX data if already DMA mapped
* @len: Rx descriptor packet length
*
* Return: XDP state
*/
-static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
- struct page *page, u32 *len)
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len)
{
struct net_device *ndev = emac->ndev;
struct netdev_queue *netif_txq;
@@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
q_idx = cpu % emac->tx_ch_num;
netif_txq = netdev_get_tx_queue(ndev, q_idx);
__netif_tx_lock(netif_txq, cpu);
- result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ result = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_TX);
__netif_tx_unlock(netif_txq);
if (result == ICSSG_XDP_CONSUMED) {
ndev->stats.tx_dropped++;
@@ -689,11 +829,188 @@ drop:
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
ndev->stats.rx_dropped++;
- page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
return ICSSG_XDP_CONSUMED;
}
}
+static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct xdp_buff *xdp)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ int buf_len;
+
+ buf_dma = xsk_buff_xdp_get_dma(xdp);
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ swdata->type = PRUETH_SWDATA_XSK;
+ swdata->data.xdp = xdp;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
+ desc_rx, desc_dma);
+}
+
+static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ struct xdp_buff *xdp;
+ int i, ret;
+
+ for (i = 0; i < budget; i++) {
+ xdp = xsk_buff_alloc(rx_chn->xsk_pool);
+ if (!xdp)
+ break;
+
+ ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp);
+ if (ret) {
+ netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n");
+ xsk_buff_free(xdp);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata)
+{
+ unsigned int headroom = xdp->data - xdp->data_hard_start;
+ unsigned int pkt_len = xdp->data_end - xdp->data;
+ struct net_device *ndev = emac->ndev;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start);
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
+ return;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+}
+
+static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id,
+ int budget)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct prueth_swdata *swdata;
+ dma_addr_t desc_dma, buf_dma;
+ struct xdp_buff *xdp;
+ int xdp_status = 0;
+ int count = 0;
+ u32 *psdata;
+ int ret;
+
+ while (count < budget) {
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ break;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
+ break;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ if (swdata->type != PRUETH_SWDATA_XSK) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ break;
+ }
+
+ xdp = swdata->data.xdp;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ count++;
+ xsk_buff_set_size(xdp, pkt_len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (prueth_xdp_is_enabled(emac)) {
+ ret = emac_run_xdp(emac, xdp, &pkt_len);
+ switch (ret) {
+ case ICSSG_XDP_PASS:
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_CONSUMED:
+ xsk_buff_free(xdp);
+ break;
+ case ICSSG_XDP_TX:
+ case ICSSG_XDP_REDIR:
+ xdp_status |= ret;
+ break;
+ }
+ } else {
+ /* prepare skb and send to n/w stack */
+ emac_dispatch_skb_zc(emac, xdp, psdata);
+ xsk_buff_free(xdp);
+ }
+ }
+
+ if (xdp_status & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
+ /* Allocate xsk buffers from the pool for the "count" number of
+ * packets processed in order to be able to receive more packets.
+ */
+ ret = prueth_rx_alloc_zc(emac, count);
+
+ if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) {
+ /* If the user space doesn't provide enough buffers then it must
+ * explicitly wake up the kernel when new buffers are available
+ */
+ if (ret < count)
+ xsk_set_rx_need_wakeup(rx_chn->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_chn->xsk_pool);
+ }
+
+ return count;
+}
+
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
@@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
return ret;
}
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ complete(&emac->tdown_complete);
return 0;
+ }
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
@@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
/* firmware adds 4 CRC bytes, strip them */
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
/* if allocation fails we drop the packet but push the
@@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
}
pa = page_address(page);
- if (emac->xdp_prog) {
+ if (prueth_xdp_is_enabled(emac)) {
xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
- *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len);
if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
@@ -804,24 +1122,29 @@ requeue:
return ret;
}
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
struct prueth_swdata *swdata;
struct page_pool *pool;
+ struct xdp_buff *xdp;
struct page *page;
pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- if (swdata->type == PRUETH_SWDATA_PAGE) {
+ if (rx_chn->xsk_pool) {
+ xdp = swdata->data.xdp;
+ xsk_buff_free(xdp);
+ } else {
page = swdata->data.page;
page_pool_recycle_direct(pool, page);
}
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
}
+EXPORT_SYMBOL_GPL(prueth_rx_cleanup);
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
{
@@ -1025,10 +1348,11 @@ drop_stop_q_busy:
}
EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct xsk_buff_pool *xsk_pool;
struct prueth_swdata *swdata;
struct xdp_frame *xdpf;
struct sk_buff *skb;
@@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
xdpf = swdata->data.xdpf;
xdp_return_frame(xdpf);
break;
+ case PRUETH_SWDATA_XSK:
+ xsk_pool = tx_chn->xsk_pool;
+ xsk_tx_completed(xsk_pool, 1);
+ break;
default:
break;
}
prueth_xmit_free(tx_chn, desc_tx);
}
+EXPORT_SYMBOL_GPL(prueth_tx_cleanup);
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
+ emac->rx_chns.irq_disabled = true;
disable_irq_nosync(irq);
napi_schedule(&emac->napi_rx);
@@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
@@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
int ret;
while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow, &xdp_state);
- xdp_state_or |= xdp_state;
- if (ret)
- break;
- num_rx++;
+ if (rx_chn->xsk_pool) {
+ num_rx = emac_rx_packet_zc(emac, flow, budget);
+ } else {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
+ break;
+ num_rx++;
+ }
}
if (num_rx >= budget)
@@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
ns_to_ktime(emac->rx_pace_timeout_ns),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
}
}
@@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
-static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
- struct device *dma_dev,
- int size)
-{
- struct page_pool_params pp_params = { 0 };
- struct page_pool *pool;
-
- pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = size;
- pp_params.nid = dev_to_node(emac->prueth->dev);
- pp_params.dma_dir = DMA_BIDIRECTIONAL;
- pp_params.dev = dma_dev;
- pp_params.napi = &emac->napi_rx;
- pp_params.max_len = PAGE_SIZE;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- netdev_err(emac->ndev, "cannot create rx page pool\n");
-
- return pool;
-}
-
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct page_pool *pool;
struct page *page;
+ int desc_avail;
int i, ret;
- pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
-
- chn->pg_pool = pool;
+ desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool);
+ if (desc_avail < chn->descs_num)
+ netdev_warn(emac->ndev,
+ "not enough RX descriptors available %d < %d\n",
+ desc_avail, chn->descs_num);
- for (i = 0; i < chn->descs_num; i++) {
- /* NOTE: we're not using memory efficiently here.
- * 1 full page (4KB?) used here instead of
- * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ if (chn->xsk_pool) {
+ /* get pages from xsk_pool and push to RX ring
+ * queue as much as possible
*/
- page = page_pool_dev_alloc_pages(pool);
- if (!page) {
- netdev_err(emac->ndev, "couldn't allocate rx page\n");
- ret = -ENOMEM;
+ ret = prueth_rx_alloc_zc(emac, desc_avail);
+ if (!ret)
goto recycle_alloc_pg;
- }
+ } else {
+ for (i = 0; i < desc_avail; i++) {
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(chn->pg_pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit page for rx chan %s ret %d\n",
- chn->name, ret);
- page_pool_recycle_direct(pool, page);
- goto recycle_alloc_pg;
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit page for rx chan %s ret %d\n",
+ chn->name, ret);
+ page_pool_recycle_direct(chn->pg_pool, page);
+ goto recycle_alloc_pg;
+ }
}
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 57a7d1ceab08..f65041662173 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -47,6 +47,9 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
return HRTIMER_NORESTART;
}
@@ -566,31 +573,41 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+}
+
static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
struct page_pool *pool = emac->rx_chns.pg_pool;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int ret;
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
if (ret)
return ret;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
-{
- struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ if (rx_chn->xsk_pool) {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (ret)
+ goto xdp_unreg;
+ xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
+ } else {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto xdp_unreg;
+ }
- if (!xdp_rxq_info_is_reg(rxq))
- return;
+ return 0;
- xdp_rxq_info_unreg(rxq);
+xdp_unreg:
+ prueth_destroy_xdp_rxqs(emac);
+ return ret;
}
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
@@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
return 0;
}
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
+static void prueth_destroy_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "tx teardown timeout\n");
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_destroy_rxq(struct prueth_emac *emac)
+{
+ int i, ret;
+
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ /* When RX DMA Channel Teardown is initiated, it will result in an
+ * interrupt and a Teardown Completion Marker (TDCM) is queued into
+ * the RX Completion queue. Acknowledging the interrupt involves
+ * popping the TDCM descriptor from the RX Completion queue via the
+ * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
+ * be popped, schedule the RX NAPI handler to run immediately.
+ */
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (napi_schedule_prep(&emac->napi_rx))
+ __napi_schedule(&emac->napi_rx);
+ }
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "rx teardown timeout\n");
+
+ for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
+ napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
+ &emac->rx_chns,
+ prueth_rx_cleanup);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+ k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
+}
+
+static int prueth_create_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ }
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+ return ret;
+}
+
+static int prueth_create_rxq(struct prueth_emac *emac)
+{
+ int ret;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto reset_rx_chn;
+
+ napi_enable(&emac->napi_rx);
+ return 0;
+
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+ return ret;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- int ret, i, num_data_chn = emac->tx_ch_num;
+ int ret, num_data_chn = emac->tx_ch_num;
struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
@@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev)
goto stop;
/* Prepare RX */
- ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
- ret = prueth_create_xdp_rxqs(emac);
- if (ret)
- goto reset_rx_chn;
-
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_txq(emac);
if (ret)
- goto destroy_xdp_rxqs;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
- if (ret)
- goto reset_tx_chan;
- }
-
- /* Enable NAPI in Tx and Rx direction */
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_enable(&emac->tx_chns[i].napi_tx);
- napi_enable(&emac->napi_rx);
+ goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
-reset_tx_chan:
- /* Since interface is not yet up, there is wouldn't be
- * any SKB for completion. So set false to free_skb
- */
- prueth_reset_tx_chan(emac, i, false);
-destroy_xdp_rxqs:
- prueth_destroy_xdp_rxqs(emac);
-reset_rx_chn:
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_rxq:
+ prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
@@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
- int rx_flow = PRUETH_RX_FLOW_DATA;
- int max_rx_flows;
- int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev)
else
__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
- atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- /* tear down and disable UDMA channels */
- reinit_completion(&emac->tdown_complete);
- for (i = 0; i < emac->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
-
- ret = wait_for_completion_timeout(&emac->tdown_complete,
- msecs_to_jiffies(1000));
- if (!ret)
- netdev_err(ndev, "tx teardown timeout\n");
-
- prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++) {
- napi_disable(&emac->tx_chns[i].napi_tx);
- hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
- }
-
- max_rx_flows = PRUETH_MAX_RX_FLOWS;
- k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
-
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
- prueth_destroy_xdp_rxqs(emac);
- napi_disable(&emac->napi_rx);
- hrtimer_cancel(&emac->rx_hrtimer);
+ prueth_destroy_txq(emac);
+ prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
@@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->tx_ts_irq, emac);
- free_irq(emac->rx_chns.irq[rx_flow], emac);
+ free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
__netif_tx_lock(netif_txq, cpu);
for (i = 0; i < n; i++) {
xdpf = frames[i];
- err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO);
if (err != ICSSG_XDP_TX) {
ndev->stats.tx_dropped++;
break;
@@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
return 0;
}
+static int prueth_xsk_pool_enable(struct prueth_emac *emac,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 frame_size;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < PRUETH_MAX_PKT_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
+ return ret;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ emac->xsk_qid = queue_id;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (emac->xsk_qid != queue_id) {
+ netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
+ return -EINVAL;
+ }
+
+ pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ if (!pool) {
+ netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
+ emac->xsk_qid = -EINVAL;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
@@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
+ case XDP_SETUP_XSK_POOL:
+ return bpf->xsk.pool ?
+ prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
+ prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
default:
return -EINVAL;
}
}
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != qid) {
+ netdev_err(ndev, "XSK queue %d not registered\n", qid);
+ return -EINVAL;
+ }
+
+ if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
+ netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!tx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!rx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (flags & XDP_WAKEUP_TX) {
+ if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
+ if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
+ __napi_schedule(&tx_chn->napi_tx);
+ }
+ }
+
+ if (flags & XDP_WAKEUP_RX) {
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (likely(napi_schedule_prep(&emac->napi_rx)))
+ __napi_schedule(&emac->napi_rx);
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1178,6 +1420,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_xdp_xmit = emac_xdp_xmit,
.ndo_hwtstamp_get = icssg_ndo_get_ts_config,
.ndo_hwtstamp_set = icssg_ndo_set_ts_config,
+ .ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1311,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth,
xdp_set_features_flag(ndev,
NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT);
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f0fa9688d9a0..10eadd356650 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -38,6 +38,8 @@
#include <net/devlink.h>
#include <net/xdp.h>
#include <net/page_pool/helpers.h>
+#include <net/xsk_buff_pool.h>
+#include <net/xdp_sock_drv.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -126,6 +128,8 @@ struct prueth_tx_chn {
char name[32];
struct hrtimer tx_hrtimer;
unsigned long tx_pace_timeout_ns;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
};
struct prueth_rx_chn {
@@ -138,6 +142,8 @@ struct prueth_rx_chn {
char name[32];
struct page_pool *pg_pool;
struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ bool irq_disabled;
};
enum prueth_swdata_type {
@@ -146,6 +152,12 @@ enum prueth_swdata_type {
PRUETH_SWDATA_PAGE,
PRUETH_SWDATA_CMD,
PRUETH_SWDATA_XDPF,
+ PRUETH_SWDATA_XSK,
+};
+
+enum prueth_tx_buff_type {
+ PRUETH_TX_BUFF_TYPE_XDP_TX,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO,
};
struct prueth_swdata {
@@ -155,6 +167,7 @@ struct prueth_swdata {
struct page *page;
u32 cmd;
struct xdp_frame *xdpf;
+ struct xdp_buff *xdp;
} data;
};
@@ -241,6 +254,7 @@ struct prueth_emac {
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
struct bpf_prog *xdp_prog;
struct xdp_attachment_info xdpi;
+ int xsk_qid;
};
/* The buf includes headroom compatible with both skb and xdpf */
@@ -499,7 +513,14 @@ void prueth_put_cores(struct prueth *prueth, int slice);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
struct xdp_frame *xdpf,
- struct page *page,
- unsigned int q_idx);
+ unsigned int q_idx,
+ enum prueth_tx_buff_type buff_type);
+void prueth_rx_cleanup(void *data, dma_addr_t desc_dma);
+void prueth_tx_cleanup(void *data, dma_addr_t desc_dma);
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags);
+static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac)
+{
+ return !!READ_ONCE(emac->xdp_prog);
+}
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 4f6cc6cd1f03..8f46e9be76b1 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2657,7 +2657,7 @@ static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg,
phy = gbe_intf->slave->phy;
if (phy_has_hwtstamp(phy))
- return phy->mii_ts->hwtstamp(phy->mii_ts, cfg, extack);
+ return phy->mii_ts->hwtstamp_set(phy->mii_ts, cfg, extack);
switch (cfg->tx_type) {
case HWTSTAMP_TX_OFF:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 5ee8e8980393..591866fc9055 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -260,6 +260,7 @@ void gelic_card_down(struct gelic_card *card)
if (atomic_dec_if_positive(&card->users) == 0) {
pr_debug("%s: real do\n", __func__);
napi_disable(&card->napi);
+ timer_delete_sync(&card->rx_oom_timer);
/*
* Disable irq. Wireless interrupts will
* be disabled later if any
@@ -970,7 +971,8 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
* gelic_card_decode_one_descr - processes an rx descriptor
* @card: card structure
*
- * returns 1 if a packet has been sent to the stack, otherwise 0
+ * returns 1 if a packet has been sent to the stack, -ENOMEM on skb alloc
+ * failure, otherwise 0
*
* processes an rx descriptor by iommu-unmapping the data buffer and passing
* the packet up to the stack
@@ -981,16 +983,18 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
struct gelic_descr_chain *chain = &card->rx_chain;
struct gelic_descr *descr = chain->head;
struct net_device *netdev = NULL;
- int dmac_chain_ended;
+ int dmac_chain_ended = 0;
+ int prepare_rx_ret;
status = gelic_descr_get_status(descr);
if (status == GELIC_DESCR_DMA_CARDOWNED)
return 0;
- if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
+ if (status == GELIC_DESCR_DMA_NOT_IN_USE || !descr->skb) {
dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
- return 0;
+ dmac_chain_ended = 1;
+ goto refill;
}
/* netdevice select */
@@ -1048,9 +1052,10 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
refill:
/* is the current descriptor terminated with next_descr == NULL? */
- dmac_chain_ended =
- be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
- GELIC_DESCR_RX_DMA_CHAIN_END;
+ if (!dmac_chain_ended)
+ dmac_chain_ended =
+ be32_to_cpu(descr->hw_regs.dmac_cmd_status) &
+ GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
@@ -1062,10 +1067,11 @@ refill:
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
/*
- * this call can fail, but for now, just leave this
- * descriptor without skb
+ * this call can fail, propagate the error
*/
- gelic_descr_prepare_rx(card, descr);
+ prepare_rx_ret = gelic_descr_prepare_rx(card, descr);
+ if (prepare_rx_ret)
+ return prepare_rx_ret;
chain->tail = descr;
chain->head = descr->next;
@@ -1087,6 +1093,13 @@ refill:
return 1;
}
+static void gelic_rx_oom_timer(struct timer_list *t)
+{
+ struct gelic_card *card = timer_container_of(card, t, rx_oom_timer);
+
+ napi_schedule(&card->napi);
+}
+
/**
* gelic_net_poll - NAPI poll function called by the stack to return packets
* @napi: napi structure
@@ -1099,14 +1112,22 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
{
struct gelic_card *card = container_of(napi, struct gelic_card, napi);
int packets_done = 0;
+ int work_result = 0;
while (packets_done < budget) {
- if (!gelic_card_decode_one_descr(card))
+ work_result = gelic_card_decode_one_descr(card);
+ if (work_result != 1)
break;
packets_done++;
}
+ if (work_result == -ENOMEM) {
+ napi_complete_done(napi, packets_done);
+ mod_timer(&card->rx_oom_timer, jiffies + 1);
+ return packets_done;
+ }
+
if (packets_done < budget) {
napi_complete_done(napi, packets_done);
gelic_card_rx_irq_on(card);
@@ -1576,6 +1597,8 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
mutex_init(&card->updown_lock);
atomic_set(&card->users, 0);
+ timer_setup(&card->rx_oom_timer, gelic_rx_oom_timer, 0);
+
return card;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index f7d7931e51b7..c10f1984a5a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -268,6 +268,7 @@ struct gelic_vlan_id {
struct gelic_card {
struct napi_struct napi;
struct net_device *netdev[GELIC_PORT_MAX];
+ struct timer_list rx_oom_timer;
/*
* hypervisor requires irq_status should be
* 8 bytes aligned, but u64 member is
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index 9aa3964187e1..f362e51c73ee 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -240,9 +240,6 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -261,9 +258,6 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -273,9 +267,6 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return;
-
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -285,9 +276,6 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b1a6ef5709a9..29e5c5470c94 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1249,7 +1249,7 @@ enum wx_pf_flags {
WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
WX_FLAG_PTP_PPS_ENABLED,
WX_FLAG_NEED_LINK_CONFIG,
- WX_FLAG_NEED_SFP_RESET,
+ WX_FLAG_NEED_MODULE_RESET,
WX_FLAG_NEED_UPDATE_LINK,
WX_FLAG_NEED_DO_RESET,
WX_FLAG_RX_MERGE_ENABLED,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
index 35eebdb07761..62d7f47d4f8d 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c
@@ -17,10 +17,15 @@
void txgbe_gpio_init_aml(struct wx *wx)
{
- u32 status;
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
- wr32(wx, WX_GPIO_INTTYPE_LEVEL, TXGBE_GPIOBIT_2);
- wr32(wx, WX_GPIO_INTEN, TXGBE_GPIOBIT_2);
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, mod_rst);
+ wr32(wx, WX_GPIO_INTEN, mod_rst);
status = rd32(wx, WX_GPIO_INTSTATUS);
for (int i = 0; i < 6; i++) {
@@ -33,13 +38,18 @@ irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data)
{
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
- u32 status;
+ u32 status, mod_rst;
+
+ if (wx->mac.type == wx_mac_aml40)
+ mod_rst = TXGBE_GPIOBIT_4;
+ else
+ mod_rst = TXGBE_GPIOBIT_2;
wr32(wx, WX_GPIO_INTMASK, 0xFF);
status = rd32(wx, WX_GPIO_INTSTATUS);
- if (status & TXGBE_GPIOBIT_2) {
- set_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
- wr32(wx, WX_GPIO_EOI, TXGBE_GPIOBIT_2);
+ if (status & mod_rst) {
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wr32(wx, WX_GPIO_EOI, mod_rst);
wx_service_event_schedule(wx);
}
@@ -51,7 +61,7 @@ int txgbe_test_hostif(struct wx *wx)
{
struct txgbe_hic_ephy_getlink buffer;
- if (wx->mac.type != wx_mac_aml)
+ if (wx->mac.type == wx_mac_sp)
return 0;
buffer.hdr.cmd = FW_PHY_GET_LINK_CMD;
@@ -63,15 +73,49 @@ int txgbe_test_hostif(struct wx *wx)
WX_HI_COMMAND_TIMEOUT, true);
}
-static int txgbe_identify_sfp_hostif(struct wx *wx, struct txgbe_hic_i2c_read *buffer)
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data)
{
- buffer->hdr.cmd = FW_READ_SFP_INFO_CMD;
+ u32 dword_len, offset, value, i;
+ int err;
+
+ buffer->hdr.cmd = FW_READ_EEPROM_CMD;
buffer->hdr.buf_len = sizeof(struct txgbe_hic_i2c_read) -
sizeof(struct wx_hic_hdr);
buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ err = wx_host_interface_command(wx, (u32 *)buffer,
+ sizeof(struct txgbe_hic_i2c_read),
+ WX_HI_COMMAND_TIMEOUT, false);
+ if (err != 0)
+ return err;
+
+ /* buffer length offset to read return data */
+ offset = sizeof(struct txgbe_hic_i2c_read) >> 2;
+ dword_len = round_up(length, 4) >> 2;
+
+ for (i = 0; i < dword_len; i++) {
+ value = rd32a(wx, WX_FW2SW_MBOX, i + offset);
+ le32_to_cpus(&value);
+
+ memcpy(data, &value, 4);
+ data += 4;
+ }
+
+ return 0;
+}
+
+static int txgbe_identify_module_hostif(struct wx *wx,
+ struct txgbe_hic_get_module_info *buffer)
+{
+ buffer->hdr.cmd = FW_GET_MODULE_INFO_CMD;
+ buffer->hdr.buf_len = sizeof(struct txgbe_hic_get_module_info) -
+ sizeof(struct wx_hic_hdr);
+ buffer->hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+
return wx_host_interface_command(wx, (u32 *)buffer,
- sizeof(struct txgbe_hic_i2c_read),
+ sizeof(struct txgbe_hic_get_module_info),
WX_HI_COMMAND_TIMEOUT, true);
}
@@ -85,6 +129,9 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
buffer.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
switch (speed) {
+ case SPEED_40000:
+ buffer.speed = TXGBE_LINK_SPEED_40GB_FULL;
+ break;
case SPEED_25000:
buffer.speed = TXGBE_LINK_SPEED_25GB_FULL;
break;
@@ -104,17 +151,21 @@ static int txgbe_set_phy_link_hostif(struct wx *wx, int speed, int autoneg, int
WX_HI_COMMAND_TIMEOUT, true);
}
-static void txgbe_get_link_capabilities(struct wx *wx, int *speed, int *duplex)
+static void txgbe_get_link_capabilities(struct wx *wx, int *speed,
+ int *autoneg, int *duplex)
{
struct txgbe *txgbe = wx->priv;
- if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->sfp_interfaces))
+ if (test_bit(PHY_INTERFACE_MODE_XLGMII, txgbe->link_interfaces))
+ *speed = SPEED_40000;
+ else if (test_bit(PHY_INTERFACE_MODE_25GBASER, txgbe->link_interfaces))
*speed = SPEED_25000;
- else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->sfp_interfaces))
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, txgbe->link_interfaces))
*speed = SPEED_10000;
else
*speed = SPEED_UNKNOWN;
+ *autoneg = phylink_test(txgbe->advertising, Autoneg);
*duplex = *speed == SPEED_UNKNOWN ? DUPLEX_HALF : DUPLEX_FULL;
}
@@ -125,6 +176,8 @@ static void txgbe_get_mac_link(struct wx *wx, int *speed)
status = rd32(wx, TXGBE_CFG_PORT_ST);
if (!(status & TXGBE_CFG_PORT_ST_LINK_UP))
*speed = SPEED_UNKNOWN;
+ else if (status & TXGBE_CFG_PORT_ST_LINK_AML_40G)
+ *speed = SPEED_40000;
else if (status & TXGBE_CFG_PORT_ST_LINK_AML_25G)
*speed = SPEED_25000;
else if (status & TXGBE_CFG_PORT_ST_LINK_AML_10G)
@@ -135,11 +188,11 @@ static void txgbe_get_mac_link(struct wx *wx, int *speed)
int txgbe_set_phy_link(struct wx *wx)
{
- int speed, duplex, err;
+ int speed, autoneg, duplex, err;
- txgbe_get_link_capabilities(wx, &speed, &duplex);
+ txgbe_get_link_capabilities(wx, &speed, &autoneg, &duplex);
- err = txgbe_set_phy_link_hostif(wx, speed, 0, duplex);
+ err = txgbe_set_phy_link_hostif(wx, speed, autoneg, duplex);
if (err) {
wx_err(wx, "Failed to setup link\n");
return err;
@@ -148,40 +201,128 @@ int txgbe_set_phy_link(struct wx *wx)
return 0;
}
-static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
+static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
DECLARE_PHY_INTERFACE_MASK(interfaces);
struct txgbe *txgbe = wx->priv;
- if (id->com_25g_code & (TXGBE_SFF_25GBASESR_CAPABLE |
- TXGBE_SFF_25GBASEER_CAPABLE |
- TXGBE_SFF_25GBASELR_CAPABLE)) {
- phylink_set(modes, 25000baseSR_Full);
+ if (id->cable_tech & TXGBE_SFF_DA_PASSIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ if (id->com_25g_code == TXGBE_SFF_25GBASECR_91FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_74FEC ||
+ id->com_25g_code == TXGBE_SFF_25GBASECR_NOFEC) {
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ } else {
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ } else if (id->cable_tech & TXGBE_SFF_DA_ACTIVE_CABLE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 25000baseCR_Full);
__set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ } else {
+ if (id->com_25g_code == TXGBE_SFF_25GBASESR_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASEER_CAPABLE ||
+ id->com_25g_code == TXGBE_SFF_25GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 25000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseSR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 10000baseLR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
}
- if (id->com_10g_code & TXGBE_SFF_10GBASESR_CAPABLE) {
- phylink_set(modes, 10000baseSR_Full);
- __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+
+ if (phy_interface_empty(interfaces)) {
+ wx_err(wx, "unsupported SFP module\n");
+ return -EINVAL;
+ }
+
+ phylink_set(modes, Pause);
+ phylink_set(modes, Asym_Pause);
+ phylink_set(modes, FIBRE);
+
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
+ wx->phylink_config.supported_interfaces,
+ interfaces);
+ linkmode_copy(txgbe->advertising, modes);
+
+ set_bit(WX_FLAG_NEED_LINK_CONFIG, wx->flags);
}
- if (id->com_10g_code & TXGBE_SFF_10GBASELR_CAPABLE) {
- phylink_set(modes, 10000baseLR_Full);
+
+ return 0;
+}
+
+static int txgbe_qsfp_to_linkmodes(struct wx *wx, struct txgbe_sff_id *id)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct txgbe *txgbe = wx->priv;
+
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
__set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
}
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_SR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseSR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_LR4) {
+ txgbe->link_port = PORT_FIBRE;
+ phylink_set(modes, 40000baseLR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_40G_ACTIVE) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ }
+ if (id->transceiver_type & TXGBE_SFF_ETHERNET_RSRVD) {
+ if (id->sff_opt1 & TXGBE_SFF_ETHERNET_100G_CR4) {
+ txgbe->link_port = PORT_DA;
+ phylink_set(modes, Autoneg);
+ phylink_set(modes, 40000baseCR4_Full);
+ phylink_set(modes, 25000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ }
if (phy_interface_empty(interfaces)) {
- wx_err(wx, "unsupported SFP module\n");
+ wx_err(wx, "unsupported QSFP module\n");
return -EINVAL;
}
phylink_set(modes, Pause);
phylink_set(modes, Asym_Pause);
phylink_set(modes, FIBRE);
- txgbe->link_port = PORT_FIBRE;
- if (!linkmode_equal(txgbe->sfp_support, modes)) {
- linkmode_copy(txgbe->sfp_support, modes);
- phy_interface_and(txgbe->sfp_interfaces,
+ if (!linkmode_equal(txgbe->link_support, modes)) {
+ linkmode_copy(txgbe->link_support, modes);
+ phy_interface_and(txgbe->link_interfaces,
wx->phylink_config.supported_interfaces,
interfaces);
linkmode_copy(txgbe->advertising, modes);
@@ -192,40 +333,53 @@ static int txgbe_sfp_to_linkmodes(struct wx *wx, struct txgbe_sfp_id *id)
return 0;
}
-int txgbe_identify_sfp(struct wx *wx)
+int txgbe_identify_module(struct wx *wx)
{
- struct txgbe_hic_i2c_read buffer;
- struct txgbe_sfp_id *id;
+ struct txgbe_hic_get_module_info buffer;
+ struct txgbe_sff_id *id;
int err = 0;
+ u32 mod_abs;
u32 gpio;
+ if (wx->mac.type == wx_mac_aml40)
+ mod_abs = TXGBE_GPIOBIT_4;
+ else
+ mod_abs = TXGBE_GPIOBIT_2;
+
gpio = rd32(wx, WX_GPIO_EXT);
- if (gpio & TXGBE_GPIOBIT_2)
+ if (gpio & mod_abs)
return -ENODEV;
- err = txgbe_identify_sfp_hostif(wx, &buffer);
+ err = txgbe_identify_module_hostif(wx, &buffer);
if (err) {
- wx_err(wx, "Failed to identify SFP module\n");
+ wx_err(wx, "Failed to identify module\n");
return err;
}
id = &buffer.id;
- if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP) {
- wx_err(wx, "Invalid SFP module\n");
+ if (id->identifier != TXGBE_SFF_IDENTIFIER_SFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP_PLUS &&
+ id->identifier != TXGBE_SFF_IDENTIFIER_QSFP28) {
+ wx_err(wx, "Invalid module\n");
return -ENODEV;
}
- return txgbe_sfp_to_linkmodes(wx, id);
+ if (id->transceiver_type == 0xFF)
+ return txgbe_sfp_to_linkmodes(wx, id);
+
+ return txgbe_qsfp_to_linkmodes(wx, id);
}
void txgbe_setup_link(struct wx *wx)
{
struct txgbe *txgbe = wx->priv;
- phy_interface_zero(txgbe->sfp_interfaces);
- linkmode_zero(txgbe->sfp_support);
+ phy_interface_zero(txgbe->link_interfaces);
+ linkmode_zero(txgbe->link_support);
- txgbe_identify_sfp(wx);
+ set_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
+ wx_service_event_schedule(wx);
}
static void txgbe_get_link_state(struct phylink_config *config,
@@ -278,6 +432,9 @@ static void txgbe_mac_link_up_aml(struct phylink_config *config,
txcfg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
switch (speed) {
+ case SPEED_40000:
+ txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
+ break;
case SPEED_25000:
txcfg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
break;
@@ -342,7 +499,18 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe)
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
config->get_fixed_state = txgbe_get_link_state;
- phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ if (wx->mac.type == wx_mac_aml40) {
+ config->mac_capabilities |= MAC_40000FD;
+ phy_mode = PHY_INTERFACE_MODE_XLGMII;
+ __set_bit(PHY_INTERFACE_MODE_XLGMII, config->supported_interfaces);
+ state.speed = SPEED_40000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_25GBASER;
+ state.speed = SPEED_25000;
+ state.duplex = DUPLEX_FULL;
+ }
+
__set_bit(PHY_INTERFACE_MODE_25GBASER, config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_10GBASER, config->supported_interfaces);
@@ -350,8 +518,6 @@ int txgbe_phylink_init_aml(struct txgbe *txgbe)
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- state.speed = SPEED_25000;
- state.duplex = DUPLEX_FULL;
err = phylink_set_fixed_link(phylink, &state);
if (err) {
wx_err(wx, "Failed to set fixed link\n");
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
index 25d4971ca0d9..4f6df0ee860b 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h
@@ -7,8 +7,11 @@
void txgbe_gpio_init_aml(struct wx *wx);
irqreturn_t txgbe_gpio_irq_handler_aml(int irq, void *data);
int txgbe_test_hostif(struct wx *wx);
+int txgbe_read_eeprom_hostif(struct wx *wx,
+ struct txgbe_hic_i2c_read *buffer,
+ u32 length, u8 *data);
int txgbe_set_phy_link(struct wx *wx);
-int txgbe_identify_sfp(struct wx *wx);
+int txgbe_identify_module(struct wx *wx);
void txgbe_setup_link(struct wx *wx);
int txgbe_phylink_init_aml(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index e285b088c7b2..f3cb00109529 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -10,6 +10,7 @@
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
#include "txgbe_fdir.h"
+#include "txgbe_aml.h"
#include "txgbe_ethtool.h"
int txgbe_get_link_ksettings(struct net_device *netdev,
@@ -19,9 +20,6 @@ int txgbe_get_link_ksettings(struct net_device *netdev,
struct txgbe *txgbe = wx->priv;
int err;
- if (wx->mac.type == wx_mac_aml40)
- return -EOPNOTSUPP;
-
err = wx_get_link_ksettings(netdev, cmd);
if (err)
return err;
@@ -30,8 +28,9 @@ int txgbe_get_link_ksettings(struct net_device *netdev,
return 0;
cmd->base.port = txgbe->link_port;
- cmd->base.autoneg = AUTONEG_DISABLE;
- linkmode_copy(cmd->link_modes.supported, txgbe->sfp_support);
+ cmd->base.autoneg = phylink_test(txgbe->advertising, Autoneg) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ linkmode_copy(cmd->link_modes.supported, txgbe->link_support);
linkmode_copy(cmd->link_modes.advertising, txgbe->advertising);
return 0;
@@ -536,6 +535,34 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static int
+txgbe_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct txgbe_hic_i2c_read buffer;
+ int err;
+
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return -EOPNOTSUPP;
+
+ buffer.length = cpu_to_be32(page_data->length);
+ buffer.offset = cpu_to_be32(page_data->offset);
+ buffer.page = page_data->page;
+ buffer.bank = page_data->bank;
+ buffer.i2c_address = page_data->i2c_address;
+
+ err = txgbe_read_eeprom_hostif(wx, &buffer, page_data->length,
+ page_data->data);
+ if (err) {
+ wx_err(wx, "Failed to read module EEPROM\n");
+ return err;
+ }
+
+ return page_data->length;
+}
+
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
@@ -570,6 +597,7 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
.get_ts_stats = wx_get_ptp_stats,
+ .get_module_eeprom_by_page = txgbe_get_module_eeprom_by_page,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 3885283681ec..aa14958d439a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -23,7 +23,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
{
u32 misc_ien = TXGBE_PX_MISC_IEN_MASK;
- if (wx->mac.type == wx_mac_aml) {
+ if (wx->mac.type != wx_mac_sp) {
misc_ien |= TXGBE_PX_MISC_GPIO;
txgbe_gpio_init_aml(wx);
}
@@ -201,10 +201,7 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
- if (txgbe->wx->mac.type == wx_mac_aml40)
- return;
-
- if (txgbe->wx->mac.type == wx_mac_aml)
+ if (txgbe->wx->mac.type != wx_mac_sp)
free_irq(txgbe->gpio_irq, txgbe);
free_irq(txgbe->link_irq, txgbe);
@@ -219,9 +216,6 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
- if (wx->mac.type == wx_mac_aml40)
- goto skip_sp_irq;
-
txgbe->misc.nirqs = TXGBE_IRQ_MAX;
txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index daa761e48f9d..0de051450a82 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -89,21 +89,21 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-static void txgbe_sfp_detection_subtask(struct wx *wx)
+static void txgbe_module_detection_subtask(struct wx *wx)
{
int err;
- if (!test_bit(WX_FLAG_NEED_SFP_RESET, wx->flags))
+ if (!test_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags))
return;
- /* wait for SFP module ready */
+ /* wait for SFF module ready */
msleep(200);
- err = txgbe_identify_sfp(wx);
+ err = txgbe_identify_module(wx);
if (err)
return;
- clear_bit(WX_FLAG_NEED_SFP_RESET, wx->flags);
+ clear_bit(WX_FLAG_NEED_MODULE_RESET, wx->flags);
}
static void txgbe_link_config_subtask(struct wx *wx)
@@ -128,7 +128,7 @@ static void txgbe_service_task(struct work_struct *work)
{
struct wx *wx = container_of(work, struct wx, service_task);
- txgbe_sfp_detection_subtask(wx);
+ txgbe_module_detection_subtask(wx);
txgbe_link_config_subtask(wx);
wx_service_event_complete(wx);
@@ -144,7 +144,6 @@ static void txgbe_init_service(struct wx *wx)
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
- u32 reg;
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -155,12 +154,8 @@ static void txgbe_up_complete(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_aml40:
- reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
- reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
- reg |= TXGBE_AML_MAC_TX_CFG_SPEED_40G;
- wr32(wx, WX_MAC_TX_CFG, reg);
- txgbe_enable_sec_tx_path(wx);
- netif_carrier_on(wx->netdev);
+ txgbe_setup_link(wx);
+ phylink_start(wx->phylink);
break;
case wx_mac_aml:
/* Enable TX laser */
@@ -276,7 +271,7 @@ void txgbe_down(struct wx *wx)
switch (wx->mac.type) {
case wx_mac_aml40:
- netif_carrier_off(wx->netdev);
+ phylink_stop(wx->phylink);
break;
case wx_mac_aml:
phylink_stop(wx->phylink);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 03f1b9bc604d..8ea7aa07ae4e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -579,7 +579,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
switch (wx->mac.type) {
case wx_mac_aml40:
- return 0;
case wx_mac_aml:
return txgbe_phylink_init_aml(txgbe);
case wx_mac_sp:
@@ -653,7 +652,6 @@ void txgbe_remove_phy(struct txgbe *txgbe)
{
switch (txgbe->wx->mac.type) {
case wx_mac_aml40:
- return;
case wx_mac_aml:
phylink_destroy(txgbe->wx->phylink);
return;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index b9a4ba48f5b9..82433e9cb0e3 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -98,6 +98,7 @@
/* Port cfg registers */
#define TXGBE_CFG_PORT_ST 0x14404
#define TXGBE_CFG_PORT_ST_LINK_UP BIT(0)
+#define TXGBE_CFG_PORT_ST_LINK_AML_40G BIT(2)
#define TXGBE_CFG_PORT_ST_LINK_AML_25G BIT(3)
#define TXGBE_CFG_PORT_ST_LINK_AML_10G BIT(4)
#define TXGBE_CFG_VXLAN 0x14410
@@ -317,8 +318,12 @@ void txgbe_do_reset(struct net_device *netdev);
#define TXGBE_LINK_SPEED_UNKNOWN 0
#define TXGBE_LINK_SPEED_10GB_FULL 4
#define TXGBE_LINK_SPEED_25GB_FULL 0x10
+#define TXGBE_LINK_SPEED_40GB_FULL 0x20
#define TXGBE_SFF_IDENTIFIER_SFP 0x3
+#define TXGBE_SFF_IDENTIFIER_QSFP 0xC
+#define TXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define TXGBE_SFF_IDENTIFIER_QSFP28 0x11
#define TXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define TXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define TXGBE_SFF_DA_SPEC_ACTIVE_LIMIT 0x4
@@ -331,6 +336,12 @@ void txgbe_do_reset(struct net_device *netdev);
#define TXGBE_SFF_25GBASECR_91FEC 0xB
#define TXGBE_SFF_25GBASECR_74FEC 0xC
#define TXGBE_SFF_25GBASECR_NOFEC 0xD
+#define TXGBE_SFF_ETHERNET_RSRVD BIT(7)
+#define TXGBE_SFF_ETHERNET_40G_CR4 BIT(3)
+#define TXGBE_SFF_ETHERNET_40G_SR4 BIT(2)
+#define TXGBE_SFF_ETHERNET_40G_LR4 BIT(1)
+#define TXGBE_SFF_ETHERNET_40G_ACTIVE BIT(0)
+#define TXGBE_SFF_ETHERNET_100G_CR4 0xB
#define TXGBE_PHY_FEC_RS BIT(0)
#define TXGBE_PHY_FEC_BASER BIT(1)
@@ -341,9 +352,10 @@ void txgbe_do_reset(struct net_device *netdev);
#define FW_PHY_GET_LINK_CMD 0xC0
#define FW_PHY_SET_LINK_CMD 0xC1
-#define FW_READ_SFP_INFO_CMD 0xC5
+#define FW_GET_MODULE_INFO_CMD 0xC5
+#define FW_READ_EEPROM_CMD 0xC6
-struct txgbe_sfp_id {
+struct txgbe_sff_id {
u8 identifier; /* A0H 0x00 */
u8 com_1g_code; /* A0H 0x06 */
u8 com_10g_code; /* A0H 0x03 */
@@ -358,9 +370,9 @@ struct txgbe_sfp_id {
u8 reserved[5];
};
-struct txgbe_hic_i2c_read {
+struct txgbe_hic_get_module_info {
struct wx_hic_hdr hdr;
- struct txgbe_sfp_id id;
+ struct txgbe_sff_id id;
};
struct txgbe_hic_ephy_setlink {
@@ -383,6 +395,16 @@ struct txgbe_hic_ephy_getlink {
u8 resv[6];
};
+struct txgbe_hic_i2c_read {
+ struct wx_hic_hdr hdr;
+ __be32 offset;
+ __be32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 resv;
+};
+
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
.name = _NAME, \
@@ -451,8 +473,8 @@ struct txgbe {
int fdir_filter_count;
spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
- DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(link_interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 link_port;
};