diff options
60 files changed, 941 insertions, 1203 deletions
diff --git a/Documentation/networking/smc-sysctl.rst b/Documentation/networking/smc-sysctl.rst index a874d007f2db..904a910f198e 100644 --- a/Documentation/networking/smc-sysctl.rst +++ b/Documentation/networking/smc-sysctl.rst @@ -71,3 +71,43 @@ smcr_max_conns_per_lgr - INTEGER acceptable value ranges from 16 to 255. Only for SMC-R v2.1 and later. Default: 255 + +smcr_max_send_wr - INTEGER + So-called work request buffers are SMCR link (and RDMA queue pair) level + resources necessary for performing RDMA operations. Since up to 255 + connections can share a link group and thus also a link and the number + of the work request buffers is decided when the link is allocated, + depending on the workload it can be a bottleneck in a sense that threads + have to wait for work request buffers to become available. Before the + introduction of this control the maximal number of work request buffers + available on the send path used to be hard coded to 16. With this control + it becomes configurable. The acceptable range is between 2 and 2048. + + Please be aware that all the buffers need to be allocated as a physically + continuous array in which each element is a single buffer and has the size + of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying + with half of the buffer count until it is ether successful or (unlikely) + we dip below the old hard coded value which is 16 where we give up much + like before having this control. + + Default: 16 + +smcr_max_recv_wr - INTEGER + So-called work request buffers are SMCR link (and RDMA queue pair) level + resources necessary for performing RDMA operations. Since up to 255 + connections can share a link group and thus also a link and the number + of the work request buffers is decided when the link is allocated, + depending on the workload it can be a bottleneck in a sense that threads + have to wait for work request buffers to become available. Before the + introduction of this control the maximal number of work request buffers + available on the receive path used to be hard coded to 16. With this control + it becomes configurable. The acceptable range is between 2 and 2048. + + Please be aware that all the buffers need to be allocated as a physically + continuous array in which each element is a single buffer and has the size + of SMC_WR_BUF_SIZE (48) bytes. If the allocation fails, we keep retrying + with half of the buffer count until it is ether successful or (unlikely) + we dip below the old hard coded value which is 16 where we give up much + like before having this control. + + Default: 48 diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index a563a94e2780..7c6ee1e604aa 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -296,6 +296,7 @@ config ICE depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB + select LIBETH_XDP select LIBIE select LIBIE_ADMINQ select LIBIE_FWLOG diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 081a4526a2f0..0fe0d52c796b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -4788,6 +4788,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) unsigned long q_map; struct i40e_vf *vf; int abs_vf_id; + int old_link; int ret = 0; int tmp; @@ -4806,6 +4807,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* skip VF link state change if requested state is already set */ + if (!vf->link_forced) + old_link = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + old_link = IFLA_VF_LINK_STATE_ENABLE; + else + old_link = IFLA_VF_LINK_STATE_DISABLE; + + if (link == old_link) + goto error_out; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 9ee596773f34..147aaee192a7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -351,6 +351,7 @@ struct ice_vsi { u16 num_q_vectors; /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; + bool hsplit:1; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -374,6 +375,8 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; + u16 max_frame; + struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ @@ -509,7 +512,6 @@ enum ice_pf_flags { ICE_FLAG_MOD_POWER_UNSUPPORTED, ICE_FLAG_PHY_FW_LOAD_FAILED, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ - ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_VF_VLAN_PRUNING, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 2d35a278c555..eadb1e3d12b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <net/xdp_sock_drv.h> +#include <linux/net/intel/libie/rx.h> #include "ice_base.h" #include "ice_lib.h" #include "ice_dcb_lib.h" @@ -462,19 +463,6 @@ u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring) } /** - * ice_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) -{ - if (ice_ring_uses_build_skb(rx_ring)) - return ICE_SKB_PAD; - return 0; -} - -/** * ice_setup_rx_ctx - Configure a receive ring context * @ring: The Rx ring to configure * @@ -536,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) else rlan_ctx.l2tsel = 1; - rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; - rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + if (ring->hdr_pp) { + rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; + + /* + * If the frame is TCP/UDP/SCTP, it will be split by the + * payload. + * If not, but it's an IPv4/IPv6 frame, it will be split by + * the IP header. + * If not IP, it will be split by the Ethernet header. + * + * In any case, the header buffer will never be left empty. + */ + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 | + ICE_RLAN_RX_HSPLIT_0_SPLIT_IP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP | + ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; + } else { + rlan_ctx.hbuf = 0; + rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; + rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; + } + rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; /* This controls whether VLAN is stripped from inner headers @@ -549,7 +558,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, ring->max_frame, + rlan_ctx.rxmax = min_t(u32, vsi->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ @@ -586,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) if (vsi->type == ICE_VSI_VF) return 0; - /* configure Rx buffer alignment */ - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - ice_clear_ring_build_skb_ena(ring); - else - ice_set_ring_build_skb_ena(ring); - - ring->rx_offset = ice_rx_offset(ring); - /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); @@ -601,36 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) return 0; } -static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) +static int ice_rxq_pp_create(struct ice_rx_ring *rq) { - void *ctx_ptr = &ring->pkt_ctx; - struct xsk_cb_desc desc = {}; - - XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff); - desc.src = &ctx_ptr; - desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) - - sizeof(struct xdp_buff); - desc.bytes = sizeof(ctx_ptr); - xsk_pool_fill_cb(ring->xsk_pool, &desc); -} + struct libeth_fq fq = { + .count = rq->count, + .nid = NUMA_NO_NODE, + .hsplit = rq->vsi->hsplit, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + .buf_len = LIBIE_MAX_RX_BUF_LEN, + }; + int err; -/** - * ice_get_frame_sz - calculate xdp_buff::frame_sz - * @rx_ring: the ring being configured - * - * Return frame size based on underlying PAGE_SIZE - */ -static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) -{ - unsigned int frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + return err; + + rq->pp = fq.pp; + rq->rx_fqes = fq.fqes; + rq->truesize = fq.truesize; + rq->rx_buf_len = fq.buf_len; -#if (PAGE_SIZE >= 8192) - frame_sz = rx_ring->rx_buf_len; -#else - frame_sz = ice_rx_pg_size(rx_ring) / 2; -#endif + if (!fq.hsplit) + return 0; + + fq = (struct libeth_fq){ + .count = rq->count, + .type = LIBETH_FQE_HDR, + .nid = NUMA_NO_NODE, + .xdp = ice_is_xdp_ena_vsi(rq->vsi), + }; - return frame_sz; + err = libeth_rx_fq_create(&fq, &rq->q_vector->napi); + if (err) + goto destroy; + + rq->hdr_pp = fq.pp; + rq->hdr_fqes = fq.fqes; + rq->hdr_truesize = fq.truesize; + rq->rx_hdr_len = fq.buf_len; + + return 0; + +destroy: + ice_rxq_pp_destroy(rq); + + return err; } /** @@ -642,7 +658,8 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); - u32 num_bufs = ICE_RX_DESC_UNUSED(ring); + u32 num_bufs = ICE_DESC_UNUSED(ring); + u32 rx_buf_len; int err; if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { @@ -656,15 +673,19 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } ice_rx_xsk_pool(ring); + err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); + if (err) + return err; + if (ring->xsk_pool) { xdp_rxq_info_unreg(&ring->xdp_rxq); - ring->rx_buf_len = + rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, - ring->rx_buf_len); + rx_buf_len); if (err) return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -673,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); - ice_xsk_pool_fill_cb(ring); dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { + err = ice_rxq_pp_create(ring); + if (err) + return err; + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, ring->rx_buf_len); if (err) - return err; + goto err_destroy_fq; } - - err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, - NULL); - if (err) - return err; + xdp_rxq_info_attach_page_pool(&ring->xdp_rxq, + ring->pp); } } - xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); ring->xdp.data = NULL; - ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); if (err) { dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", ring->q_index, err); - return err; + goto err_destroy_fq; } if (ring->xsk_pool) { @@ -730,9 +748,17 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (ring->vsi->type == ICE_VSI_CTRL) ice_init_ctrl_rx_descs(ring, num_bufs); else - ice_alloc_rx_bufs(ring, num_bufs); + err = ice_alloc_rx_bufs(ring, num_bufs); + + if (err) + goto err_destroy_fq; return 0; + +err_destroy_fq: + ice_rxq_pp_destroy(ring); + + return err; } int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) @@ -753,18 +779,10 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) */ static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; - ring->rx_buf_len = ICE_RXBUF_1664; -#if (PAGE_SIZE < 8192) - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; -#endif + if (!vsi->netdev) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; } else { - ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - ring->rx_buf_len = ICE_RXBUF_3072; + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; } } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index b097cc8b175c..83f5217bce9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3392,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E825C_SGMII: return true; default: return false; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index cb34d4675a78..a1d9abee97e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -10,6 +10,7 @@ #include "ice_lib.h" #include "ice_dcb_lib.h" #include <net/dcbnl.h> +#include <net/libeth/rx.h> struct ice_stats { char stat_string[ETH_GSTRING_LEN]; @@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = { ICE_FLAG_VF_TRUE_PROMISC_ENA), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING), - ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), }; #define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) @@ -1231,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size) */ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; int valid_frames, i; + struct page *page; u8 *received_buf; valid_frames = 0; @@ -1247,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring) cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))))) continue; - rx_buf = &rx_ring->rx_buf[i]; - received_buf = page_address(rx_buf->page) + rx_buf->page_offset; + rx_buf = &rx_ring->rx_fqes[i]; + page = __netmem_to_page(rx_buf->netmem); + received_buf = page_address(page) + rx_buf->offset + + page->pp->p.offset; if (ice_lbtest_check_frame(received_buf)) valid_frames++; @@ -1856,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ice_nway_reset(netdev); } } - if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) { - /* down and up VSI so that changes of Rx cfg are reflected. */ - ice_down_up(vsi); - } /* don't allow modification of this flag when a single VF is in * promiscuous mode because it's not supported */ @@ -3152,6 +3151,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, ring->rx_jumbo_max_pending = 0; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; + + kernel_ring->tcp_data_split = vsi->hsplit ? + ETHTOOL_TCP_DATA_SPLIT_ENABLED : + ETHTOOL_TCP_DATA_SPLIT_DISABLED; } static int @@ -3168,6 +3171,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, int i, timeout = 50, err = 0; struct ice_hw *hw = &pf->hw; u16 new_rx_cnt, new_tx_cnt; + bool hsplit; if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) || ring->tx_pending < ICE_MIN_NUM_DESC || @@ -3193,9 +3197,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", new_rx_cnt); + hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED; + /* if nothing to do return success */ if (new_tx_cnt == vsi->tx_rings[0]->count && - new_rx_cnt == vsi->rx_rings[0]->count) { + new_rx_cnt == vsi->rx_rings[0]->count && + hsplit == vsi->hsplit) { netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); return 0; } @@ -3225,6 +3232,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, vsi->xdp_rings[i]->count = new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt; + vsi->hsplit = hsplit; + netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); goto done; } @@ -3308,7 +3317,8 @@ process_rx: rx_rings[i].count = new_rx_cnt; rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; rx_rings[i].desc = NULL; - rx_rings[i].rx_buf = NULL; + rx_rings[i].xdp_buf = NULL; + /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ @@ -3317,10 +3327,6 @@ process_rx: err = ice_setup_rx_ring(&rx_rings[i]); if (err) goto rx_unwind; - - /* allocate Rx buffers */ - err = ice_alloc_rx_bufs(&rx_rings[i], - ICE_RX_DESC_UNUSED(&rx_rings[i])); rx_unwind: if (err) { while (i) { @@ -3334,6 +3340,8 @@ rx_unwind: } process_link: + vsi->hsplit = hsplit; + /* Bring interface down, copy in the new ring info, then restore the * interface. if VSI is up, bring it down and then back up */ @@ -4815,6 +4823,7 @@ static const struct ethtool_ops ice_ethtool_ops = { ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, .supported_input_xfrm = RXH_XFRM_SYM_XOR, + .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, .get_fec_stats = ice_get_fec_stats, diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 10c312d49e05..185672c7e17d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -342,6 +342,9 @@ enum ice_flg64_bits { /* for ice_32byte_rx_flex_desc.pkt_length member */ #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ +/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */ +#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0) + enum ice_rx_flex_desc_status_error_0_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 69cb0381c460..15621707fbf8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1427,7 +1427,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->reg_idx = vsi->rxq_map[i]; ring->vsi = vsi; ring->netdev = vsi->netdev; - ring->dev = dev; ring->count = vsi->num_rx_desc; ring->cached_phctime = pf->ptp.cached_phc_time; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 1de3da7b3907..2533876f1a2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -37,6 +37,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS("LIBETH"); +MODULE_IMPORT_NS("LIBETH_XDP"); MODULE_IMPORT_NS("LIBIE"); MODULE_IMPORT_NS("LIBIE_ADMINQ"); MODULE_IMPORT_NS("LIBIE_FWLOG"); @@ -2957,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) */ static int ice_max_xdp_frame_size(struct ice_vsi *vsi) { - if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) - return ICE_RXBUF_1664; - else - return ICE_RXBUF_3072; + return ICE_RXBUF_3072; } /** @@ -3018,19 +3017,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } } xdp_features_set_redirect_target(vsi->netdev, true); - /* reallocate Rx queues that are used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, true); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); - /* reallocate Rx queues that were used for zero-copy */ - xdp_ring_err = ice_realloc_zc_buf(vsi, false); - if (xdp_ring_err) - NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); } resume_if: @@ -7864,12 +7855,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu) frame_size - ICE_ETH_PKT_HDR_PAD); return -EINVAL; } - } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { - if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) { - netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", - ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); - return -EINVAL; - } } /* if a reset is in progress, wait for some time for it to complete */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 73f08d02f9c7..ad76768a4232 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -7,6 +7,8 @@ #include <linux/netdevice.h> #include <linux/prefetch.h> #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include <net/dsfield.h> #include <net/mpls.h> #include <net/xdp.h> @@ -111,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, static void ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) { - if (dma_unmap_len(tx_buf, len)) + if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len)) dma_unmap_page(ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_len(tx_buf, len), @@ -125,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf) dev_kfree_skb_any(tx_buf->skb); break; case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, false); break; case ICE_TX_BUF_XDP_XMIT: xdp_return_frame(tx_buf->xdpf); @@ -506,61 +508,67 @@ err: return -ENOMEM; } +void ice_rxq_pp_destroy(struct ice_rx_ring *rq) +{ + struct libeth_fq fq = { + .fqes = rq->rx_fqes, + .pp = rq->pp, + }; + + libeth_rx_fq_destroy(&fq); + rq->rx_fqes = NULL; + rq->pp = NULL; + + if (!rq->hdr_pp) + return; + + fq.fqes = rq->hdr_fqes; + fq.pp = rq->hdr_pp; + + libeth_rx_fq_destroy(&fq); + rq->hdr_fqes = NULL; + rq->hdr_pp = NULL; +} + /** * ice_clean_rx_ring - Free Rx buffers * @rx_ring: ring to be cleaned */ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) { - struct xdp_buff *xdp = &rx_ring->xdp; - struct device *dev = rx_ring->dev; u32 size; - u16 i; - - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_buf) - return; if (rx_ring->xsk_pool) { ice_xsk_clean_rx_ring(rx_ring); goto rx_skip_free; } - if (xdp->data) { - xdp_return_buff(xdp); - xdp->data = NULL; - } + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_fqes) + return; + + libeth_xdp_return_stash(&rx_ring->xdp); /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem); - if (!rx_buf->page) - continue; + if (rx_ring->hdr_pp) + libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(dev, rx_buf->dma, - rx_buf->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - - rx_buf->page = NULL; - rx_buf->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } -rx_skip_free: - if (rx_ring->xsk_pool) - memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); - else - memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); + if (rx_ring->vsi->type == ICE_VSI_PF && + xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) { + xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq); + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + } + ice_rxq_pp_destroy(rx_ring); + +rx_skip_free: /* Zero out the descriptor ring */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -568,7 +576,6 @@ rx_skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; rx_ring->next_to_use = 0; } @@ -580,26 +587,20 @@ rx_skip_free: */ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) { + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; ice_clean_rx_ring(rx_ring); - if (rx_ring->vsi->type == ICE_VSI_PF) - if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); WRITE_ONCE(rx_ring->xdp_prog, NULL); if (rx_ring->xsk_pool) { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - } else { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; } if (rx_ring->desc) { size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); - dmam_free_coherent(rx_ring->dev, size, - rx_ring->desc, rx_ring->dma); + dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } } @@ -612,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) */ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) { - struct device *dev = rx_ring->dev; + struct device *dev = ice_pf_to_dev(rx_ring->vsi->back); u32 size; - if (!dev) - return -ENOMEM; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_buf); - rx_ring->rx_buf = - kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); - if (!rx_ring->rx_buf) - return -ENOMEM; - /* round up to nearest page */ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), PAGE_SIZE); @@ -633,22 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) if (!rx_ring->desc) { dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", size); - goto err; + return -ENOMEM; } rx_ring->next_to_use = 0; rx_ring->next_to_clean = 0; - rx_ring->first_desc = 0; if (ice_is_xdp_ena_vsi(rx_ring->vsi)) WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); return 0; - -err: - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - return -ENOMEM; } /** @@ -662,7 +647,7 @@ err: * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static u32 -ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, +ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, union ice_32b_rx_flex_desc *eop_desc) { @@ -672,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, if (!xdp_prog) goto exit; - ice_xdp_meta_set_desc(xdp, eop_desc); + xdp->desc = eop_desc; - act = bpf_prog_run_xdp(xdp_prog, xdp); + act = bpf_prog_run_xdp(xdp_prog, &xdp->base); switch (act) { case XDP_PASS: break; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) spin_lock(&xdp_ring->tx_lock); - ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); + ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); if (ret == ICE_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: - if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) + if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog)) goto out_failure; ret = ICE_XDP_REDIR; break; @@ -700,8 +685,10 @@ out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); fallthrough; case XDP_DROP: + libeth_xdp_return_buff(xdp); ret = ICE_XDP_CONSUMED; } + exit: return ret; } @@ -790,53 +777,6 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } /** - * ice_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buf struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - */ -static bool -ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) - return true; - - /* alloc new page for storage */ - page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, ICE_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, ice_rx_pg_order(rx_ring)); - rx_ring->ring_stats->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = rx_ring->rx_offset; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; - - return true; -} - -/** * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. * @rx_ring: ring to init descriptors on * @count: number of descriptors to initialize @@ -882,9 +822,20 @@ void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) */ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) { + const struct libeth_fq_fp hdr_fq = { + .pp = rx_ring->hdr_pp, + .fqes = rx_ring->hdr_fqes, + .truesize = rx_ring->hdr_truesize, + .count = rx_ring->count, + }; + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; union ice_32b_rx_flex_desc *rx_desc; u16 ntu = rx_ring->next_to_use; - struct ice_rx_buf *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -892,30 +843,39 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) /* get the Rx descriptor and buffer based on next_to_use */ rx_desc = ICE_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_buf[ntu]; do { - /* if we fail here, we have work remaining */ - if (!ice_alloc_mapped_page(rx_ring, bi)) - break; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + break; + } /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); + + if (!hdr_fq.pp) + goto next; + + addr = libeth_rx_alloc(&hdr_fq, ntu); + if (addr == DMA_MAPPING_ERROR) { + rx_ring->ring_stats->rx_stats.alloc_page_failed++; + + libeth_rx_recycle_slow(fq.fqes[ntu].netmem); + break; + } + + rx_desc->read.hdr_addr = cpu_to_le64(addr); +next: rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = ICE_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_buf; ntu = 0; } @@ -932,402 +892,6 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) } /** - * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse - * @rx_buf: Rx buffer to adjust - * @size: Size of adjustment - * - * Update the offset within page so that Rx buf will be ready to be reused. - * For systems with PAGE_SIZE < 8192 this function will flip the page offset - * so the second half of page assigned to Rx buffer will be used, otherwise - * the offset is moved by "size" bytes - */ -static void -ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) -{ -#if (PAGE_SIZE < 8192) - /* flip page offset to other buffer */ - rx_buf->page_offset ^= size; -#else - /* move offset up to the next cache line */ - rx_buf->page_offset += size; -#endif -} - -/** - * ice_can_reuse_rx_page - Determine if page can be reused for another Rx - * @rx_buf: buffer containing the page - * - * If page is reusable, we have a green light for calling ice_reuse_rx_page, - * which will assign the current buffer to the buffer that next_to_alloc is - * pointing to; otherwise, the DMA mapping needs to be destroyed and - * page freed - */ -static bool -ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) -{ - unsigned int pagecnt_bias = rx_buf->pagecnt_bias; - struct page *page = rx_buf->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - - /* if we are only owner of page we can reuse it */ - if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) - return false; -#if (PAGE_SIZE >= 8192) -#define ICE_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) - if (rx_buf->page_offset > ICE_LAST_OFFSET) - return false; -#endif /* PAGE_SIZE >= 8192) */ - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buf->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp buff to place the data into - * @rx_buf: buffer containing page to add - * @size: packet length from rx_desc - * - * This function will add the data contained in rx_buf->page to the xdp buf. - * It will just attach the page as a frag. - */ -static int -ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct ice_rx_buf *rx_buf, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(xdp)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(xdp); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) - return -ENOMEM; - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, - rx_buf->page_offset, size); - sinfo->xdp_frags_size += size; - - if (page_is_pfmemalloc(rx_buf->page)) - xdp_buff_set_frag_pfmemalloc(xdp); - - return 0; -} - -/** - * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: Rx descriptor ring to store buffers on - * @old_buf: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - */ -static void -ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) -{ - u16 nta = rx_ring->next_to_alloc; - struct ice_rx_buf *new_buf; - - new_buf = &rx_ring->rx_buf[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* Transfer page from old buffer to new buffer. - * Move each member individually to avoid possible store - * forwarding stalls and unnecessary copy of skb. - */ - new_buf->dma = old_buf->dma; - new_buf->page = old_buf->page; - new_buf->page_offset = old_buf->page_offset; - new_buf->pagecnt_bias = old_buf->pagecnt_bias; -} - -/** - * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use - * @rx_ring: Rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * @ntc: index of next to clean element - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct ice_rx_buf * -ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, - const unsigned int ntc) -{ - struct ice_rx_buf *rx_buf; - - rx_buf = &rx_ring->rx_buf[ntc]; - prefetchw(rx_buf->page); - - if (!size) - return rx_buf; - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, - rx_buf->page_offset, size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buf->pagecnt_bias--; - - return rx_buf; -} - -/** - * ice_get_pgcnts - grab page_count() for gathered fragments - * @rx_ring: Rx descriptor ring to store the page counts on - * @ntc: the next to clean element (not included in this frame!) - * - * This function is intended to be called right before running XDP - * program so that the page recycling mechanism will be able to take - * a correct decision regarding underlying pages; this is done in such - * way as XDP program can change the refcount of page - */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) -{ - u32 idx = rx_ring->first_desc; - struct ice_rx_buf *rx_buf; - u32 cnt = rx_ring->count; - - while (idx != ntc) { - rx_buf = &rx_ring->rx_buf[idx]; - rx_buf->pgcnt = page_count(rx_buf->page); - - if (++idx == cnt) - idx = 0; - } -} - -/** - * ice_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function builds an skb around an existing XDP buffer, taking care - * to set up the skb correctly and avoid any memcpy overhead. Driver has - * already combined frags (if any) to skb_shared_info. - */ -static struct sk_buff * -ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - u8 metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - unsigned int nr_frags; - struct sk_buff *skb; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* Prefetch first cache line of first page. If xdp->data_meta - * is unused, this points exactly as xdp->data, otherwise we - * likely have a consumer accessing first few bytes of meta - * data, and then actual data. - */ - net_prefetch(xdp->data_meta); - /* build an skb around the page buffer */ - skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); - if (unlikely(!skb)) - return NULL; - - /* must to record Rx queue, otherwise OS features such as - * symmetric queue won't work - */ - skb_record_rx_queue(skb, rx_ring->q_index); - - /* update pointers within the skb to store the data */ - skb_reserve(skb, xdp->data - xdp->data_hard_start); - __skb_put(skb, xdp->data_end - xdp->data); - if (metasize) - skb_metadata_set(skb, metasize); - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - - return skb; -} - -/** - * ice_construct_skb - Allocate skb and populate it - * @rx_ring: Rx descriptor ring to transact packets on - * @xdp: xdp_buff pointing to the data - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff * -ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int size = xdp->data_end - xdp->data; - struct skb_shared_info *sinfo = NULL; - struct ice_rx_buf *rx_buf; - unsigned int nr_frags = 0; - unsigned int headlen; - struct sk_buff *skb; - - /* prefetch first cache line of first page */ - net_prefetch(xdp->data); - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; - skb_record_rx_queue(skb, rx_ring->q_index); - /* Determine available headroom for copy */ - headlen = size; - if (headlen > ICE_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, - sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (size) { - /* besides adding here a partial frag, we are going to add - * frags from xdp_buff, make sure there is enough space for - * them - */ - if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) { - dev_kfree_skb(skb); - return NULL; - } - skb_add_rx_frag(skb, 0, rx_buf->page, - rx_buf->page_offset + headlen, size, - xdp->frame_sz); - } else { - /* buffer is unused, restore biased page count in Rx buffer; - * data was copied onto skb's linear part so there's no - * need for adjusting page offset and we can reuse this buffer - * as-is - */ - rx_buf->pagecnt_bias++; - } - - if (unlikely(xdp_buff_has_frags(xdp))) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - - memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], - sizeof(skb_frag_t) * nr_frags); - - xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags, - sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_get_skb_flags(xdp)); - } - - return skb; -} - -/** - * ice_put_rx_buf - Clean up used buffer and either recycle or free - * @rx_ring: Rx descriptor ring to transact packets on - * @rx_buf: Rx buffer to pull data from - * - * This function will clean up the contents of the rx_buf. It will either - * recycle the buffer or unmap it and free the associated resources. - */ -static void -ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) -{ - if (!rx_buf) - return; - - if (ice_can_reuse_rx_page(rx_buf)) { - /* hand second half of page back to the ring */ - ice_reuse_rx_page(rx_ring, rx_buf); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, - ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, - ICE_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buf->page = NULL; -} - -/** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame - * @rx_ring: Rx ring with all the auxiliary data - * @xdp: XDP buffer carrying linear + frags part - * @ntc: the next to clean element (not included in this frame!) - * @verdict: return code from XDP program execution - * - * Called after XDP program is completed, or on error with verdict set to - * ICE_XDP_CONSUMED. - * - * Walk through buffers from first_desc to the end of the frame, releasing - * buffers and satisfying internal page recycle mechanism. The action depends - * on verdict from XDP program. - */ -static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 ntc, u32 verdict) -{ - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - u32 xdp_frags = 0; - int i = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) - xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - - while (idx != ntc) { - buf = &rx_ring->rx_buf[idx]; - if (++idx == cnt) - idx = 0; - - /* An XDP program could release fragments from the end of the - * buffer. For these, we need to keep the pagecnt_bias as-is. - * To do this, only adjust pagecnt_bias for fragments up to - * the total remaining after the XDP program has run. - */ - if (verdict != ICE_XDP_CONSUMED) - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - else if (i++ <= xdp_frags) - buf->pagecnt_bias++; - - ice_put_rx_buf(rx_ring, buf); - } - - xdp->data = NULL; - rx_ring->first_desc = ntc; -} - -/** * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on * @@ -1361,9 +925,8 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) total_rx_pkts++; } - rx_ring->first_desc = ntc; rx_ring->next_to_clean = ntc; - ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring)); } /** @@ -1381,16 +944,17 @@ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; - unsigned int offset = rx_ring->rx_offset; - struct xdp_buff *xdp = &rx_ring->xdp; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + LIBETH_XDP_ONSTACK_BUFF(xdp); u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; bool failure; + libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { xdp_ring = rx_ring->xdp_ring; @@ -1400,19 +964,21 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_buf; struct sk_buff *skb; unsigned int size; u16 stat_err_bits; u16 vlan_tci; + bool rxe; /* get the Rx desc from Rx ring based on 'next_to_clean' */ rx_desc = ICE_RX_DESC(rx_ring, ntc); - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then it will be non-zero + /* + * The DD bit will always be zero for unused descriptors + * because it's cleared in cleanup or when setting the DMA + * address of the header buffer, which never uses the DD bit. + * If the hardware wrote the descriptor, it will be non-zero. */ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) @@ -1426,71 +992,65 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ice_trace(clean_rx_irq, rx_ring, rx_desc); + stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) | + BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); + rxe = ice_test_staterr(rx_desc->wb.status_error0, + stat_err_bits); + + if (!rx_ring->hdr_pp) + goto payload; + + size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1, + ICE_RX_FLEX_DESC_HDR_LEN_M); + if (unlikely(rxe)) + size = 0; + + rx_buf = &rx_ring->hdr_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); + rx_buf->netmem = 0; + +payload: size = le16_to_cpu(rx_desc->wb.pkt_len) & ICE_RX_FLX_DESC_PKT_LEN_M; + if (unlikely(rxe)) + size = 0; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + rx_buf = &rx_ring->rx_fqes[ntc]; + libeth_xdp_process_buff(xdp, rx_buf, size); - /* Increment ntc before calls to ice_put_rx_mbuf() */ if (++ntc == cnt) ntc = 0; - if (!xdp->data) { - void *hard_start; - - hard_start = page_address(rx_buf->page) + rx_buf->page_offset - - offset; - xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); - xdp_buff_clear_frags_flag(xdp); - } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); - break; - } - /* skip if it is NOP desc */ - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data)) continue; - ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; - total_rx_bytes += xdp_get_buff_len(xdp); - total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); + if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) + xdp_xmit |= xdp_verdict; + total_rx_bytes += xdp_get_buff_len(&xdp->base); + total_rx_pkts++; + xdp->data = NULL; continue; + construct_skb: - if (likely(ice_ring_uses_build_skb(rx_ring))) - skb = ice_build_skb(rx_ring, xdp); - else - skb = ice_construct_skb(rx_ring, xdp); + skb = xdp_build_skb_from_buff(&xdp->base); + xdp->data = NULL; + /* exit if we failed to retrieve a buffer */ if (!skb) { + libeth_xdp_return_buff_slow(xdp); rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - xdp_verdict = ICE_XDP_CONSUMED; - } - ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); - - if (!skb) - break; - - stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); - if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, - stat_err_bits))) { - dev_kfree_skb_any(skb); continue; } vlan_tci = ice_get_vlan_tci(rx_desc); - /* pad the skb if needed, to make a valid ethernet frame */ - if (eth_skb_pad(skb)) - continue; - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1507,11 +1067,13 @@ construct_skb: rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ - failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); + failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring)); if (xdp_xmit) ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu); + libeth_xdp_save_buff(&rx_ring->xdp, xdp); + if (rx_ring->ring_stats) ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 841a07bfba54..e440c55d9e9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,6 +4,8 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include <net/libeth/types.h> + #include "ice_type.h" #define ICE_DFLT_IRQ_WORK 256 @@ -27,72 +29,6 @@ #define ICE_MAX_TXQ_PER_TXQG 128 -/* Attempt to maximize the headroom available for incoming frames. We use a 2K - * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. - * This leaves us with 512 bytes of room. From that we need to deduct the - * space needed for the shared info and the padding needed to IP align the - * frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define ICE_2K_TOO_SMALL_WITH_PADDING \ - ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ - SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) - -/** - * ice_compute_pad - compute the padding - * @rx_buf_len: buffer length - * - * Figure out the size of half page based on given buffer length and - * then subtract the skb_shared_info followed by subtraction of the - * actual buffer length; this in turn results in the actual space that - * is left for padding usage - */ -static inline int ice_compute_pad(int rx_buf_len) -{ - int half_page_size; - - half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; -} - -/** - * ice_skb_pad - determine the padding that we can supply - * - * Figure out the right Rx buffer size and based on that calculate the - * padding - */ -static inline int ice_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (ICE_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = ICE_RXBUF_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return ice_compute_pad(rx_buf_len); -} - -#define ICE_SKB_PAD ice_skb_pad() -#else -#define ICE_2K_TOO_SMALL_WITH_PADDING false -#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - /* We are assuming that the cache line is always 64 Bytes here for ice. * In order to make sure that is a correct assumption there is a check in probe * to print a warning if the read from GLPCI_CNF2 tells us that the cache line @@ -112,10 +48,6 @@ static inline int ice_skb_pad(void) (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define ICE_RX_DESC_UNUSED(R) \ - ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->first_desc - (R)->next_to_use - 1) - #define ICE_RING_QUARTER(R) ((R)->count >> 2) #define ICE_TX_FLAGS_TSO BIT(0) @@ -197,14 +129,6 @@ struct ice_tx_offload_params { u8 header_len; }; -struct ice_rx_buf { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - unsigned int pgcnt; - unsigned int pagecnt_bias; -}; - struct ice_q_stats { u64 pkts; u64 bytes; @@ -262,15 +186,6 @@ struct ice_pkt_ctx { __be16 vlan_proto; }; -struct ice_xdp_buff { - struct xdp_buff xdp_buff; - const union ice_32b_rx_flex_desc *eop_desc; - const struct ice_pkt_ctx *pkt_ctx; -}; - -/* Required for compatibility with xdp_buffs from xsk_pool */ -static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); - /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -323,7 +238,7 @@ struct ice_tstamp_ring { struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + struct page_pool *pp; struct net_device *netdev; /* netdev ring maps to */ struct ice_vsi *vsi; /* Backreference to associated VSI */ struct ice_q_vector *q_vector; /* Backreference to associated vector */ @@ -335,14 +250,19 @@ struct ice_rx_ring { u16 next_to_alloc; union { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_fqes; struct xdp_buff **xdp_buf; }; + /* CL2 - 2nd cacheline starts here */ + struct libeth_fqe *hdr_fqes; + struct page_pool *hdr_pp; + union { - struct ice_xdp_buff xdp_ext; - struct xdp_buff xdp; + struct libeth_xdp_buff_stash xdp; + struct libeth_xdp_buff *xsk; }; + /* CL3 - 3rd cacheline starts here */ union { struct ice_pkt_ctx pkt_ctx; @@ -352,12 +272,13 @@ struct ice_rx_ring { }; }; struct bpf_prog *xdp_prog; - u16 rx_offset; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u16 first_desc; + + u32 hdr_truesize; + u32 truesize; /* stats structs */ struct ice_ring_stats *ring_stats; @@ -368,12 +289,11 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u16 max_frame; + u16 rx_hdr_len; u16 rx_buf_len; dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; -#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) #define ICE_RX_FLAGS_MULTIDEV BIT(3) #define ICE_RX_FLAGS_RING_GCS BIT(4) @@ -422,21 +342,6 @@ struct ice_tx_ring { u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; -static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) -{ - return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); -} - -static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; -} - -static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; -} - static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) { return !!ring->ch; @@ -491,18 +396,13 @@ struct ice_coalesce_stored { static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) { -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif return 0; } -#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) - union ice_32b_rx_flex_desc; void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs); +void ice_rxq_pp_destroy(struct ice_rx_ring *rq); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 45cfaabc41cb..956da38d63b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -3,6 +3,7 @@ #include <linux/filter.h> #include <linux/net/intel/libie/rx.h> +#include <net/libeth/xdp.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -230,9 +231,12 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, if (ice_is_port_repr_netdev(netdev)) ice_repr_inc_rx_stats(netdev, skb->len); + + /* __skb_push() is needed because xdp_build_skb_from_buff() + * calls eth_type_trans() + */ + __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, netdev); - } else { - skb->protocol = eth_type_trans(skb, rx_ring->netdev); } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -270,19 +274,18 @@ static void ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, struct xdp_frame_bulk *bq) { - dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), - dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); - dma_unmap_len_set(tx_buf, len, 0); - switch (tx_buf->type) { case ICE_TX_BUF_XDP_TX: - page_frag_free(tx_buf->raw_buf); + libeth_xdp_return_va(tx_buf->raw_buf, true); break; case ICE_TX_BUF_XDP_XMIT: + dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); xdp_return_frame_bulk(tx_buf->xdpf, bq); break; } + dma_unmap_len_set(tx_buf, len, 0); tx_buf->type = ICE_TX_BUF_EMPTY; } @@ -377,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf; u32 cnt = xdp_ring->count; void *data = xdp->data; + struct page *page; u32 nr_frags = 0; u32 free_space; u32 frag = 0; + u32 offset; free_space = ICE_DESC_UNUSED(xdp_ring); if (free_space < ICE_RING_QUARTER(xdp_ring)) @@ -399,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_head = &xdp_ring->tx_buf[ntu]; tx_buf = tx_head; + page = virt_to_page(data); + offset = offset_in_page(xdp->data); + for (;;) { dma_addr_t dma; - dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) - goto dma_unmap; - - /* record length, and DMA address */ - dma_unmap_len_set(tx_buf, len, size); - dma_unmap_addr_set(tx_buf, dma, dma); - if (frame) { + dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_unmap; tx_buf->type = ICE_TX_BUF_FRAG; } else { + dma = page_pool_get_dma_addr(page) + offset; + dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL); tx_buf->type = ICE_TX_BUF_XDP_TX; tx_buf->raw_buf = data; } + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, size); + dma_unmap_addr_set(tx_buf, dma, dma); + tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0); @@ -430,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, tx_desc = ICE_TX_DESC(xdp_ring, ntu); tx_buf = &xdp_ring->tx_buf[ntu]; + page = skb_frag_page(&sinfo->frags[frag]); + offset = skb_frag_off(&sinfo->frags[frag]); data = skb_frag_address(&sinfo->frags[frag]); size = skb_frag_size(&sinfo->frags[frag]); frag++; @@ -514,10 +525,13 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, */ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; - *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc, - xdp_ext->pkt_ctx); + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); + + *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc, + &rx_ring->pkt_ctx); if (!*ts_ns) return -ENODATA; @@ -545,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; - *hash = ice_get_rx_hash(xdp_ext->eop_desc); - *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); + *hash = ice_get_rx_hash(xdp_ext->desc); + *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc); if (!likely(*hash)) return -ENODATA; @@ -567,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto, u16 *vlan_tci) { - const struct ice_xdp_buff *xdp_ext = (void *)ctx; + const struct libeth_xdp_buff *xdp_ext = (void *)ctx; + struct ice_rx_ring *rx_ring; + + rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq); - *vlan_proto = xdp_ext->pkt_ctx->vlan_proto; + *vlan_proto = rx_ring->pkt_ctx.vlan_proto; if (!*vlan_proto) return -ENODATA; - *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); + *vlan_tci = ice_get_vlan_tci(xdp_ext->desc); if (!*vlan_tci) return -ENODATA; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index 99717730f21a..6a3f10f7a53f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -135,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, void ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci); -static inline void -ice_xdp_meta_set_desc(struct xdp_buff *xdp, - union ice_32b_rx_flex_desc *eop_desc) -{ - struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff, - xdp_buff); - - xdp_ext->eop_desc = eop_desc; -} #endif /* !_ICE_TXRX_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 575fd48f485f..989ff1fd9110 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -3,6 +3,7 @@ #include <linux/bpf_trace.h> #include <linux/unroll.h> +#include <net/libeth/xdp.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ice.h" @@ -169,50 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure */ -static int +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) { - size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : - sizeof(*rx_ring->rx_buf); - void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); - - if (!sw_ring) - return -ENOMEM; - if (pool_present) { - kfree(rx_ring->rx_buf); - rx_ring->rx_buf = NULL; - rx_ring->xdp_buf = sw_ring; + rx_ring->xdp_buf = kcalloc(rx_ring->count, + sizeof(*rx_ring->xdp_buf), + GFP_KERNEL); + if (!rx_ring->xdp_buf) + return -ENOMEM; } else { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; - rx_ring->rx_buf = sw_ring; - } - - return 0; -} - -/** - * ice_realloc_zc_buf - reallocate XDP ZC queue pairs - * @vsi: Current VSI - * @zc: is zero copy set - * - * Reallocate buffer for rx_rings that might be used by XSK. - * XDP requires more memory, than rx_buf provides. - * Returns 0 on success, negative on failure - */ -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) -{ - struct ice_rx_ring *rx_ring; - uint i; - - ice_for_each_rxq(vsi, i) { - rx_ring = vsi->rx_rings[i]; - if (!rx_ring->xsk_pool) - continue; - - if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) - return -ENOMEM; } return 0; @@ -228,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) */ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { + struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0; @@ -241,8 +211,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ice_is_xdp_ena_vsi(vsi); if (if_running) { - struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; - ret = ice_qp_dis(vsi, qid); if (ret) { netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); @@ -303,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, rx_desc->read.pkt_addr = cpu_to_le64(dma); rx_desc->wb.status_error0 = 0; - /* Put private info that changes on a per-packet basis - * into xdp_buff_xsk->cb. - */ - ice_xdp_meta_set_desc(*xdp, rx_desc); - rx_desc++; xdp++; } @@ -393,69 +356,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, } /** - * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer - * @rx_ring: Rx ring - * @xdp: Pointer to XDP buffer - * - * This function allocates a new skb from a zero-copy Rx buffer. - * - * Returns the skb on success, NULL on failure. - */ -static struct sk_buff * -ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) -{ - unsigned int totalsize = xdp->data_end - xdp->data_meta; - unsigned int metasize = xdp->data - xdp->data_meta; - struct skb_shared_info *sinfo = NULL; - struct sk_buff *skb; - u32 nr_frags = 0; - - if (unlikely(xdp_buff_has_frags(xdp))) { - sinfo = xdp_get_shared_info_from_buff(xdp); - nr_frags = sinfo->nr_frags; - } - net_prefetch(xdp->data_meta); - - skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); - if (unlikely(!skb)) - return NULL; - - memcpy(__skb_put(skb, totalsize), xdp->data_meta, - ALIGN(totalsize, sizeof(long))); - - if (metasize) { - skb_metadata_set(skb, metasize); - __skb_pull(skb, metasize); - } - - if (likely(!xdp_buff_has_frags(xdp))) - goto out; - - for (int i = 0; i < nr_frags; i++) { - struct skb_shared_info *skinfo = skb_shinfo(skb); - skb_frag_t *frag = &sinfo->frags[i]; - struct page *page; - void *addr; - - page = dev_alloc_page(); - if (!page) { - dev_kfree_skb(skb); - return NULL; - } - addr = page_to_virt(page); - - memcpy(addr, skb_frag_page(frag), skb_frag_size(frag)); - - __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++, - addr, 0, skb_frag_size(frag)); - } - -out: - xsk_buff_free(xdp); - return skb; -} - -/** * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ * @xdp_ring: XDP Tx ring * @xsk_pool: AF_XDP buffer pool pointer @@ -669,10 +569,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, int budget) { + struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk; unsigned int total_rx_bytes = 0, total_rx_packets = 0; u32 ntc = rx_ring->next_to_clean; u32 ntu = rx_ring->next_to_use; - struct xdp_buff *first = NULL; struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; @@ -686,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, xdp_prog = READ_ONCE(rx_ring->xdp_prog); xdp_ring = rx_ring->xdp_ring; - if (ntc != rx_ring->first_desc) - first = *ice_xdp_buf(rx_ring, rx_ring->first_desc); - while (likely(total_rx_packets < (unsigned int)budget)) { union ice_32b_rx_flex_desc *rx_desc; unsigned int size, xdp_res = 0; @@ -724,15 +621,17 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, first = xdp; } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { xsk_buff_free(first); - break; + first = NULL; } if (++ntc == cnt) ntc = 0; - if (ice_is_non_eop(rx_ring, rx_desc)) + if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first)) continue; + ((struct libeth_xdp_buff *)first)->desc = rx_desc; + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring, xsk_pool); if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { @@ -740,7 +639,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, } else if (xdp_res == ICE_XDP_EXIT) { failure = true; first = NULL; - rx_ring->first_desc = ntc; break; } else if (xdp_res == ICE_XDP_CONSUMED) { xsk_buff_free(first); @@ -752,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, total_rx_packets++; first = NULL; - rx_ring->first_desc = ntc; continue; construct_skb: /* XDP_PASS path */ - skb = ice_construct_skb_zc(rx_ring, first); + skb = xdp_build_skb_from_zc(first); if (!skb) { + xsk_buff_free(first); + first = NULL; + rx_ring->ring_stats->rx_stats.alloc_buf_failed++; - break; + continue; } first = NULL; - rx_ring->first_desc = ntc; - - if (eth_skb_pad(skb)) { - skb = NULL; - continue; - } total_rx_bytes += skb->len; total_rx_packets++; @@ -781,7 +675,9 @@ construct_skb: } rx_ring->next_to_clean = ntc; - entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); + rx_ring->xsk = (struct libeth_xdp_buff *)first; + + entries_to_alloc = ICE_DESC_UNUSED(rx_ring); if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, entries_to_alloc); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 600cbeeaa203..5275fcedc9e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -22,7 +22,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); -int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); +int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present); void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid); void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, @@ -77,8 +77,8 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { } static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { } static inline int -ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi, - bool __always_unused zc) +ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, + bool __always_unused pool_present) { return 0; } diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c index 370f6ec2a374..7928f4e8e788 100644 --- a/drivers/net/ethernet/intel/ice/virt/queues.c +++ b/drivers/net/ethernet/intel/ice/virt/queues.c @@ -842,18 +842,17 @@ int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; - ring->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - ring->max_frame = qpi->rxq.max_pkt_size; + vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - ring->max_frame += VLAN_HLEN; + vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index ca4da0c89979..50fa7be0c00d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -735,12 +735,10 @@ static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) #define IDPF_CAP_RSS (\ VIRTCHNL2_FLOW_IPV4_TCP |\ - VIRTCHNL2_FLOW_IPV4_TCP |\ VIRTCHNL2_FLOW_IPV4_UDP |\ VIRTCHNL2_FLOW_IPV4_SCTP |\ VIRTCHNL2_FLOW_IPV4_OTHER |\ VIRTCHNL2_FLOW_IPV6_TCP |\ - VIRTCHNL2_FLOW_IPV6_TCP |\ VIRTCHNL2_FLOW_IPV6_UDP |\ VIRTCHNL2_FLOW_IPV6_SCTP |\ VIRTCHNL2_FLOW_IPV6_OTHER) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 61dfcd8cb370..ac57212ab02b 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, true)) { - dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); + dev_warn(&adapter->pdev->dev, "Vlan id %d is not added\n", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index d5b1b974b4a3..3069b583fd81 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -198,7 +198,7 @@ static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by - * previous proc_autoc_read_82599. + * previous prot_autoc_read_82599. * * This part (82599) may need to hold a the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. @@ -1622,7 +1622,7 @@ int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, break; } - /* store source and destination IP masks (big-enian) */ + /* store source and destination IP masks (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 170a29d162c6..a1d04914fbbc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -318,7 +318,7 @@ static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * @@ -492,7 +492,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices * @adapter: board private structure to initialize * - * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * When SR-IOV (Single Root IO Virtualization) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index f521af575e9b..c866cfd144c7 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -458,6 +458,5 @@ void wil_pm_runtime_put(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index b264ed0af923..65d0f805459c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -24,6 +24,7 @@ #include <linux/crc-ccitt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/nvmem-consumer.h> #include <linux/slab.h> #include "rt2x00.h" @@ -10962,6 +10963,36 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) } EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); +int rt2800_read_eeprom_nvmem(struct rt2x00_dev *rt2x00dev) +{ + struct device_node *np = rt2x00dev->dev->of_node; + unsigned int len = rt2x00dev->ops->eeprom_size; + struct nvmem_cell *cell; + const void *data; + size_t retlen; + + cell = of_nvmem_cell_get(np, "eeprom"); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + data = nvmem_cell_read(cell, &retlen); + nvmem_cell_put(cell); + + if (IS_ERR(data)) + return PTR_ERR(data); + + if (retlen != len) { + dev_err(rt2x00dev->dev, "invalid eeprom size, required: 0x%04x\n", len); + kfree(data); + return -EINVAL; + } + + memcpy(rt2x00dev->eeprom, data, len); + kfree(data); + return 0; +} +EXPORT_SYMBOL_GPL(rt2800_read_eeprom_nvmem); + static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev) { u16 word; @@ -11011,7 +11042,9 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) * Start validation of the data that has been read. */ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); - rt2x00lib_set_mac_address(rt2x00dev, mac); + retval = rt2x00lib_set_mac_address(rt2x00dev, mac); + if (retval) + return retval; word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (word == 0xffff) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 620a3d9872ce..a3c3a751f57e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -248,6 +248,8 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev); int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); +int rt2800_read_eeprom_nvmem(struct rt2x00_dev *rt2x00dev); + int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev); void rt2800_get_key_seq(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 14c45aba836f..4fa14bb573ad 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -278,6 +278,9 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; + if (!rt2800_read_eeprom_nvmem(rt2x00dev)) + return 0; + if (rt2800pci_efuse_detect(rt2x00dev)) retval = rt2800pci_read_eeprom_efuse(rt2x00dev); else diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index 8f510a84e7f1..5c29201b34c8 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -92,8 +92,12 @@ static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev, static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev) { - void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); + void __iomem *base_addr; + if (!rt2800_read_eeprom_nvmem(rt2x00dev)) + return 0; + + base_addr = ioremap(0x1F040000, EEPROM_SIZE); if (!base_addr) return -ENOMEM; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 09b9d1f9f793..665887e9b118 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1427,7 +1427,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, */ u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif); -void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); +int rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr); /* * Interrupt context handlers. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index f8a6f9c968a1..778a478ab53a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -988,14 +988,20 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry, entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; } -void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) +int rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { - of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); + int ret; + + ret = of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); + if (ret == -EPROBE_DEFER) + return ret; if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr); } + + return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address); diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 80fbf740fe6d..ac756318e8ea 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -272,7 +272,6 @@ static ssize_t radar_detection_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -312,7 +311,6 @@ static ssize_t dynamic_fw_traces_write(struct file *file, if (ret < 0) count = ret; - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -374,7 +372,6 @@ static ssize_t radar_debug_mode_write(struct file *file, wl->radar_debug_mode, 0); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index fa3a3f71dd15..9d73ba933a16 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -213,7 +213,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl, } while (!event); out: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); free_vector: kfree(events_vector); diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index eb3d3f0e0b4d..bbfd2725215b 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -63,7 +63,6 @@ void wl1271_debugfs_update_stats(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -113,7 +112,6 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value, chip_op = arg; chip_op(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } @@ -287,7 +285,6 @@ static ssize_t dynamic_ps_timeout_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -357,7 +354,6 @@ static ssize_t forced_ps_write(struct file *file, wl1271_ps_set_mode(wl, wlvif, ps_mode); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -830,7 +826,6 @@ static ssize_t rx_streaming_interval_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -886,7 +881,6 @@ static ssize_t rx_streaming_always_write(struct file *file, wl1271_recalc_rx_streaming(wl, wlvif); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -934,7 +928,6 @@ static ssize_t beacon_filtering_write(struct file *file, ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -1015,7 +1008,6 @@ static ssize_t sleep_auth_write(struct file *file, goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -1090,7 +1082,6 @@ read_err: goto part_err; part_err: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); skip_read: @@ -1172,7 +1163,6 @@ write_err: goto part_err; part_err: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); skip_write: @@ -1247,7 +1237,6 @@ static ssize_t fw_logger_write(struct file *file, ret = wl12xx_cmd_config_fwlog(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 6116a8522d96..12f0167d7380 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -154,7 +154,6 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work) jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -181,7 +180,6 @@ static void wl1271_rx_streaming_disable_work(struct work_struct *work) goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -234,7 +232,6 @@ static void wlcore_rc_update_work(struct work_struct *work) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -711,7 +708,6 @@ static int wlcore_irq_locked(struct wl1271 *wl) } err_ret: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -1047,7 +1043,6 @@ static void wl1271_recovery_work(struct work_struct *work) } wlcore_op_stop_locked(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); ieee80211_restart_hw(wl->hw); @@ -1943,7 +1938,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -2131,7 +2125,6 @@ static void wlcore_channel_switch_work(struct work_struct *work) wl12xx_cmd_stop_channel_switch(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -2201,7 +2194,6 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work) /* cancel the ROC if active */ wlcore_update_inconn_sta(wl, wlvif, NULL, false); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -2694,7 +2686,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, else wl->sta_count++; out: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_unlock: mutex_unlock(&wl->mutex); @@ -2774,7 +2765,6 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, } } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } deinit: @@ -3200,7 +3190,6 @@ static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3315,7 +3304,6 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, */ out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3531,7 +3519,6 @@ static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_wake_queues: @@ -3695,7 +3682,6 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out_unlock: @@ -3724,7 +3710,6 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3772,7 +3757,6 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, ret = wlcore_scan(hw->priv, vif, ssid, len, req); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3823,7 +3807,6 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, ieee80211_scan_completed(wl->hw, &info); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3860,7 +3843,6 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, wl->sched_vif = wlvif; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3887,7 +3869,6 @@ static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, wl->ops->sched_scan_stop(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -3916,7 +3897,6 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, if (ret < 0) wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -3948,7 +3928,6 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, if (ret < 0) wl1271_warning("set rts threshold failed: %d", ret); } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -4714,7 +4693,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, else wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -4779,7 +4757,6 @@ static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, } } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4828,7 +4805,6 @@ static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = true; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4871,7 +4847,6 @@ static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, wlvif->radar_enabled = false; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4941,7 +4916,6 @@ wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, goto out_sleep; } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -4995,7 +4969,6 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 0, 0); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5029,7 +5002,6 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, goto out_sleep; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5342,7 +5314,6 @@ static int wl12xx_op_sta_state(struct ieee80211_hw *hw, ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5467,7 +5438,6 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5511,7 +5481,6 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); ret = wl1271_acx_sta_rate_policies(wl, wlvif); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); } out: @@ -5566,7 +5535,6 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: @@ -5645,7 +5613,6 @@ static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5699,7 +5666,6 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, msecs_to_jiffies(duration)); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5748,7 +5714,6 @@ static int wlcore_roc_completed(struct wl1271 *wl) ret = __wlcore_roc_completed(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -5839,7 +5804,6 @@ static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, sinfo->signal = rssi_dbm; out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index b414305acc32..f6dc54c1dbad 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -69,7 +69,6 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); ieee80211_scan_completed(wl->hw, &info); diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index 65ca5dc569a0..5ab6c1683675 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -58,7 +58,6 @@ static ssize_t bt_coex_state_store(struct device *dev, goto out; wl1271_acx_sg_enable(wl, wl->sg_enabled); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c index fc8ea58bc165..7c0cb1b7fef0 100644 --- a/drivers/net/wireless/ti/wlcore/testmode.c +++ b/drivers/net/wireless/ti/wlcore/testmode.c @@ -127,7 +127,6 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) } out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -192,7 +191,6 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) out_free: kfree(cmd); out_sleep: - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 464587d16ab2..f76087be2f75 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -863,7 +863,6 @@ void wl1271_tx_work(struct work_struct *work) goto out; } - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index e4269e2b0098..5bb9eb300f97 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -60,7 +60,6 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, ret = wlcore_smart_config_start(wl, nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -92,7 +91,6 @@ wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy, ret = wlcore_smart_config_stop(wl); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); @@ -140,7 +138,6 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy, nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); - pm_runtime_mark_last_busy(wl->dev); pm_runtime_put_autosuspend(wl->dev); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 9f856042a67a..cd84dfd5b47e 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -5793,6 +5793,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, NO_AUTO_VIF); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PUNCT); for (i = 0; i < ARRAY_SIZE(data->link_data); i++) { hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 781624f5913a..53490eb04e87 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1015,6 +1015,7 @@ const struct cfg80211_chan_def * cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, const struct cfg80211_chan_def *chandef2); + /** * nl80211_chan_width_to_mhz - get the channel width in MHz * @chan_width: the channel width from &enum nl80211_chan_width @@ -5683,9 +5684,13 @@ struct wiphy_iftype_akm_suites { * * @rts_threshold: RTS threshold (dot11RTSThreshold); * -1 (default) = RTS/CTS disabled + * @radio_debugfsdir: Pointer to debugfs directory containing the radio- + * specific parameters. + * NULL (default) = Debugfs directory not created */ struct wiphy_radio_cfg { u32 rts_threshold; + struct dentry *radio_debugfsdir; }; /** @@ -6883,6 +6888,19 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) } /** + * ieee80211_radio_freq_range_valid - Check if the radio supports the + * specified frequency range + * + * @radio: wiphy radio + * @freq: the frequency (in KHz) to be queried + * @width: the bandwidth (in KHz) to be queried + * + * Return: whether or not the given frequency range is valid for the given radio + */ +bool ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, + u32 freq, u32 width); + +/** * cfg80211_radio_chandef_valid - Check if the radio supports the chandef * * @radio: wiphy radio diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 813e163ce27c..c60867e7e43c 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2017 Intel Deutschland GmbH - * Copyright (c) 2018-2019, 2021-2022 Intel Corporation + * Copyright (c) 2018-2019, 2021-2022, 2025 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -202,6 +202,24 @@ enum ieee80211_radiotap_vht_coding { IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08, }; +enum ieee80211_radiotap_vht_bandwidth { + /* Note: more values are defined but can't really be used */ + IEEE80211_RADIOTAP_VHT_BW_20 = 0, + IEEE80211_RADIOTAP_VHT_BW_40 = 1, + IEEE80211_RADIOTAP_VHT_BW_80 = 4, + IEEE80211_RADIOTAP_VHT_BW_160 = 11, +}; + +struct ieee80211_radiotap_vht { + __le16 known; + u8 flags; + u8 bandwidth; + u8 mcs_nss[4]; + u8 coding; + u8 group_id; + __le16 partial_aid; +} __packed; + /* for IEEE80211_RADIOTAP_TIMESTAMP */ enum ieee80211_radiotap_timestamp_unit_spos { IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a55085cf4ec4..c326243e1f01 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1529,6 +1529,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * known the frame shouldn't be reported. * @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by * hardware or driver) + * @RX_FLAG_RADIOTAP_VHT: VHT radiotap data is present */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -1564,6 +1565,7 @@ enum mac80211_rx_flags { RX_FLAG_RADIOTAP_LSIG = BIT(28), RX_FLAG_NO_PSDU = BIT(29), RX_FLAG_8023 = BIT(30), + RX_FLAG_RADIOTAP_VHT = BIT(31), }; /** diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index fc752a50f91b..6ceb12baec24 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -24,5 +24,7 @@ struct netns_smc { int sysctl_rmem; int sysctl_max_links_per_lgr; int sysctl_max_conns_per_lgr; + unsigned int sysctl_smcr_max_send_wr; + unsigned int sysctl_smcr_max_recv_wr; }; #endif diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3b5827ea438e..025210d50405 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2508,6 +2508,16 @@ static void ieee80211_csa_switch_work(struct wiphy *wiphy, link->u.mgd.csa.waiting_bcn = true; + /* + * The next beacon really should always be different, so this should + * have no effect whatsoever. However, some APs (we observed this in + * an Asus AXE11000), the beacon after the CSA might be identical to + * the last beacon on the old channel - in this case we'd ignore it. + * Resetting the CRC will lead us to handle it better (albeit with a + * disconnect, but clearly the AP is broken.) + */ + link->u.mgd.beacon_crc_valid = false; + /* apply new TPE restrictions immediately on the new channel */ if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { @@ -6610,8 +6620,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_link_data *link, * Response frame shall be set to the broadcast address [..]" * So, on 6GHz band we should also accept broadcast responses. */ - channel = ieee80211_get_channel(sdata->local->hw.wiphy, - rx_status->freq); + channel = ieee80211_get_channel_khz(sdata->local->hw.wiphy, + ieee80211_rx_status_to_khz(rx_status)); if (!channel) return; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6af43dfefdd6..80067ed1da2f 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -59,7 +59,8 @@ static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | RX_FLAG_RADIOTAP_LSIG | RX_FLAG_RADIOTAP_HE_MU | - RX_FLAG_RADIOTAP_HE); + RX_FLAG_RADIOTAP_HE | + RX_FLAG_RADIOTAP_VHT); hdr = (void *)skb->data; fc = hdr->frame_control; @@ -151,8 +152,10 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, } if (status->encoding == RX_ENC_VHT) { + /* Included even if RX_FLAG_RADIOTAP_VHT is not set */ len = ALIGN(len, 2); len += 12; + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12); } if (local->hw.radiotap_timestamp.units_pos >= 0) { @@ -195,6 +198,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, * The position to look at depends on the existence (or non- * existence) of other elements, so take that into account... */ + if (status->flag & RX_FLAG_RADIOTAP_VHT) + tlv_offset += + sizeof(struct ieee80211_radiotap_vht); if (status->flag & RX_FLAG_RADIOTAP_HE) tlv_offset += sizeof(struct ieee80211_radiotap_he); @@ -319,10 +325,17 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, u32 tlvs_len = 0; int mpdulen, chain; unsigned long chains = status->chains; + struct ieee80211_radiotap_vht vht = {}; struct ieee80211_radiotap_he he = {}; struct ieee80211_radiotap_he_mu he_mu = {}; struct ieee80211_radiotap_lsig lsig = {}; + if (status->flag & RX_FLAG_RADIOTAP_VHT) { + vht = *(struct ieee80211_radiotap_vht *)skb->data; + skb_pull(skb, sizeof(vht)); + WARN_ON_ONCE(status->encoding != RX_ENC_VHT); + } + if (status->flag & RX_FLAG_RADIOTAP_HE) { he = *(struct ieee80211_radiotap_he *)skb->data; skb_pull(skb, sizeof(he)); @@ -530,45 +543,61 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } if (status->encoding == RX_ENC_VHT) { - u16 known = local->hw.radiotap_vht_details; + u16 fill = local->hw.radiotap_vht_details; - rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); - put_unaligned_le16(known, pos); - pos += 2; - /* flags */ - if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) - *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; + /* Leave driver filled fields alone */ + fill &= ~le16_to_cpu(vht.known); + vht.known |= cpu_to_le16(fill); + + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI && + status->enc_flags & RX_ENC_FLAG_SHORT_GI) + vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; /* in VHT, STBC is binary */ - if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) - *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; - if (status->enc_flags & RX_ENC_FLAG_BF) + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC && + status->enc_flags & RX_ENC_FLAG_STBC_MASK) + vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED && + status->enc_flags & RX_ENC_FLAG_BF) *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; - pos++; - /* bandwidth */ - switch (status->bw) { - case RATE_INFO_BW_80: - *pos++ = 4; - break; - case RATE_INFO_BW_160: - *pos++ = 11; - break; - case RATE_INFO_BW_40: - *pos++ = 1; - break; - default: - *pos++ = 0; + + if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { + switch (status->bw) { + case RATE_INFO_BW_40: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40; + break; + case RATE_INFO_BW_80: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80; + break; + case RATE_INFO_BW_160: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160; + break; + default: + vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20; + break; + } } - /* MCS/NSS */ - *pos = (status->rate_idx << 4) | status->nss; - pos += 4; - /* coding field */ - if (status->enc_flags & RX_ENC_FLAG_LDPC) - *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; - pos++; - /* group ID */ - pos++; - /* partial_aid */ - pos += 2; + + /* + * If the driver filled in mcs_nss[0], then do not touch it. + * + * Otherwise, put some information about MCS/NSS into the + * user 0 field. Note that this is not technically correct for + * an MU frame as we might have decoded a different user. + */ + if (!vht.mcs_nss[0]) { + vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss; + + /* coding field */ + if (status->enc_flags & RX_ENC_FLAG_LDPC) + vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; + } + + /* ensure 2 byte alignment */ + while ((pos - (u8 *)rthdr) & 1) + pos++; + rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); + memcpy(pos, &vht, sizeof(vht)); + pos += sizeof(vht); } if (local->hw.radiotap_timestamp.units_pos >= 0) { @@ -763,6 +792,51 @@ ieee80211_make_monitor_skb(struct ieee80211_local *local, return skb; } +static bool +ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata, + struct ieee80211_local *local, + struct ieee80211_rx_status *status) +{ + struct wiphy *wiphy = local->hw.wiphy; + int i, freq, bw; + + if (!wiphy->n_radio) + return true; + + switch (status->bw) { + case RATE_INFO_BW_20: + bw = 20000; + break; + case RATE_INFO_BW_40: + bw = 40000; + break; + case RATE_INFO_BW_80: + bw = 80000; + break; + case RATE_INFO_BW_160: + bw = 160000; + break; + case RATE_INFO_BW_320: + bw = 320000; + break; + default: + return false; + } + + freq = MHZ_TO_KHZ(status->freq); + + for (i = 0; i < wiphy->n_radio; i++) { + if (!(sdata->wdev.radio_mask & BIT(i))) + continue; + + if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw)) + continue; + + return true; + } + return false; +} + /* * This function copies a received frame to all monitor interfaces and * returns a cleaned-up SKB that no longer includes the FCS nor the @@ -789,6 +863,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, return NULL; } + if (status->flag & RX_FLAG_RADIOTAP_VHT) + rtap_space += sizeof(struct ieee80211_radiotap_vht); + if (status->flag & RX_FLAG_RADIOTAP_HE) rtap_space += sizeof(struct ieee80211_radiotap_he); @@ -855,6 +932,10 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, chandef->chan->center_freq != status->freq) continue; + if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && + !ieee80211_validate_monitor_radio(sdata, local, status)) + continue; + if (!prev_sdata) { prev_sdata = sdata; continue; @@ -3521,8 +3602,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: - /* reject HT action frames from stations not supporting HT */ - if (!rx->link_sta->pub->ht_cap.ht_supported) + /* reject HT action frames from stations not supporting HT + * or not HE Capable + */ + if (!rx->link_sta->pub->ht_cap.ht_supported && + !rx->link_sta->pub->he_cap.has_he) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION && @@ -4903,6 +4987,11 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, /* after this point, don't punt to the slowpath! */ + if (fast_rx->uses_rss) + stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); + else + stats = &rx->link_sta->rx_stats; + if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && pskb_trim(skb, skb->len - fast_rx->icv_len)) goto drop; @@ -4937,6 +5026,8 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); switch (res) { case RX_QUEUED: + stats->last_rx = jiffies; + stats->last_rate = sta_stats_encode_rate(status); return true; case RX_CONTINUE: break; @@ -4950,11 +5041,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, drop: dev_kfree_skb(skb); - if (fast_rx->uses_rss) - stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); - else - stats = &rx->link_sta->rx_stats; - stats->dropped++; return true; } diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index be0c2da83d2b..e4eabc83719e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -810,6 +810,8 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, lnk->clearing = 0; lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu; lnk->link_id = smcr_next_link_id(lgr); + lnk->max_send_wr = lgr->max_send_wr; + lnk->max_recv_wr = lgr->max_recv_wr; lnk->lgr = lgr; smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */ lnk->link_idx = link_idx; @@ -836,27 +838,39 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, rc = smc_llc_link_init(lnk); if (rc) goto out; - rc = smc_wr_alloc_link_mem(lnk); - if (rc) - goto clear_llc_lnk; rc = smc_ib_create_protection_domain(lnk); if (rc) - goto free_link_mem; - rc = smc_ib_create_queue_pair(lnk); - if (rc) - goto dealloc_pd; + goto clear_llc_lnk; + do { + rc = smc_ib_create_queue_pair(lnk); + if (rc) + goto dealloc_pd; + rc = smc_wr_alloc_link_mem(lnk); + if (!rc) + break; + else if (rc != -ENOMEM) /* give up */ + goto destroy_qp; + /* retry with smaller ... */ + lnk->max_send_wr /= 2; + lnk->max_recv_wr /= 2; + /* ... unless droping below old SMC_WR_BUF_SIZE */ + if (lnk->max_send_wr < 16 || lnk->max_recv_wr < 48) + goto destroy_qp; + smc_ib_destroy_queue_pair(lnk); + } while (1); + rc = smc_wr_create_link(lnk); if (rc) - goto destroy_qp; + goto free_link_mem; lnk->state = SMC_LNK_ACTIVATING; return 0; +free_link_mem: + smc_wr_free_link_mem(lnk); destroy_qp: smc_ib_destroy_queue_pair(lnk); dealloc_pd: smc_ib_dealloc_protection_domain(lnk); -free_link_mem: - smc_wr_free_link_mem(lnk); clear_llc_lnk: smc_llc_link_clear(lnk, false); out: diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index a5a78cbff341..5c18f08a4c8a 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -34,6 +34,8 @@ * distributions may modify it to a value between * 16-255 as needed. */ +#define SMCR_MAX_SEND_WR_DEF 16 /* Default number of work requests per send queue */ +#define SMCR_MAX_RECV_WR_DEF 48 /* Default number of work requests per recv queue */ struct smc_lgr_list { /* list of link group definition */ struct list_head list; @@ -173,6 +175,8 @@ struct smc_link { struct completion llc_testlink_resp; /* wait for rx of testlink */ int llc_testlink_time; /* testlink interval */ atomic_t conn_cnt; /* connections on this link */ + u16 max_send_wr; + u16 max_recv_wr; }; /* For now we just allow one parallel link per link group. The SMC protocol @@ -366,6 +370,10 @@ struct smc_link_group { /* max conn can be assigned to lgr */ u8 max_links; /* max links can be added in lgr */ + u16 max_send_wr; + /* number of WR buffers on send */ + u16 max_recv_wr; + /* number of WR buffers on recv */ }; struct { /* SMC-D */ struct smcd_gid peer_gid; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 0052f02756eb..1154907c5c05 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -669,11 +669,6 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { - /* include unsolicited rdma_writes as well, - * there are max. 2 RDMA_WRITE per 1 WR_SEND - */ - .max_send_wr = SMC_WR_BUF_CNT * 3, - .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = lnk->wr_rx_sge_cnt, .max_inline_data = 0, @@ -683,6 +678,11 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) }; int rc; + /* include unsolicited rdma_writes as well, + * there are max. 2 RDMA_WRITE per 1 WR_SEND + */ + qp_attr.cap.max_send_wr = 3 * lnk->lgr->max_send_wr; + qp_attr.cap.max_recv_wr = lnk->lgr->max_recv_wr; lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); rc = PTR_ERR_OR_ZERO(lnk->roce_qp); if (IS_ERR(lnk->roce_qp)) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index f865c58c3aa7..f5d5eb617526 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -2157,6 +2157,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) init_waitqueue_head(&lgr->llc_msg_waiter); init_rwsem(&lgr->llc_conf_mutex); lgr->llc_testlink_time = READ_ONCE(net->smc.sysctl_smcr_testlink_time); + lgr->max_send_wr = (u16)(READ_ONCE(net->smc.sysctl_smcr_max_send_wr)); + lgr->max_recv_wr = (u16)(READ_ONCE(net->smc.sysctl_smcr_max_recv_wr)); } /* called after lgr was removed from lgr_list */ diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c index 2fab6456f765..7b2471904d04 100644 --- a/net/smc/smc_sysctl.c +++ b/net/smc/smc_sysctl.c @@ -29,6 +29,8 @@ static int links_per_lgr_min = SMC_LINKS_ADD_LNK_MIN; static int links_per_lgr_max = SMC_LINKS_ADD_LNK_MAX; static int conns_per_lgr_min = SMC_CONN_PER_LGR_MIN; static int conns_per_lgr_max = SMC_CONN_PER_LGR_MAX; +static unsigned int smcr_max_wr_min = 2; +static unsigned int smcr_max_wr_max = 2048; static struct ctl_table smc_table[] = { { @@ -99,6 +101,24 @@ static struct ctl_table smc_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "smcr_max_send_wr", + .data = &init_net.smc.sysctl_smcr_max_send_wr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &smcr_max_wr_min, + .extra2 = &smcr_max_wr_max, + }, + { + .procname = "smcr_max_recv_wr", + .data = &init_net.smc.sysctl_smcr_max_recv_wr, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &smcr_max_wr_min, + .extra2 = &smcr_max_wr_max, + }, }; int __net_init smc_sysctl_net_init(struct net *net) @@ -130,6 +150,8 @@ int __net_init smc_sysctl_net_init(struct net *net) WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init); net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER; net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER; + net->smc.sysctl_smcr_max_send_wr = SMCR_MAX_SEND_WR_DEF; + net->smc.sysctl_smcr_max_recv_wr = SMCR_MAX_RECV_WR_DEF; /* disable handshake limitation by default */ net->smc.limit_smc_hs = 0; diff --git a/net/smc/smc_sysctl.h b/net/smc/smc_sysctl.h index eb2465ae1e15..8538915af7af 100644 --- a/net/smc/smc_sysctl.h +++ b/net/smc/smc_sysctl.h @@ -25,6 +25,8 @@ static inline int smc_sysctl_net_init(struct net *net) net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE; net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER; net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER; + net->smc.sysctl_smcr_max_send_wr = SMCR_MAX_SEND_WR_DEF; + net->smc.sysctl_smcr_max_recv_wr = SMCR_MAX_RECV_WR_DEF; return 0; } diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index b04a21b8c511..5feafa98ab1a 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -547,9 +547,9 @@ void smc_wr_remember_qp_attr(struct smc_link *lnk) IB_QP_DEST_QPN, &init_attr); - lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, + lnk->wr_tx_cnt = min_t(size_t, lnk->max_send_wr, lnk->qp_attr.cap.max_send_wr); - lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, + lnk->wr_rx_cnt = min_t(size_t, lnk->max_recv_wr, lnk->qp_attr.cap.max_recv_wr); } @@ -741,50 +741,51 @@ int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr) int smc_wr_alloc_link_mem(struct smc_link *link) { /* allocate link related memory */ - link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); + link->wr_tx_bufs = kcalloc(link->max_send_wr, + SMC_WR_BUF_SIZE, GFP_KERNEL); if (!link->wr_tx_bufs) goto no_mem; - link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, link->wr_rx_buflen, + link->wr_rx_bufs = kcalloc(link->max_recv_wr, link->wr_rx_buflen, GFP_KERNEL); if (!link->wr_rx_bufs) goto no_mem_wr_tx_bufs; - link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), - GFP_KERNEL); + link->wr_tx_ibs = kcalloc(link->max_send_wr, + sizeof(link->wr_tx_ibs[0]), GFP_KERNEL); if (!link->wr_tx_ibs) goto no_mem_wr_rx_bufs; - link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, + link->wr_rx_ibs = kcalloc(link->max_recv_wr, sizeof(link->wr_rx_ibs[0]), GFP_KERNEL); if (!link->wr_rx_ibs) goto no_mem_wr_tx_ibs; - link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_rdmas = kcalloc(link->max_send_wr, sizeof(link->wr_tx_rdmas[0]), GFP_KERNEL); if (!link->wr_tx_rdmas) goto no_mem_wr_rx_ibs; - link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_rdma_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_rdma_sges[0]), GFP_KERNEL); if (!link->wr_tx_rdma_sges) goto no_mem_wr_tx_rdmas; - link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), + link->wr_tx_sges = kcalloc(link->max_send_wr, sizeof(link->wr_tx_sges[0]), GFP_KERNEL); if (!link->wr_tx_sges) goto no_mem_wr_tx_rdma_sges; - link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, + link->wr_rx_sges = kcalloc(link->max_recv_wr, sizeof(link->wr_rx_sges[0]) * link->wr_rx_sge_cnt, GFP_KERNEL); if (!link->wr_rx_sges) goto no_mem_wr_tx_sges; - link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL); + link->wr_tx_mask = bitmap_zalloc(link->max_send_wr, GFP_KERNEL); if (!link->wr_tx_mask) goto no_mem_wr_rx_sges; - link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_pends = kcalloc(link->max_send_wr, sizeof(link->wr_tx_pends[0]), GFP_KERNEL); if (!link->wr_tx_pends) goto no_mem_wr_tx_mask; - link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT, + link->wr_tx_compl = kcalloc(link->max_send_wr, sizeof(link->wr_tx_compl[0]), GFP_KERNEL); if (!link->wr_tx_compl) @@ -905,7 +906,7 @@ int smc_wr_create_link(struct smc_link *lnk) goto dma_unmap; } smc_wr_init_sge(lnk); - bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT); + bitmap_zero(lnk->wr_tx_mask, lnk->max_send_wr); init_waitqueue_head(&lnk->wr_tx_wait); rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL); if (rc) diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index f3008dda222a..aa4533af9122 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -19,8 +19,6 @@ #include "smc.h" #include "smc_core.h" -#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */ - #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ) #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */ diff --git a/net/wireless/core.c b/net/wireless/core.c index 797f9f2004a6..f3568eb5e592 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -34,6 +34,9 @@ /* name for sysfs, %d is appended */ #define PHY_NAME "phy" +/* maximum length of radio debugfs directory name */ +#define RADIO_DEBUGFSDIR_MAX_LEN 8 + MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); @@ -1042,6 +1045,18 @@ int wiphy_register(struct wiphy *wiphy) /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), ieee80211_debugfs_dir); + if (wiphy->n_radio > 0) { + int idx; + char radio_name[RADIO_DEBUGFSDIR_MAX_LEN]; + + for (idx = 0; idx < wiphy->n_radio; idx++) { + scnprintf(radio_name, sizeof(radio_name), "radio%d", + idx); + wiphy->radio_cfg[idx].radio_debugfsdir = + debugfs_create_dir(radio_name, + rdev->wiphy.debugfsdir); + } + } cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c index 40e49074e2ee..f9e7fff1ef25 100644 --- a/net/wireless/debugfs.c +++ b/net/wireless/debugfs.c @@ -29,6 +29,24 @@ static const struct file_operations name## _ops = { \ .llseek = generic_file_llseek, \ } +#define DEBUGFS_RADIO_READONLY_FILE(name, buflen, fmt, value...) \ +static ssize_t name## _read(struct file *file, char __user *userbuf, \ + size_t count, loff_t *ppos) \ +{ \ + struct wiphy_radio_cfg *radio_cfg = file->private_data; \ + char buf[buflen]; \ + int res; \ + \ + res = scnprintf(buf, buflen, fmt "\n", ##value); \ + return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ +} \ + \ +static const struct file_operations name## _ops = { \ + .read = name## _read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", wiphy->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", @@ -38,6 +56,9 @@ DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d", DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d", wiphy->retry_long); +DEBUGFS_RADIO_READONLY_FILE(radio_rts_threshold, 20, "%d", + radio_cfg->rts_threshold); + static int ht_print_chan(struct ieee80211_channel *chan, char *buf, int buf_size, int offset) { @@ -100,15 +121,27 @@ static const struct file_operations ht40allow_map_ops = { #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops) +#define DEBUGFS_RADIO_ADD(name, radio_idx) \ + debugfs_create_file(#name, 0444, radiod, \ + &rdev->wiphy.radio_cfg[radio_idx], \ + &name## _ops) + void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) { struct dentry *phyd = rdev->wiphy.debugfsdir; + struct dentry *radiod; + u8 i; DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); DEBUGFS_ADD(long_retry_limit); DEBUGFS_ADD(ht40allow_map); + + for (i = 0; i < rdev->wiphy.n_radio; i++) { + radiod = rdev->wiphy.radio_cfg[i].radio_debugfsdir; + DEBUGFS_RADIO_ADD(radio_rts_threshold, i); + } } struct debugfs_read_work { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 346dfd2bd987..ceca47cd9e25 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3544,6 +3544,9 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev, return -EINVAL; } + if (cfg80211_chandef_is_s1g(chandef)) + chandef->width = NL80211_CHAN_WIDTH_1; + if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { enum nl80211_channel_type chantype; diff --git a/net/wireless/util.c b/net/wireless/util.c index 56724b33af04..97f40c6d1e9d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2942,9 +2942,8 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type) } EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa); -static bool -ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, - u32 freq, u32 width) +bool ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, + u32 freq, u32 width) { const struct wiphy_radio_freq_range *r; int i; @@ -2958,6 +2957,7 @@ ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, return false; } +EXPORT_SYMBOL(ieee80211_radio_freq_range_valid); bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, const struct cfg80211_chan_def *chandef) diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile index 6e41635bd55a..68e0bb603a9d 100644 --- a/tools/testing/selftests/drivers/net/Makefile +++ b/tools/testing/selftests/drivers/net/Makefile @@ -22,6 +22,7 @@ TEST_PROGS := \ ping.py \ psp.py \ queues.py \ + ring_reconfig.py \ shaper.py \ stats.py \ xdp.py \ diff --git a/tools/testing/selftests/drivers/net/netdevsim/Makefile b/tools/testing/selftests/drivers/net/netdevsim/Makefile index daf51113c827..833abd8e6fdc 100644 --- a/tools/testing/selftests/drivers/net/netdevsim/Makefile +++ b/tools/testing/selftests/drivers/net/netdevsim/Makefile @@ -8,7 +8,6 @@ TEST_PROGS := \ ethtool-features.sh \ ethtool-fec.sh \ ethtool-pause.sh \ - ethtool-ring.sh \ fib.sh \ fib_notifications.sh \ hw_stats_l3.sh \ diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh deleted file mode 100755 index c969559ffa7a..000000000000 --- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0-only - -source ethtool-common.sh - -function get_value { - local query="${SETTINGS_MAP[$1]}" - - echo $(ethtool -g $NSIM_NETDEV | \ - tail -n +$CURR_SETT_LINE | \ - awk -F':' -v pattern="$query:" '$0 ~ pattern {gsub(/[\t ]/, "", $2); print $2}') -} - -function update_current_settings { - for key in ${!SETTINGS_MAP[@]}; do - CURRENT_SETTINGS[$key]=$(get_value $key) - done - echo ${CURRENT_SETTINGS[@]} -} - -if ! ethtool -h | grep -q set-ring >/dev/null; then - echo "SKIP: No --set-ring support in ethtool" - exit 4 -fi - -NSIM_NETDEV=$(make_netdev) - -set -o pipefail - -declare -A SETTINGS_MAP=( - ["rx"]="RX" - ["rx-mini"]="RX Mini" - ["rx-jumbo"]="RX Jumbo" - ["tx"]="TX" -) - -declare -A EXPECTED_SETTINGS=( - ["rx"]="" - ["rx-mini"]="" - ["rx-jumbo"]="" - ["tx"]="" -) - -declare -A CURRENT_SETTINGS=( - ["rx"]="" - ["rx-mini"]="" - ["rx-jumbo"]="" - ["tx"]="" -) - -MAX_VALUE=$((RANDOM % $((2**32-1)))) -RING_MAX_LIST=$(ls $NSIM_DEV_DFS/ethtool/ring/) - -for ring_max_entry in $RING_MAX_LIST; do - echo $MAX_VALUE > $NSIM_DEV_DFS/ethtool/ring/$ring_max_entry -done - -CURR_SETT_LINE=$(ethtool -g $NSIM_NETDEV | grep -i -m1 -n 'Current hardware settings' | cut -f1 -d:) - -# populate the expected settings map -for key in ${!SETTINGS_MAP[@]}; do - EXPECTED_SETTINGS[$key]=$(get_value $key) -done - -# test -for key in ${!SETTINGS_MAP[@]}; do - value=$((RANDOM % $MAX_VALUE)) - - ethtool -G $NSIM_NETDEV "$key" "$value" - - EXPECTED_SETTINGS[$key]="$value" - expected=${EXPECTED_SETTINGS[@]} - current=$(update_current_settings) - - check $? "$current" "$expected" - set +x -done - -if [ $num_errors -eq 0 ]; then - echo "PASSED all $((num_passes)) checks" - exit 0 -else - echo "FAILED $num_errors/$((num_errors+num_passes)) checks" - exit 1 -fi diff --git a/tools/testing/selftests/drivers/net/ring_reconfig.py b/tools/testing/selftests/drivers/net/ring_reconfig.py new file mode 100755 index 000000000000..f9530a8b0856 --- /dev/null +++ b/tools/testing/selftests/drivers/net/ring_reconfig.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +""" +Test channel and ring size configuration via ethtool (-L / -G). +""" + +from lib.py import ksft_run, ksft_exit, ksft_pr +from lib.py import ksft_eq +from lib.py import NetDrvEpEnv, EthtoolFamily, GenerateTraffic +from lib.py import defer, NlError + + +def channels(cfg) -> None: + """ + Twiddle channel counts in various combinations of parameters. + We're only looking for driver adhering to the requested config + if the config is accepted and crashes. + """ + ehdr = {'header':{'dev-index': cfg.ifindex}} + chans = cfg.eth.channels_get(ehdr) + + all_keys = ["rx", "tx", "combined"] + mixes = [{"combined"}, {"rx", "tx"}, {"rx", "combined"}, {"tx", "combined"}, + {"rx", "tx", "combined"},] + + # Get the set of keys that device actually supports + restore = {} + supported = set() + for key in all_keys: + if key + "-max" in chans: + supported.add(key) + restore |= {key + "-count": chans[key + "-count"]} + + defer(cfg.eth.channels_set, ehdr | restore) + + def test_config(config): + try: + cfg.eth.channels_set(ehdr | config) + get = cfg.eth.channels_get(ehdr) + for k, v in config.items(): + ksft_eq(get.get(k, 0), v) + except NlError as e: + failed.append(mix) + ksft_pr("Can't set", config, e) + else: + ksft_pr("Okay", config) + + failed = [] + for mix in mixes: + if not mix.issubset(supported): + continue + + # Set all the values in the mix to 1, other supported to 0 + config = {} + for key in all_keys: + config[key + "-count"] = 1 if key in mix else 0 + test_config(config) + + for mix in mixes: + if not mix.issubset(supported): + continue + if mix in failed: + continue + + # Set all the values in the mix to max, other supported to 0 + config = {} + for key in all_keys: + config[key + "-count"] = chans[key + '-max'] if key in mix else 0 + test_config(config) + + +def _configure_min_ring_cnt(cfg) -> None: + """ Try to configure a single Rx/Tx ring. """ + ehdr = {'header':{'dev-index': cfg.ifindex}} + chans = cfg.eth.channels_get(ehdr) + + all_keys = ["rx-count", "tx-count", "combined-count"] + restore = {} + config = {} + for key in all_keys: + if key in chans: + restore[key] = chans[key] + config[key] = 0 + + if chans.get('combined-count', 0) > 1: + config['combined-count'] = 1 + elif chans.get('rx-count', 0) > 1 and chans.get('tx-count', 0) > 1: + config['tx-count'] = 1 + config['rx-count'] = 1 + else: + # looks like we're already on 1 channel + return + + cfg.eth.channels_set(ehdr | config) + defer(cfg.eth.channels_set, ehdr | restore) + + +def ringparam(cfg) -> None: + """ + Tweak the ringparam configuration. Try to run some traffic over min + ring size to make sure it actually functions. + """ + ehdr = {'header':{'dev-index': cfg.ifindex}} + rings = cfg.eth.rings_get(ehdr) + + restore = {} + maxes = {} + params = set() + for key in rings.keys(): + if 'max' in key: + param = key[:-4] + maxes[param] = rings[key] + params.add(param) + restore[param] = rings[param] + + defer(cfg.eth.rings_set, ehdr | restore) + + # Speed up the reconfig by configuring just one ring + _configure_min_ring_cnt(cfg) + + # Try to reach min on all settings + for param in params: + val = rings[param] + while True: + try: + cfg.eth.rings_set({'header':{'dev-index': cfg.ifindex}, + param: val // 2}) + if val == 0: + break + val //= 2 + except NlError: + break + + get = cfg.eth.rings_get(ehdr) + ksft_eq(get[param], val) + + ksft_pr(f"Reached min for '{param}' at {val} (max {rings[param]})") + + GenerateTraffic(cfg).wait_pkts_and_stop(10000) + + # Try max across all params, if the driver supports large rings + # this may OOM so we ignore errors + try: + ksft_pr("Applying max settings") + config = {p: maxes[p] for p in params} + cfg.eth.rings_set(ehdr | config) + except NlError as e: + ksft_pr("Can't set max params", config, e) + else: + GenerateTraffic(cfg).wait_pkts_and_stop(10000) + + +def main() -> None: + """ Ksft boiler plate main """ + + with NetDrvEpEnv(__file__) as cfg: + cfg.eth = EthtoolFamily() + + ksft_run([channels, + ringparam], + args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() |
