summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h179
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c93
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c225
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c716
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h44
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1096
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h88
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c48
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c12
13 files changed, 1417 insertions, 1146 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 1bf7934d4e28..b206fba092c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -8,6 +8,8 @@
struct idpf_adapter;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_q_vec_rsrc;
+struct idpf_rss_data;
#include <net/pkt_sched.h>
#include <linux/aer.h>
@@ -201,7 +203,8 @@ struct idpf_vport_max_q {
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_adapter *adapter,
struct idpf_ctlq_create_info *cq);
- int (*intr_reg_init)(struct idpf_vport *vport);
+ int (*intr_reg_init)(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
@@ -288,54 +291,88 @@ struct idpf_fsteer_fltr {
};
/**
- * struct idpf_vport - Handle for netdevices and queue resources
- * @num_txq: Number of allocated TX queues
- * @num_complq: Number of allocated completion queues
+ * struct idpf_q_vec_rsrc - handle for queue and vector resources
+ * @dev: device pointer for DMA mapping
+ * @q_vectors: array of queue vectors
+ * @q_vector_idxs: starting index of queue vectors
+ * @num_q_vectors: number of IRQ vectors allocated
+ * @noirq_v_idx: ID of the NOIRQ vector
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @txq_grps: array of TX queue groups
* @txq_desc_count: TX queue descriptor count
- * @complq_desc_count: Completion queue descriptor count
- * @compln_clean_budget: Work budget for completion clean
- * @num_txq_grp: Number of TX queue groups
- * @txq_grps: Array of TX queue groups
- * @txq_model: Split queue or single queue queuing model
- * @txqs: Used only in hotpath to get to the right queue very fast
- * @crc_enable: Enable CRC insertion offload
- * @xdpsq_share: whether XDPSQ sharing is enabled
- * @num_xdp_txq: number of XDPSQs
+ * @complq_desc_count: completion queue descriptor count
+ * @txq_model: split queue or single queue queuing model
+ * @num_txq: number of allocated TX queues
+ * @num_complq: number of allocated completion queues
+ * @num_txq_grp: number of TX queue groups
* @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
- * @xdp_prog: installed XDP program
- * @num_rxq: Number of allocated RX queues
- * @num_bufq: Number of allocated buffer queues
+ * @num_rxq_grp: number of RX queues in a group
+ * @rxq_model: splitq queue or single queue queuing model
+ * @rxq_grps: total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @num_rxq: number of allocated RX queues
+ * @num_bufq: number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
* to complete all buffer descriptors for all buffer queues in
* the worst case.
- * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
- * @bufq_desc_count: Buffer queue descriptor count
- * @num_rxq_grp: Number of RX queues in a group
- * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
- * group will yield total number of RX queues.
- * @rxq_model: Splitq queue or single queue queuing model
- * @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @bufq_desc_count: buffer queue descriptor count
+ * @num_bufqs_per_qgrp: buffer queues per RX queue in a given grouping
+ * @base_rxd: true if the driver should use base descriptors instead of flex
+ */
+struct idpf_q_vec_rsrc {
+ struct device *dev;
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 num_q_vectors;
+ u16 noirq_v_idx;
+ u32 noirq_dyn_ctl_ena;
+ void __iomem *noirq_dyn_ctl;
+
+ struct idpf_txq_group *txq_grps;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 txq_model;
+ u16 num_txq;
+ u16 num_complq;
+ u16 num_txq_grp;
+ u16 xdp_txq_offset;
+
+ u16 num_rxq_grp;
+ u32 rxq_model;
+ struct idpf_rxq_group *rxq_grps;
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u8 num_bufqs_per_qgrp;
+ bool base_rxd;
+};
+
+/**
+ * struct idpf_vport - Handle for netdevices and queue resources
+ * @dflt_qv_rsrc: contains default queue and vector resources
+ * @txqs: Used only in hotpath to get to the right queue very fast
+ * @num_txq: Number of allocated TX queues
+ * @num_xdp_txq: number of XDPSQs
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @xdp_prog: installed XDP program
* @vdev_info: IDC vport device info pointer
* @adapter: back pointer to associated adapter
* @netdev: Associated net_device. Each vport should have one and only one
* associated netdev.
* @flags: See enum idpf_vport_flags
- * @vport_type: Default SRIOV, SIOV, etc.
+ * @compln_clean_budget: Work budget for completion clean
* @vport_id: Device given vport identifier
+ * @vport_type: Default SRIOV, SIOV, etc.
* @idx: Software index in adapter vports struct
- * @default_vport: Use this vport if one isn't specified
- * @base_rxd: True if the driver should use base descriptors instead of flex
- * @num_q_vectors: Number of IRQ vectors allocated
- * @q_vectors: Array of queue vectors
- * @q_vector_idxs: Starting index of queue vectors
- * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
- * @noirq_dyn_ctl_ena: value to write to the above to enable it
- * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
+ * @default_vport: Use this vport if one isn't specified
+ * @crc_enable: Enable CRC insertion offload
* @link_up: True if link is up
* @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
* @tstamp_config: The Tx tstamp config
@@ -343,57 +380,31 @@ struct idpf_fsteer_fltr {
* @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
- u16 num_txq;
- u16 num_complq;
- u32 txq_desc_count;
- u32 complq_desc_count;
- u32 compln_clean_budget;
- u16 num_txq_grp;
- struct idpf_txq_group *txq_grps;
- u32 txq_model;
+ struct idpf_q_vec_rsrc dflt_qv_rsrc;
struct idpf_tx_queue **txqs;
- bool crc_enable;
-
- bool xdpsq_share;
+ u16 num_txq;
u16 num_xdp_txq;
- u16 xdp_txq_offset;
+ bool xdpsq_share;
struct bpf_prog *xdp_prog;
- u16 num_rxq;
- u16 num_bufq;
- u32 rxq_desc_count;
- u8 num_bufqs_per_qgrp;
- u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u16 num_rxq_grp;
- struct idpf_rxq_group *rxq_grps;
- u32 rxq_model;
- struct libeth_rx_pt *rx_ptype_lkup;
-
struct iidc_rdma_vport_dev_info *vdev_info;
struct idpf_adapter *adapter;
struct net_device *netdev;
DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
- u16 vport_type;
+ u32 compln_clean_budget;
u32 vport_id;
+ u16 vport_type;
u16 idx;
- bool default_vport;
- bool base_rxd;
-
- u16 num_q_vectors;
- struct idpf_q_vector *q_vectors;
- u16 *q_vector_idxs;
-
- void __iomem *noirq_dyn_ctl;
- u32 noirq_dyn_ctl_ena;
- u16 noirq_v_idx;
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
- struct idpf_port_stats port_stats;
+ struct idpf_port_stats port_stats;
+ bool default_vport;
+ bool crc_enable;
bool link_up;
struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
@@ -550,10 +561,37 @@ struct idpf_vector_lifo {
};
/**
+ * struct idpf_queue_id_reg_chunk - individual queue ID and register chunk
+ * @qtail_reg_start: queue tail register offset
+ * @qtail_reg_spacing: queue tail register spacing
+ * @type: queue type of the queues in the chunk
+ * @start_queue_id: starting queue ID in the chunk
+ * @num_queues: number of queues in the chunk
+ */
+struct idpf_queue_id_reg_chunk {
+ u64 qtail_reg_start;
+ u32 qtail_reg_spacing;
+ u32 type;
+ u32 start_queue_id;
+ u32 num_queues;
+};
+
+/**
+ * struct idpf_queue_id_reg_info - queue ID and register chunk info received
+ * over the mailbox
+ * @num_chunks: number of chunks
+ * @queue_chunks: array of chunks
+ */
+struct idpf_queue_id_reg_info {
+ u16 num_chunks;
+ struct idpf_queue_id_reg_chunk *queue_chunks;
+};
+
+/**
* struct idpf_vport_config - Vport configuration data
* @user_config: see struct idpf_vport_user_config_data
* @max_q: Maximum possible queues
- * @req_qs_chunks: Queue chunk data for requested queues
+ * @qid_reg_info: Struct to store the queue ID and register info
* @mac_filter_list_lock: Lock to protect mac filters
* @flow_steer_list_lock: Lock to protect fsteer filters
* @flags: See enum idpf_vport_config_flags
@@ -561,7 +599,7 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- struct virtchnl2_add_queues *req_qs_chunks;
+ struct idpf_queue_id_reg_info qid_reg_info;
spinlock_t mac_filter_list_lock;
spinlock_t flow_steer_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
@@ -603,6 +641,8 @@ struct idpf_vc_xn_manager;
* @vport_params_reqd: Vport params requested
* @vport_params_recvd: Vport params received
* @vport_ids: Array of device given vport identifiers
+ * @singleq_pt_lkup: Lookup table for singleq RX ptypes
+ * @splitq_pt_lkup: Lookup table for splitq RX ptypes
* @vport_config: Vport config parameters
* @max_vports: Maximum vports that can be allocated
* @num_alloc_vports: Current number of vports allocated
@@ -661,6 +701,9 @@ struct idpf_adapter {
struct virtchnl2_create_vport **vport_params_recvd;
u32 *vport_ids;
+ struct libeth_rx_pt *singleq_pt_lkup;
+ struct libeth_rx_pt *splitq_pt_lkup;
+
struct idpf_vport_config **vport_config;
u16 max_vports;
u16 num_alloc_vports;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3a04a6bd0d7c..a4625638cf3f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -70,11 +70,13 @@ static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -86,15 +88,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -123,12 +125,12 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 2efa3c08aba5..1d78a621d65b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -18,7 +18,7 @@ static u32 idpf_get_rx_ring_count(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- num_rxq = vport->num_rxq;
+ num_rxq = vport->dflt_qv_rsrc.num_rxq;
idpf_vport_ctrl_unlock(netdev);
return num_rxq;
@@ -503,7 +503,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
}
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_config_rss(vport);
+ err = idpf_config_rss(vport, rss_data);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -644,8 +644,8 @@ static void idpf_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
- ring->rx_pending = vport->rxq_desc_count;
- ring->tx_pending = vport->txq_desc_count;
+ ring->rx_pending = vport->dflt_qv_rsrc.rxq_desc_count;
+ ring->tx_pending = vport->dflt_qv_rsrc.txq_desc_count;
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
@@ -669,8 +669,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
{
struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
u16 idx;
idpf_vport_ctrl_lock(netdev);
@@ -704,8 +705,9 @@ static int idpf_set_ringparam(struct net_device *netdev,
netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
new_tx_count);
- if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count &&
+ rsrc = &vport->dflt_qv_rsrc;
+ if (new_tx_count == rsrc->txq_desc_count &&
+ new_rx_count == rsrc->rxq_desc_count &&
kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
@@ -724,10 +726,10 @@ static int idpf_set_ringparam(struct net_device *netdev,
/* Since we adjusted the RX completion queue count, the RX buffer queue
* descriptor count needs to be adjusted as well
*/
- for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
- vport->bufq_desc_count[i] =
+ for (unsigned int i = 0; i < rsrc->num_bufqs_per_qgrp; i++)
+ rsrc->bufq_desc_count[i] =
IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
- vport->num_bufqs_per_qgrp);
+ rsrc->num_bufqs_per_qgrp);
err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
@@ -1104,7 +1106,7 @@ static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
static void idpf_collect_queue_stats(struct idpf_vport *vport)
{
struct idpf_port_stats *pstats = &vport->port_stats;
- int i, j;
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
/* zero out port stats since they're actually tracked in per
* queue stats; this is only for reporting
@@ -1120,22 +1122,22 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
u64_stats_set(&pstats->tx_dma_map_errs, 0);
u64_stats_update_end(&pstats->stats_sync);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rxq_grp->splitq.num_rxq_sets;
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
struct idpf_rx_queue *rxq;
unsigned int start;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
else
rxq = rxq_grp->singleq.rxqs[j];
@@ -1162,10 +1164,10 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
}
}
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
@@ -1208,9 +1210,9 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int total = 0;
- unsigned int i, j;
bool is_splitq;
u16 qtype;
@@ -1228,12 +1230,13 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_collect_queue_stats(vport);
idpf_add_port_stats(vport, &data);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
- for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++, total++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
@@ -1253,10 +1256,10 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
total = 0;
- is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+ is_splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &rsrc->rxq_grps[i];
u16 num_rxq;
qtype = VIRTCHNL2_QUEUE_TYPE_RX;
@@ -1266,7 +1269,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
else
num_rxq = rxq_grp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, total++) {
+ for (unsigned int j = 0; j < num_rxq; j++, total++) {
struct idpf_rx_queue *rxq;
if (is_splitq)
@@ -1298,15 +1301,16 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp, q_idx;
- if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
+ return rsrc->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
+ return rsrc->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
@@ -1319,14 +1323,15 @@ struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
u32 q_num)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
int q_grp;
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq->q_vector;
+ return rsrc->txq_grps[q_grp].complq->q_vector;
}
/**
@@ -1363,7 +1368,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
u32 q_num)
{
const struct idpf_netdev_priv *np = netdev_priv(netdev);
- const struct idpf_vport *vport;
+ struct idpf_q_vec_rsrc *rsrc;
+ struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1372,16 +1378,17 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ rsrc = &vport->dflt_qv_rsrc;
+ if (q_num >= rsrc->num_rxq && q_num >= rsrc->num_txq) {
err = -EINVAL;
goto unlock_mutex;
}
- if (q_num < vport->num_rxq)
+ if (q_num < rsrc->num_rxq)
__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_RX);
- if (q_num < vport->num_txq)
+ if (q_num < rsrc->num_txq)
__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
VIRTCHNL2_QUEUE_TYPE_TX);
@@ -1549,8 +1556,9 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_user_config_data *user_config;
struct idpf_q_coalesce *q_coal;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
- int i, err = 0;
+ int err = 0;
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
@@ -1560,14 +1568,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto unlock_mutex;
- for (i = 0; i < vport->num_txq; i++) {
+ rsrc = &vport->dflt_qv_rsrc;
+ for (unsigned int i = 0; i < rsrc->num_txq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
- for (i = 0; i < vport->num_rxq; i++) {
+ for (unsigned int i = 0; i < rsrc->num_rxq; i++) {
q_coal = &user_config->q_coalesce[i];
err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
@@ -1748,6 +1757,7 @@ static void idpf_get_ts_stats(struct net_device *netdev,
struct ethtool_ts_stats *ts_stats)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
unsigned int start;
@@ -1763,8 +1773,9 @@ static void idpf_get_ts_stats(struct net_device *netdev,
if (!test_bit(IDPF_VPORT_UP, np->state))
goto exit;
- for (u16 i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ rsrc = &vport->dflt_qv_rsrc;
+ for (u16 i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
for (u16 j = 0; j < txq_grp->num_txq; j++) {
struct idpf_tx_queue *txq = txq_grp->txqs[j];
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index f5a1ede23dbf..94da5fbd56f1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -545,7 +545,9 @@ static int idpf_del_mac_filter(struct idpf_vport *vport,
if (test_bit(IDPF_VPORT_UP, np->state)) {
int err;
- err = idpf_add_del_mac_filters(vport, np, false, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, false, async);
if (err)
return err;
}
@@ -614,7 +616,9 @@ static int idpf_add_mac_filter(struct idpf_vport *vport,
return err;
if (test_bit(IDPF_VPORT_UP, np->state))
- err = idpf_add_del_mac_filters(vport, np, true, async);
+ err = idpf_add_del_mac_filters(np->adapter, vport_config,
+ vport->default_mac_addr,
+ np->vport_id, true, async);
return err;
}
@@ -662,7 +666,8 @@ static void idpf_restore_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
true, false);
}
@@ -686,7 +691,8 @@ static void idpf_remove_mac_filters(struct idpf_vport *vport)
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ idpf_add_del_mac_filters(vport->adapter, vport_config,
+ vport->default_mac_addr, vport->vport_id,
false, false);
}
@@ -975,6 +981,10 @@ static void idpf_remove_features(struct idpf_vport *vport)
static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue_id_reg_info *chunks;
+ u32 vport_id = vport->vport_id;
if (!test_bit(IDPF_VPORT_UP, np->state))
return;
@@ -985,24 +995,26 @@ static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
- idpf_send_disable_vport_msg(vport);
+ chunks = &adapter->vport_config[vport->idx]->qid_reg_info;
+
+ idpf_send_disable_vport_msg(adapter, vport_id);
idpf_send_disable_queues_msg(vport);
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
/* Normally we ask for queues in create_vport, but if the number of
* initially requested queues have changed, for example via ethtool
* set channels, we do delete queues and then add the queues back
* instead of deleting and reallocating the vport.
*/
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, chunks, vport_id);
idpf_remove_features(vport);
vport->link_up = false;
- idpf_vport_intr_deinit(vport);
- idpf_xdp_rxq_info_deinit_all(vport);
- idpf_vport_queues_rel(vport);
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
+ idpf_vport_queues_rel(vport, rsrc);
+ idpf_vport_intr_rel(rsrc);
clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
@@ -1046,9 +1058,6 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
struct idpf_adapter *adapter = vport->adapter;
u16 idx = vport->idx;
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
-
if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
adapter->vport_config[idx]->flags)) {
unregister_netdev(vport->netdev);
@@ -1065,6 +1074,7 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
*/
static void idpf_vport_rel(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
@@ -1073,12 +1083,12 @@ static void idpf_vport_rel(struct idpf_vport *vport)
u16 idx = vport->idx;
vport_config = adapter->vport_config[vport->idx];
- idpf_deinit_rss_lut(vport);
rss_data = &vport_config->user_config.rss_data;
+ idpf_deinit_rss_lut(rss_data);
kfree(rss_data->rss_key);
rss_data->rss_key = NULL;
- idpf_send_destroy_vport_msg(vport);
+ idpf_send_destroy_vport_msg(adapter, vport->vport_id);
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
@@ -1089,24 +1099,21 @@ static void idpf_vport_rel(struct idpf_vport *vport)
/* Release all the allocated vectors on the stack */
vec_info.num_req_vecs = 0;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
vec_info.default_vport = vport->default_vport;
- idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+ idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
+
+ kfree(rsrc->q_vector_idxs);
+ rsrc->q_vector_idxs = NULL;
- kfree(vport->q_vector_idxs);
- vport->q_vector_idxs = NULL;
+ idpf_vport_deinit_queue_reg_chunks(vport_config);
kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
- if (adapter->vport_config[idx]) {
- kfree(adapter->vport_config[idx]->req_qs_chunks);
- adapter->vport_config[idx]->req_qs_chunks = NULL;
- }
- kfree(vport->rx_ptype_lkup);
- vport->rx_ptype_lkup = NULL;
+
kfree(vport);
adapter->num_alloc_vports--;
}
@@ -1155,7 +1162,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
*/
static bool idpf_is_hsplit_supported(const struct idpf_vport *vport)
{
- return idpf_is_queue_model_split(vport->rxq_model) &&
+ return idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model) &&
idpf_is_cap_ena_all(vport->adapter, IDPF_HSPLIT_CAPS,
IDPF_CAP_HSPLIT);
}
@@ -1224,6 +1231,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
{
struct idpf_rss_data *rss_data;
u16 idx = adapter->next_vport;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
u16 num_max_q;
int err;
@@ -1271,11 +1279,15 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs)
+ rsrc = &vport->dflt_qv_rsrc;
+ rsrc->dev = &adapter->pdev->dev;
+ rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!rsrc->q_vector_idxs)
goto free_vport;
- idpf_vport_init(vport, max_q);
+ err = idpf_vport_init(vport, max_q);
+ if (err)
+ goto free_vector_idxs;
/* LUT and key are both initialized here. Key is not strictly dependent
* on how many queues we have. If we change number of queues and soft
@@ -1286,13 +1298,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key)
- goto free_vector_idxs;
+ goto free_qreg_chunks;
- /* Initialize default rss key */
+ /* Initialize default RSS key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
- /* Initialize default rss LUT */
- err = idpf_init_rss_lut(vport);
+ /* Initialize default RSS LUT */
+ err = idpf_init_rss_lut(vport, rss_data);
if (err)
goto free_rss_key;
@@ -1308,8 +1320,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
free_rss_key:
kfree(rss_data->rss_key);
+free_qreg_chunks:
+ idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
- kfree(vport->q_vector_idxs);
+ kfree(rsrc->q_vector_idxs);
free_vport:
kfree(vport);
@@ -1346,7 +1360,8 @@ void idpf_statistics_task(struct work_struct *work)
struct idpf_vport *vport = adapter->vports[i];
if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
- idpf_send_get_stats_msg(vport);
+ idpf_send_get_stats_msg(netdev_priv(vport->netdev),
+ &vport->port_stats);
}
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
@@ -1369,7 +1384,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
usecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter);
+ idpf_recv_mb_msg(adapter, adapter->hw.arq);
}
/**
@@ -1417,9 +1432,10 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err, txq = vport->num_txq - vport->num_xdp_txq;
+ int err, txq = vport->dflt_qv_rsrc.num_txq - vport->num_xdp_txq;
- err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ err = netif_set_real_num_rx_queues(vport->netdev,
+ vport->dflt_qv_rsrc.num_rxq);
if (err)
return err;
@@ -1444,24 +1460,22 @@ static void idpf_up_complete(struct idpf_vport *vport)
/**
* idpf_rx_init_buf_tail - Write initial buffer ring tail value
- * @vport: virtual port struct
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+static void idpf_rx_init_buf_tail(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
- for (j = 0; j < grp->singleq.num_rxq; j++) {
+ for (unsigned int j = 0; j < grp->singleq.num_rxq; j++) {
const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
@@ -1479,7 +1493,12 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_queue_id_reg_info *chunks;
+ struct idpf_rss_data *rss_data;
+ u32 vport_id = vport->vport_id;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
@@ -1491,48 +1510,51 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- err = idpf_vport_intr_alloc(vport);
+ err = idpf_vport_intr_alloc(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto err_rtnl_unlock;
}
- err = idpf_vport_queues_alloc(vport);
+ err = idpf_vport_queues_alloc(vport, rsrc);
if (err)
goto intr_rel;
- err = idpf_vport_queue_ids_init(vport);
+ vport_config = adapter->vport_config[vport->idx];
+ chunks = &vport_config->qid_reg_info;
+
+ err = idpf_vport_queue_ids_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_vport_intr_init(vport);
+ err = idpf_vport_intr_init(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_queue_reg_init(vport, rsrc, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_rx_bufs_init_all(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- idpf_rx_init_buf_tail(vport);
+ idpf_rx_init_buf_tail(rsrc);
- err = idpf_xdp_rxq_info_init_all(vport);
+ err = idpf_xdp_rxq_info_init_all(rsrc);
if (err) {
netdev_err(vport->netdev,
"Failed to initialize XDP RxQ info for vport %u: %pe\n",
@@ -1540,16 +1562,17 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto intr_deinit;
}
- idpf_vport_intr_ena(vport);
+ idpf_vport_intr_ena(vport, rsrc);
- err = idpf_send_config_queues_msg(vport);
+ err = idpf_send_config_queues_msg(adapter, rsrc, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
goto rxq_deinit;
}
- err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ err = idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id,
+ true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
@@ -1563,7 +1586,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
goto unmap_queue_vectors;
}
- err = idpf_send_enable_vport_msg(vport);
+ err = idpf_send_enable_vport_msg(adapter, vport_id);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
vport->vport_id, err);
@@ -1573,7 +1596,8 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
idpf_restore_features(vport);
- err = idpf_config_rss(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
vport->vport_id, err);
@@ -1588,19 +1612,19 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
return 0;
disable_vport:
- idpf_send_disable_vport_msg(vport);
+ idpf_send_disable_vport_msg(adapter, vport_id);
disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
- idpf_send_map_unmap_queue_vector_msg(vport, false);
+ idpf_send_map_unmap_queue_vector_msg(adapter, rsrc, vport_id, false);
rxq_deinit:
- idpf_xdp_rxq_info_deinit_all(vport);
+ idpf_xdp_rxq_info_deinit_all(rsrc);
intr_deinit:
- idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
intr_rel:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
err_rtnl_unlock:
if (rtnl)
@@ -1658,10 +1682,6 @@ void idpf_init_task(struct work_struct *work)
goto unwind_vports;
}
- err = idpf_send_get_rx_ptype_msg(vport);
- if (err)
- goto unwind_vports;
-
index = vport->idx;
vport_config = adapter->vport_config[index];
@@ -1987,9 +2007,13 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_q_vec_rsrc *new_rsrc;
+ u32 vport_id = vport->vport_id;
struct idpf_vport *new_vport;
- int err;
+ int err, tmp_err = 0;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -2014,16 +2038,18 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
+ new_rsrc = &new_vport->dflt_qv_rsrc;
+
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
case IDPF_SR_Q_CHANGE:
- err = idpf_vport_adjust_qs(new_vport);
+ err = idpf_vport_adjust_qs(new_vport, new_rsrc);
if (err)
goto free_vport;
break;
case IDPF_SR_Q_DESC_CHANGE:
/* Update queue parameters before allocating resources */
- idpf_vport_calc_num_q_desc(new_vport);
+ idpf_vport_calc_num_q_desc(new_vport, new_rsrc);
break;
case IDPF_SR_MTU_CHANGE:
idpf_idc_vdev_mtu_event(vport->vdev_info,
@@ -2037,41 +2063,40 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
+ vport_config = adapter->vport_config[vport->idx];
+
if (!vport_is_up) {
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(adapter, &vport_config->qid_reg_info,
+ vport_id);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
idpf_vport_stop(vport, false);
}
- /* We're passing in vport here because we need its wait_queue
- * to send a message and it should be getting all the vport
- * config data out of the adapter but we need to be careful not
- * to add code to add_queues to change the vport config within
- * vport itself as it will be wiped with a memcpy later.
- */
- err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
- new_vport->num_complq,
- new_vport->num_rxq,
- new_vport->num_bufq);
+ err = idpf_send_add_queues_msg(adapter, vport_config, new_rsrc,
+ vport_id);
if (err)
goto err_reset;
- /* Same comment as above regarding avoiding copying the wait_queues and
- * mutexes applies here. We do not want to mess with those if possible.
+ /* Avoid copying the wait_queues and mutexes. We do not want to mess
+ * with those if possible.
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
err = idpf_set_real_num_queues(vport);
if (err)
goto err_open;
if (reset_cause == IDPF_SR_Q_CHANGE &&
- !netif_is_rxfh_configured(vport->netdev))
- idpf_fill_dflt_rss_lut(vport);
+ !netif_is_rxfh_configured(vport->netdev)) {
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &vport_config->user_config.rss_data;
+ idpf_fill_dflt_rss_lut(vport, rss_data);
+ }
if (vport_is_up)
err = idpf_vport_open(vport, false);
@@ -2079,11 +2104,11 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
err_reset:
- idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
- vport->num_rxq, vport->num_bufq);
+ tmp_err = idpf_send_add_queues_msg(adapter, vport_config, rsrc,
+ vport_id);
err_open:
- if (vport_is_up)
+ if (!tmp_err && vport_is_up)
idpf_vport_open(vport, false);
free_vport:
@@ -2249,7 +2274,12 @@ static int idpf_set_features(struct net_device *netdev,
* the HW when the interface is brought up.
*/
if (test_bit(IDPF_VPORT_UP, np->state)) {
- err = idpf_config_rss(vport);
+ struct idpf_vport_config *vport_config;
+ struct idpf_rss_data *rss_data;
+
+ vport_config = adapter->vport_config[vport->idx];
+ rss_data = &vport_config->user_config.rss_data;
+ err = idpf_config_rss(vport, rss_data);
if (err)
goto unlock_mutex;
}
@@ -2263,8 +2293,13 @@ static int idpf_set_features(struct net_device *netdev,
}
if (changed & NETIF_F_LOOPBACK) {
+ bool loopback_ena;
+
netdev->features ^= NETIF_F_LOOPBACK;
- err = idpf_send_ena_dis_loopback_msg(vport);
+ loopback_ena = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+
+ err = idpf_send_ena_dis_loopback_msg(adapter, vport->vport_id,
+ loopback_ena);
}
unlock_mutex:
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 0a8b50350b86..4a805a9541f0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -384,15 +384,17 @@ static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter)
WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies);
idpf_for_each_vport(adapter, vport) {
+ struct idpf_q_vec_rsrc *rsrc;
bool split;
- if (!vport || !vport->rxq_grps)
+ if (!vport || !vport->dflt_qv_rsrc.rxq_grps)
continue;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
idpf_ptp_update_phctime_rxq_grp(grp, split, systime);
}
@@ -681,9 +683,10 @@ int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb,
*/
static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
bool enable = true, splitq;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(rsrc->rxq_model);
if (rx_filter == HWTSTAMP_FILTER_NONE) {
enable = false;
@@ -692,8 +695,8 @@ static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter)
vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- for (u16 i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u16 i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
struct idpf_rx_queue *rx_queue;
u16 j, num_rxq;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 66ba645e8b90..376050308b06 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -148,24 +148,22 @@ static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
- * @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_tx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- int i, j;
-
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++)
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++)
idpf_tx_desc_rel(txq_grp->txqs[j]);
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -265,7 +263,7 @@ err_alloc:
/**
* idpf_compl_desc_alloc - allocate completion descriptors
- * @vport: vport to allocate resources for
+ * @vport: virtual port private structure
* @complq: completion queue to set up
*
* Return: 0 on success, -errno on failure.
@@ -298,20 +296,21 @@ static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err = 0;
- int i, j;
/* Setup buffer queues. In single queue model buffer queues and
* completion queues will be same
*/
- for (i = 0; i < vport->num_txq_grp; i++) {
- for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ for (unsigned int j = 0; j < rsrc->txq_grps[i].num_txq; j++) {
+ struct idpf_tx_queue *txq = rsrc->txq_grps[i].txqs[j];
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -322,11 +321,11 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
continue;
/* Setup completion queues */
- err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
+ err = idpf_compl_desc_alloc(vport, rsrc->txq_grps[i].complq);
if (err) {
pci_err(vport->adapter->pdev,
"Allocation for Tx Completion Queue %u failed\n",
@@ -337,7 +336,7 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
err_out:
if (err)
- idpf_tx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
return err;
}
@@ -490,38 +489,38 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Free all rx queues resources
*/
-static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+static void idpf_rx_desc_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct device *dev = &vport->adapter->pdev->dev;
+ struct device *dev = rsrc->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
- int i, j;
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ for (unsigned int j = 0; j < rx_qgrp->singleq.num_rxq; j++)
idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++)
+ for (unsigned int j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
@@ -781,26 +780,28 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
/**
* idpf_rx_bufs_init_all - Initialize all RX bufs
- * @vport: virtual port struct
+ * @vport: pointer to vport struct
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- bool split = idpf_is_queue_model_split(vport->rxq_model);
- int i, j, err;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
+ int err;
- idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, vport->xdp_prog);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
q = rx_qgrp->singleq.rxqs[j];
@@ -813,7 +814,7 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
}
/* Otherwise, allocate bufs for the buffer queues */
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
enum libeth_fqe_type type;
struct idpf_buf_queue *q;
@@ -899,26 +900,28 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
/**
* idpf_rx_desc_alloc_all - allocate all RX queues resources
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_rxq_group *rx_qgrp;
- int i, j, err;
u16 num_rxq;
+ int err;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ rx_qgrp = &rsrc->rxq_grps[i];
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -932,10 +935,10 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
}
}
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
@@ -953,18 +956,18 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
return 0;
err_out:
- idpf_rx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(rsrc);
return err;
}
-static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+static int idpf_init_queue_set(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
bool splitq;
int err;
- splitq = idpf_is_queue_model_split(vport->rxq_model);
+ splitq = idpf_is_queue_model_split(qs->qv_rsrc->rxq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -1034,19 +1037,18 @@ static int idpf_init_queue_set(const struct idpf_queue_set *qs)
static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
- struct device *dev = vport->netdev->dev.parent;
+ const struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
switch (q->type) {
case VIRTCHNL2_QUEUE_TYPE_RX:
- idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
- idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
+ idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- idpf_rx_desc_rel_bufq(q->bufq, dev);
+ idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
break;
case VIRTCHNL2_QUEUE_TYPE_TX:
idpf_tx_desc_rel(q->txq);
@@ -1113,7 +1115,8 @@ static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
static struct idpf_queue_set *
idpf_vector_to_queue_set(struct idpf_q_vector *qv)
{
- bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
+ bool xdp = xdp_txq_offset && !qv->num_xsksq;
struct idpf_vport *vport = qv->vport;
struct idpf_queue_set *qs;
u32 num;
@@ -1123,7 +1126,8 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
if (!num)
return NULL;
- qs = idpf_alloc_queue_set(vport, num);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, num);
if (!qs)
return NULL;
@@ -1149,12 +1153,12 @@ idpf_vector_to_queue_set(struct idpf_q_vector *qv)
qs->qs[num++].complq = qv->complq[i];
}
- if (!vport->xdp_txq_offset)
+ if (!xdp_txq_offset)
goto finalize;
if (xdp) {
for (u32 i = 0; i < qv->num_rxq; i++) {
- u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+ u32 idx = xdp_txq_offset + qv->rx[i]->idx;
qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[num++].txq = vport->txqs[idx];
@@ -1181,26 +1185,27 @@ finalize:
return qs;
}
-static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_enable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_q_vector *q_vector;
int err;
q_vector = idpf_find_rxq_vec(vport, qid);
- err = idpf_init_queue_set(qs);
+ err = idpf_init_queue_set(vport, qs);
if (err) {
netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
qid, ERR_PTR(err));
return err;
}
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
goto config;
- q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors),
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors),
sizeof(*q_vector->xsksq), GFP_KERNEL);
if (!q_vector->xsksq)
return -ENOMEM;
@@ -1243,9 +1248,9 @@ config:
return 0;
}
-static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_disable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
struct idpf_q_vector *q_vector;
int err;
@@ -1290,30 +1295,28 @@ int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
if (!qs)
return -ENOMEM;
- return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+ return en ? idpf_qp_enable(vport, qs, qid) :
+ idpf_qp_disable(vport, qs, qid);
}
/**
* idpf_txq_group_rel - Release all resources for txq groups
- * @vport: vport to release txq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_txq_group_rel(struct idpf_vport *vport)
+static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- bool split, flow_sch_en;
- int i, j;
+ bool split;
- if (!vport->txq_grps)
+ if (!rsrc->txq_grps)
return;
- split = idpf_is_queue_model_split(vport->txq_model);
- flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
- VIRTCHNL2_CAP_SPLITQ_QSCHED);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i];
- for (j = 0; j < txq_grp->num_txq; j++) {
- if (flow_sch_en) {
+ for (unsigned int j = 0; j < txq_grp->num_txq; j++) {
+ if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) {
kfree(txq_grp->txqs[j]->refillq);
txq_grp->txqs[j]->refillq = NULL;
}
@@ -1328,8 +1331,8 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
}
- kfree(vport->txq_grps);
- vport->txq_grps = NULL;
+ kfree(rsrc->txq_grps);
+ rsrc->txq_grps = NULL;
}
/**
@@ -1338,12 +1341,10 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
*/
static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
{
- int i, j;
-
- for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) {
struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
- for (j = 0; j < bufq_set->num_refillqs; j++) {
+ for (unsigned int j = 0; j < bufq_set->num_refillqs; j++) {
kfree(bufq_set->refillqs[j].ring);
bufq_set->refillqs[j].ring = NULL;
}
@@ -1354,23 +1355,20 @@ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
/**
* idpf_rxq_group_rel - Release all resources for rxq groups
- * @vport: vport to release rxq groups on
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_rxq_group_rel(struct idpf_vport *vport)
+static void idpf_rxq_group_rel(struct idpf_q_vec_rsrc *rsrc)
{
- int i;
-
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- int j;
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_rxq = rx_qgrp->splitq.num_rxq_sets;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->splitq.rxq_sets[j]);
rx_qgrp->splitq.rxq_sets[j] = NULL;
}
@@ -1380,41 +1378,44 @@ static void idpf_rxq_group_rel(struct idpf_vport *vport)
rx_qgrp->splitq.bufq_sets = NULL;
} else {
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
kfree(rx_qgrp->singleq.rxqs[j]);
rx_qgrp->singleq.rxqs[j] = NULL;
}
}
}
- kfree(vport->rxq_grps);
- vport->rxq_grps = NULL;
+ kfree(rsrc->rxq_grps);
+ rsrc->rxq_grps = NULL;
}
/**
* idpf_vport_queue_grp_rel_all - Release all queue groups
* @vport: vport to release queue groups for
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+static void idpf_vport_queue_grp_rel_all(struct idpf_q_vec_rsrc *rsrc)
{
- idpf_txq_group_rel(vport);
- idpf_rxq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
+ idpf_rxq_group_rel(rsrc);
}
/**
* idpf_vport_queues_rel - Free memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for queues associated to a vport
*/
-void idpf_vport_queues_rel(struct idpf_vport *vport)
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_xdp_copy_prog_to_rqs(vport, NULL);
+ idpf_xdp_copy_prog_to_rqs(rsrc, NULL);
- idpf_tx_desc_rel_all(vport);
- idpf_rx_desc_rel_all(vport);
+ idpf_tx_desc_rel_all(rsrc);
+ idpf_rx_desc_rel_all(rsrc);
idpf_xdpsqs_put(vport);
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
kfree(vport->txqs);
vport->txqs = NULL;
@@ -1423,6 +1424,7 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
/**
* idpf_vport_init_fast_path_txqs - Initialize fast path txq array
* @vport: vport to init txqs on
+ * @rsrc: pointer to queue and vector resources
*
* We get a queue index from skb->queue_mapping and we need a fast way to
* dereference the queue from queue groups. This allows us to quickly pull a
@@ -1430,22 +1432,23 @@ void idpf_vport_queues_rel(struct idpf_vport *vport)
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps;
struct work_struct *tstamp_task = &vport->tstamp_task;
- int i, j, k = 0;
+ int k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
+ vport->txqs = kcalloc(rsrc->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
-
if (!vport->txqs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+ vport->num_txq = rsrc->num_txq;
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &rsrc->txq_grps[i];
- for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ for (unsigned int j = 0; j < tx_grp->num_txq; j++, k++) {
vport->txqs[k] = tx_grp->txqs[j];
vport->txqs[k]->idx = k;
@@ -1464,16 +1467,18 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
* idpf_vport_init_num_qs - Initialize number of queues
* @vport: vport to initialize queues
* @vport_msg: data to be filled into vport
+ * @rsrc: pointer to queue and vector resources
*/
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg)
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
u16 idx = vport->idx;
config_data = &vport->adapter->vport_config[idx]->user_config;
- vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
- vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ rsrc->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ rsrc->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
/* number of txqs and rxqs in config data will be zeros only in the
* driver load path and we dont update them there after
*/
@@ -1482,74 +1487,75 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
}
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
vport->xdp_prog = config_data->xdp_prog;
if (idpf_xdp_enabled(vport)) {
- vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
- vport->xdp_txq_offset;
+ rsrc->xdp_txq_offset;
vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
} else {
- vport->xdp_txq_offset = 0;
+ rsrc->xdp_txq_offset = 0;
vport->num_xdp_txq = 0;
vport->xdpsq_share = false;
}
/* Adjust number of buffer queues per Rx queue group. */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- vport->num_bufqs_per_qgrp = 0;
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
+ rsrc->num_bufqs_per_qgrp = 0;
return;
}
- vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ rsrc->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
}
/**
* idpf_vport_calc_num_q_desc - Calculate number of queue groups
* @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vport_user_config_data *config_data;
- int num_bufqs = vport->num_bufqs_per_qgrp;
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
u32 num_req_txq_desc, num_req_rxq_desc;
u16 idx = vport->idx;
- int i;
config_data = &vport->adapter->vport_config[idx]->user_config;
num_req_txq_desc = config_data->num_req_txq_desc;
num_req_rxq_desc = config_data->num_req_rxq_desc;
- vport->complq_desc_count = 0;
+ rsrc->complq_desc_count = 0;
if (num_req_txq_desc) {
- vport->txq_desc_count = num_req_txq_desc;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vport->complq_desc_count = num_req_txq_desc;
- if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
- vport->complq_desc_count =
+ rsrc->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
+ rsrc->complq_desc_count = num_req_txq_desc;
+ if (rsrc->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ rsrc->complq_desc_count =
IDPF_MIN_TXQ_COMPLQ_DESC;
}
} else {
- vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->complq_desc_count =
+ rsrc->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->complq_desc_count =
IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
}
if (num_req_rxq_desc)
- vport->rxq_desc_count = num_req_rxq_desc;
+ rsrc->rxq_desc_count = num_req_rxq_desc;
else
- vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+ rsrc->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
- for (i = 0; i < num_bufqs; i++) {
- if (!vport->bufq_desc_count[i])
- vport->bufq_desc_count[i] =
- IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ for (unsigned int i = 0; i < num_bufqs; i++) {
+ if (!rsrc->bufq_desc_count[i])
+ rsrc->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(rsrc->rxq_desc_count,
num_bufqs);
}
}
@@ -1638,54 +1644,54 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
/**
* idpf_vport_calc_num_q_groups - Calculate number of queue groups
- * @vport: vport to calculate q groups for
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc)
{
- if (idpf_is_queue_model_split(vport->txq_model))
- vport->num_txq_grp = vport->num_txq;
+ if (idpf_is_queue_model_split(rsrc->txq_model))
+ rsrc->num_txq_grp = rsrc->num_txq;
else
- vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ rsrc->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
- if (idpf_is_queue_model_split(vport->rxq_model))
- vport->num_rxq_grp = vport->num_rxq;
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
+ rsrc->num_rxq_grp = rsrc->num_rxq;
else
- vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ rsrc->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
}
/**
* idpf_vport_calc_numq_per_grp - Calculate number of queues per group
- * @vport: vport to calculate queues for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: return parameter for number of TX queues
* @num_rxq: return parameter for number of RX queues
*/
-static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+static void idpf_vport_calc_numq_per_grp(struct idpf_q_vec_rsrc *rsrc,
u16 *num_txq, u16 *num_rxq)
{
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
else
- *num_txq = vport->num_txq;
+ *num_txq = rsrc->num_txq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
else
- *num_rxq = vport->num_rxq;
+ *num_rxq = rsrc->num_rxq;
}
/**
* idpf_rxq_set_descids - set the descids supported by this queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+static void idpf_rxq_set_descids(struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
return;
- if (vport->base_rxd)
+ if (rsrc->base_rxd)
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
else
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
@@ -1694,44 +1700,45 @@ static void idpf_rxq_set_descids(const struct idpf_vport *vport,
/**
* idpf_txq_group_alloc - Allocate all txq group resources
* @vport: vport to allocate txq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_txq: number of txqs to allocate for each group
*
* Return: 0 on success, negative on failure
*/
-static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+static int idpf_txq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_txq)
{
bool split, flow_sch_en;
- int i;
- vport->txq_grps = kcalloc(vport->num_txq_grp,
- sizeof(*vport->txq_grps), GFP_KERNEL);
- if (!vport->txq_grps)
+ rsrc->txq_grps = kcalloc(rsrc->num_txq_grp,
+ sizeof(*rsrc->txq_grps), GFP_KERNEL);
+ if (!rsrc->txq_grps)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (unsigned int i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- int j;
tx_qgrp->vport = vport;
tx_qgrp->num_txq = num_txq;
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
if (!tx_qgrp->txqs[j])
goto err_alloc;
}
- for (j = 0; j < tx_qgrp->num_txq; j++) {
+ for (unsigned int j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
- q->desc_count = vport->txq_desc_count;
+ q->desc_count = rsrc->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
@@ -1766,7 +1773,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!tx_qgrp->complq)
goto err_alloc;
- tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->desc_count = rsrc->complq_desc_count;
tx_qgrp->complq->txq_grp = tx_qgrp;
tx_qgrp->complq->netdev = vport->netdev;
tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
@@ -1778,7 +1785,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
return 0;
err_alloc:
- idpf_txq_group_rel(vport);
+ idpf_txq_group_rel(rsrc);
return -ENOMEM;
}
@@ -1786,30 +1793,34 @@ err_alloc:
/**
* idpf_rxq_group_alloc - Allocate all rxq group resources
* @vport: vport to allocate rxq groups for
+ * @rsrc: pointer to queue and vector resources
* @num_rxq: number of rxqs to allocate for each group
*
* Return: 0 on success, negative on failure
*/
-static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+static int idpf_rxq_group_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u16 num_rxq)
{
- int i, k, err = 0;
- bool hs;
+ struct idpf_adapter *adapter = vport->adapter;
+ bool hs, rsc;
+ int err = 0;
- vport->rxq_grps = kcalloc(vport->num_rxq_grp,
- sizeof(struct idpf_rxq_group), GFP_KERNEL);
- if (!vport->rxq_grps)
+ rsrc->rxq_grps = kcalloc(rsrc->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!rsrc->rxq_grps)
return -ENOMEM;
hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ rsc = idpf_is_feature_ena(vport, NETIF_F_GRO_HW);
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- int j;
+ for (unsigned int i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
rx_qgrp->vport = vport;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
rx_qgrp->singleq.num_rxq = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->singleq.rxqs[j] =
kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
GFP_KERNEL);
@@ -1822,7 +1833,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
rx_qgrp->splitq.num_rxq_sets = num_rxq;
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
rx_qgrp->splitq.rxq_sets[j] =
kzalloc(sizeof(struct idpf_rxq_set),
GFP_KERNEL);
@@ -1832,25 +1843,27 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
}
- rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ rx_qgrp->splitq.bufq_sets = kcalloc(rsrc->num_bufqs_per_qgrp,
sizeof(struct idpf_bufq_set),
GFP_KERNEL);
if (!rx_qgrp->splitq.bufq_sets) {
err = -ENOMEM;
goto err_alloc;
}
+ rx_qgrp->splitq.num_bufq_sets = rsrc->num_bufqs_per_qgrp;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (unsigned int j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->desc_count = vport->bufq_desc_count[j];
+ q->desc_count = rsrc->bufq_desc_count[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1859,12 +1872,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
err = -ENOMEM;
goto err_alloc;
}
- for (k = 0; k < bufq_set->num_refillqs; k++) {
+ for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) {
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
refillq->desc_count =
- vport->bufq_desc_count[j];
+ rsrc->bufq_desc_count[j];
idpf_queue_set(GEN_CHK, refillq);
idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
@@ -1878,37 +1891,39 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
skip_splitq_rx_init:
- for (j = 0; j < num_rxq; j++) {
+ for (unsigned int j = 0; j < num_rxq; j++) {
struct idpf_rx_queue *q;
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
+ q->rx_ptype_lkup = adapter->singleq_pt_lkup;
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
idpf_queue_assign(HSPLIT_EN, q, hs);
+ idpf_queue_assign(RSC_EN, q, rsc);
+ q->rx_ptype_lkup = adapter->splitq_pt_lkup;
setup_rxq:
- q->desc_count = vport->rxq_desc_count;
- q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->desc_count = rsrc->rxq_desc_count;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
LIBETH_RX_LL_LEN;
- idpf_rxq_set_descids(vport, q);
+ idpf_rxq_set_descids(rsrc, q);
}
}
err_alloc:
if (err)
- idpf_rxq_group_rel(vport);
+ idpf_rxq_group_rel(rsrc);
return err;
}
@@ -1916,28 +1931,30 @@ err_alloc:
/**
* idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
* @vport: vport with qgrps to allocate
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 num_txq, num_rxq;
int err;
- idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+ idpf_vport_calc_numq_per_grp(rsrc, &num_txq, &num_rxq);
- err = idpf_txq_group_alloc(vport, num_txq);
+ err = idpf_txq_group_alloc(vport, rsrc, num_txq);
if (err)
goto err_out;
- err = idpf_rxq_group_alloc(vport, num_rxq);
+ err = idpf_rxq_group_alloc(vport, rsrc, num_rxq);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queue_grp_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(rsrc);
return err;
}
@@ -1945,20 +1962,22 @@ err_out:
/**
* idpf_vport_queues_alloc - Allocate memory for all queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Allocate memory for queues associated with a vport.
*
* Return: 0 on success, negative on failure.
*/
-int idpf_vport_queues_alloc(struct idpf_vport *vport)
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_queue_grp_alloc_all(vport);
+ err = idpf_vport_queue_grp_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_vport_init_fast_path_txqs(vport, rsrc);
if (err)
goto err_out;
@@ -1966,18 +1985,18 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_tx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_rx_desc_alloc_all(vport, rsrc);
if (err)
goto err_out;
return 0;
err_out:
- idpf_vport_queues_rel(vport);
+ idpf_vport_queues_rel(vport, rsrc);
return err;
}
@@ -3154,7 +3173,7 @@ netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
return idpf_tx_splitq_frame(skb, tx_q);
else
return idpf_tx_singleq_frame(skb, tx_q);
@@ -3780,39 +3799,34 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
/**
* idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
- * @vport: virtual port structure
- *
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
{
- u16 v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- netif_napi_del(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ netif_napi_del(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
{
- int v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- napi_disable(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ napi_disable(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_rel - Free memory allocated for interrupt vectors
- * @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for interrupt vectors associated to a vport
*/
-void idpf_vport_intr_rel(struct idpf_vport *vport)
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
{
- for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
kfree(q_vector->xsksq);
q_vector->xsksq = NULL;
@@ -3826,8 +3840,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->rx = NULL;
}
- kfree(vport->q_vectors);
- vport->q_vectors = NULL;
+ kfree(rsrc->q_vectors);
+ rsrc->q_vectors = NULL;
}
static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
@@ -3847,21 +3861,22 @@ static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int vector;
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
int irq_num, vidx;
/* free only the irqs that were actually requested */
if (!q_vector)
continue;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
idpf_q_vector_set_napi(q_vector, false);
@@ -3871,16 +3886,15 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/**
* idpf_vport_intr_dis_irq_all - Disable all interrupt
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct idpf_q_vector *q_vector = vport->q_vectors;
- int q_idx;
+ struct idpf_q_vector *q_vector = rsrc->q_vectors;
- writel(0, vport->noirq_dyn_ctl);
+ writel(0, rsrc->noirq_dyn_ctl);
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
@@ -4026,10 +4040,12 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
const char *drv_name, *if_name, *vec_name;
@@ -4038,11 +4054,11 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
drv_name = dev_driver_string(&adapter->pdev->dev);
if_name = netdev_name(vport->netdev);
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
char *name;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
if (q_vector->num_rxq && q_vector->num_txq)
@@ -4072,9 +4088,9 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
free_q_irqs:
while (--vector >= 0) {
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- kfree(free_irq(irq_num, &vport->q_vectors[vector]));
+ kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
}
return err;
@@ -4103,15 +4119,16 @@ void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
/**
* idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
bool dynamic;
- int q_idx;
u16 itr;
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
/* Set the initial ITR values */
if (qv->num_txq) {
@@ -4134,19 +4151,21 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
idpf_vport_intr_update_itr_ena_irq(qv);
}
- writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
+ writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
}
/**
* idpf_vport_intr_deinit - Release all vector associations for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_intr_deinit(struct idpf_vport *vport)
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_dis_irq_all(vport);
- idpf_vport_intr_napi_dis_all(vport);
- idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_rel_irq(vport);
+ idpf_vport_intr_dis_irq_all(rsrc);
+ idpf_vport_intr_napi_dis_all(rsrc);
+ idpf_vport_intr_napi_del_all(rsrc);
+ idpf_vport_intr_rel_irq(vport, rsrc);
}
/**
@@ -4218,14 +4237,12 @@ static void idpf_init_dim(struct idpf_q_vector *qv)
/**
* idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
{
- int q_idx;
-
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
idpf_init_dim(q_vector);
napi_enable(&q_vector->napi);
@@ -4355,24 +4372,26 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
/**
* idpf_vport_intr_map_vector_to_qs - Map vectors to queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Mapping for vectors to queues
*/
-static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ u16 num_txq_grp = rsrc->num_txq_grp - vport->num_xdp_txq;
+ bool split = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- u32 i, qv_idx, q_index;
+ u32 q_index;
- for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < rsrc->num_rxq_grp; i++) {
u16 num_rxq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- rx_qgrp = &vport->rxq_grps[i];
+ rx_qgrp = &rsrc->rxq_grps[i];
if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
@@ -4385,7 +4404,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
@@ -4395,11 +4414,11 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
}
if (split) {
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
struct idpf_buf_queue *bufq;
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[qv_idx];
+ bufq->q_vector = &rsrc->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
@@ -4409,40 +4428,40 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
+ for (unsigned int i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
- tx_qgrp = &vport->txq_grps[i];
+ tx_qgrp = &rsrc->txq_grps[i];
num_txq = tx_qgrp->num_txq;
for (u32 j = 0; j < num_txq; j++) {
struct idpf_tx_queue *q;
q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->tx[q->q_vector->num_txq++] = q;
}
if (split) {
struct idpf_compl_queue *q = tx_qgrp->complq;
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->complq[q->q_vector->num_complq++] = q;
}
qv_idx++;
}
- for (i = 0; i < vport->num_xdp_txq; i++) {
+ for (unsigned int i = 0; i < vport->num_xdp_txq; i++) {
struct idpf_tx_queue *xdpsq;
struct idpf_q_vector *qv;
- xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
if (!idpf_queue_has(XSK, xdpsq))
continue;
@@ -4457,12 +4476,14 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
/**
* idpf_vport_intr_init_vec_idx - Initialize the vector indexes
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Initialize vector indexes with values returned over mailbox.
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *ac;
@@ -4471,10 +4492,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
ac = adapter->req_vec_chunks;
if (!ac) {
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
- vport->noirq_v_idx = vport->q_vector_idxs[i];
+ rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
return 0;
}
@@ -4486,10 +4507,10 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
- vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+ rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
kfree(vecids);
@@ -4499,21 +4520,24 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
/**
* idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx, qv_idx;
int irq_num;
+ u16 qv_idx;
- if (idpf_is_queue_model_split(vport->txq_model))
+ if (idpf_is_queue_model_split(rsrc->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
else
napi_poll = idpf_vport_singleq_napi_poll;
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
- qv_idx = vport->q_vector_idxs[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
+
+ qv_idx = rsrc->q_vector_idxs[v_idx];
irq_num = vport->adapter->msix_entries[qv_idx].vector;
netif_napi_add_config(vport->netdev, &q_vector->napi,
@@ -4525,38 +4549,41 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_alloc - Allocate memory for interrupt vectors
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Allocate one q_vector per queue interrupt.
*
* Return: 0 on success, if allocation fails we return -ENOMEM.
*/
-int idpf_vport_intr_alloc(struct idpf_vport *vport)
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
struct idpf_q_coalesce *q_coal;
- u32 complqs_per_vector, v_idx;
+ u32 complqs_per_vector;
u16 idx = vport->idx;
user_config = &vport->adapter->vport_config[idx]->user_config;
- vport->q_vectors = kcalloc(vport->num_q_vectors,
- sizeof(struct idpf_q_vector), GFP_KERNEL);
- if (!vport->q_vectors)
+
+ rsrc->q_vectors = kcalloc(rsrc->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!rsrc->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- bufqs_per_vector = vport->num_bufqs_per_qgrp *
- DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
- complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- q_vector = &vport->q_vectors[v_idx];
+ txqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ bufqs_per_vector = rsrc->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(rsrc->num_rxq_grp,
+ rsrc->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(rsrc->num_txq_grp,
+ rsrc->num_q_vectors);
+
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ q_vector = &rsrc->q_vectors[v_idx];
q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
@@ -4578,7 +4605,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->rx)
goto error;
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
@@ -4593,7 +4620,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
if (!q_vector->complq)
goto error;
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
continue;
q_vector->xsksq = kcalloc(rxqs_per_vector,
@@ -4606,7 +4633,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
return 0;
error:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
return -ENOMEM;
}
@@ -4614,72 +4641,74 @@ error:
/**
* idpf_vport_intr_init - Setup all vectors for the given vport
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success or negative on failure
*/
-int idpf_vport_intr_init(struct idpf_vport *vport)
+int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_intr_init_vec_idx(vport);
+ err = idpf_vport_intr_init_vec_idx(vport, rsrc);
if (err)
return err;
- idpf_vport_intr_map_vector_to_qs(vport);
- idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_map_vector_to_qs(vport, rsrc);
+ idpf_vport_intr_napi_add_all(vport, rsrc);
- err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
- err = idpf_vport_intr_req_irq(vport);
+ err = idpf_vport_intr_req_irq(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_napi_del_all(rsrc);
return err;
}
-void idpf_vport_intr_ena(struct idpf_vport *vport)
+void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_napi_ena_all(vport);
- idpf_vport_intr_ena_irq_all(vport);
+ idpf_vport_intr_napi_ena_all(rsrc);
+ idpf_vport_intr_ena_irq_all(vport, rsrc);
}
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
* Return: 0 on success, negative on failure
*/
-int idpf_config_rss(struct idpf_vport *vport)
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
+ struct idpf_adapter *adapter = vport->adapter;
+ u32 vport_id = vport->vport_id;
int err;
- err = idpf_send_get_set_rss_key_msg(vport, false);
+ err = idpf_send_get_set_rss_key_msg(adapter, rss_data, vport_id, false);
if (err)
return err;
- return idpf_send_get_set_rss_lut_msg(vport, false);
+ return idpf_send_get_set_rss_lut_msg(adapter, rss_data, vport_id, false);
}
/**
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
* @vport: virtual port structure
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- u16 num_active_rxq = vport->num_rxq;
- struct idpf_rss_data *rss_data;
+ u16 num_active_rxq = vport->dflt_qv_rsrc.num_rxq;
int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
-
for (i = 0; i < rss_data->rss_lut_size; i++)
rss_data->rss_lut[i] = i % num_active_rxq;
}
@@ -4687,15 +4716,12 @@ void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
/**
* idpf_init_rss_lut - Allocate and initialize RSS LUT
* @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*
* Return: 0 on success, negative on failure
*/
-int idpf_init_rss_lut(struct idpf_vport *vport)
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
if (!rss_data->rss_lut) {
u32 lut_size;
@@ -4706,21 +4732,17 @@ int idpf_init_rss_lut(struct idpf_vport *vport)
}
/* Fill the default RSS lut values */
- idpf_fill_dflt_rss_lut(vport);
+ idpf_fill_dflt_rss_lut(vport, rss_data);
return 0;
}
/**
* idpf_deinit_rss_lut - Release RSS LUT
- * @vport: virtual port
+ * @rss_data: pointer to RSS key and lut info
*/
-void idpf_deinit_rss_lut(struct idpf_vport *vport)
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_rss_data *rss_data;
-
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
kfree(rss_data->rss_lut);
rss_data->rss_lut = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 423cc9486dce..4be5b3b6d3ed 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -283,6 +283,7 @@ struct idpf_ptype_state {
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_RSC_EN: enable Receive Side Coalescing on Rx (splitq)
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
@@ -297,6 +298,7 @@ enum idpf_queue_flags_t {
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_CRC_EN,
+ __IDPF_Q_RSC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
__IDPF_Q_NOIRQ,
@@ -925,6 +927,7 @@ struct idpf_bufq_set {
* @singleq.rxqs: Array of RX queue pointers
* @splitq: Struct with split queue related members
* @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.num_rxq_sets: Number of Buffer queue sets
* @splitq.rxq_sets: Array of RX queue sets
* @splitq.bufq_sets: Buffer queue set pointer
*
@@ -942,6 +945,7 @@ struct idpf_rxq_group {
} singleq;
struct {
u16 num_rxq_sets;
+ u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
@@ -1072,25 +1076,35 @@ static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
- struct virtchnl2_create_vport *vport_msg);
-void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
struct virtchnl2_create_vport *vport_msg,
struct idpf_vport_max_q *max_q);
-void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
-int idpf_vport_queues_alloc(struct idpf_vport *vport);
-void idpf_vport_queues_rel(struct idpf_vport *vport);
-void idpf_vport_intr_rel(struct idpf_vport *vport);
-int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_calc_num_q_groups(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_queues_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_queues_rel(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
-void idpf_vport_intr_deinit(struct idpf_vport *vport);
-int idpf_vport_intr_init(struct idpf_vport *vport);
-void idpf_vport_intr_ena(struct idpf_vport *vport);
-void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
-int idpf_config_rss(struct idpf_vport *vport);
-int idpf_init_rss_lut(struct idpf_vport *vport);
-void idpf_deinit_rss_lut(struct idpf_vport *vport);
-int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_ena(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_fill_dflt_rss_lut(struct idpf_vport *vport,
+ struct idpf_rss_data *rss_data);
+int idpf_config_rss(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+int idpf_init_rss_lut(struct idpf_vport *vport, struct idpf_rss_data *rss_data);
+void idpf_deinit_rss_lut(struct idpf_rss_data *rss_data);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
u32 q_num);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 4cc58c83688c..7527b967e2e7 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -69,11 +69,13 @@ static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
/**
* idpf_vf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
@@ -85,15 +87,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ num_regs = idpf_get_reg_intr_vecs(adapter, reg_vals);
if (num_regs < num_vecs) {
err = -EINVAL;
goto free_reg_vals;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
@@ -122,12 +124,12 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
@@ -156,7 +158,8 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
+ idpf_send_mb_msg(adapter, adapter->hw.asq,
+ VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index cb702eac86c8..d46affaf7185 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -117,13 +117,15 @@ static void idpf_recv_event_msg(struct idpf_adapter *adapter,
/**
* idpf_mb_clean - Reclaim the send mailbox queue entries
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: send control queue info
*
* Reclaim the send mailbox queue entries to be used to send further messages
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-static int idpf_mb_clean(struct idpf_adapter *adapter)
+static int idpf_mb_clean(struct idpf_adapter *adapter,
+ struct idpf_ctlq_info *asq)
{
u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
struct idpf_ctlq_msg **q_msg;
@@ -134,7 +136,7 @@ static int idpf_mb_clean(struct idpf_adapter *adapter)
if (!q_msg)
return -ENOMEM;
- err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
+ err = idpf_ctlq_clean_sq(asq, &num_q_msg, q_msg);
if (err)
goto err_kfree;
@@ -206,7 +208,8 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
/**
* idpf_send_mb_msg - Send message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @asq: control queue to send message to
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
@@ -214,10 +217,10 @@ static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op,
*
* Will prepare the control queue message and initiates the send api
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie)
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -231,7 +234,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
if (idpf_is_reset_detected(adapter))
return 0;
- err = idpf_mb_clean(adapter);
+ err = idpf_mb_clean(adapter, asq);
if (err)
return err;
@@ -267,7 +270,7 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
ctlq_msg->ctx.indirect.payload = dma_mem;
ctlq_msg->ctx.sw_cookie.data = cookie;
- err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
+ err = idpf_ctlq_send(&adapter->hw, asq, 1, ctlq_msg);
if (err)
goto send_error;
@@ -463,7 +466,7 @@ ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- retval = idpf_send_mb_msg(adapter, params->vc_op,
+ retval = idpf_send_mb_msg(adapter, adapter->hw.asq, params->vc_op,
send_buf->iov_len, send_buf->iov_base,
cookie);
if (retval) {
@@ -662,12 +665,14 @@ out_unlock:
/**
* idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
+ * @adapter: driver specific private structure
+ * @arq: control queue to receive message from
+ *
+ * Will receive control queue message and posts the receive buffer.
*
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * Return: 0 on success and negative on failure.
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq)
{
struct idpf_ctlq_msg ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -679,7 +684,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
* actually received on num_recv.
*/
num_recv = 1;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ err = idpf_ctlq_recv(arq, &num_recv, &ctlq_msg);
if (err || !num_recv)
break;
@@ -695,8 +700,7 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
else
err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
- post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
- adapter->hw.arq,
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, arq,
&num_recv, &dma_mem);
/* If post failed clear the only buffer we supplied */
@@ -717,9 +721,8 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
}
struct idpf_chunked_msg_params {
- u32 (*prepare_msg)(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num);
+ u32 (*prepare_msg)(u32 vport_id, void *buf,
+ const void *pos, u32 num);
const void *chunks;
u32 num_chunks;
@@ -728,9 +731,12 @@ struct idpf_chunked_msg_params {
u32 config_sz;
u32 vc_op;
+ u32 vport_id;
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *qv_rsrc,
+ u32 vport_id, u32 num)
{
struct idpf_queue_set *qp;
@@ -738,7 +744,9 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
if (!qp)
return NULL;
- qp->vport = vport;
+ qp->adapter = adapter;
+ qp->qv_rsrc = qv_rsrc;
+ qp->vport_id = vport_id;
qp->num = num;
return qp;
@@ -746,7 +754,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
/**
* idpf_send_chunked_msg - send VC message consisting of chunks
- * @vport: virtual port data structure
+ * @adapter: Driver specific private structure
* @params: message params
*
* Helper function for preparing a message describing queues to be enabled
@@ -754,7 +762,7 @@ struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
*
* Return: the total size of the prepared message.
*/
-static int idpf_send_chunked_msg(struct idpf_vport *vport,
+static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
const struct idpf_chunked_msg_params *params)
{
struct idpf_vc_xn_params xn_params = {
@@ -765,6 +773,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
u32 num_chunks, num_msgs, buf_sz;
void *buf __free(kfree) = NULL;
u32 totqs = params->num_chunks;
+ u32 vid = params->vport_id;
num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
params->chunk_sz), totqs);
@@ -783,10 +792,10 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
memset(buf, 0, buf_sz);
xn_params.send_buf.iov_len = buf_sz;
- if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
return -EINVAL;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -809,6 +818,7 @@ static int idpf_send_chunked_msg(struct idpf_vport *vport,
*/
static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
{
+ struct net_device *netdev;
struct idpf_tx_queue *txq;
bool markers_rcvd = true;
@@ -817,6 +827,8 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
case VIRTCHNL2_QUEUE_TYPE_TX:
txq = qs->qs[i].txq;
+ netdev = txq->netdev;
+
idpf_queue_set(SW_MARKER, txq);
idpf_wait_for_sw_marker_completion(txq);
markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
@@ -827,7 +839,7 @@ static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
}
if (!markers_rcvd) {
- netdev_warn(qs->vport->netdev,
+ netdev_warn(netdev,
"Failed to receive marker packets\n");
return -ETIMEDOUT;
}
@@ -845,7 +857,8 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, vport->num_txq);
if (!qs)
return -ENOMEM;
@@ -1263,13 +1276,52 @@ static void idpf_init_avail_queues(struct idpf_adapter *adapter)
}
/**
+ * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
+ * @vport_config: persistent vport structure to store the queue register info
+ * @schunks: source chunks to copy data from
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int
+idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
+ struct virtchnl2_queue_reg_chunks *schunks)
+{
+ struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
+ u16 num_chunks = le16_to_cpu(schunks->num_chunks);
+
+ kfree(q_info->queue_chunks);
+
+ q_info->queue_chunks = kcalloc(num_chunks, sizeof(*q_info->queue_chunks),
+ GFP_KERNEL);
+ if (!q_info->queue_chunks) {
+ q_info->num_chunks = 0;
+ return -ENOMEM;
+ }
+
+ q_info->num_chunks = num_chunks;
+
+ for (u16 i = 0; i < num_chunks; i++) {
+ struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
+ struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
+
+ dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
+ dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
+ dchunk->type = le32_to_cpu(schunk->type);
+ dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
+ dchunk->num_queues = le32_to_cpu(schunk->num_queues);
+ }
+
+ return 0;
+}
+
+/**
* idpf_get_reg_intr_vecs - Get vector queue register offset
- * @vport: virtual port structure
+ * @adapter: adapter structure to get the vector chunks
* @reg_vals: Register offsets to store in
*
- * Returns number of registers that got populated
+ * Return: number of registers that got populated
*/
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals)
{
struct virtchnl2_vector_chunks *chunks;
@@ -1277,7 +1329,7 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
u16 num_vchunks, num_vec;
int num_regs = 0, i, j;
- chunks = &vport->adapter->req_vec_chunks->vchunks;
+ chunks = &adapter->req_vec_chunks->vchunks;
num_vchunks = le16_to_cpu(chunks->num_vchunks);
for (j = 0; j < num_vchunks; j++) {
@@ -1322,25 +1374,25 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
* are filled.
*/
static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
int reg_filled = 0, i;
u32 reg_val;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
u16 num_q;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ num_q = chunk->num_queues;
+ reg_val = chunk->qtail_reg_start;
for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
reg_vals[reg_filled++] = reg_val;
- reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ reg_val += chunk->qtail_reg_spacing;
}
}
@@ -1350,13 +1402,15 @@ static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
/**
* __idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
* @reg_vals: registers we are initializing
* @num_regs: how many registers there are in total
* @q_type: queue model
*
* Return number of queues that are initialized
*/
-static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+static int __idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
@@ -1364,8 +1418,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
tx_qgrp->txqs[j]->tail =
@@ -1373,8 +1427,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
@@ -1387,9 +1441,9 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
struct idpf_buf_queue *q;
@@ -1410,15 +1464,15 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
/**
* idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue registers received over mailbox
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_queue_reg_init(struct idpf_vport *vport)
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_regs, ret = 0;
u32 *reg_vals;
@@ -1427,28 +1481,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
if (!reg_vals)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
/* Initialize Tx queue tail register address */
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_regs < vport->num_txq) {
+ if (num_regs < rsrc->num_txq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1456,18 +1500,18 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
/* Initialize Rx/buffer queue tail register address based on Rx queue
* model
*/
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
chunks);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- if (num_regs < vport->num_bufq) {
+ if (num_regs < rsrc->num_bufq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1475,14 +1519,14 @@ int idpf_queue_reg_init(struct idpf_vport *vport)
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
- num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ num_regs = __idpf_queue_reg_init(vport, rsrc, reg_vals, num_regs,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_regs < vport->num_rxq) {
+ if (num_regs < rsrc->num_rxq) {
ret = -EINVAL;
goto free_reg_vals;
}
@@ -1581,6 +1625,7 @@ free_vport_params:
*/
int idpf_check_supported_desc_ids(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
u64 rx_desc_ids, tx_desc_ids;
@@ -1597,17 +1642,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (idpf_is_queue_model_split(vport->rxq_model)) {
+ if (idpf_is_queue_model_split(rsrc->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
}
} else {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
- vport->base_rxd = true;
+ rsrc->base_rxd = true;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1620,96 +1665,96 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
/**
* idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send virtchnl destroy vport message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_enable_vport_msg - Send virtchnl enable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send enable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_enable_vport_msg(struct idpf_vport *vport)
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_disable_vport_msg - Send virtchnl disable vport message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Send disable vport virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_disable_vport_msg(struct idpf_vport *vport)
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
ssize_t reply_sz;
- v_id.vport_id = cpu_to_le32(vport->vport_id);
+ v_id.vport_id = cpu_to_le32(vport_id);
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Tx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_txq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_tx_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
- if (!idpf_is_queue_model_split(vport->txq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->txq_model)) {
qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
return;
}
@@ -1731,18 +1776,18 @@ static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_complq_config_chunk - fill chunk describing the completion queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: completion queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_complq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_compl_queue *q,
struct virtchnl2_txq_info *qi)
{
u32 val;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->txq_model);
+ qi->model = cpu_to_le16(rsrc->txq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1757,7 +1802,7 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the tx queue
* @num_chunks: number of chunks in the message
@@ -1767,13 +1812,12 @@ static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_tx_queues *ctq = buf;
- ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->vport_id = cpu_to_le32(vport_id);
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
@@ -1794,6 +1838,7 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
.prepare_msg = idpf_prepare_cfg_txqs_msg,
.config_sz = sizeof(struct virtchnl2_config_tx_queues),
@@ -1808,43 +1853,47 @@ static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
- idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ idpf_fill_txq_config_chunk(qs->qv_rsrc, qs->qs[i].txq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
- idpf_fill_complq_config_chunk(qs->vport,
+ idpf_fill_complq_config_chunk(qs->qv_rsrc,
qs->qs[i].complq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_tx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_txq + vport->num_complq;
+ u32 totqs = rsrc->num_txq + rsrc->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[k++].txq = tx_qgrp->txqs[j];
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (idpf_is_queue_model_split(rsrc->txq_model)) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
qs->qs[k++].complq = tx_qgrp->complq;
}
@@ -1859,28 +1908,28 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
/**
* idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: Rx queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_rxq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
struct idpf_rx_queue *q,
struct virtchnl2_rxq_info *qi)
{
const struct idpf_bufq_set *sets;
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!idpf_is_queue_model_split(rsrc->rxq_model)) {
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->desc_ids = cpu_to_le64(q->rxdids);
@@ -1897,7 +1946,7 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ if (rsrc->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi->bufq2_ena = IDPF_BUFQ2_ENA;
qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
}
@@ -1914,16 +1963,16 @@ static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
- * @vport: virtual port data structure
+ * @rsrc: pointer to queue and vector resources
* @q: buffer queue to be inserted into VC chunk
* @qi: pointer to the buffer containing the VC chunk
*/
-static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+static void idpf_fill_bufq_config_chunk(const struct idpf_q_vec_rsrc *rsrc,
const struct idpf_buf_queue *q,
struct virtchnl2_rxq_info *qi)
{
qi->queue_id = cpu_to_le32(q->q_id);
- qi->model = cpu_to_le16(vport->rxq_model);
+ qi->model = cpu_to_le16(rsrc->rxq_model);
qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi->ring_len = cpu_to_le16(q->desc_count);
qi->dma_ring_addr = cpu_to_le64(q->dma);
@@ -1931,7 +1980,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ if (idpf_queue_has(RSC_EN, q))
qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
if (idpf_queue_has(HSPLIT_EN, q)) {
@@ -1942,7 +1991,7 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
/**
* idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the rx queue
* @num_chunks: number of chunks in the message
@@ -1952,13 +2001,12 @@ static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_rx_queues *crq = buf;
- crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->vport_id = cpu_to_le32(vport_id);
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
@@ -1979,6 +2027,7 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
.prepare_msg = idpf_prepare_cfg_rxqs_msg,
.config_sz = sizeof(struct virtchnl2_config_rx_queues),
@@ -1993,36 +2042,40 @@ static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
for (u32 i = 0; i < qs->num; i++) {
if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
- idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ idpf_fill_rxq_config_chunk(qs->qv_rsrc, qs->qs[i].rxq,
&qi[params.num_chunks++]);
else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
- idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ idpf_fill_bufq_config_chunk(qs->qv_rsrc, qs->qs[i].bufq,
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static int idpf_send_config_rx_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, totqs);
if (!qs)
return -ENOMEM;
/* Populate the queue info buffer with all queue context info */
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (!splitq) {
@@ -2030,7 +2083,7 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto rxq;
}
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2059,7 +2112,7 @@ rxq:
/**
* idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
* queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the queue
* @num_chunks: number of chunks in the message
@@ -2069,13 +2122,12 @@ rxq:
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_del_ena_dis_queues *eq = buf;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
memcpy(eq->chunks.chunks, pos,
num_chunks * sizeof(*eq->chunks.chunks));
@@ -2100,6 +2152,7 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES,
.prepare_msg = idpf_prepare_ena_dis_qs_msg,
@@ -2141,34 +2194,38 @@ static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
qc[i].start_queue_id = cpu_to_le32(qid);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @en: whether to enable or disable queues
*
* Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
+static int idpf_send_ena_dis_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool en)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
u32 num_txq, num_q, k = 0;
bool split;
- num_txq = vport->num_txq + vport->num_complq;
- num_q = num_txq + vport->num_rxq + vport->num_bufq;
+ num_txq = rsrc->num_txq + rsrc->num_complq;
+ num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- split = idpf_is_queue_model_split(vport->txq_model);
+ split = idpf_is_queue_model_split(rsrc->txq_model);
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2185,10 +2242,10 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (k != num_txq)
return -EINVAL;
- split = idpf_is_queue_model_split(vport->rxq_model);
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (split)
@@ -2209,7 +2266,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
if (!split)
continue;
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
@@ -2224,7 +2281,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
/**
* idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
* queue set to the interrupt vector
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the vector mapping
* @num_chunks: number of chunks in the message
@@ -2235,13 +2292,12 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
* Return: the total size of the prepared message.
*/
static u32
-idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num_chunks)
+idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
+ const void *pos, u32 num_chunks)
{
struct virtchnl2_queue_vector_maps *vqvm = buf;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->vport_id = cpu_to_le32(vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
@@ -2262,6 +2318,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
{
struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
.prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
@@ -2277,7 +2334,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
params.chunks = vqv;
- split = idpf_is_queue_model_split(qs->vport->txq_model);
+ split = idpf_is_queue_model_split(qs->qv_rsrc->txq_model);
for (u32 i = 0; i < qs->num; i++) {
const struct idpf_queue_ptr *q = &qs->qs[i];
@@ -2299,7 +2356,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->rx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_0;
}
break;
@@ -2319,7 +2376,7 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
v_idx = vec->v_idx;
itr_idx = vec->tx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_1;
}
break;
@@ -2332,29 +2389,33 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return idpf_send_chunked_msg(qs->vport, &params);
+ return idpf_send_chunked_msg(qs->adapter, &params);
}
/**
* idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
* vector message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @map: true for map and false for unmap
*
* Return: 0 on success, -errno on failure.
*/
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, bool map)
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 num_q = rsrc->num_txq + rsrc->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(adapter, rsrc, vport_id, num_q);
if (!qs)
return -ENOMEM;
- for (u32 i = 0; i < vport->num_txq_grp; i++) {
- const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (u32 i = 0; i < rsrc->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
@@ -2362,14 +2423,14 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (k != vport->num_txq)
+ if (k != rsrc->num_txq)
return -EINVAL;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -2377,7 +2438,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
for (u32 j = 0; j < num_rxq; j++) {
qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
qs->qs[k++].rxq =
&rx_qgrp->splitq.rxq_sets[j]->rxq;
else
@@ -2453,7 +2514,9 @@ int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, true);
+ return idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, true);
}
/**
@@ -2467,7 +2530,9 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err;
- err = idpf_send_ena_dis_queues_msg(vport, false);
+ err = idpf_send_ena_dis_queues_msg(vport->adapter,
+ &vport->dflt_qv_rsrc,
+ vport->vport_id, false);
if (err)
return err;
@@ -2482,104 +2547,96 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* @num_chunks: number of chunks to copy
*/
static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
- struct virtchnl2_queue_reg_chunk *schunks,
+ struct idpf_queue_id_reg_chunk *schunks,
u16 num_chunks)
{
u16 i;
for (i = 0; i < num_chunks; i++) {
- dchunks[i].type = schunks[i].type;
- dchunks[i].start_queue_id = schunks[i].start_queue_id;
- dchunks[i].num_queues = schunks[i].num_queues;
+ dchunks[i].type = cpu_to_le32(schunks[i].type);
+ dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
+ dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
}
}
/**
* idpf_send_delete_queues_msg - send delete queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @chunks: queue ids received over mailbox
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send delete queues virtchnl message. Return 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id)
{
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
u16 num_chunks;
int buf_size;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- chunks = &vport_config->req_qs_chunks->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
- num_chunks = le16_to_cpu(chunks->num_chunks);
+ num_chunks = chunks->num_chunks;
buf_size = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_size, GFP_KERNEL);
if (!eq)
return -ENOMEM;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
* idpf_send_config_queues_msg - Send config queues virtchnl message
- * @vport: Virtual port private data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Will send config queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_config_queues_msg(struct idpf_vport *vport)
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
int err;
- err = idpf_send_config_tx_queues_msg(vport);
+ err = idpf_send_config_tx_queues_msg(adapter, rsrc, vport_id);
if (err)
return err;
- return idpf_send_config_rx_queues_msg(vport);
+ return idpf_send_config_rx_queues_msg(adapter, rsrc, vport_id);
}
/**
* idpf_send_add_queues_msg - Send virtchnl add queues message
- * @vport: Virtual port private data structure
- * @num_tx_q: number of transmit queues
- * @num_complq: number of transmit completion queues
- * @num_rx_q: number of receive queues
- * @num_rx_bufq: number of receive buffer queues
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: vport persistent structure to store the queue chunk info
+ * @rsrc: pointer to queue and vector resources
+ * @vport_id: vport identifier used while preparing the virtchnl message
*
- * Returns 0 on success, negative on failure. vport _MUST_ be const here as
- * we should not change any fields within vport itself in this function.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id)
{
struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
struct virtchnl2_add_queues aq = {};
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
int size;
@@ -2587,15 +2644,11 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (!vc_msg)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
-
- aq.vport_id = cpu_to_le32(vport->vport_id);
- aq.num_tx_q = cpu_to_le16(num_tx_q);
- aq.num_tx_complq = cpu_to_le16(num_complq);
- aq.num_rx_q = cpu_to_le16(num_rx_q);
- aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
+ aq.vport_id = cpu_to_le32(vport_id);
+ aq.num_tx_q = cpu_to_le16(rsrc->num_txq);
+ aq.num_tx_complq = cpu_to_le16(rsrc->num_complq);
+ aq.num_rx_q = cpu_to_le16(rsrc->num_rxq);
+ aq.num_rx_bufq = cpu_to_le16(rsrc->num_bufq);
xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2603,15 +2656,15 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
xn_params.send_buf.iov_len = sizeof(aq);
xn_params.recv_buf.iov_base = vc_msg;
xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
/* compare vc_msg num queues with vport num queues */
- if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
- le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
- le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ if (le16_to_cpu(vc_msg->num_tx_q) != rsrc->num_txq ||
+ le16_to_cpu(vc_msg->num_rx_q) != rsrc->num_rxq ||
+ le16_to_cpu(vc_msg->num_tx_complq) != rsrc->num_complq ||
+ le16_to_cpu(vc_msg->num_rx_bufq) != rsrc->num_bufq)
return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
@@ -2619,11 +2672,7 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
if (reply_sz < size)
return -EIO;
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks)
- return -ENOMEM;
-
- return 0;
+ return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
}
/**
@@ -2746,13 +2795,14 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
/**
* idpf_send_get_stats_msg - Send virtchnl get statistics message
- * @vport: vport to get stats for
+ * @np: netdev private structure
+ * @port_stats: structure to store the vport statistics
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_stats_msg(struct idpf_vport *vport)
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
struct virtchnl2_vport_stats stats_msg = {};
struct idpf_vc_xn_params xn_params = {};
@@ -2763,7 +2813,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
if (!test_bit(IDPF_VPORT_UP, np->state))
return 0;
- stats_msg.vport_id = cpu_to_le32(vport->vport_id);
+ stats_msg.vport_id = cpu_to_le32(np->vport_id);
xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
xn_params.send_buf.iov_base = &stats_msg;
@@ -2771,7 +2821,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
xn_params.recv_buf = xn_params.send_buf;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(np->adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (reply_sz < sizeof(stats_msg))
@@ -2792,7 +2842,7 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
- vport->port_stats.vport_stats = stats_msg;
+ port_stats->vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
@@ -2800,36 +2850,43 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
}
/**
- * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set RSS lut message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
* When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
* is enabled, the LUT values stored in driver's soft copy will be used to setup
* the HW.
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
int buf_size, lut_buf_size;
+ struct idpf_vport *vport;
ssize_t reply_sz;
bool rxhash_ena;
int i;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
+ vport = idpf_vid_to_vport(adapter, vport_id);
+ if (!vport)
+ return -EINVAL;
+
rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
- rl->vport_id = cpu_to_le32(vport->vport_id);
+ rl->vport_id = cpu_to_le32(vport_id);
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = rl;
@@ -2850,7 +2907,7 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -2882,30 +2939,31 @@ do_memcpy:
}
/**
- * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
- * @vport: virtual port data structure
- * @get: flag to set or get rss look up table
+ * idpf_send_get_set_rss_key_msg - Send virtchnl get or set RSS key message
+ * @adapter: adapter pointer used to send virtchnl message
+ * @rss_data: pointer to RSS key and lut info
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @get: flag to set or get RSS look up table
*
- * Returns 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get)
{
struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
struct virtchnl2_rss_key *rk __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_rss_data *rss_data;
ssize_t reply_sz;
int i, buf_size;
u16 key_size;
- rss_data =
- &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
- rk->vport_id = cpu_to_le32(vport->vport_id);
+ rk->vport_id = cpu_to_le32(vport_id);
xn_params.send_buf.iov_base = rk;
xn_params.send_buf.iov_len = buf_size;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
@@ -2925,7 +2983,7 @@ int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
if (!get)
@@ -3011,33 +3069,142 @@ static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
}
/**
+ * idpf_parse_protocol_ids - parse protocol IDs for a given packet type
+ * @ptype: packet type to parse
+ * @rx_pt: store the parsed packet type info into
+ */
+static void idpf_parse_protocol_ids(struct virtchnl2_ptype *ptype,
+ struct libeth_rx_pt *rx_pt)
+{
+ struct idpf_ptype_state pstate = {};
+
+ for (u32 j = 0; j < ptype->proto_id_count; j++) {
+ u16 id = le16_to_cpu(ptype->proto_id[j]);
+
+ switch (id) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ rx_pt->outer_ip = LIBETH_RX_PT_OUTER_L2;
+ if (pstate.tunnel_state == IDPF_TUN_IP_GRE) {
+ rx_pt->tunnel_type =
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, true, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ idpf_fill_ptype_lookup(rx_pt, &pstate, false, true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ rx_pt->inner_prot = LIBETH_RX_PT_INNER_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ rx_pt->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
- * @vport: virtual port data structure
+ * @adapter: driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
+static int idpf_send_get_rx_ptype_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
- int max_ptype, ptypes_recvd = 0, ptype_offset;
- struct idpf_adapter *adapter = vport->adapter;
+ struct libeth_rx_pt *singleq_pt_lkup __free(kfree) = NULL;
+ struct libeth_rx_pt *splitq_pt_lkup __free(kfree) = NULL;
struct idpf_vc_xn_params xn_params = {};
+ int ptypes_recvd = 0, ptype_offset;
+ u32 max_ptype = IDPF_RX_MAX_PTYPE;
u16 next_ptype_id = 0;
ssize_t reply_sz;
- int i, j, k;
- if (vport->rx_ptype_lkup)
- return 0;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- max_ptype = IDPF_RX_MAX_PTYPE;
- else
- max_ptype = IDPF_RX_MAX_BASE_PTYPE;
+ singleq_pt_lkup = kcalloc(IDPF_RX_MAX_BASE_PTYPE,
+ sizeof(*singleq_pt_lkup), GFP_KERNEL);
+ if (!singleq_pt_lkup)
+ return -ENOMEM;
- ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
- if (!ptype_lkup)
+ splitq_pt_lkup = kcalloc(max_ptype, sizeof(*splitq_pt_lkup), GFP_KERNEL);
+ if (!splitq_pt_lkup)
return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
@@ -3078,175 +3245,85 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
- for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
- struct idpf_ptype_state pstate = { };
+ for (u16 i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
+ struct libeth_rx_pt rx_pt = {};
struct virtchnl2_ptype *ptype;
- u16 id;
+ u16 pt_10, pt_8;
ptype = (struct virtchnl2_ptype *)
((u8 *)ptype_info + ptype_offset);
+ pt_10 = le16_to_cpu(ptype->ptype_id_10);
+ pt_8 = ptype->ptype_id_8;
+
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
/* 0xFFFF indicates end of ptypes */
- if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID)
+ if (pt_10 == IDPF_INVALID_PTYPE_ID)
goto out;
+ if (pt_10 >= max_ptype)
+ return -EINVAL;
- if (idpf_is_queue_model_split(vport->rxq_model))
- k = le16_to_cpu(ptype->ptype_id_10);
- else
- k = ptype->ptype_id_8;
-
- for (j = 0; j < ptype->proto_id_count; j++) {
- id = le16_to_cpu(ptype->proto_id[j]);
- switch (id) {
- case VIRTCHNL2_PROTO_HDR_GRE:
- if (pstate.tunnel_state ==
- IDPF_PTYPE_TUNNEL_IP) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_MAC:
- ptype_lkup[k].outer_ip =
- LIBETH_RX_PT_OUTER_L2;
- if (pstate.tunnel_state ==
- IDPF_TUN_IP_GRE) {
- ptype_lkup[k].tunnel_type =
- LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
- pstate.tunnel_state |=
- IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
- }
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- false);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, true,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
- idpf_fill_ptype_lookup(&ptype_lkup[k],
- &pstate, false,
- true);
- break;
- case VIRTCHNL2_PROTO_HDR_UDP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_UDP;
- break;
- case VIRTCHNL2_PROTO_HDR_TCP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_TCP;
- break;
- case VIRTCHNL2_PROTO_HDR_SCTP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_SCTP;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMP:
- ptype_lkup[k].inner_prot =
- LIBETH_RX_PT_INNER_ICMP;
- break;
- case VIRTCHNL2_PROTO_HDR_PAY:
- ptype_lkup[k].payload_layer =
- LIBETH_RX_PT_PAYLOAD_L2;
- break;
- case VIRTCHNL2_PROTO_HDR_ICMPV6:
- case VIRTCHNL2_PROTO_HDR_IPV6_EH:
- case VIRTCHNL2_PROTO_HDR_PRE_MAC:
- case VIRTCHNL2_PROTO_HDR_POST_MAC:
- case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
- case VIRTCHNL2_PROTO_HDR_SVLAN:
- case VIRTCHNL2_PROTO_HDR_CVLAN:
- case VIRTCHNL2_PROTO_HDR_MPLS:
- case VIRTCHNL2_PROTO_HDR_MMPLS:
- case VIRTCHNL2_PROTO_HDR_PTP:
- case VIRTCHNL2_PROTO_HDR_CTRL:
- case VIRTCHNL2_PROTO_HDR_LLDP:
- case VIRTCHNL2_PROTO_HDR_ARP:
- case VIRTCHNL2_PROTO_HDR_ECP:
- case VIRTCHNL2_PROTO_HDR_EAPOL:
- case VIRTCHNL2_PROTO_HDR_PPPOD:
- case VIRTCHNL2_PROTO_HDR_PPPOE:
- case VIRTCHNL2_PROTO_HDR_IGMP:
- case VIRTCHNL2_PROTO_HDR_AH:
- case VIRTCHNL2_PROTO_HDR_ESP:
- case VIRTCHNL2_PROTO_HDR_IKE:
- case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
- case VIRTCHNL2_PROTO_HDR_L2TPV2:
- case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
- case VIRTCHNL2_PROTO_HDR_L2TPV3:
- case VIRTCHNL2_PROTO_HDR_GTP:
- case VIRTCHNL2_PROTO_HDR_GTP_EH:
- case VIRTCHNL2_PROTO_HDR_GTPCV2:
- case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
- case VIRTCHNL2_PROTO_HDR_GTPU:
- case VIRTCHNL2_PROTO_HDR_GTPU_UL:
- case VIRTCHNL2_PROTO_HDR_GTPU_DL:
- case VIRTCHNL2_PROTO_HDR_ECPRI:
- case VIRTCHNL2_PROTO_HDR_VRRP:
- case VIRTCHNL2_PROTO_HDR_OSPF:
- case VIRTCHNL2_PROTO_HDR_TUN:
- case VIRTCHNL2_PROTO_HDR_NVGRE:
- case VIRTCHNL2_PROTO_HDR_VXLAN:
- case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
- case VIRTCHNL2_PROTO_HDR_GENEVE:
- case VIRTCHNL2_PROTO_HDR_NSH:
- case VIRTCHNL2_PROTO_HDR_QUIC:
- case VIRTCHNL2_PROTO_HDR_PFCP:
- case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
- case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
- case VIRTCHNL2_PROTO_HDR_RTP:
- case VIRTCHNL2_PROTO_HDR_NO_PROTO:
- break;
- default:
- break;
- }
- }
-
- idpf_finalize_ptype_lookup(&ptype_lkup[k]);
+ idpf_parse_protocol_ids(ptype, &rx_pt);
+ idpf_finalize_ptype_lookup(&rx_pt);
+
+ /* For a given protocol ID stack, the ptype value might
+ * vary between ptype_id_10 and ptype_id_8. So store
+ * them separately for splitq and singleq. Also skip
+ * the repeated ptypes in case of singleq.
+ */
+ splitq_pt_lkup[pt_10] = rx_pt;
+ if (!singleq_pt_lkup[pt_8].outer_ip)
+ singleq_pt_lkup[pt_8] = rx_pt;
}
}
out:
- vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+ adapter->splitq_pt_lkup = no_free_ptr(splitq_pt_lkup);
+ adapter->singleq_pt_lkup = no_free_ptr(singleq_pt_lkup);
return 0;
}
/**
+ * idpf_rel_rx_pt_lkup - release RX ptype lookup table
+ * @adapter: adapter pointer to get the lookup table
+ */
+static void idpf_rel_rx_pt_lkup(struct idpf_adapter *adapter)
+{
+ kfree(adapter->splitq_pt_lkup);
+ adapter->splitq_pt_lkup = NULL;
+
+ kfree(adapter->singleq_pt_lkup);
+ adapter->singleq_pt_lkup = NULL;
+}
+
+/**
* idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
* message
- * @vport: virtual port data structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_id: vport identifier used while preparing the virtchnl message
+ * @loopback_ena: flag to enable or disable loopback
*
- * Returns 0 on success, negative on failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena)
{
struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
ssize_t reply_sz;
- loopback.vport_id = cpu_to_le32(vport->vport_id);
- loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+ loopback.vport_id = cpu_to_le32(vport_id);
+ loopback.enable = loopback_ena;
xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = &loopback;
xn_params.send_buf.iov_len = sizeof(loopback);
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
@@ -3325,7 +3402,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
{
if (adapter->hw.arq && adapter->hw.asq) {
- idpf_mb_clean(adapter);
+ idpf_mb_clean(adapter, adapter->hw.asq);
idpf_ctlq_deinit(&adapter->hw);
}
adapter->hw.arq = NULL;
@@ -3520,6 +3597,13 @@ restart:
goto err_intr_req;
}
+ err = idpf_send_get_rx_ptype_msg(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to get RX ptypes: %d\n",
+ err);
+ goto intr_rel;
+ }
+
err = idpf_ptp_init(adapter);
if (err)
pci_err(adapter->pdev, "PTP init failed, err=%pe\n",
@@ -3537,6 +3621,8 @@ restart:
return 0;
+intr_rel:
+ idpf_intr_rel(adapter);
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
@@ -3591,6 +3677,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
idpf_ptp_release(adapter);
idpf_deinit_task(adapter);
idpf_idc_deinit_core_aux_device(adapter->cdev_info);
+ idpf_rel_rx_pt_lkup(adapter);
idpf_intr_rel(adapter);
if (remove_in_prog)
@@ -3613,25 +3700,27 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
/**
* idpf_vport_alloc_vec_indexes - Get relative vector indexes
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* This function requests the vector information required for the vport and
* stores the vector indexes received from the 'global vector distribution'
* in the vport's queue vectors array.
*
- * Return 0 on success, error on failure
+ * Return: 0 on success, error on failure
*/
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
u32 req;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
if (vec_info.num_curr_vecs)
vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
/* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
- req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ req = max(rsrc->num_txq - vport->num_xdp_txq, rsrc->num_rxq) +
IDPF_RESERVED_VECS;
vec_info.num_req_vecs = req;
@@ -3639,7 +3728,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
vec_info.index = vport->idx;
num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
- vport->q_vector_idxs,
+ rsrc->q_vector_idxs,
&vec_info);
if (num_alloc_vecs <= 0) {
dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
@@ -3647,7 +3736,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
+ rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3658,9 +3747,12 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
* @max_q: vport max queue info
*
* Will initialize vport with the info received through MB earlier
+ *
+ * Return: 0 on success, negative on failure.
*/
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
struct idpf_vport_config *vport_config;
@@ -3674,13 +3766,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data = &vport_config->user_config.rss_data;
vport_msg = adapter->vport_params_recvd[idx];
+ err = idpf_vport_init_queue_reg_chunks(vport_config,
+ &vport_msg->chunks);
+ if (err)
+ return err;
+
vport_config->max_q.max_txq = max_q->max_txq;
vport_config->max_q.max_rxq = max_q->max_rxq;
vport_config->max_q.max_complq = max_q->max_complq;
vport_config->max_q.max_bufq = max_q->max_bufq;
- vport->txq_model = le16_to_cpu(vport_msg->txq_model);
- vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ rsrc->txq_model = le16_to_cpu(vport_msg->txq_model);
+ rsrc->rxq_model = le16_to_cpu(vport_msg->rxq_model);
vport->vport_type = le16_to_cpu(vport_msg->vport_type);
vport->vport_id = le32_to_cpu(vport_msg->vport_id);
@@ -3697,24 +3794,27 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED);
- idpf_vport_init_num_qs(vport, vport_msg);
- idpf_vport_calc_num_q_desc(vport);
- idpf_vport_calc_num_q_groups(vport);
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_init_num_qs(vport, vport_msg, rsrc);
+ idpf_vport_calc_num_q_desc(vport, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
+ idpf_vport_alloc_vec_indexes(vport, rsrc);
vport->crc_enable = adapter->crc_enable;
if (!(vport_msg->vport_flags &
cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
- return;
+ return 0;
err = idpf_ptp_get_vport_tstamps_caps(vport);
if (err) {
+ /* Do not error on timestamp failure */
pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
- return;
+ return 0;
}
INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
+
+ return 0;
}
/**
@@ -3773,21 +3873,21 @@ int idpf_get_vec_ids(struct idpf_adapter *adapter,
* Returns number of ids filled
*/
static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
u32 num_q_id_filled = 0, i;
u32 start_q_id, num_q;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- start_q_id = le32_to_cpu(chunk->start_queue_id);
+ num_q = chunk->num_queues;
+ start_q_id = chunk->start_queue_id;
for (i = 0; i < num_q; i++) {
if ((num_q_id_filled + i) < num_qids) {
@@ -3806,6 +3906,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
/**
* __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
* @qids: queue ids
* @num_qids: number of queue ids
* @q_type: type of queue
@@ -3814,6 +3915,7 @@ static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
* parameters. Returns number of queue ids initialized.
*/
static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
const u32 *qids,
int num_qids,
u32 q_type)
@@ -3822,19 +3924,19 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
switch (q_type) {
case VIRTCHNL2_QUEUE_TYPE_TX:
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u16 num_rxq;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
@@ -3842,7 +3944,7 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (idpf_is_queue_model_split(rsrc->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3851,16 +3953,16 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
}
break;
case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
- for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ for (i = 0; i < rsrc->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &rsrc->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u8 num_bufqs = vport->num_bufqs_per_qgrp;
+ for (i = 0; i < rsrc->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
+ u8 num_bufqs = rsrc->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
struct idpf_buf_queue *q;
@@ -3880,30 +3982,21 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
/**
* idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @rsrc: pointer to queue and vector resources
+ * @chunks: queue ids received over mailbox
*
* Will initialize all queue ids with ids received as mailbox parameters.
- * Returns 0 on success, negative if all the queues are not initialized.
+ *
+ * Return: 0 on success, negative if all the queues are not initialized.
*/
-int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_ids, err = 0;
u16 q_type;
u32 *qids;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
if (!qids)
return -ENOMEM;
@@ -3911,13 +4004,13 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_TX,
chunks);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_TX);
- if (num_ids < vport->num_txq) {
+ if (num_ids < rsrc->num_txq) {
err = -EINVAL;
goto mem_rel;
}
@@ -3925,44 +4018,46 @@ int idpf_vport_queue_ids_init(struct idpf_vport *vport)
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
VIRTCHNL2_QUEUE_TYPE_RX,
chunks);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids, num_ids,
VIRTCHNL2_QUEUE_TYPE_RX);
- if (num_ids < vport->num_rxq) {
+ if (num_ids < rsrc->num_rxq) {
err = -EINVAL;
goto mem_rel;
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(rsrc->txq_model))
goto check_rxq;
q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_complq) {
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_complq) {
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_complq) {
err = -EINVAL;
goto mem_rel;
}
check_rxq:
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(rsrc->rxq_model))
goto mem_rel;
q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
- if (num_ids < vport->num_bufq) {
+ if (num_ids < rsrc->num_bufq) {
err = -EINVAL;
goto mem_rel;
}
- num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
- if (num_ids < vport->num_bufq)
+ num_ids = __idpf_vport_queue_ids_init(vport, rsrc, qids,
+ num_ids, q_type);
+ if (num_ids < rsrc->num_bufq)
err = -EINVAL;
mem_rel:
@@ -3974,23 +4069,24 @@ mem_rel:
/**
* idpf_vport_adjust_qs - Adjust to new requested queues
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* Renegotiate queues. Returns 0 on success, negative on failure.
*/
-int idpf_vport_adjust_qs(struct idpf_vport *vport)
+int idpf_vport_adjust_qs(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
struct virtchnl2_create_vport vport_msg;
int err;
- vport_msg.txq_model = cpu_to_le16(vport->txq_model);
- vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ vport_msg.txq_model = cpu_to_le16(rsrc->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(rsrc->rxq_model);
err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
NULL);
if (err)
return err;
- idpf_vport_init_num_qs(vport, &vport_msg);
- idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_init_num_qs(vport, &vport_msg, rsrc);
+ idpf_vport_calc_num_q_groups(rsrc);
return 0;
}
@@ -4112,12 +4208,12 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
-static void idpf_set_mac_type(struct idpf_vport *vport,
+static void idpf_set_mac_type(const u8 *default_mac_addr,
struct virtchnl2_mac_addr *mac_addr)
{
bool is_primary;
- is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ is_primary = ether_addr_equal(default_mac_addr, mac_addr->addr);
mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
VIRTCHNL2_MAC_ADDR_EXTRA;
}
@@ -4193,22 +4289,23 @@ invalid_payload:
/**
* idpf_add_del_mac_filters - Add/del mac filters
- * @vport: Virtual port data structure
- * @np: Netdev private structure
+ * @adapter: adapter pointer used to send virtchnl message
+ * @vport_config: persistent vport structure to get the MAC filter list
+ * @default_mac_addr: default MAC address to compare with
+ * @vport_id: vport identifier used while preparing the virtchnl message
* @add: Add or delete flag
* @async: Don't wait for return message
*
- * Returns 0 on success, error on failure.
+ * Return: 0 on success, error on failure.
**/
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async)
{
struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
- struct idpf_adapter *adapter = np->adapter;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
u32 num_msgs, total_filters = 0;
struct idpf_mac_filter *f;
ssize_t reply_sz;
@@ -4220,7 +4317,6 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
xn_params.async = async;
xn_params.async_handler = idpf_mac_filter_async_handler;
- vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
/* Find the number of newly added filters */
@@ -4251,7 +4347,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -4259,7 +4355,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
- idpf_set_mac_type(vport, &mac_addr[i]);
+ idpf_set_mac_type(default_mac_addr, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
@@ -4291,7 +4387,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
memset(ma_list, 0, buf_size);
}
- ma_list->vport_id = cpu_to_le32(np->vport_id);
+ ma_list->vport_id = cpu_to_le32(vport_id);
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index eac3d15daa42..fe065911ad5a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -92,6 +92,7 @@ struct idpf_netdev_priv;
struct idpf_vec_regs;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_vport_config;
struct idpf_vport_user_config_data;
ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
@@ -101,10 +102,20 @@ void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
int idpf_vc_core_init(struct idpf_adapter *adapter);
void idpf_vc_core_deinit(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+int idpf_get_reg_intr_vecs(struct idpf_adapter *adapter,
struct idpf_vec_regs *reg_vals);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ struct idpf_queue_id_reg_info *chunks);
+static inline void
+idpf_vport_deinit_queue_reg_chunks(struct idpf_vport_config *vport_cfg)
+{
+ kfree(vport_cfg->qid_reg_info.queue_chunks);
+ vport_cfg->qid_reg_info.queue_chunks = NULL;
+}
bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
@@ -112,9 +123,9 @@ bool idpf_sideband_action_ena(struct idpf_vport *vport,
struct ethtool_rx_flow_spec *fsp);
unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg, u16 cookie);
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *arq);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, struct idpf_ctlq_info *asq,
+ u32 op, u16 msg_size, u8 *msg, u16 cookie);
struct idpf_queue_ptr {
enum virtchnl2_queue_type type;
@@ -127,60 +138,81 @@ struct idpf_queue_ptr {
};
struct idpf_queue_set {
- struct idpf_vport *vport;
+ struct idpf_adapter *adapter;
+ struct idpf_q_vec_rsrc *qv_rsrc;
+ u32 vport_id;
u32 num;
struct idpf_queue_ptr qs[] __counted_by(num);
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id, u32 num);
int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+int idpf_send_destroy_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_enable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
+int idpf_send_disable_vport_msg(struct idpf_adapter *adapter, u32 vport_id);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_adjust_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_send_add_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id);
+int idpf_send_delete_queues_msg(struct idpf_adapter *adapter,
+ struct idpf_queue_id_reg_info *chunks,
+ u32 vport_id);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_adapter *adapter,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 vport_id,
+ bool map);
+
+int idpf_add_del_mac_filters(struct idpf_adapter *adapter,
+ struct idpf_vport_config *vport_config,
+ const u8 *default_mac_addr, u32 vport_id,
bool add, bool async);
int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id);
int idpf_check_supported_desc_ids(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_adapter *adapter, u32 vport_id,
+ bool loopback_ena);
+int idpf_send_get_stats_msg(struct idpf_netdev_priv *np,
+ struct idpf_port_stats *port_stats);
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_key_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_adapter *adapter,
+ struct idpf_rss_data *rss_data,
+ u32 vport_id, bool get);
void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr);
int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
u8 *send_msg, u16 msg_size,
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..0fe435fdbb6c 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -6,17 +6,17 @@
#include "xdp.h"
#include "xsk.h"
-static int idpf_rxq_for_each(const struct idpf_vport *vport,
+static int idpf_rxq_for_each(const struct idpf_q_vec_rsrc *rsrc,
int (*fn)(struct idpf_rx_queue *rxq, void *arg),
void *arg)
{
- bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ bool splitq = idpf_is_queue_model_split(rsrc->rxq_model);
- if (!vport->rxq_grps)
+ if (!rsrc->rxq_grps)
return -ENETDOWN;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &rsrc->rxq_grps[i];
u32 num_rxq;
if (splitq)
@@ -45,7 +45,8 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport,
static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
{
const struct idpf_vport *vport = rxq->q_vector->vport;
- bool split = idpf_is_queue_model_split(vport->rxq_model);
+ const struct idpf_q_vec_rsrc *rsrc;
+ bool split;
int err;
err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
@@ -54,6 +55,9 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (err)
return err;
+ rsrc = &vport->dflt_qv_rsrc;
+ split = idpf_is_queue_model_split(rsrc->rxq_model);
+
if (idpf_queue_has(XSK, rxq)) {
err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
@@ -70,7 +74,7 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
if (!split)
return 0;
- rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->xdpsqs = &vport->txqs[rsrc->xdp_txq_offset];
rxq->num_xdp_txq = vport->num_xdp_txq;
return 0;
@@ -86,9 +90,9 @@ int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
return __idpf_xdp_rxq_info_init(rxq, NULL);
}
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc)
{
- return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+ return idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_init, NULL);
}
static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
@@ -111,10 +115,10 @@ void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
__idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
}
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc)
{
- idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
- (void *)(size_t)vport->rxq_model);
+ idpf_rxq_for_each(rsrc, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)rsrc->rxq_model);
}
static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
@@ -132,10 +136,10 @@ static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
return 0;
}
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog)
{
- idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+ idpf_rxq_for_each(rsrc, idpf_xdp_rxq_assign_prog, xdp_prog);
}
static void idpf_xdp_tx_timer(struct work_struct *work);
@@ -165,7 +169,7 @@ int idpf_xdpsqs_get(const struct idpf_vport *vport)
}
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -202,7 +206,7 @@ void idpf_xdpsqs_put(const struct idpf_vport *vport)
return;
dev = vport->netdev;
- sqs = vport->xdp_txq_offset;
+ sqs = vport->dflt_qv_rsrc.xdp_txq_offset;
for (u32 i = sqs; i < vport->num_txq; i++) {
struct idpf_tx_queue *xdpsq = vport->txqs[i];
@@ -358,12 +362,15 @@ int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
{
const struct idpf_netdev_priv *np = netdev_priv(dev);
const struct idpf_vport *vport = np->vport;
+ u32 xdp_txq_offset;
if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
return -ENETDOWN;
+ xdp_txq_offset = vport->dflt_qv_rsrc.xdp_txq_offset;
+
return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
- &vport->txqs[vport->xdp_txq_offset],
+ &vport->txqs[xdp_txq_offset],
vport->num_xdp_txq,
idpf_xdp_xmit_flush_bulk,
idpf_xdp_tx_finalize);
@@ -397,7 +404,7 @@ static const struct xdp_metadata_ops idpf_xdpmo = {
void idpf_xdp_set_features(const struct idpf_vport *vport)
{
- if (!idpf_is_queue_model_split(vport->rxq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.rxq_model))
return;
libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
@@ -409,6 +416,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
const struct netdev_bpf *xdp)
{
const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct bpf_prog *old, *prog = xdp->prog;
struct idpf_vport_config *cfg;
int ret;
@@ -419,7 +427,7 @@ static int idpf_xdp_setup_prog(struct idpf_vport *vport,
!test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
!!vport->xdp_prog == !!prog) {
if (test_bit(IDPF_VPORT_UP, np->state))
- idpf_xdp_copy_prog_to_rqs(vport, prog);
+ idpf_xdp_copy_prog_to_rqs(rsrc, prog);
old = xchg(&vport->xdp_prog, prog);
if (old)
@@ -464,7 +472,7 @@ int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
idpf_vport_ctrl_lock(dev);
vport = idpf_netdev_to_vport(dev);
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!idpf_is_queue_model_split(vport->dflt_qv_rsrc.txq_model))
goto notsupp;
switch (xdp->command) {
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 479f5ef3c604..7ffc6955dfae 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -9,10 +9,10 @@
#include "idpf_txrx.h"
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
-int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+int idpf_xdp_rxq_info_init_all(const struct idpf_q_vec_rsrc *rsrc);
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
-void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
-void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_q_vec_rsrc *rsrc);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_q_vec_rsrc *rsrc,
struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
index fd2cc43ab43c..676cbd80774d 100644
--- a/drivers/net/ethernet/intel/idpf/xsk.c
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -26,13 +26,14 @@ static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
struct idpf_buf_queue *bufq)
{
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct xsk_buff_pool *pool;
u32 qid = U32_MAX;
- for (u32 i = 0; i < vport->num_rxq_grp; i++) {
- const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+ for (u32 i = 0; i < rsrc->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &rsrc->rxq_grps[i];
- for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ for (u32 j = 0; j < rsrc->num_bufqs_per_qgrp; j++) {
if (&grp->splitq.bufq_sets[j].bufq == bufq) {
qid = grp->splitq.rxq_sets[0]->rxq.idx;
goto setup;
@@ -61,7 +62,7 @@ static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, txq))
return;
- qid = txq->idx - vport->xdp_txq_offset;
+ qid = txq->idx - vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)
@@ -86,7 +87,8 @@ static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
if (!idpf_queue_has(XDP, complq))
return;
- qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+ qid = complq->txq_grp->txqs[0]->idx -
+ vport->dflt_qv_rsrc.xdp_txq_offset;
pool = xsk_get_pool_from_qid(vport->netdev, qid);
if (!pool || !pool->dev)