summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Hay <joshua.a.hay@intel.com>2025-11-03 13:20:36 -0800
committerTony Nguyen <anthony.l.nguyen@intel.com>2026-01-06 15:42:10 -0800
commit086efe0a1ecc36cffe46640ce12649a4cd3ff171 (patch)
treeccce37b60c6be69ff3442cc4cf679f27b7c938f3
parent87b8ee64685bc096a087af833d4594b2332bfdb1 (diff)
idpf: cap maximum Rx buffer size
The HW only supports a maximum Rx buffer size of 16K-128. On systems using large pages, the libeth logic can configure the buffer size to be larger than this. The upper bound is PAGE_SIZE while the lower bound is MTU rounded up to the nearest power of 2. For example, ARM systems with a 64K page size and an mtu of 9000 will set the Rx buffer size to 16K, which will cause the config Rx queues message to fail. Initialize the bufq/fill queue buf_len field to the maximum supported size. This will trigger the libeth logic to cap the maximum Rx buffer size by reducing the upper bound. Fixes: 74d1412ac8f37 ("idpf: use libeth Rx buffer management for payload buffer") Signed-off-by: Joshua Hay <joshua.a.hay@intel.com> Acked-by: Alexander Lobakin <aleksander.lobakin@intel.com> Reviewed-by: Madhu Chittim <madhu.chittim@intel.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com> Reviewed-by: David Decotigny <ddecotig@google.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
2 files changed, 6 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f51d52297e1e..7f3933ca9edc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -695,9 +695,10 @@ err:
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
{
struct libeth_fq fq = {
- .count = rxq->desc_count,
- .type = LIBETH_FQE_MTU,
- .nid = idpf_q_vector_to_mem(rxq->q_vector),
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
int ret;
@@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.truesize = bufq->truesize,
.count = bufq->desc_count,
.type = type,
+ .buf_len = IDPF_RX_MAX_BUF_SZ,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0472698ca192..423cc9486dce 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -101,6 +101,7 @@ do { \
idx = 0; \
} while (0)
+#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64