summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShiraz Saleem <shiraz.saleem@intel.com>2026-03-16 13:39:47 -0500
committerLeon Romanovsky <leonro@nvidia.com>2026-03-18 06:20:53 -0400
commite37afcb56ae070477741fe2d6e61fc0c542cce2d (patch)
treebf9add2502b85183086248b456d7efde0a4c74d9
parent7221f581eefa79ead06e171044f393fb7ee22f87 (diff)
RDMA/irdma: Harden depth calculation functions
An issue was exposed where OS can pass in U32_MAX for SQ/RQ/SRQ size. This can cause integer overflow and truncation of SQ/RQ/SRQ depth returning a success when it should have failed. Harden the functions to do all depth calculations and boundary checking in u64 sizes. Fixes: 563e1feb5f6e ("RDMA/irdma: Add SRQ support") Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r--drivers/infiniband/hw/irdma/uk.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index ac3721a5747a..4718acf6c6fd 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -1438,7 +1438,7 @@ exit:
* irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int irdma_round_up_wq(u32 wqdepth)
+static u64 irdma_round_up_wq(u64 wqdepth)
{
int scount = 1;
@@ -1491,15 +1491,16 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *sqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)sq_size << shift) + IRDMA_SQ_RSVD);
- *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
-
- if (*sqdepth < min_size)
- *sqdepth = min_size;
- else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
+ *sqdepth = hw_quanta;
return 0;
}
@@ -1513,15 +1514,16 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *rqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
-
- *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < min_size)
- *rqdepth = min_size;
- else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
+ *rqdepth = hw_quanta;
return 0;
}
@@ -1535,13 +1537,16 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
u32 *srqdepth)
{
- *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)srq_size << shift) + IRDMA_RQ_RSVD);
- if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
- *srqdepth = uk_attrs->min_hw_wq_size << shift;
- else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_srq_quanta)
return -EINVAL;
+ *srqdepth = hw_quanta;
return 0;
}