summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
authorStefan Roscher <stefan.roscher@de.ibm.com>2007-08-31 16:02:59 +0200
committerRoland Dreier <rolandd@cisco.com>2007-08-31 13:56:42 -0700
commitfecea0ab3415bfab9a1964690e53b10c5d8f2e46 (patch)
tree45c8a33154fad54c00dd86d9b5b989d6b3bd04d1 /drivers/infiniband/hw/ehca
parent18115f45374d19ada218fc013aa5308baf5d283e (diff)
IB/ehca: Fix Small QP regressions
The new Small QP code had a few bugs that would also make it trigger for non-Small QPs. Fix them. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c10
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c2
2 files changed, 7 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b178cba96345..84d435a5ee11 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -600,10 +600,12 @@ static struct ehca_qp *internal_create_qp(
if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
&& !(context && udata)) { /* no small QP support in userspace ATM */
- ehca_determine_small_queue(
- &parms.squeue, max_send_sge, is_llqp);
- ehca_determine_small_queue(
- &parms.rqueue, max_recv_sge, is_llqp);
+ if (HAS_SQ(my_qp))
+ ehca_determine_small_queue(
+ &parms.squeue, max_send_sge, is_llqp);
+ if (HAS_RQ(my_qp))
+ ehca_determine_small_queue(
+ &parms.rqueue, max_recv_sge, is_llqp);
parms.qp_storage =
(parms.squeue.is_small || parms.rqueue.is_small);
}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index a090c679c397..29bd476fbd54 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -172,7 +172,7 @@ static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
unsigned long bit;
int free_page = 0;
- bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK)
+ bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
>> (order + 9);
mutex_lock(&pd->lock);