diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 17:08:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 17:08:02 -0700 |
commit | 3cc08fc35db75b059118626c30b60b0f56583802 (patch) | |
tree | 704d71199c8be8d5b822ca424675291e8cec7bde /drivers/infiniband/hw/qib/qib_rc.c | |
parent | faa38b5e0e092914764cdba9f83d31a3f794d182 (diff) | |
parent | 03b37ecdb3975f09832747600853d3818a50eda3 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits)
IB/qib: Add missing <linux/slab.h> include
IB/ehca: Drop unnecessary NULL test
RDMA/nes: Fix confusing if statement indentation
IB/ehca: Init irq tasklet before irq can happen
RDMA/nes: Fix misindented code
RDMA/nes: Fix showing wqm_quanta
RDMA/nes: Get rid of "set but not used" variables
RDMA/nes: Read firmware version from correct place
IB/srp: Export req_lim via sysfs
IB/srp: Make receive buffer handling more robust
IB/srp: Use print_hex_dump()
IB: Rename RAW_ETY to RAW_ETHERTYPE
RDMA/nes: Fix two sparse warnings
RDMA/cxgb3: Make needlessly global iwch_l2t_send() static
IB/iser: Make needlessly global iser_alloc_rx_descriptors() static
RDMA/cxgb4: Add timeouts when waiting for FW responses
IB/qib: Fix race between qib_error_qp() and receive packet processing
IB/qib: Limit the number of packets processed per interrupt
IB/qib: Allow writes to the diag_counters to be able to clear them
IB/qib: Set cfgctxts to number of CPUs by default
...
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 47 |
1 files changed, 13 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 40c0a373719c..a0931119bd78 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -868,7 +868,7 @@ done: /* * Back up requester to resend the last un-ACKed request. - * The QP s_lock should be held and interrupts disabled. + * The QP r_lock and s_lock should be held and interrupts disabled. */ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) { @@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg) struct qib_ibport *ibp; unsigned long flags; - spin_lock_irqsave(&qp->s_lock, flags); + spin_lock_irqsave(&qp->r_lock, flags); + spin_lock(&qp->s_lock); if (qp->s_flags & QIB_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->n_rc_timeouts++; @@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg) qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); } - spin_unlock_irqrestore(&qp->s_lock, flags); + spin_unlock(&qp->s_lock); + spin_unlock_irqrestore(&qp->r_lock, flags); } /* @@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this now that we hold the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto ack_done; - /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) goto ack_done; @@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ibp->n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this now that we hold the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto unlock_done; for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) @@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; - /* Prevent simultaneous processing after APM on different CPUs */ - spin_lock(&qp->r_lock); - /* * Process responses (ACKs) before anything else. Note that the * packet sequence number will be for something in the send work @@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, opcode <= OP(ATOMIC_ACKNOWLEDGE)) { qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hdrsize, pmtu, rcd); - goto runlock; + return; } /* Compute 24 bits worth of difference. */ diff = qib_cmp24(psn, qp->r_psn); if (unlikely(diff)) { if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) - goto runlock; + return; goto send_ack; } @@ -2090,9 +2082,6 @@ send_last: if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this while holding the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2146,7 +2135,7 @@ send_last: qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto srunlock; + goto sunlock; } case OP(COMPARE_SWAP): @@ -2165,9 +2154,6 @@ send_last: if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); - /* Double check we can process this while holding the s_lock. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) - goto srunlock; if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; @@ -2213,7 +2199,7 @@ send_last: qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); - goto srunlock; + goto sunlock; } default: @@ -2227,7 +2213,7 @@ send_last: /* Send an ACK if requested or required. */ if (psn & (1 << 31)) goto send_ack; - goto runlock; + return; rnr_nak: qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; @@ -2238,7 +2224,7 @@ rnr_nak: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); @@ -2250,7 +2236,7 @@ nack_op_err: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2264,7 +2250,7 @@ nack_inv: atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } - goto runlock; + return; nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -2274,13 +2260,6 @@ nack_acc: qp->r_ack_psn = qp->r_psn; send_ack: qib_send_rc_ack(qp); -runlock: - spin_unlock(&qp->r_lock); - return; - -srunlock: - spin_unlock_irqrestore(&qp->s_lock, flags); - spin_unlock(&qp->r_lock); return; sunlock: |