From 4b281faec3ad00f7fb00080078321e4d819795eb Mon Sep 17 00:00:00 2001 From: Don Wood Date: Sat, 5 Sep 2009 20:36:38 -0700 Subject: RDMA/nes: Use flush mechanism to set status for wqe in error When an asynchronous event occurs that requires a terminate, it is sometimes possible to identify the wqe in error. This change uses flush to get this information to the poll routine. The flush operation puts the status into the cqe. If this information is not available, it continues to use the more generic flush code as before. Signed-off-by: Don Wood Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_hw.c | 54 +++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/nes/nes_hw.h | 12 ++++++++ drivers/infiniband/hw/nes/nes_verbs.h | 2 ++ 3 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 297026f0c138..63a1a8e1e8a3 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2944,6 +2944,7 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a u16 ddp_seg_len; int copy_len = 0; u8 is_tagged = 0; + u8 flush_code = 0; struct nes_terminate_hdr *termhdr; termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase; @@ -2983,19 +2984,23 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_WRITE: + flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_INV_STAG; break; default: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; } break; case NES_AEQE_AEID_AMP_INVALID_STAG: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; break; case NES_AEQE_AEID_AMP_BAD_QP: + flush_code = IB_WC_LOC_QP_OP_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_QN; break; @@ -3004,19 +3009,23 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_SEND_INV: case IWARP_OPCODE_SEND_SE_INV: + flush_code = IB_WC_REM_OP_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_CANT_INV_STAG; break; default: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_STAG; } break; case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) { + flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_BOUNDS; } else { + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_INV_BOUNDS; } @@ -3024,57 +3033,69 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION: case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case NES_AEQE_AEID_PRIV_OPERATION_DENIED: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_ACCESS; break; case NES_AEQE_AEID_AMP_TO_WRAP: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_TO_WRAP; break; case NES_AEQE_AEID_AMP_BAD_PD: switch (iwarp_opcode(nesqp, aeq_info)) { case IWARP_OPCODE_WRITE: + flush_code = IB_WC_LOC_PROT_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_UNASSOC_STAG; break; case IWARP_OPCODE_SEND_INV: case IWARP_OPCODE_SEND_SE_INV: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_CANT_INV_STAG; break; default: + flush_code = IB_WC_REM_ACCESS_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT; termhdr->error_code = RDMAP_UNASSOC_STAG; } break; case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: + flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; termhdr->error_code = MPA_MARKER; break; case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: + flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP; termhdr->error_code = MPA_CRC; break; case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE: case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: + flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; termhdr->error_code = DDP_CATASTROPHIC_LOCAL; break; case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC: case NES_AEQE_AEID_DDP_NO_L_BIT: + flush_code = IB_WC_FATAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC; termhdr->error_code = DDP_CATASTROPHIC_LOCAL; break; case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN: case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: + flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE; break; case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + flush_code = IB_WC_LOC_LEN_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG; break; case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION: + flush_code = IB_WC_GENERAL_ERR; if (is_tagged) { termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER; termhdr->error_code = DDP_TAGGED_INV_DDP_VER; @@ -3084,26 +3105,32 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a } break; case NES_AEQE_AEID_DDP_UBE_INVALID_MO: + flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MO; break; case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: + flush_code = IB_WC_REM_OP_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF; break; case NES_AEQE_AEID_DDP_UBE_INVALID_QN: + flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER; termhdr->error_code = DDP_UNTAGGED_INV_QN; break; case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION: + flush_code = IB_WC_GENERAL_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_INV_RDMAP_VER; break; case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE: + flush_code = IB_WC_LOC_QP_OP_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_UNEXPECTED_OP; break; default: + flush_code = IB_WC_FATAL_ERR; termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP; termhdr->error_code = RDMAP_UNSPECIFIED; break; @@ -3112,6 +3139,13 @@ static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 a if (copy_len) memcpy(termhdr + 1, pkt, copy_len); + if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) { + if (aeq_info & NES_AEQE_SQ) + nesqp->term_sq_flush_code = flush_code; + else + nesqp->term_rq_flush_code = flush_code; + } + return sizeof(struct nes_terminate_hdr) + copy_len; } @@ -3646,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, { struct nes_cqp_request *cqp_request; struct nes_hw_cqp_wqe *cqp_wqe; + u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; + u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH; int ret; cqp_request = nes_get_cqp_request(nesdev); @@ -3662,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp, cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); + /* If wqe in error was identified, set code to be put into cqe */ + if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) { + which_wq |= NES_CQP_FLUSH_MAJ_MIN; + sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code; + nesqp->term_sq_flush_code = 0; + } + + if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) { + which_wq |= NES_CQP_FLUSH_MAJ_MIN; + rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code; + nesqp->term_rq_flush_code = 0; + } + + if (which_wq & NES_CQP_FLUSH_MAJ_MIN) { + cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code); + cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code); + } + cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 4a0bfcd5a628..f28a41ba9fa1 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -274,6 +274,8 @@ enum nes_cqp_qp_bits { enum nes_cqp_qp_wqe_word_idx { NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, + NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8, + NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9, NES_CQP_QP_WQE_NEW_MSS_IDX = 15, }; @@ -364,6 +366,7 @@ enum nes_cqp_arp_bits { enum nes_cqp_flush_bits { NES_CQP_FLUSH_SQ = (1<<30), NES_CQP_FLUSH_RQ = (1<<31), + NES_CQP_FLUSH_MAJ_MIN = (1<<28), }; enum nes_cqe_opcode_bits { @@ -757,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits { NES_IWARP_SQ_OP_NOP = 12, }; +enum nes_iwarp_cqe_major_code { + NES_IWARP_CQE_MAJOR_FLUSH = 1, + NES_IWARP_CQE_MAJOR_DRV = 0x8000 +}; + +enum nes_iwarp_cqe_minor_code { + NES_IWARP_CQE_MINOR_FLUSH = 1 +}; + #define NES_EEPROM_READ_REQUEST (1<<16) #define NES_MAC_ADDR_VALID (1<<20) diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h index d92b1ef4653b..89822d75f82e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.h +++ b/drivers/infiniband/hw/nes/nes_verbs.h @@ -168,6 +168,8 @@ struct nes_qp { wait_queue_head_t kick_waitq; u16 in_disconnect; u16 private_data_len; + u16 term_sq_flush_code; + u16 term_rq_flush_code; u8 active_conn; u8 skip_lsmm; u8 user_mode; -- cgit v1.2.3