diff options
Diffstat (limited to 'drivers/infiniband/hw')
25 files changed, 638 insertions, 319 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 57176ddd4c50..bb95a6c0477b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, sizeof(ep->com.mapped_remote_addr)); } +static int get_remote_addr(struct c4iw_ep *ep) +{ + int ret; + + print_addr(&ep->com, __func__, "get_remote_addr"); + + ret = iwpm_get_remote_info(&ep->com.mapped_local_addr, + &ep->com.mapped_remote_addr, + &ep->com.remote_addr, RDMA_NL_C4IW); + if (ret) + pr_info(MOD "Unable to find remote peer addr info - err %d\n", + ret); + + return ret; +} + static void best_mtu(const unsigned short *mtus, unsigned short mtu, unsigned int *idx, int use_ts, int ipv6) { @@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep) if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); - opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ + opt2 |= T5_ISS_F; } t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); @@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) status, status2errno(status)); if (is_neg_adv(status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Connection problems for atid %u status %u (%s)\n", - atid, status, neg_adv_str(status)); + PDBG("%s Connection problems for atid %u status %u (%s)\n", + __func__, atid, status, neg_adv_str(status)); + ep->stats.connect_neg_adv++; + mutex_lock(&dev->rdev.stats.lock); + dev->rdev.stats.neg_adv++; + mutex_unlock(&dev->rdev.stats.lock); return 0; } @@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, u32 isn = (prandom_u32() & ~7UL) - 1; opt2 |= T5_OPT_2_VALID_F; opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); - opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ + opt2 |= T5_ISS_F; rpl5 = (void *)rpl; memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); if (peer2peer) @@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; + + /* + * The mapped_local and mapped_remote addresses get setup with + * the actual 4-tuple. The local address will be based on the + * actual local address of the connection, but on the port number + * of the parent listening endpoint. The remote address is + * setup based on a query to the IWPM since we don't know what it + * originally was before mapping. If no mapping was done, then + * mapped_remote == remote, and mapped_local == local. + */ if (iptype == 4) { struct sockaddr_in *sin = (struct sockaddr_in *) - &child_ep->com.local_addr; + &child_ep->com.mapped_local_addr; + sin->sin_family = PF_INET; sin->sin_port = local_port; sin->sin_addr.s_addr = *(__be32 *)local_ip; - sin = (struct sockaddr_in *)&child_ep->com.remote_addr; + + sin = (struct sockaddr_in *)&child_ep->com.local_addr; + sin->sin_family = PF_INET; + sin->sin_port = ((struct sockaddr_in *) + &parent_ep->com.local_addr)->sin_port; + sin->sin_addr.s_addr = *(__be32 *)local_ip; + + sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr; sin->sin_family = PF_INET; sin->sin_port = peer_port; sin->sin_addr.s_addr = *(__be32 *)peer_ip; } else { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &child_ep->com.local_addr; + &child_ep->com.mapped_local_addr; + sin6->sin6_family = PF_INET6; sin6->sin6_port = local_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); - sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; + + sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; + sin6->sin6_family = PF_INET6; + sin6->sin6_port = ((struct sockaddr_in6 *) + &parent_ep->com.local_addr)->sin6_port; + memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); + + sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = peer_port; memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); } + memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr, + sizeof(child_ep->com.remote_addr)); + get_remote_addr(child_ep); + c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); @@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); if (is_neg_adv(req->status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Negative advice on abort - tid %u status %d (%s)\n", - ep->hwtid, req->status, neg_adv_str(req->status)); + PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", + __func__, ep->hwtid, req->status, + neg_adv_str(req->status)); + ep->stats.abort_neg_adv++; + mutex_lock(&dev->rdev.stats.lock); + dev->rdev.stats.neg_adv++; + mutex_unlock(&dev->rdev.stats.lock); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, @@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, * TP will ignore any value > 0 for MSS index. */ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); - req->cookie = (unsigned long)skb; + req->cookie = (uintptr_t)skb; set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); @@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } if (is_neg_adv(req->status)) { - dev_warn(&dev->rdev.lldi.pdev->dev, - "Negative advice on abort - tid %u status %d (%s)\n", - ep->hwtid, req->status, neg_adv_str(req->status)); + PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", + __func__, ep->hwtid, req->status, + neg_adv_str(req->status)); + ep->stats.abort_neg_adv++; + dev->rdev.stats.neg_adv++; kfree_skb(skb); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ab7692ac2044..68ddb3710215 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_RESET; @@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, FW_RI_RES_WR_NRES_V(1) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.cq.restype = FW_RI_RES_TYPE_CQ; res->u.cq.op = FW_RI_RES_OP_WRITE; @@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, goto err4; cq->gen = 1; - cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; if (user) { - cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + - (cq->cqid << rdev->cqshift); - cq->ugts &= PAGE_MASK; + u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK; + + cq->ugts = (u64)rdev->bar2_pa + off; + } else if (is_t4(rdev->lldi.adapter_type)) { + cq->gts = rdev->lldi.gts_reg; + cq->qid_mask = -1U; + } else { + u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12; + + cq->gts = rdev->bar2_kva + off; + cq->qid_mask = rdev->qpmask; } return 0; err4: @@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, } PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", __func__, chp->cq.cqid, chp, chp->cq.size, - chp->cq.memsize, - (unsigned long long) chp->cq.dma_addr); + chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); return &chp->ibcq; err5: kfree(mm2); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 8fb295e4a9ab..cf54d6922dc4 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = { [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, + [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} }; @@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v) int prev_ts_set = 0; int idx, end; -#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) +#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) idx = atomic_read(&dev->rdev.wr_log_idx) & (dev->rdev.wr_log_size - 1); @@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v) dev->rdev.stats.act_ofld_conn_fails); seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.pas_ofld_conn_fails); + seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); return 0; } @@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data) cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " + "conn_na %u abort_na %u " "%pI4:%d/%d <-> %pI4:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, + ep->stats.connect_neg_adv, + ep->stats.abort_neg_adv, &lsin->sin_addr, ntohs(lsin->sin_port), ntohs(mapped_lsin->sin_port), &rsin->sin_addr, ntohs(rsin->sin_port), @@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data) cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " "history 0x%lx hwtid %d atid %d " + "conn_na %u abort_na %u " "%pI6:%d/%d <-> %pI6:%d/%d\n", ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, ep->com.flags, ep->com.history, ep->hwtid, ep->atid, + ep->stats.connect_neg_adv, + ep->stats.abort_neg_adv, &lsin6->sin6_addr, ntohs(lsin6->sin6_port), ntohs(mapped_lsin6->sin6_port), &rsin6->sin6_addr, ntohs(rsin6->sin6_port), @@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) c4iw_init_dev_ucontext(rdev, &rdev->uctx); /* + * This implementation assumes udb_density == ucq_density! Eventually + * we might need to support this but for now fail the open. Also the + * cqid and qpid range must match for now. + */ + if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { + pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", + pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, + rdev->lldi.ucq_density); + err = -EINVAL; + goto err1; + } + if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || + rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { + pr_err(MOD "%s: unsupported qp and cq id ranges " + "qp start %u size %u cq start %u size %u\n", + pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, + rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, + rdev->lldi.vr->cq.size); + err = -EINVAL; + goto err1; + } + + /* * qpshift is the number of bits to shift the qpid left in order * to get the correct address of the doorbell for that qp. */ @@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " + PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " "qpmask 0x%x cqshift %lu cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), - (u64)pci_resource_start(rdev->lldi.pdev, 2), + (void *)pci_resource_start(rdev->lldi.pdev, 2), rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpshift, rdev->qpmask, diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d87e1650f643..97bb5550a6cf 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -137,6 +137,7 @@ struct c4iw_stats { u64 tcam_full; u64 act_ofld_conn_fails; u64 pas_ofld_conn_fails; + u64 neg_adv; }; struct c4iw_hw_queue { @@ -814,6 +815,11 @@ struct c4iw_listen_ep { int backlog; }; +struct c4iw_ep_stats { + unsigned connect_neg_adv; + unsigned abort_neg_adv; +}; + struct c4iw_ep { struct c4iw_ep_common com; struct c4iw_ep *parent_ep; @@ -846,6 +852,7 @@ struct c4iw_ep { unsigned int retry_count; int snd_win; int rcv_win; + struct c4iw_ep_stats stats; }; static inline void print_addr(struct c4iw_ep_common *epc, const char *func, diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ef0cf9f5c44..cff815b91707 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, if (i == (num_wqe-1)) { req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | FW_WR_COMPL_F); - req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; + req->wr.wr_lo = (__force __be64)&wr_wait; } else req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( @@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; - mhp->attr.len = ~0UL; + mhp->attr.len = ~0ULL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, - mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); + mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); if (ret) goto err1; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 15cae5a31018..389ced335bc5 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, FW_RI_RES_WR_NRES_V(2) | FW_WR_COMPL_F); res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); - res_wr->cookie = (unsigned long) &wr_wait; + res_wr->cookie = (uintptr_t)&wr_wait; res = res_wr->res; res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; res->u.sqrq.op = FW_RI_RES_OP_WRITE; @@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, wqe->flowid_len16 = cpu_to_be32( FW_WR_FLOWID_V(ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); - wqe->cookie = (unsigned long) &ep->com.wr_wait; + wqe->cookie = (uintptr_t)&ep->com.wr_wait; wqe->u.fini.type = FW_RI_TYPE_FINI; ret = c4iw_ofld_send(&rhp->rdev, skb); @@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) FW_WR_FLOWID_V(qhp->ep->hwtid) | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); - wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; + wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; wqe->u.init.type = FW_RI_TYPE_INIT; wqe->u.init.mpareqbit_p2ptype = @@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); insert_mmap(ucontext, mm2); mm3->key = uresp.sq_db_gts_key; - mm3->addr = (__force unsigned long) qhp->wq.sq.udb; + mm3->addr = (__force unsigned long)qhp->wq.sq.udb; mm3->len = PAGE_SIZE; insert_mmap(ucontext, mm3); mm4->key = uresp.rq_db_gts_key; - mm4->addr = (__force unsigned long) qhp->wq.rq.udb; + mm4->addr = (__force unsigned long)qhp->wq.rq.udb; mm4->len = PAGE_SIZE; insert_mmap(ucontext, mm4); if (mm5) { diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 871cdcac7be2..7f2a6c244d25 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -539,6 +539,7 @@ struct t4_cq { size_t memsize; __be64 bits_type_ts; u32 cqid; + u32 qid_mask; int vector; u16 size; /* including status page */ u16 cidx; @@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) set_bit(CQ_ARMED, &cq->flags); while (cq->cidx_inc > CIDXINC_M) { val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc -= CIDXINC_M; } val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc = 0; return 0; @@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) u32 val; val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid); + INGRESSQID_V(cq->cqid & cq->qid_mask); writel(val, cq->gts); cq->cidx_inc = 0; } diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 5e53327fc647..343e8daf2270 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */ #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) -#define CONG_CNTRL_VALID (1 << 18) +#define T5_ISS_S 18 +#define T5_ISS_V(x) ((x) << T5_ISS_S) +#define T5_ISS_F T5_ISS_V(1U) #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 33c45dfcbd88..1ca8e32a9592 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -82,14 +82,14 @@ static int create_file(const char *name, umode_t mode, { int error; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(*dentry)) - error = ipathfs_mknod(parent->d_inode, *dentry, + error = ipathfs_mknod(d_inode(parent), *dentry, mode, fops, data); else error = PTR_ERR(*dentry); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); return error; } @@ -277,11 +277,11 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!d_unhashed(tmp) && tmp->d_inode) { + if (!d_unhashed(tmp) && d_really_is_positive(tmp)) { dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); - simple_unlink(parent->d_inode, tmp); + simple_unlink(d_inode(parent), tmp); } else spin_unlock(&tmp->d_lock); @@ -302,7 +302,7 @@ static int remove_device_files(struct super_block *sb, int ret; root = dget(sb->s_root); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); snprintf(unit, sizeof unit, "%02d", dd->ipath_unit); dir = lookup_one_len(unit, root, strlen(unit)); @@ -315,10 +315,10 @@ static int remove_device_files(struct super_block *sb, remove_file(dir, "flash"); remove_file(dir, "atomic_counters"); d_delete(dir); - ret = simple_rmdir(root->d_inode, dir); + ret = simple_rmdir(d_inode(root), dir); bail: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(root); return ret; } diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index a31e031afd87..0f00204d2ece 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { int query_id; struct list_head list; int block_num; + ib_sa_comp_mask guid_indexes; + u8 method; }; struct mlx4_next_alias_guid_work { u8 port; u8 block_num; + u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) @@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) return IB_SA_COMP_MASK(4 + index); } +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init) +{ + __be64 curr_guid, required_guid; + int record_num = slave / 8; + int index = slave % 8; + int port_index = port - 1; + unsigned long flags; + int do_work = 0; + + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & + GUID_STATE_NEED_PORT_INIT) + goto unlock; + if (!slave_init) { + curr_guid = *(__be64 *)&dev->sriov. + alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index]; + if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || + !curr_guid) + goto unlock; + required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); + } else { + required_guid = mlx4_get_admin_guid(dev->dev, slave, port); + if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto unlock; + } + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index] = required_guid; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].guid_indexes + |= mlx4_ib_get_aguid_comp_mask_from_ix(index); + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].status + = MLX4_GUID_INFO_STATUS_IDLE; + /* set to run immediately */ + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].time_to_run = 0; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + guids_retry_schedule[index] = 0; + do_work = 1; +unlock: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + + if (do_work) + mlx4_ib_init_alias_guid_work(dev, port_index); +} + /* * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). @@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; + struct mlx4_sriov_alias_guid_info_rec_det *rec; + unsigned long flags; + __be64 required_value; if (!mlx4_is_master(dev->dev)) return; + rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. + all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); @@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, */ if (tmp_cur_ag != form_cache_ag) continue; - mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + + if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + required_value = 0; + + if (tmp_cur_ag == required_value) { + rec->guid_indexes = rec->guid_indexes & + ~mlx4_ib_get_aguid_comp_mask_from_ix(i); + } else { + /* may notify port down if value is 0 */ + if (tmp_cur_ag != MLX4_NOT_SET_GUID) { + spin_unlock_irqrestore(&dev->sriov. + alias_guid.ag_work_lock, flags); + continue; + } + } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, + flags); + mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ @@ -188,10 +268,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); - pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", - slave_id, port_num); - mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, - MLX4_PORT_CHANGE_SUBTYPE_DOWN); + if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { + pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", + slave_id, port_num); + mlx4_gen_port_state_change_eqe(dev->dev, + slave_id, + port_num, + MLX4_PORT_CHANGE_SUBTYPE_DOWN); + } } } } @@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status, int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; + ib_sa_comp_mask declined_guid_indexes = 0; + ib_sa_comp_mask applied_guid_indexes = 0; + unsigned int resched_delay_sec = 0; if (!context) return; @@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status, all_rec_per_port[cb_ctx->block_num]; if (status) { - rec->status = MLX4_GUID_INFO_STATUS_IDLE; pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); + rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; goto out; } @@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status, rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; - rec->status = MLX4_GUID_INFO_STATUS_SET; - rec->method = MLX4_GUID_INFO_RECORD_SET; - + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { - __be64 tmp_cur_ag; - tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; + __be64 sm_response, required_val; + + if (!(cb_ctx->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + sm_response = *(__be64 *)&guid_rec->guid_info_list + [i * GUID_REC_SIZE]; + required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { + if (required_val == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto next_entry; + + /* A new value was set till we got the response */ + pr_debug("need to set new value %llx, record num %d, block_num:%d\n", + be64_to_cpu(required_val), + i, guid_rec->block_num); + goto entry_declined; + } + /* check if the SM didn't assign one of the records. - * if it didn't, if it was not sysadmin request: - * ask the SM to give a new GUID, (instead of the driver request). + * if it didn't, re-ask for. */ - if (tmp_cur_ag == MLX4_NOT_SET_GUID) { - mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " - "block_num: %d was declined by SM, " - "ownership by %d (0 = driver, 1=sysAdmin," - " 2=None)\n", __func__, i, - guid_rec->block_num, rec->ownership); - if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { - /* if it is driver assign, asks for new GUID from SM*/ - *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = - MLX4_NOT_SET_GUID; - - /* Mark the record as not assigned, and let it - * be sent again in the next work sched.*/ - rec->status = MLX4_GUID_INFO_STATUS_IDLE; - rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); - } + if (sm_response == MLX4_NOT_SET_GUID) { + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, + "%s:Record num %d in block_num: %d was declined by SM\n", + __func__, i, + guid_rec->block_num); + goto entry_declined; } else { /* properly assigned record. */ /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ - if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && - tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { - /* the sysadmin assignment failed.*/ - mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" - " admin guid after SysAdmin " - "configuration. " - "Record num %d in block_num:%d " - "was declined by SM, " - "new val(0x%llx) was kept\n", - __func__, i, - guid_rec->block_num, - be64_to_cpu(*(__be64 *) & - rec->all_recs[i * GUID_REC_SIZE])); + if (required_val && + sm_response != required_val) { + /* Warn only on first retry */ + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" + " admin guid after SysAdmin " + "configuration. " + "Record num %d in block_num:%d " + "was declined by SM, " + "new val(0x%llx) was kept, SM returned (0x%llx)\n", + __func__, i, + guid_rec->block_num, + be64_to_cpu(required_val), + be64_to_cpu(sm_response)); + goto entry_declined; } else { - memcpy(&rec->all_recs[i * GUID_REC_SIZE], - &guid_rec->guid_info_list[i * GUID_REC_SIZE], - GUID_REC_SIZE); + *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = + sm_response; + if (required_val == 0) + mlx4_set_admin_guid(dev->dev, + sm_response, + (guid_rec->block_num + * NUM_ALIAS_GUID_IN_REC) + i, + cb_ctx->port); + goto next_entry; } } +entry_declined: + declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); + rec->guids_retry_schedule[i] = + (rec->guids_retry_schedule[i] == 0) ? 1 : + min((unsigned int)60, + rec->guids_retry_schedule[i] * 2); + /* using the minimum value among all entries in that record */ + resched_delay_sec = (resched_delay_sec == 0) ? + rec->guids_retry_schedule[i] : + min(resched_delay_sec, + rec->guids_retry_schedule[i]); + continue; + +next_entry: + rec->guids_retry_schedule[i] = 0; } + + applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; + if (declined_guid_indexes || + rec->guid_indexes & ~(applied_guid_indexes)) { + pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", + guid_rec->block_num, + be64_to_cpu((__force __be64)rec->guid_indexes), + be64_to_cpu((__force __be64)applied_guid_indexes), + be64_to_cpu((__force __be64)declined_guid_indexes)); + rec->time_to_run = ktime_get_real_ns() + + resched_delay_sec * NSEC_PER_SEC; + } else { + rec->status = MLX4_GUID_INFO_STATUS_SET; + } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver @@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status, out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); - if (!dev->sriov.is_going_down) + if (!dev->sriov.is_going_down) { + get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. - alias_guid_work, 0); + alias_guid_work, + msecs_to_jiffies(resched_delay_sec * 1000)); + } if (cb_ctx->sa_query) { list_del(&cb_ctx->list); kfree(cb_ctx); @@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status - = MLX4_GUID_INFO_STATUS_IDLE; - dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method - = MLX4_GUID_INFO_RECORD_SET; + = MLX4_GUID_INFO_STATUS_SET; /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { @@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) need to assign GUIDs, then don't put it up for assignment. */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || - (!index && !i) || - MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. - ports_guid[port - 1].all_rec_per_port[index].ownership) + (!index && !i)) continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } dev->sriov.alias_guid.ports_guid[port - 1]. - all_rec_per_port[index].guid_indexes = comp_mask; + all_rec_per_port[index].guid_indexes |= comp_mask; + if (dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].guid_indexes) + dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; + } static int set_guid_rec(struct ib_device *ibdev, - u8 port, int index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) + struct mlx4_next_alias_guid_work *rec) { int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); @@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev, struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; + u8 port = rec->port + 1; + int index = rec->block_num; + struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; @@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->port = port; callback_context->dev = dev; callback_context->block_num = index; + callback_context->guid_indexes = rec_det->guid_indexes; + callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); @@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, - comp_mask, rec_det->method, 1000, + comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, callback_context, &callback_context->sa_query); @@ -434,6 +573,30 @@ out: return err; } +static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) +{ + int j, k, entry; + __be64 guid; + + /*Check if the SM doesn't need to assign the GUIDs*/ + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { + entry = j * NUM_ALIAS_GUID_IN_REC + k; + /* no request for the 0 entry (hw guid) */ + if (!entry || entry > dev->dev->persist->num_vfs || + !mlx4_is_slave_active(dev->dev, entry)) + continue; + guid = mlx4_get_admin_guid(dev->dev, entry, port); + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[j].all_recs + [GUID_REC_SIZE * k] = guid; + pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", + entry, + be64_to_cpu(guid), + port); + } + } +} void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { int i; @@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); + + if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & + GUID_STATE_NEED_PORT_INIT) { + mlx4_ib_guid_port_init(dev, port); + dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= + (~GUID_STATE_NEED_PORT_INIT); + } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); @@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } -/* The function returns the next record that was - * not configured (or failed to be configured) */ -static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, - struct mlx4_next_alias_guid_work *rec) +static void set_required_record(struct mlx4_ib_dev *dev, u8 port, + struct mlx4_next_alias_guid_work *next_rec, + int record_index) { - int j; - unsigned long flags; + int i; + int lowset_time_entry = -1; + int lowest_time = 0; + ib_sa_comp_mask delete_guid_indexes = 0; + ib_sa_comp_mask set_guid_indexes = 0; + struct mlx4_sriov_alias_guid_info_rec_det *rec = + &dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[record_index]; - for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); - if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == - MLX4_GUID_INFO_STATUS_IDLE) { - memcpy(&rec->rec_det, - &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], - sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); - rec->port = port; - rec->block_num = j; - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = - MLX4_GUID_INFO_STATUS_PENDING; - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); - return 0; + for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { + if (!(rec->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + + if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + delete_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + else + set_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + + if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= + lowest_time) { + lowset_time_entry = i; + lowest_time = rec->guids_retry_schedule[i]; } - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); } - return -ENOENT; + + memcpy(&next_rec->rec_det, rec, sizeof(*rec)); + next_rec->port = port; + next_rec->block_num = record_index; + + if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { + next_rec->rec_det.guid_indexes = delete_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; + } else { + next_rec->rec_det.guid_indexes = set_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_SET; + } } -static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, - int rec_index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) +/* return index of record that should be updated based on lowest + * rescheduled time + */ +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec) { - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = - rec_det->guid_indexes; - memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, - rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = - rec_det->status; + int record_index = -1; + u64 low_record_time = 0; + struct mlx4_sriov_alias_guid_info_rec_det rec; + int j; + + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + rec = dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[j]; + if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && + rec.guid_indexes) { + if (record_index == -1 || + rec.time_to_run < low_record_time) { + record_index = j; + low_record_time = rec.time_to_run; + } + } + } + if (resched_delay_sec) { + u64 curr_time = ktime_get_real_ns(); + + *resched_delay_sec = (low_record_time < curr_time) ? 0 : + div_u64((low_record_time - curr_time), NSEC_PER_SEC); + } + + return record_index; } -static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) +/* The function returns the next record that was + * not configured (or failed to be configured) */ +static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, + struct mlx4_next_alias_guid_work *rec) { - int j; - struct mlx4_sriov_alias_guid_info_rec_det rec_det ; - - for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { - memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | - IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | - IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | - IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | - IB_SA_GUIDINFO_REC_GID7; - rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; - set_administratively_guid_record(dev, port, j, &rec_det); + unsigned long flags; + int record_index; + int ret = 0; + + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + record_index = get_low_record_time_index(dev, port, NULL); + + if (record_index < 0) { + ret = -ENOENT; + goto out; } + + set_required_record(dev, port, rec, record_index); +out: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + return ret; } static void alias_guid_work(struct work_struct *work) @@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work) goto out; } - set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, - &rec->rec_det); - + set_guid_rec(&dev->ib_dev, rec); out: kfree(rec); } @@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { + /* If there is pending one should cancell then run, otherwise + * won't run till previous one is ended as same work + * struct is used. + */ + cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. + alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } @@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { char alias_wq_name[15]; int ret = 0; - int i, j, k; + int i, j; union ib_gid gid; if (!mlx4_is_master(dev->dev)) @@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); - /*Check if the SM doesn't need to assign the GUIDs*/ + dev->sriov.alias_guid.ports_guid[i].state_flags |= + GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - if (mlx4_ib_sm_guid_assign) { - dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j]. - ownership = MLX4_GUID_DRIVER_ASSIGN; - continue; - } - dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. - ownership = MLX4_GUID_NONE_ASSIGN; - /*mark each val as it was deleted, - till the sysAdmin will give it valid val*/ - for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { - *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = - cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); - } + /* mark each val as it was deleted */ + memset(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs, 0xFF, + sizeof(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs)); } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ + if (mlx4_ib_sm_guid_assign) + for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) + mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; - if (mlx4_ib_sm_guid_assign) - set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 59040265e361..9cd2b002d7ae 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->ring[i].addr, rx_buf_size, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { + kfree(tun_qp->ring[i].addr); + goto err; + } } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { @@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->tx_ring[i].buf.addr, tx_buf_size, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, + tun_qp->tx_ring[i].buf.map)) { + kfree(tun_qp->tx_ring[i].buf.addr); + goto tx_err; + } tun_qp->tx_ring[i].ah = NULL; } spin_lock_init(&tun_qp->tx_lock); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 976bea794b5f..57070c529dfb 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -int mlx4_ib_sm_guid_assign = 1; +int mlx4_ib_sm_guid_assign = 0; module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); -MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)"); +MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" @@ -2791,9 +2791,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ do_slave_init(ibdev, p, 1); + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 1); + } + } return; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 0); + } + } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); return; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f829fd935b79..fce3934372a1 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -342,14 +342,9 @@ struct mlx4_ib_ah { enum mlx4_guid_alias_rec_status { MLX4_GUID_INFO_STATUS_IDLE, MLX4_GUID_INFO_STATUS_SET, - MLX4_GUID_INFO_STATUS_PENDING, }; -enum mlx4_guid_alias_rec_ownership { - MLX4_GUID_DRIVER_ASSIGN, - MLX4_GUID_SYSADMIN_ASSIGN, - MLX4_GUID_NONE_ASSIGN, /*init state of each record*/ -}; +#define GUID_STATE_NEED_PORT_INIT 0x01 enum mlx4_guid_alias_rec_method { MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET, @@ -360,8 +355,8 @@ struct mlx4_sriov_alias_guid_info_rec_det { u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ - u8 method; /*set or delete*/ - enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ + unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC]; + u64 time_to_run; }; struct mlx4_sriov_alias_guid_port_rec_det { @@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det { struct workqueue_struct *wq; struct delayed_work alias_guid_work; u8 port; + u32 state_flags; struct mlx4_sriov_alias_guid *parent; struct list_head cb_list; }; @@ -802,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, struct attribute *attr); ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index); +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init); int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ed2bd6701f9b..02fc91c68027 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); + if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { + kfree(qp->sqp_proxy_rcv[i].addr); + goto err; + } } return 0; @@ -2605,8 +2609,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | - wr->wr.ud.hlen); + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index d10c2b8a5dad..6797108ce873 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -46,21 +46,17 @@ static ssize_t show_admin_alias_guid(struct device *dev, struct device_attribute *attr, char *buf) { - int record_num;/*0-15*/ - int guid_index_in_rec; /*0 - 7*/ struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; + __be64 sysadmin_ag_val; - record_num = mlx4_ib_iov_dentry->entry_num / 8 ; - guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ; + sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev, + mlx4_ib_iov_dentry->entry_num, + port->num); - return sprintf(buf, "%llx\n", - be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid. - ports_guid[port->num - 1]. - all_rec_per_port[record_num]. - all_recs[8 * guid_index_in_rec])); + return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val)); } /* store_admin_alias_guid stores the (new) administratively assigned value of that GUID. @@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; u64 sysadmin_ag_val; + unsigned long flags; record_num = mlx4_ib_iov_dentry->entry_num / 8; guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8; @@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, pr_err("GUID 0 block 0 is RO\n"); return count; } + spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); sscanf(buf, "%llx", &sysadmin_ag_val); *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. all_rec_per_port[record_num]. @@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev, /* Change the state to be pending for update */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE ; - - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_SET; - - switch (sysadmin_ag_val) { - case MLX4_GUID_FOR_DELETE_VAL: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_DELETE; - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - /* The sysadmin requests the SM to re-assign */ - case MLX4_NOT_SET_GUID: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_DRIVER_ASSIGN; - break; - /* The sysadmin requests a specific value.*/ - default: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - } + mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val), + mlx4_ib_iov_dentry->entry_num, + port->num); /* set the record index */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes - = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_init_alias_guid_work(mdev, port->num - 1); return count; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3b2a6dc8ea99..9f9d5c563a61 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = { [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, + [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 6f09a72e78d7..72b43417cbe3 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic, memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); } +static void record_sockaddr_info(struct sockaddr_storage *addr_info, + nes_addr_t *ip_addr, u16 *port_num) +{ + struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info; + + if (in_addr->sin_family == AF_INET) { + *ip_addr = ntohl(in_addr->sin_addr.s_addr); + *port_num = ntohs(in_addr->sin_port); + } +} + /* * nes_record_pm_msg - Save the received mapping info */ static void nes_record_pm_msg(struct nes_cm_info *cm_info, struct iwpm_sa_data *pm_msg) { - struct sockaddr_in *mapped_loc_addr = - (struct sockaddr_in *)&pm_msg->mapped_loc_addr; - struct sockaddr_in *mapped_rem_addr = - (struct sockaddr_in *)&pm_msg->mapped_rem_addr; - - if (mapped_loc_addr->sin_family == AF_INET) { - cm_info->mapped_loc_addr = - ntohl(mapped_loc_addr->sin_addr.s_addr); - cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); - } - if (mapped_rem_addr->sin_family == AF_INET) { - cm_info->mapped_rem_addr = - ntohl(mapped_rem_addr->sin_addr.s_addr); - cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); - } + record_sockaddr_info(&pm_msg->mapped_loc_addr, + &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port); + + record_sockaddr_info(&pm_msg->mapped_rem_addr, + &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port); +} + +/* + * nes_get_reminfo - Get the address info of the remote connecting peer + */ +static int nes_get_remote_addr(struct nes_cm_node *cm_node) +{ + struct sockaddr_storage mapped_loc_addr, mapped_rem_addr; + struct sockaddr_storage remote_addr; + int ret; + + nes_create_sockaddr(htonl(cm_node->mapped_loc_addr), + htons(cm_node->mapped_loc_port), &mapped_loc_addr); + nes_create_sockaddr(htonl(cm_node->mapped_rem_addr), + htons(cm_node->mapped_rem_port), &mapped_rem_addr); + + ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr, + &remote_addr, RDMA_NL_NES); + if (ret) + nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n"); + else + record_sockaddr_info(&remote_addr, &cm_node->rem_addr, + &cm_node->rem_port); + return ret; } /** @@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, return NULL; /* set our node specific transport info */ - cm_node->loc_addr = cm_info->loc_addr; + if (listener) { + cm_node->loc_addr = listener->loc_addr; + cm_node->loc_port = listener->loc_port; + } else { + cm_node->loc_addr = cm_info->loc_addr; + cm_node->loc_port = cm_info->loc_port; + } cm_node->rem_addr = cm_info->rem_addr; - cm_node->loc_port = cm_info->loc_port; cm_node->rem_port = cm_info->rem_port; cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; @@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cm_node->state = NES_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + nes_get_remote_addr(cm_node); handle_rcv_mpa(cm_node, skb); } else { /* rcvd ACK only */ dev_kfree_skb_any(skb); diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index ffd48bfc4923..ba5173e24973 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit); extern u32 qib_cpulist_count; extern unsigned long *qib_cpulist; -extern unsigned qib_wc_pat; extern unsigned qib_cc_table_size; int qib_init(struct qib_devdata *, int); int init_chip_wc_pat(struct qib_devdata *dd, u32); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9ea6c440a00c..725881890c4a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; - if (qib_wc_pat) + /* We used PAT if wc_cookie == 0 */ + if (!dd->wc_cookie) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 650897a8591e..bdd5d3857203 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -89,14 +89,14 @@ static int create_file(const char *name, umode_t mode, { int error; - mutex_lock(&parent->d_inode->i_mutex); + mutex_lock(&d_inode(parent)->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(*dentry)) - error = qibfs_mknod(parent->d_inode, *dentry, + error = qibfs_mknod(d_inode(parent), *dentry, mode, fops, data); else error = PTR_ERR(*dentry); - mutex_unlock(&parent->d_inode->i_mutex); + mutex_unlock(&d_inode(parent)->i_mutex); return error; } @@ -455,10 +455,10 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!d_unhashed(tmp) && tmp->d_inode) { + if (!d_unhashed(tmp) && d_really_is_positive(tmp)) { __d_drop(tmp); spin_unlock(&tmp->d_lock); - simple_unlink(parent->d_inode, tmp); + simple_unlink(d_inode(parent), tmp); } else { spin_unlock(&tmp->d_lock); } @@ -481,7 +481,7 @@ static int remove_device_files(struct super_block *sb, int ret, i; root = dget(sb->s_root); - mutex_lock(&root->d_inode->i_mutex); + mutex_lock(&d_inode(root)->i_mutex); snprintf(unit, sizeof(unit), "%u", dd->unit); dir = lookup_one_len(unit, root, strlen(unit)); @@ -491,7 +491,7 @@ static int remove_device_files(struct super_block *sb, goto bail; } - mutex_lock(&dir->d_inode->i_mutex); + mutex_lock(&d_inode(dir)->i_mutex); remove_file(dir, "counters"); remove_file(dir, "counter_names"); remove_file(dir, "portcounter_names"); @@ -506,13 +506,13 @@ static int remove_device_files(struct super_block *sb, } } remove_file(dir, "flash"); - mutex_unlock(&dir->d_inode->i_mutex); - ret = simple_rmdir(root->d_inode, dir); + mutex_unlock(&d_inode(dir)->i_mutex); + ret = simple_rmdir(d_inode(root), dir); d_delete(dir); dput(dir); bail: - mutex_unlock(&root->d_inode->i_mutex); + mutex_unlock(&d_inode(root)->i_mutex); dput(root); return ret; } diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 0d2ba59af30a..4b927809d1a1 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd) qib_6120_config_ctxts(dd); qib_set_ctxtcnt(dd); - if (qib_wc_pat) { - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; - } + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; set_6120_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 22affda8af88..00b2af211157 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) qib_7220_config_ctxts(dd); qib_set_ctxtcnt(dd); /* needed for PAT setup */ - if (qib_wc_pat) { - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; - } + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; set_7220_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ef97b71c8f7d..f32b4628e991 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) unsigned features, pidx, sbufcnt; int ret, mtu; u32 sbufs, updthresh; + resource_size_t vl15off; /* pport structs are contiguous, allocated after devdata */ ppd = (struct qib_pportdata *)(dd + 1); @@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd) qib_7322_config_ctxts(dd); qib_set_ctxtcnt(dd); - if (qib_wc_pat) { - resource_size_t vl15off; - /* - * We do not set WC on the VL15 buffers to avoid - * a rare problem with unaligned writes from - * interrupt-flushed store buffers, so we need - * to map those separately here. We can't solve - * this for the rarely used mtrr case. - */ - ret = init_chip_wc_pat(dd, 0); - if (ret) - goto bail; + /* + * We do not set WC on the VL15 buffers to avoid + * a rare problem with unaligned writes from + * interrupt-flushed store buffers, so we need + * to map those separately here. We can't solve + * this for the rarely used mtrr case. + */ + ret = init_chip_wc_pat(dd, 0); + if (ret) + goto bail; - /* vl15 buffers start just after the 4k buffers */ - vl15off = dd->physaddr + (dd->piobufbase >> 32) + - dd->piobcnt4k * dd->align4k; - dd->piovl15base = ioremap_nocache(vl15off, - NUM_VL15_BUFS * dd->align4k); - if (!dd->piovl15base) { - ret = -ENOMEM; - goto bail; - } + /* vl15 buffers start just after the 4k buffers */ + vl15off = dd->physaddr + (dd->piobufbase >> 32) + + dd->piobcnt4k * dd->align4k; + dd->piovl15base = ioremap_nocache(vl15off, + NUM_VL15_BUFS * dd->align4k); + if (!dd->piovl15base) { + ret = -ENOMEM; + goto bail; } + qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ ret = 0; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 2ee36953e234..7e00470adc30 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); unsigned qib_cc_table_size; module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); -/* - * qib_wc_pat parameter: - * 0 is WC via MTRR - * 1 is WC via PAT - * If PAT initialization fails, code reverts back to MTRR - */ -unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ -module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); -MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); static void verify_interrupt(unsigned long); @@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd) spin_unlock(&dd->pport[pidx].cc_shadow_lock); } - if (!qib_wc_pat) - qib_disable_wc(dd); + qib_disable_wc(dd); if (dd->pioavailregs_dma) { dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, @@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto bail; } - if (!qib_wc_pat) { - ret = qib_enable_wc(dd); - if (ret) { - qib_dev_err(dd, - "Write combining not enabled (err %d): performance may be poor\n", - -ret); - ret = 0; - } + ret = qib_enable_wc(dd); + if (ret) { + qib_dev_err(dd, + "Write combining not enabled (err %d): performance may be poor\n", + -ret); + ret = 0; } qib_verify_pioperf(dd); diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 81b225f2300a..6d61ef98721c 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -116,21 +116,9 @@ int qib_enable_wc(struct qib_devdata *dd) } if (!ret) { - int cookie; - - cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); - if (cookie < 0) { - { - qib_devinfo(dd->pcidev, - "mtrr_add() WC for PIO bufs failed (%d)\n", - cookie); - ret = -EINVAL; - } - } else { - dd->wc_cookie = cookie; - dd->wc_base = (unsigned long) pioaddr; - dd->wc_len = (unsigned long) piolen; - } + dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen); + if (dd->wc_cookie < 0) + ret = -EINVAL; } return ret; @@ -142,18 +130,7 @@ int qib_enable_wc(struct qib_devdata *dd) */ void qib_disable_wc(struct qib_devdata *dd) { - if (dd->wc_cookie) { - int r; - - r = mtrr_del(dd->wc_cookie, dd->wc_base, - dd->wc_len); - if (r < 0) - qib_devinfo(dd->pcidev, - "mtrr_del(%lx, %lx, %lx) failed: %d\n", - dd->wc_cookie, dd->wc_base, - dd->wc_len, r); - dd->wc_cookie = 0; /* even on failure */ - } + arch_phys_wc_del(dd->wc_cookie); } /** |