summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c176
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
12 files changed, 172 insertions, 148 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..4b8c6116c058 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,13 +472,13 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
skb = get_skb(skb, flowclen, GFP_KERNEL);
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
- flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS(8));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
- 16)) | FW_WR_FLOWID(ep->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
- flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
(ep->com.dev->rdev.lldi.pf));
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->local_ip = la->sin_addr.s_addr;
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
- t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
t5_req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
- t5_req6->params = cpu_to_be64(V_FILTER_TUPLE(
+ t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t)));
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
* due to the limit in the number of bits in the RCV_BUFSIZ field,
* then add the overage in to the credits returned.
*/
- if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
- credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
+ if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
+ credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
+ req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
@@ -1762,10 +1762,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
- htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
- V_FW_OFLD_CONNECTION_WR_ASTID(atid));
+ htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
+ FW_OFLD_CONNECTION_WR_ASTID_V(atid));
req->tcb.cplrxdataack_cplpassacceptrpl =
- htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
+ htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
@@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win));
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
- RX_CHANNEL(0) |
+ RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
- req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
+ req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
if (enable_tcp_sack)
- req->tcb.opt2 |= (__force __be32) SACK_EN(1);
+ req->tcb.opt2 |= (__force __be32)SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
- req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
- req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
+ req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
+ req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
+ req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
* remainder will be specified in the rx_data_ack.
*/
win = ep->rcv_win >> 10;
- if (win > RCV_BUFSIZ_MASK)
- win = RCV_BUFSIZ_MASK;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
- KEEP_ALIVE(1) |
+ KEEP_ALIVE_F |
DELACK(1) |
- WND_SCALE(wscale) |
- MSS_IDX(mtu_idx) |
- L2T_IDX(ep->l2t->idx) |
- TX_CHAN(ep->tx_chan) |
- SMAC_SEL(ep->smac_idx) |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(ep->l2t->idx) |
+ TX_CHAN_V(ep->tx_chan) |
+ SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos >> 2) |
- ULP_MODE(ULP_MODE_TCPDDP) |
- RCV_BUFSIZ(win);
- opt2 = RX_CHANNEL(0) |
- RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
+ ULP_MODE_V(ULP_MODE_TCPDDP) |
+ RCV_BUFSIZ_V(win);
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN(1);
if (enable_tcp_sack && req->tcpopt.sack)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
- opt2 |= WND_SCALE_EN(1);
+ opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
- opt2 |= T5_OPT_2_VALID;
+ opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
@@ -3537,9 +3537,9 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
req->le.lport = lport;
req->le.pport = rport;
@@ -3548,16 +3548,16 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req->tcb.rcv_nxt = htonl(rcv_isn + 1);
req->tcb.rcv_adv = htons(window);
req->tcb.t_state_to_astid =
- htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
- V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
- V_FW_OFLD_CONNECTION_WR_ASTID(
+ htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
+ FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
+ FW_OFLD_CONNECTION_WR_ASTID_V(
GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
* to send us the wr response.
*/
- req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
+ req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
/*
* We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TCB picks up the correct value. If this was 0
* TP will ignore any value > 0 for MSS index.
*/
- req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
+ req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
req->cookie = (unsigned long)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 0f773e78e080..e9fd3a029296 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -51,9 +51,9 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
@@ -121,9 +121,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index ec7a2988a703..0744455cd88b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -74,18 +74,18 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
memset(req, 0, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
- (wait ? FW_WR_COMPL(1) : 0));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
+ (wait ? FW_WR_COMPL_F : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
- req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
+ req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
- sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@@ -107,12 +107,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
- __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
- cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
else
- cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
+ cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -135,23 +135,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
INIT_ULPTX_WR(req, wr_len, 0, 0);
if (i == (num_wqe-1)) {
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
- FW_WR_COMPL(1));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_COMPL_F);
req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
} else
- req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
- FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cmd;
- req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
16));
- req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
sc = (struct ulptx_idata *)(req + 1);
- sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
to_dp = (u8 *)(sc + 1);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 72e3b69d1b76..66bd6a2ad83b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -408,10 +408,10 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers));
+ FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 41cd6882b648..2ed3ece2b2ee 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -271,9 +271,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
- FW_WR_OP(FW_RI_RES_WR) |
+ FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(2) |
- FW_WR_COMPL(1));
+ FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
res = res_wr->res;
@@ -1082,10 +1082,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
- wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
+ wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(qhp->ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(qhp->ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
@@ -1204,11 +1204,11 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
- FW_WR_OP(FW_RI_INIT_WR) |
- FW_WR_COMPL(1));
+ FW_WR_OP_V(FW_RI_INIT_WR) |
+ FW_WR_COMPL_F);
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->cookie = (unsigned long) &ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
@@ -1273,11 +1273,11 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
wqe->op_compl = cpu_to_be32(
- FW_WR_OP(FW_RI_INIT_WR) |
- FW_WR_COMPL(1));
+ FW_WR_OP_V(FW_RI_INIT_WR) |
+ FW_WR_COMPL_F);
wqe->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(qhp->ep->hwtid) |
- FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+ FW_WR_FLOWID_V(qhp->ep->hwtid) |
+ FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1066eec854a9..a3b70f6c4035 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
if (err)
goto err_dbmap;
- cq->mcq.comp = mlx4_ib_cq_comp;
+ if (context)
+ cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
+ else
+ cq->mcq.comp = mlx4_ib_cq_comp;
cq->mcq.event = mlx4_ib_cq_event;
if (context)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8b72cf392b34..57ecc5b204f3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1975,8 +1975,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
dev->caps.num_ports > dev->caps.comp_pool)
return;
- eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
- dev->caps.num_ports);
+ eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
/* Init eq table */
added_eqs = 0;
@@ -2228,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
- &ibdev->steer_qpn_base);
+ &ibdev->steer_qpn_base, 0);
if (err)
goto err_counter;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9c5150c3cb31..cf000b7ad64f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
}
} else {
- /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
- * BlueFlame setup flow wrongly causes VLAN insertion. */
+ /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
+ * otherwise, the WQE BlueFlame setup flow wrongly causes
+ * VLAN insertion. */
if (init_attr->qp_type == IB_QPT_RAW_PACKET)
- err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
+ (init_attr->cap.max_send_wr ?
+ MLX4_RESERVE_ETH_BF_QP : 0) |
+ (init_attr->cap.max_recv_wr ?
+ MLX4_RESERVE_A0_QP : 0));
else
if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
- &qpn);
+ &qpn, 0);
if (err)
goto err_proxy;
}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 10cfce5119a9..c463e7bba5f4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -805,14 +805,14 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
}
- mlx5_vfree(cqb);
+ kvfree(cqb);
return &cq->ibcq;
err_cmd:
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
err_cqb:
- mlx5_vfree(cqb);
+ kvfree(cqb);
if (context)
destroy_cq_user(cq, context);
else
@@ -1159,11 +1159,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
}
mutex_unlock(&cq->resize_mutex);
- mlx5_vfree(in);
+ kvfree(in);
return 0;
ex_alloc:
- mlx5_vfree(in);
+ kvfree(in);
ex_resize:
if (udata)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ee7cb46e059..5a80dd993761 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -159,6 +159,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
+ spin_lock_irq(&ent->lock);
+ ent->pending--;
+ spin_unlock_irq(&ent->lock);
mlx5_ib_warn(dev, "create mkey failed %d\n", err);
kfree(mr);
break;
@@ -853,14 +856,14 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
goto err_2;
}
mr->umem = umem;
- mlx5_vfree(in);
+ kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
return mr;
err_2:
- mlx5_vfree(in);
+ kvfree(in);
err_1:
kfree(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e261a53f9a02..1cae1c7132b4 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -647,7 +647,7 @@ err_unmap:
mlx5_ib_db_unmap_user(context, &qp->db);
err_free:
- mlx5_vfree(*in);
+ kvfree(*in);
err_umem:
if (qp->umem)
@@ -761,7 +761,7 @@ err_wrid:
kfree(qp->rq.wrid);
err_free:
- mlx5_vfree(*in);
+ kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &qp->buf);
@@ -971,7 +971,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_create;
}
- mlx5_vfree(in);
+ kvfree(in);
/* Hardware wants QPN written in big-endian order (after
* shifting) for send doorbell. Precompute this value to save
* a little bit when posting sends.
@@ -988,7 +988,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
- mlx5_vfree(in);
+ kvfree(in);
return err;
}
@@ -1011,9 +1011,14 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
}
} else {
spin_lock_irq(&send_cq->lock);
+ __acquire(&recv_cq->lock);
}
} else if (recv_cq) {
spin_lock_irq(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
}
}
@@ -1033,10 +1038,15 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
spin_unlock_irq(&recv_cq->lock);
}
} else {
+ __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
}
} else if (recv_cq) {
+ __release(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
+ } else {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
}
}
@@ -2411,7 +2421,7 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl,
- struct ib_send_wr *wr, int *idx,
+ struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq)
{
int err = 0;
@@ -2737,6 +2747,8 @@ out:
if (bf->need_lock)
spin_lock(&bf->lock);
+ else
+ __acquire(&bf->lock);
/* TBD enable WC */
if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
@@ -2753,6 +2765,8 @@ out:
bf->offset ^= bf->buf_size;
if (bf->need_lock)
spin_unlock(&bf->lock);
+ else
+ __release(&bf->lock);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 97cc1baaa8e3..41fec66217dd 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -141,7 +141,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return 0;
err_in:
- mlx5_vfree(*in);
+ kvfree(*in);
err_umem:
ib_umem_release(srq->umem);
@@ -209,7 +209,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return 0;
err_in:
- mlx5_vfree(*in);
+ kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
@@ -306,7 +306,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
- mlx5_vfree(in);
+ kvfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;