summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2014-11-07 09:35:25 +0530
committerDavid S. Miller <davem@davemloft.net>2014-11-10 12:57:10 -0500
commite2ac9628959cc152a811931a6422757b137ac4a4 (patch)
treeb73982788602de801c3e835d63247d7af174f740 /drivers/infiniband/hw/cxgb4/cm.c
parent6559a7e8296002b4379e5f2c26a2a3a339d5e60a (diff)
cxgb4: Cleanup macros so they follow the same style and look consistent, part 2
Various patches have ended up changing the style of the symbolic macros/register defines to different style. As a result, the current kernel.org files are a mix of different macro styles. Since this macro/register defines is used by different drivers a few patch series have ended up adding duplicate macro/register define entries with different styles. This makes these register define/macro files a complete mess and we want to make them clean and consistent. This patch cleans up a part of it. Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..a07d8e124a80 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,10 +472,10 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
skb = get_skb(skb, flowclen, GFP_KERNEL);
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
- flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS(8));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
- 16)) | FW_WR_FLOWID(ep->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(8));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
+ 16)) | FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
req->op_to_immdlen = cpu_to_be32(
- FW_WR_OP(FW_OFLD_TX_DATA_WR) |
- FW_WR_COMPL(1) |
- FW_WR_IMMDLEN(mpalen));
+ FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
+ FW_WR_COMPL_F |
+ FW_WR_IMMDLEN_V(mpalen));
req->flowid_len16 = cpu_to_be32(
- FW_WR_FLOWID(ep->hwtid) |
- FW_WR_LEN16(wrlen >> 4));
+ FW_WR_FLOWID_V(ep->hwtid) |
+ FW_WR_LEN16_V(wrlen >> 4));
req->plen = cpu_to_be32(mpalen);
req->tunnel_to_proxy = cpu_to_be32(
- FW_OFLD_TX_DATA_WR_FLUSH(1) |
- FW_OFLD_TX_DATA_WR_SHOVE(1));
+ FW_OFLD_TX_DATA_WR_FLUSH_F |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
@@ -3537,8 +3537,8 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
- req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
- req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
+ req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
+ req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
req->le.filter = (__force __be32) filter;
req->le.lport = lport;