diff options
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 99 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/Kconfig | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 97 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c | 464 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h | 310 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/l2t.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/l2t.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 217 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 8 | ||||
-rw-r--r-- | drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | 17 |
15 files changed, 1174 insertions, 139 deletions
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 343e8daf2270..1e26669793c3 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -753,103 +753,4 @@ struct fw_ri_wr { #define FW_RI_WR_P2PTYPE_G(x) \ (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M) -struct tcp_options { - __be16 mss; - __u8 wsf; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:4; - __u8 unknown:1; - __u8:1; - __u8 sack:1; - __u8 tstamp:1; -#else - __u8 tstamp:1; - __u8 sack:1; - __u8:1; - __u8 unknown:1; - __u8:4; -#endif -}; - -struct cpl_pass_accept_req { - union opcode_tid ot; - __be16 rsvd; - __be16 len; - __be32 hdr_len; - __be16 vlan; - __be16 l2info; - __be32 tos_stid; - struct tcp_options tcpopt; -}; - -/* cpl_pass_accept_req.hdr_len fields */ -#define SYN_RX_CHAN_S 0 -#define SYN_RX_CHAN_M 0xF -#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S) -#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M) - -#define TCP_HDR_LEN_S 10 -#define TCP_HDR_LEN_M 0x3F -#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S) -#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M) - -#define IP_HDR_LEN_S 16 -#define IP_HDR_LEN_M 0x3FF -#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S) -#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M) - -#define ETH_HDR_LEN_S 26 -#define ETH_HDR_LEN_M 0x1F -#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S) -#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M) - -/* cpl_pass_accept_req.l2info fields */ -#define SYN_MAC_IDX_S 0 -#define SYN_MAC_IDX_M 0x1FF -#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S) -#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M) - -#define SYN_XACT_MATCH_S 9 -#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S) -#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U) - -#define SYN_INTF_S 12 -#define SYN_INTF_M 0xF -#define SYN_INTF_V(x) ((x) << SYN_INTF_S) -#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M) - -struct ulptx_idata { - __be32 cmd_more; - __be32 len; -}; - -#define ULPTX_NSGE_S 0 -#define ULPTX_NSGE_M 0xFFFF -#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) - -#define RX_DACK_MODE_S 29 -#define RX_DACK_MODE_M 0x3 -#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S) -#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M) - -#define RX_DACK_CHANGE_S 31 -#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S) -#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U) - -enum { /* TCP congestion control algorithms */ - CONG_ALG_RENO, - CONG_ALG_TAHOE, - CONG_ALG_NEWRENO, - CONG_ALG_HIGHSPEED -}; - -#define CONG_CNTRL_S 14 -#define CONG_CNTRL_M 0x3 -#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) -#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) - -#define T5_ISS_S 18 -#define T5_ISS_V(x) ((x) << T5_ISS_S) -#define T5_ISS_F T5_ISS_V(1U) - #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 4d187f22c48b..4686a85a8a22 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -96,6 +96,17 @@ config CHELSIO_T4_DCB If unsure, say N. +config CHELSIO_T4_UWIRE + bool "Unified Wire Support for Chelsio T5 cards" + default n + depends on CHELSIO_T4 + ---help--- + Enable unified-wire offload features. + Say Y here if you want to enable unified-wire over Ethernet + in the driver. + + If unsure, say N. + config CHELSIO_T4_FCOE bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" default n diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index ace0ab98d0f1..85c92821b239 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o +cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1dac6c6111bf..984a3cc26f86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -404,6 +404,9 @@ enum { MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ + + /* # of streaming iSCSIT Rx queues */ + MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS, }; enum { @@ -420,8 +423,8 @@ enum { enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ - MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES - + MAX_RDMA_CIQS + INGQ_EXTRAS, + MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES + + MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS, }; struct adapter; @@ -508,6 +511,15 @@ struct pkt_gl { typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); +typedef void (*rspq_flush_handler_t)(struct sge_rspq *q); +/* LRO related declarations for ULD */ +struct t4_lro_mgr { +#define MAX_LRO_SESSIONS 64 + u8 lro_session_cnt; /* # of sessions to aggregate */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + struct sk_buff_head lroq; /* list of aggregated sessions */ +}; struct sge_rspq { /* state for an SGE response queue */ struct napi_struct napi; @@ -532,6 +544,8 @@ struct sge_rspq { /* state for an SGE response queue */ struct adapter *adap; struct net_device *netdev; /* associated net device */ rspq_handler_t handler; + rspq_flush_handler_t flush_handler; + struct t4_lro_mgr lro_mgr; #ifdef CONFIG_NET_RX_BUSY_POLL #define CXGB_POLL_STATE_IDLE 0 #define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ @@ -641,6 +655,7 @@ struct sge { struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES]; struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; @@ -652,9 +667,11 @@ struct sge { u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ u16 iscsiqsets; /* # of active iSCSI queue sets */ + u16 niscsitq; /* # of available iSCST Rx queues */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ u16 iscsi_rxq[MAX_OFLD_QSETS]; + u16 iscsit_rxq[MAX_ISCSIT_QUEUES]; u16 rdma_rxq[MAX_RDMA_QUEUES]; u16 rdma_ciq[MAX_RDMA_CIQS]; u16 timer_val[SGE_NTIMERS]; @@ -681,6 +698,7 @@ struct sge { #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) #define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++) +#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++) #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) @@ -747,6 +765,8 @@ struct adapter { struct list_head rcu_node; struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ + void *iscsi_ppm; + struct tid_info tids; void **tid_release_head; spinlock_t tid_release_lock; @@ -1113,7 +1133,8 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct net_device *dev, int intr_idx, - struct sge_fl *fl, rspq_handler_t hnd, int cong); + struct sge_fl *fl, rspq_handler_t hnd, + rspq_flush_handler_t flush_handler, int cong); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *netdevq, unsigned int iqid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index e6a4072b494b..0bb41e9b9b1c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2334,12 +2334,14 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4); + int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int i, r = (uintptr_t)v - 1; int iscsi_idx = r - eth_entries; - int rdma_idx = iscsi_idx - iscsi_entries; + int iscsit_idx = iscsi_idx - iscsi_entries; + int rdma_idx = iscsit_idx - iscsit_entries; int ciq_idx = rdma_idx - rdma_entries; int ctrl_idx = ciq_idx - ciq_entries; int fq_idx = ctrl_idx - ctrl_entries; @@ -2453,6 +2455,35 @@ do { \ RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); + } else if (iscsit_idx < iscsit_entries) { + const struct sge_ofld_rxq *rx = + &adap->sge.iscsitrxq[iscsit_idx * 4]; + int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx); + + S("QType:", "iSCSIT"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", + adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImmPkts:", stats.imm); + RL("RxNoMem:", stats.nomem); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); + RL("FLStarving:", fl.starving); + } else if (rdma_idx < rdma_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmarxq[rdma_idx * 4]; @@ -2543,6 +2574,7 @@ static int sge_queue_entries(const struct adapter *adap) { return DIV_ROUND_UP(adap->sge.ethqsets, 4) + DIV_ROUND_UP(adap->sge.iscsiqsets, 4) + + DIV_ROUND_UP(adap->sge.niscsitq, 4) + DIV_ROUND_UP(adap->sge.rdmaqs, 4) + DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index adad73f7c8cd..d1e3f0997d6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -227,7 +227,7 @@ static DEFINE_MUTEX(uld_mutex); static LIST_HEAD(adap_rcu_list); static DEFINE_SPINLOCK(adap_rcu_lock); static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; -static const char *uld_str[] = { "RDMA", "iSCSI" }; +static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" }; static void link_report(struct net_device *dev) { @@ -664,6 +664,13 @@ out: return 0; } +/* Flush the aggregated lro sessions */ +static void uldrx_flush_handler(struct sge_rspq *q) +{ + if (ulds[q->uld].lro_flush) + ulds[q->uld].lro_flush(&q->lro_mgr); +} + /** * uldrx_handler - response queue handler for ULD queues * @q: the response queue that received the packet @@ -677,6 +684,7 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl) { struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + int ret; /* FW can send CPLs encapsulated in a CPL_FW4_MSG. */ @@ -684,10 +692,19 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) rsp += 2; - if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + if (q->flush_handler) + ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld], + rsp, gl, &q->lro_mgr, + &q->napi); + else + ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], + rsp, gl); + + if (ret) { rxq->stats.nomem++; return -1; } + if (gl == NULL) rxq->stats.imm++; else if (gl == CXGB4_MSG_AN) @@ -754,6 +771,10 @@ static void name_msix_vecs(struct adapter *adap) snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d", adap->port[0]->name, i); + for_each_iscsitrxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d", + adap->port[0]->name, i); + for_each_rdmarxq(&adap->sge, i) snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", adap->port[0]->name, i); @@ -767,6 +788,7 @@ static int request_msix_queue_irqs(struct adapter *adap) { struct sge *s = &adap->sge; int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; + int iscsitqidx = 0; int msi_index = 2; err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, @@ -792,6 +814,15 @@ static int request_msix_queue_irqs(struct adapter *adap) goto unwind; msi_index++; } + for_each_iscsitrxq(s, iscsitqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->iscsitrxq[iscsitqidx].rspq); + if (err) + goto unwind; + msi_index++; + } for_each_rdmarxq(s, rdmaqidx) { err = request_irq(adap->msix_info[msi_index].vec, t4_sge_intr_msix, 0, @@ -819,6 +850,9 @@ unwind: while (--rdmaqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->rdmarxq[rdmaqidx].rspq); + while (--iscsitqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->iscsitrxq[iscsitqidx].rspq); while (--iscsiqidx >= 0) free_irq(adap->msix_info[--msi_index].vec, &s->iscsirxq[iscsiqidx].rspq); @@ -840,6 +874,9 @@ static void free_msix_queue_irqs(struct adapter *adap) for_each_iscsirxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->iscsirxq[i].rspq); + for_each_iscsitrxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, + &s->iscsitrxq[i].rspq); for_each_rdmarxq(s, i) free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); for_each_rdmaciq(s, i) @@ -984,7 +1021,7 @@ static void enable_rx(struct adapter *adap) static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, unsigned int nq, unsigned int per_chan, int msi_idx, - u16 *ids) + u16 *ids, bool lro) { int i, err; @@ -994,7 +1031,9 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i / per_chan], msi_idx, q->fl.size ? &q->fl : NULL, - uldrx_handler, 0); + uldrx_handler, + lro ? uldrx_flush_handler : NULL, + 0); if (err) return err; memset(&q->stats, 0, sizeof(q->stats)); @@ -1024,7 +1063,7 @@ static int setup_sge_queues(struct adapter *adap) msi_idx = 1; /* vector 0 is for non-queue interrupts */ else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, - NULL, NULL, -1); + NULL, NULL, NULL, -1); if (err) return err; msi_idx = -((int)s->intrq.abs_id + 1); @@ -1044,7 +1083,7 @@ static int setup_sge_queues(struct adapter *adap) * new/deleted queues. */ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - msi_idx, NULL, fwevtq_handler, -1); + msi_idx, NULL, fwevtq_handler, NULL, -1); if (err) { freeout: t4_free_sge_resources(adap); return err; @@ -1062,6 +1101,7 @@ freeout: t4_free_sge_resources(adap); err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, &q->fl, t4_ethrx_handler, + NULL, t4_get_mps_bg_map(adap, pi->tx_chan)); if (err) @@ -1087,18 +1127,19 @@ freeout: t4_free_sge_resources(adap); goto freeout; } -#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ - err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ +#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \ if (err) \ goto freeout; \ if (msi_idx > 0) \ msi_idx += nq; \ } while (0) - ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq); - ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); + ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false); + ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true); + ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false); j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ - ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); + ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false); #undef ALLOC_OFLD_RXQS @@ -2430,6 +2471,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld) } else if (uld == CXGB4_ULD_ISCSI) { lli.rxq_ids = adap->sge.iscsi_rxq; lli.nrxq = adap->sge.iscsiqsets; + } else if (uld == CXGB4_ULD_ISCSIT) { + lli.rxq_ids = adap->sge.iscsit_rxq; + lli.nrxq = adap->sge.niscsitq; } lli.ntxq = adap->sge.iscsiqsets; lli.nchan = adap->params.nports; @@ -2437,6 +2481,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld) lli.wr_cred = adap->params.ofldq_wr_cred; lli.adapter_type = adap->params.chip; lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); + lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); + lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); + lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); + lli.iscsi_ppm = &adap->iscsi_ppm; lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; lli.udb_density = 1 << adap->params.sge.eq_qpp; lli.ucq_density = 1 << adap->params.sge.iq_qpp; @@ -4336,6 +4384,9 @@ static void cfg_queues(struct adapter *adap) s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * adap->params.nports; s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); + + if (!is_t4(adap->params.chip)) + s->niscsitq = s->iscsiqsets; } for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { @@ -4362,6 +4413,16 @@ static void cfg_queues(struct adapter *adap) r->fl.size = 72; } + if (!is_t4(adap->params.chip)) { + for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) { + struct sge_ofld_rxq *r = &s->iscsitrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSIT; + r->fl.size = 72; + } + } + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { struct sge_ofld_rxq *r = &s->rdmarxq[i]; @@ -4436,9 +4497,13 @@ static int enable_msix(struct adapter *adap) want = s->max_ethqsets + EXTRA_VECS; if (is_offload(adap)) { - want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets; + want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets + + s->niscsitq; /* need nchan for each possible ULD */ - ofld_need = 3 * nchan; + if (is_t4(adap->params.chip)) + ofld_need = 3 * nchan; + else + ofld_need = 4 * nchan; } #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for @@ -4470,12 +4535,16 @@ static int enable_msix(struct adapter *adap) if (allocated < want) { s->rdmaqs = nchan; s->rdmaciqs = nchan; + + if (!is_t4(adap->params.chip)) + s->niscsitq = nchan; } /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - - s->rdmaqs - s->rdmaciqs; + s->rdmaqs - s->rdmaciqs - s->niscsitq; s->iscsiqsets = (i / nchan) * nchan; /* round down */ + } for (i = 0; i < allocated; ++i) adap->msix_info[i].vec = entries[i].vector; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c new file mode 100644 index 000000000000..d88a7a7b2400 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c @@ -0,0 +1,464 @@ +/* + * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/pci.h> +#include <linux/scatterlist.h> + +#include "cxgb4_ppm.h" + +/* Direct Data Placement - + * Directly place the iSCSI Data-In or Data-Out PDU's payload into + * pre-posted final destination host-memory buffers based on the + * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT) + * in Data-Out PDUs. The host memory address is programmed into + * h/w in the format of pagepod entries. The location of the + * pagepod entry is encoded into ddp tag which is used as the base + * for ITT/TTT. + */ + +/* Direct-Data Placement page size adjustment + */ +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + int i; + + for (i = 0; i < DDP_PGIDX_MAX; i++) { + if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT + + tformat->pgsz_order[i])) { + pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n", + __func__, ppm->ndev->name, pgsz, i); + return i; + } + } + pr_info("ippm: ddp page size %lu not supported.\n", pgsz); + return DDP_PGIDX_MAX; +} + +/* DDP setup & teardown + */ +static int ppm_find_unused_entries(unsigned long *bmap, + unsigned int max_ppods, + unsigned int start, + unsigned int nr, + unsigned int align_mask) +{ + unsigned long i; + + i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask); + + if (unlikely(i >= max_ppods) && (start > nr)) + i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1, + align_mask); + if (unlikely(i >= max_ppods)) + return -ENOSPC; + + bitmap_set(bmap, i, nr); + return (int)i; +} + +static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count, + unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata = ppm->ppod_data + i; + + pdata->caller_data = caller_data; + pdata->npods = count; + + if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1)) + pdata->color = 0; + else + pdata->color++; +} + +static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + struct cxgbi_ppm_pool *pool; + unsigned int cpu; + int i; + + cpu = get_cpu(); + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + put_cpu(); + + i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, + pool->next, count, 0); + if (i < 0) { + pool->next = 0; + spin_unlock_bh(&pool->lock); + return -ENOSPC; + } + + pool->next = i + count; + if (pool->next >= ppm->pool_index_max) + pool->next = 0; + + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n", + __func__, cpu, i, count, i + cpu * ppm->pool_index_max, + pool->next); + + i += cpu * ppm->pool_index_max; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + int i; + + spin_lock_bh(&ppm->map_lock); + i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max, + ppm->next, count, 0); + if (i < 0) { + ppm->next = 0; + spin_unlock_bh(&ppm->map_lock); + pr_debug("ippm: NO suitable entries %u available.\n", + count); + return -ENOSPC; + } + + ppm->next = i + count; + if (ppm->next >= ppm->bmap_index_max) + ppm->next = 0; + + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n", + __func__, i, count, i + ppm->pool_rsvd, ppm->next, + caller_data); + + i += ppm->pool_rsvd; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count) +{ + pr_debug("%s: idx %d + %d.\n", __func__, i, count); + + if (i < ppm->pool_rsvd) { + unsigned int cpu; + struct cxgbi_ppm_pool *pool; + + cpu = i / ppm->pool_index_max; + i %= ppm->pool_index_max; + + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + bitmap_clear(pool->bmap, i, count); + + if (i < pool->next) + pool->next = i; + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d, next %u.\n", + __func__, cpu, i, pool->next); + } else { + spin_lock_bh(&ppm->map_lock); + + i -= ppm->pool_rsvd; + bitmap_clear(ppm->ppod_bmap, i, count); + + if (i < ppm->next) + ppm->next = i; + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next); + } +} + +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx) +{ + struct cxgbi_ppod_data *pdata; + + if (idx >= ppm->ppmax) { + pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax); + return; + } + + pdata = ppm->ppod_data + idx; + if (!pdata->npods) { + pr_warn("ippm: idx %u, npods 0.\n", idx); + return; + } + + pr_debug("release idx %u, npods %u.\n", idx, pdata->npods); + ppm_unmark_entries(ppm, idx, pdata->npods); +} +EXPORT_SYMBOL(cxgbi_ppm_ppod_release); + +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, + u32 *ddp_tag, unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata; + unsigned int npods; + int idx = -1; + unsigned int hwidx; + u32 tag; + + npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; + if (!npods) { + pr_warn("%s: pages %u -> npods %u, full.\n", + __func__, nr_pages, npods); + return -EINVAL; + } + + /* grab from cpu pool first */ + idx = ppm_get_cpu_entries(ppm, npods, caller_data); + /* try the general pool */ + if (idx < 0) + idx = ppm_get_entries(ppm, npods, caller_data); + if (idx < 0) { + pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n", + nr_pages, npods, ppm->next, caller_data); + return idx; + } + + pdata = ppm->ppod_data + idx; + hwidx = ppm->base_idx + idx; + + tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color); + + if (per_tag_pg_idx) + tag |= (per_tag_pg_idx << 30) & 0xC0000000; + + *ppod_idx = idx; + *ddp_tag = tag; + + pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n", + nr_pages, tag, idx, npods, caller_data); + + return npods; +} +EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve); + +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr) +{ + /* The ddp tag in pagepod should be with bit 31:30 set to 0. + * The ddp Tag on the wire should be with non-zero 31:30 to the peer + */ + tag &= 0x3FFFFFFF; + + hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); + + hdr->rsvd = 0; + hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask); + hdr->max_offset = htonl(length); + hdr->page_offset = htonl(offset); + + pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n", + tag, tid, length, offset); +} +EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr); + +static void ppm_free(struct cxgbi_ppm *ppm) +{ + vfree(ppm); +} + +static void ppm_destroy(struct kref *kref) +{ + struct cxgbi_ppm *ppm = container_of(kref, + struct cxgbi_ppm, + refcnt); + pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n", + ppm->ndev->name, ppm); + + *ppm->ppm_pp = NULL; + + free_percpu(ppm->pool); + ppm_free(ppm); +} + +int cxgbi_ppm_release(struct cxgbi_ppm *ppm) +{ + if (ppm) { + int rv; + + rv = kref_put(&ppm->refcnt, ppm_destroy); + return rv; + } + return 1; +} + +static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, + unsigned int *pcpu_ppmax) +{ + struct cxgbi_ppm_pool *pools; + unsigned int ppmax = (*total) / num_possible_cpus(); + unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; + unsigned int bmap; + unsigned int alloc_sz; + unsigned int count = 0; + unsigned int cpu; + + /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */ + if (ppmax > max) + ppmax = max; + + /* pool size must be multiple of unsigned long */ + bmap = BITS_TO_LONGS(ppmax); + ppmax = (bmap * sizeof(unsigned long)) << 3; + + alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; + pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); + + if (!pools) + return NULL; + + for_each_possible_cpu(cpu) { + struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); + + memset(ppool, 0, alloc_sz); + spin_lock_init(&ppool->lock); + count += ppmax; + } + + *total = count; + *pcpu_ppmax = ppmax; + + return pools; +} + +int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, + struct pci_dev *pdev, void *lldev, + struct cxgbi_tag_format *tformat, + unsigned int ppmax, + unsigned int llimit, + unsigned int start, + unsigned int reserve_factor) +{ + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); + struct cxgbi_ppm_pool *pool = NULL; + unsigned int ppmax_pool = 0; + unsigned int pool_index_max = 0; + unsigned int alloc_sz; + unsigned int ppod_bmap_size; + + if (ppm) { + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax); + kref_get(&ppm->refcnt); + return 1; + } + + if (reserve_factor) { + ppmax_pool = ppmax / reserve_factor; + pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + + pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", + ndev->name, ppmax, ppmax_pool, pool_index_max); + } + + ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool); + alloc_sz = sizeof(struct cxgbi_ppm) + + ppmax * (sizeof(struct cxgbi_ppod_data)) + + ppod_bmap_size * sizeof(unsigned long); + + ppm = vmalloc(alloc_sz); + if (!ppm) + goto release_ppm_pool; + + memset(ppm, 0, alloc_sz); + + ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]); + + if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) { + unsigned int start = ppmax - ppmax_pool; + unsigned int end = ppod_bmap_size >> 3; + + bitmap_set(ppm->ppod_bmap, ppmax, end - start); + pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n", + __func__, ppmax, ppmax_pool, ppod_bmap_size, start, + end); + } + + spin_lock_init(&ppm->map_lock); + kref_init(&ppm->refcnt); + + memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format)); + + ppm->ppm_pp = ppm_pp; + ppm->ndev = ndev; + ppm->pdev = pdev; + ppm->lldev = lldev; + ppm->ppmax = ppmax; + ppm->next = 0; + ppm->llimit = llimit; + ppm->base_idx = start > llimit ? + (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0; + ppm->bmap_index_max = ppmax - ppmax_pool; + + ppm->pool = pool; + ppm->pool_rsvd = ppmax_pool; + ppm->pool_index_max = pool_index_max; + + /* check one more time */ + if (*ppm_pp) { + ppm_free(ppm); + ppm = (struct cxgbi_ppm *)(*ppm_pp); + + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax); + + kref_get(&ppm->refcnt); + return 1; + } + *ppm_pp = ppm; + + ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE); + + pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n", + ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE, + ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd, + ppm->pool_index_max); + + return 0; + +release_ppm_pool: + free_percpu(pool); + return -ENOMEM; +} +EXPORT_SYMBOL(cxgbi_ppm_init); + +unsigned int cxgbi_tagmask_set(unsigned int ppmax) +{ + unsigned int bits = fls(ppmax); + + if (bits > PPOD_IDX_MAX_SIZE) + bits = PPOD_IDX_MAX_SIZE; + + pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n", + ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT)); + + return 1 << (bits + PPOD_IDX_SHIFT); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h new file mode 100644 index 000000000000..d48732673b75 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h @@ -0,0 +1,310 @@ +/* + * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#ifndef __CXGB4PPM_H__ +#define __CXGB4PPM_H__ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/vmalloc.h> +#include <linux/bitmap.h> + +struct cxgbi_pagepod_hdr { + u32 vld_tid; + u32 pgsz_tag_clr; + u32 max_offset; + u32 page_offset; + u64 rsvd; +}; + +#define PPOD_PAGES_MAX 4 +struct cxgbi_pagepod { + struct cxgbi_pagepod_hdr hdr; + u64 addr[PPOD_PAGES_MAX + 1]; +}; + +/* ddp tag format + * for a 32-bit tag: + * bit # + * 31 ..... ..... 0 + * X Y...Y Z...Z, where + * ^ ^^^^^ ^^^^ + * | | |____ when ddp bit = 0: color bits + * | | + * | |____ when ddp bit = 0: idx into the ddp memory region + * | + * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag + * + * [page selector:2] [sw/free bits] [0] [idx] [color:6] + */ + +#define DDP_PGIDX_MAX 4 +#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */ + +struct cxgbi_task_tag_info { + unsigned char flags; +#define CXGBI_PPOD_INFO_FLAG_VALID 0x1 +#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2 + unsigned char cid; + unsigned short pg_shift; + unsigned int npods; + unsigned int idx; + unsigned int tag; + struct cxgbi_pagepod_hdr hdr; + int nents; + int nr_pages; + struct scatterlist *sgl; +}; + +struct cxgbi_tag_format { + unsigned char pgsz_order[DDP_PGIDX_MAX]; + unsigned char pgsz_idx_dflt; + unsigned char free_bits:4; + unsigned char color_bits:4; + unsigned char idx_bits; + unsigned char rsvd_bits; + unsigned int no_ddp_mask; + unsigned int idx_mask; + unsigned int color_mask; + unsigned int idx_clr_mask; + unsigned int rsvd_mask; +}; + +struct cxgbi_ppod_data { + unsigned char pg_idx:2; + unsigned char color:6; + unsigned char chan_id; + unsigned short npods; + unsigned long caller_data; +}; + +/* per cpu ppm pool */ +struct cxgbi_ppm_pool { + unsigned int base; /* base index */ + unsigned int next; /* next possible free index */ + spinlock_t lock; /* ppm pool lock */ + unsigned long bmap[0]; +} ____cacheline_aligned_in_smp; + +struct cxgbi_ppm { + struct kref refcnt; + struct net_device *ndev; /* net_device, 1st port */ + struct pci_dev *pdev; + void *lldev; + void **ppm_pp; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + unsigned int llimit; + unsigned int base_idx; + + unsigned int pool_rsvd; + unsigned int pool_index_max; + struct cxgbi_ppm_pool __percpu *pool; + /* map lock */ + spinlock_t map_lock; /* ppm map lock */ + unsigned int bmap_index_max; + unsigned int next; + unsigned long *ppod_bmap; + struct cxgbi_ppod_data ppod_data[0]; +}; + +#define DDP_THRESHOLD 512 + +#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ + +#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ +#define PPOD_SIZE_SHIFT 6 + +/* page pods are allocated in groups of this size (must be power of 2) */ +#define PPOD_CLUSTER_SIZE 16U + +#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ +#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */ +#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ + +#define PPOD_COLOR_SHIFT 0 +#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) + +#define PPOD_IDX_SHIFT 6 +#define PPOD_IDX_MAX_SIZE 24 + +#define PPOD_TID_SHIFT 0 +#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) + +#define PPOD_TAG_SHIFT 6 +#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) + +#define PPOD_VALID_SHIFT 24 +#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) +#define PPOD_VALID_FLAG PPOD_VALID(1U) + +#define PPOD_PI_EXTRACT_CTL_SHIFT 31 +#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT) +#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U) + +#define PPOD_PI_TYPE_SHIFT 29 +#define PPOD_PI_TYPE_MASK 0x3 +#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT) + +#define PPOD_PI_CHECK_CTL_SHIFT 27 +#define PPOD_PI_CHECK_CTL_MASK 0x3 +#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT) + +#define PPOD_PI_REPORT_CTL_SHIFT 25 +#define PPOD_PI_REPORT_CTL_MASK 0x3 +#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT) + +static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag) +{ + return !(tag & ppm->tformat.no_ddp_mask); +} + +static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm, + u32 tag) +{ + /* the sw tag must be using <= 31 bits */ + return !(tag & 0x80000000U); +} + +static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 sw_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + + if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) { + pr_info("sw_tag 0x%x NOT usable.\n", sw_tag); + return -EINVAL; + } + + if (!sw_tag) { + *final_tag = tformat->no_ddp_mask; + } else { + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = sw_tag & tformat->idx_clr_mask; + u32 upper = (sw_tag >> shift) << (shift + 1); + + *final_tag = upper | tformat->no_ddp_mask | lower; + } + return 0; +} + +static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = tag & tformat->idx_clr_mask; + u32 upper = (tag >> tformat->rsvd_bits) << shift; + + return upper | lower; +} + +static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) & + ppm->tformat.idx_mask; + + return hw_idx - ppm->base_idx; +} + +static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx, + unsigned char color) +{ + return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color); +} + +static inline unsigned long +cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag); + + return ppm->ppod_data[idx].caller_data; +} + +/* sw bits are the free bits */ +static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm, + u32 val, u32 orig_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + u32 v = val >> tformat->free_bits; + + if (v) { + pr_info("sw_bits 0x%x too large, avail bits %u.\n", + val, tformat->free_bits); + return -EINVAL; + } + if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag)) + return -EINVAL; + + *final_tag = (val << tformat->rsvd_bits) | + (orig_tag & ppm->tformat.rsvd_mask); + return 0; +} + +static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod) +{ + ppod->hdr.vld_tid = 0U; +} + +static inline void cxgbi_tagmask_check(unsigned int tagmask, + struct cxgbi_tag_format *tformat) +{ + unsigned int bits = fls(tagmask); + + /* reserve top most 2 bits for page selector */ + tformat->free_bits = 32 - 2 - bits; + tformat->rsvd_bits = bits; + tformat->color_bits = PPOD_IDX_SHIFT; + tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT; + tformat->no_ddp_mask = 1 << (bits - 1); + tformat->idx_mask = (1 << tformat->idx_bits) - 1; + tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1; + tformat->idx_clr_mask = (1 << (bits - 1)) - 1; + tformat->rsvd_mask = (1 << bits) - 1; + + pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, " + "pg %u,%u,%u,%u.\n", + tagmask, tformat->rsvd_bits, tformat->idx_bits, + tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask, + tformat->pgsz_order[0], tformat->pgsz_order[1], + tformat->pgsz_order[2], tformat->pgsz_order[3]); +} + +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz); +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr); +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx); +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag, + unsigned long caller_data); +int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *, + void *lldev, struct cxgbi_tag_format *, + unsigned int ppmax, unsigned int llimit, + unsigned int start, + unsigned int reserve_factor); +int cxgbi_ppm_release(struct cxgbi_ppm *ppm); +void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *); +unsigned int cxgbi_tagmask_set(unsigned int ppmax); + +#endif /*__CXGB4PPM_H__*/ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index cf711d5f15be..f3c58aaa932d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -191,6 +191,7 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) enum cxgb4_uld { CXGB4_ULD_RDMA, CXGB4_ULD_ISCSI, + CXGB4_ULD_ISCSIT, CXGB4_ULD_MAX }; @@ -212,6 +213,7 @@ struct l2t_data; struct net_device; struct pkt_gl; struct tp_tcp_stats; +struct t4_lro_mgr; struct cxgb4_range { unsigned int start; @@ -273,6 +275,10 @@ struct cxgb4_lld_info { unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */ unsigned int max_ird_adapter; /* Max IRD memory per adapter */ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + unsigned int iscsi_tagmask; /* iscsi ddp tag mask */ + unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */ + unsigned int iscsi_llimit; /* chip's iscsi region llimit */ + void **iscsi_ppm; /* iscsi page pod manager */ int nodeid; /* device numa node id */ }; @@ -283,6 +289,11 @@ struct cxgb4_uld_info { const struct pkt_gl *gl); int (*state_change)(void *handle, enum cxgb4_state new_state); int (*control)(void *handle, enum cxgb4_control control, ...); + int (*lro_rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl, + struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi); + void (*lro_flush)(struct t4_lro_mgr *); }; int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 5b0f3ef348e9..60a26037a1c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -48,8 +48,6 @@ #include "t4_regs.h" #include "t4_values.h" -#define VLAN_NONE 0xfff - /* identifies sync vs async L2T_WRITE_REQs */ #define SYNC_WR_S 12 #define SYNC_WR_V(x) ((x) << SYNC_WR_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index 4e2d47ac102b..79665bd8f881 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -39,6 +39,8 @@ #include <linux/if_ether.h> #include <linux/atomic.h> +#define VLAN_NONE 0xfff + enum { L2T_SIZE = 4096 }; /* # of L2T entries */ enum { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index deca4a2956cc..13b144bcf725 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2157,8 +2157,11 @@ static int process_responses(struct sge_rspq *q, int budget) while (likely(budget_left)) { rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); - if (!is_new_response(rc, q)) + if (!is_new_response(rc, q)) { + if (q->flush_handler) + q->flush_handler(q); break; + } dma_rmb(); rsp_type = RSPD_TYPE_G(rc->type_gen); @@ -2544,7 +2547,8 @@ static void __iomem *bar2_address(struct adapter *adapter, */ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct net_device *dev, int intr_idx, - struct sge_fl *fl, rspq_handler_t hnd, int cong) + struct sge_fl *fl, rspq_handler_t hnd, + rspq_flush_handler_t flush_hnd, int cong) { int ret, flsz = 0; struct fw_iq_cmd c; @@ -2648,6 +2652,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->size--; /* subtract status entry */ iq->netdev = dev; iq->handler = hnd; + iq->flush_handler = flush_hnd; + + memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); + skb_queue_head_init(&iq->lro_mgr.lroq); /* set offset to -1 to distinguish ingress queues without FL */ iq->offset = fl ? 0 : -1; @@ -2992,6 +3000,7 @@ void t4_free_sge_resources(struct adapter *adap) /* clean up RDMA and iSCSI Rx queues */ t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq); + t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 1d2d1da40c80..80417fc564d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -51,6 +51,7 @@ enum { CPL_TX_PKT = 0xE, CPL_L2T_WRITE_REQ = 0x12, CPL_TID_RELEASE = 0x1A, + CPL_TX_DATA_ISO = 0x1F, CPL_CLOSE_LISTSRV_RPL = 0x20, CPL_L2T_WRITE_RPL = 0x23, @@ -344,6 +345,87 @@ struct cpl_pass_open_rpl { u8 status; }; +struct tcp_options { + __be16 mss; + __u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:4; + __u8 unknown:1; + __u8:1; + __u8 sack:1; + __u8 tstamp:1; +#else + __u8 tstamp:1; + __u8 sack:1; + __u8:1; + __u8 unknown:1; + __u8:4; +#endif +}; + +struct cpl_pass_accept_req { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 hdr_len; + __be16 vlan; + __be16 l2info; + __be32 tos_stid; + struct tcp_options tcpopt; +}; + +/* cpl_pass_accept_req.hdr_len fields */ +#define SYN_RX_CHAN_S 0 +#define SYN_RX_CHAN_M 0xF +#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S) +#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M) + +#define TCP_HDR_LEN_S 10 +#define TCP_HDR_LEN_M 0x3F +#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S) +#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M) + +#define IP_HDR_LEN_S 16 +#define IP_HDR_LEN_M 0x3FF +#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S) +#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M) + +#define ETH_HDR_LEN_S 26 +#define ETH_HDR_LEN_M 0x1F +#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S) +#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M) + +/* cpl_pass_accept_req.l2info fields */ +#define SYN_MAC_IDX_S 0 +#define SYN_MAC_IDX_M 0x1FF +#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S) +#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M) + +#define SYN_XACT_MATCH_S 9 +#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S) +#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U) + +#define SYN_INTF_S 12 +#define SYN_INTF_M 0xF +#define SYN_INTF_V(x) ((x) << SYN_INTF_S) +#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M) + +enum { /* TCP congestion control algorithms */ + CONG_ALG_RENO, + CONG_ALG_TAHOE, + CONG_ALG_NEWRENO, + CONG_ALG_HIGHSPEED +}; + +#define CONG_CNTRL_S 14 +#define CONG_CNTRL_M 0x3 +#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) +#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) + +#define T5_ISS_S 18 +#define T5_ISS_V(x) ((x) << T5_ISS_S) +#define T5_ISS_F T5_ISS_V(1U) + struct cpl_pass_accept_rpl { WR_HDR; union opcode_tid ot; @@ -818,6 +900,110 @@ struct cpl_iscsi_hdr { #define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S) #define ISCSI_DDP_F ISCSI_DDP_V(1U) +struct cpl_rx_data_ddp { + union opcode_tid ot; + __be16 urg; + __be16 len; + __be32 seq; + union { + __be32 nxt_seq; + __be32 ddp_report; + }; + __be32 ulp_crc; + __be32 ddpvld; +}; + +#define cpl_rx_iscsi_ddp cpl_rx_data_ddp + +struct cpl_iscsi_data { + union opcode_tid ot; + __u8 rsvd0[2]; + __be16 len; + __be32 seq; + __be16 urg; + __u8 rsvd1; + __u8 status; +}; + +struct cpl_tx_data_iso { + __be32 op_to_scsi; + __u8 reserved1; + __u8 ahs_len; + __be16 mpdu; + __be32 burst_size; + __be32 len; + __be32 reserved2_seglen_offset; + __be32 datasn_offset; + __be32 buffer_offset; + __be32 reserved3; + + /* encapsulated CPL_TX_DATA follows here */ +}; + +/* cpl_tx_data_iso.op_to_scsi fields */ +#define CPL_TX_DATA_ISO_OP_S 24 +#define CPL_TX_DATA_ISO_OP_M 0xff +#define CPL_TX_DATA_ISO_OP_V(x) ((x) << CPL_TX_DATA_ISO_OP_S) +#define CPL_TX_DATA_ISO_OP_G(x) \ + (((x) >> CPL_TX_DATA_ISO_OP_S) & CPL_TX_DATA_ISO_OP_M) + +#define CPL_TX_DATA_ISO_FIRST_S 23 +#define CPL_TX_DATA_ISO_FIRST_M 0x1 +#define CPL_TX_DATA_ISO_FIRST_V(x) ((x) << CPL_TX_DATA_ISO_FIRST_S) +#define CPL_TX_DATA_ISO_FIRST_G(x) \ + (((x) >> CPL_TX_DATA_ISO_FIRST_S) & CPL_TX_DATA_ISO_FIRST_M) +#define CPL_TX_DATA_ISO_FIRST_F CPL_TX_DATA_ISO_FIRST_V(1U) + +#define CPL_TX_DATA_ISO_LAST_S 22 +#define CPL_TX_DATA_ISO_LAST_M 0x1 +#define CPL_TX_DATA_ISO_LAST_V(x) ((x) << CPL_TX_DATA_ISO_LAST_S) +#define CPL_TX_DATA_ISO_LAST_G(x) \ + (((x) >> CPL_TX_DATA_ISO_LAST_S) & CPL_TX_DATA_ISO_LAST_M) +#define CPL_TX_DATA_ISO_LAST_F CPL_TX_DATA_ISO_LAST_V(1U) + +#define CPL_TX_DATA_ISO_CPLHDRLEN_S 21 +#define CPL_TX_DATA_ISO_CPLHDRLEN_M 0x1 +#define CPL_TX_DATA_ISO_CPLHDRLEN_V(x) ((x) << CPL_TX_DATA_ISO_CPLHDRLEN_S) +#define CPL_TX_DATA_ISO_CPLHDRLEN_G(x) \ + (((x) >> CPL_TX_DATA_ISO_CPLHDRLEN_S) & CPL_TX_DATA_ISO_CPLHDRLEN_M) +#define CPL_TX_DATA_ISO_CPLHDRLEN_F CPL_TX_DATA_ISO_CPLHDRLEN_V(1U) + +#define CPL_TX_DATA_ISO_HDRCRC_S 20 +#define CPL_TX_DATA_ISO_HDRCRC_M 0x1 +#define CPL_TX_DATA_ISO_HDRCRC_V(x) ((x) << CPL_TX_DATA_ISO_HDRCRC_S) +#define CPL_TX_DATA_ISO_HDRCRC_G(x) \ + (((x) >> CPL_TX_DATA_ISO_HDRCRC_S) & CPL_TX_DATA_ISO_HDRCRC_M) +#define CPL_TX_DATA_ISO_HDRCRC_F CPL_TX_DATA_ISO_HDRCRC_V(1U) + +#define CPL_TX_DATA_ISO_PLDCRC_S 19 +#define CPL_TX_DATA_ISO_PLDCRC_M 0x1 +#define CPL_TX_DATA_ISO_PLDCRC_V(x) ((x) << CPL_TX_DATA_ISO_PLDCRC_S) +#define CPL_TX_DATA_ISO_PLDCRC_G(x) \ + (((x) >> CPL_TX_DATA_ISO_PLDCRC_S) & CPL_TX_DATA_ISO_PLDCRC_M) +#define CPL_TX_DATA_ISO_PLDCRC_F CPL_TX_DATA_ISO_PLDCRC_V(1U) + +#define CPL_TX_DATA_ISO_IMMEDIATE_S 18 +#define CPL_TX_DATA_ISO_IMMEDIATE_M 0x1 +#define CPL_TX_DATA_ISO_IMMEDIATE_V(x) ((x) << CPL_TX_DATA_ISO_IMMEDIATE_S) +#define CPL_TX_DATA_ISO_IMMEDIATE_G(x) \ + (((x) >> CPL_TX_DATA_ISO_IMMEDIATE_S) & CPL_TX_DATA_ISO_IMMEDIATE_M) +#define CPL_TX_DATA_ISO_IMMEDIATE_F CPL_TX_DATA_ISO_IMMEDIATE_V(1U) + +#define CPL_TX_DATA_ISO_SCSI_S 16 +#define CPL_TX_DATA_ISO_SCSI_M 0x3 +#define CPL_TX_DATA_ISO_SCSI_V(x) ((x) << CPL_TX_DATA_ISO_SCSI_S) +#define CPL_TX_DATA_ISO_SCSI_G(x) \ + (((x) >> CPL_TX_DATA_ISO_SCSI_S) & CPL_TX_DATA_ISO_SCSI_M) + +/* cpl_tx_data_iso.reserved2_seglen_offset fields */ +#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_S 0 +#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_M 0xffffff +#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(x) \ + ((x) << CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) +#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_G(x) \ + (((x) >> CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) & \ + CPL_TX_DATA_ISO_SEGLEN_OFFSET_M) + struct cpl_rx_data { union opcode_tid ot; __be16 rsvd; @@ -854,6 +1040,15 @@ struct cpl_rx_data_ack { #define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) #define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) +#define RX_DACK_MODE_S 29 +#define RX_DACK_MODE_M 0x3 +#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S) +#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M) + +#define RX_DACK_CHANGE_S 31 +#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S) +#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U) + struct cpl_rx_pkt { struct rss_header rsshdr; u8 opcode; @@ -1090,6 +1285,12 @@ struct cpl_fw4_ack { __be64 rsvd1; }; +enum { + CPL_FW4_ACK_FLAGS_SEQVAL = 0x1, /* seqn valid */ + CPL_FW4_ACK_FLAGS_CH = 0x2, /* channel change complete */ + CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */ +}; + struct cpl_fw6_msg { u8 opcode; u8 type; @@ -1115,6 +1316,17 @@ struct cpl_fw6_msg_ofld_connection_wr_rpl { __u8 rsvd[2]; }; +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be32 flags; +}; + +/* cpl_tx_data.flags field */ +#define TX_FORCE_S 13 +#define TX_FORCE_V(x) ((x) << TX_FORCE_S) + enum { ULP_TX_MEM_READ = 2, ULP_TX_MEM_WRITE = 3, @@ -1143,6 +1355,11 @@ struct ulptx_sgl { struct ulptx_sge_pair sge[0]; }; +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + #define ULPTX_NSGE_S 0 #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index c8661c77b4e3..7ad6d4e75b2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -101,6 +101,7 @@ enum fw_wr_opcodes { FW_RI_BIND_MW_WR = 0x18, FW_RI_FR_NSMR_WR = 0x19, FW_RI_INV_LSTAG_WR = 0x1a, + FW_ISCSI_TX_DATA_WR = 0x45, FW_LASTC2E_WR = 0x70 }; @@ -561,7 +562,12 @@ enum fw_flowc_mnem { FW_FLOWC_MNEM_SNDBUF, FW_FLOWC_MNEM_MSS, FW_FLOWC_MNEM_TXDATAPLEN_MAX, - FW_FLOWC_MNEM_SCHEDCLASS = 11, + FW_FLOWC_MNEM_TCPSTATE, + FW_FLOWC_MNEM_EOSTATE, + FW_FLOWC_MNEM_SCHEDCLASS, + FW_FLOWC_MNEM_DCBPRIO, + FW_FLOWC_MNEM_SND_SCALE, + FW_FLOWC_MNEM_RCV_SCALE, }; struct fw_flowc_mnemval { diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h index 22dd8d670e4a..2fd9c76fc21c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h @@ -25,21 +25,4 @@ #define T5_ISS_VALID (1 << 18) -struct ulptx_idata { - __be32 cmd_more; - __be32 len; -}; - -struct cpl_rx_data_ddp { - union opcode_tid ot; - __be16 urg; - __be16 len; - __be32 seq; - union { - __be32 nxt_seq; - __be32 ddp_report; - }; - __be32 ulp_crc; - __be32 ddpvld; -}; #endif /* __CXGB4I_H__ */ |