diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-12-14 20:29:00 +0100 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2015-12-14 20:31:16 +0100 |
commit | a4ec80082c82f3fda775d13b2a72aac38248ced6 (patch) | |
tree | 23affbe49c629cbbdb5553ca78aa444a7ccf2ead /drivers/net/ethernet/chelsio | |
parent | 19576c9478682a398276c994ea0d2696474df32b (diff) | |
parent | cb4396edd84ed73081635fb933d19c1410fafaf4 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Resolve conflict between commit 264640fc2c5f4f ("ipv6: distinguish frag
queues by device for multicast and link-local packets") from the net
tree and commit 029f7f3b8701c ("netfilter: ipv6: nf_defrag: avoid/free
clone operations") from the nf-next tree.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Conflicts:
net/ipv6/netfilter/nf_conntrack_reasm.c
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r-- | drivers/net/ethernet/chelsio/Kconfig | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/pm3393.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb/vsc7326.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 146 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 120 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 4 |
13 files changed, 264 insertions, 114 deletions
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index a79813a17b6e..4d187f22c48b 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -65,13 +65,14 @@ config CHELSIO_T3 will be called cxgb3. config CHELSIO_T4 - tristate "Chelsio Communications T4/T5 Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) select FW_LOADER select MDIO ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapter and T5 based 40Gb Ethernet adapter. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapter and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. @@ -85,7 +86,7 @@ config CHELSIO_T4 will be called cxgb4. config CHELSIO_T4_DCB - bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards" + bool "Data Center Bridging (DCB) Support for Chelsio T4/T5/T6 cards" default n depends on CHELSIO_T4 && DCB ---help--- @@ -107,12 +108,12 @@ config CHELSIO_T4_FCOE If unsure, say N. config CHELSIO_T4VF - tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" + tristate "Chelsio Communications T4/T5/T6 Virtual Function Ethernet support" depends on PCI ---help--- - This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet - adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual - Functions. + This driver supports Chelsio T4, T5 & T6 based gigabit, 10Gb Ethernet + adapters and T5/T6 based 40Gb and T6 based 25Gb, 50Gb and 100Gb + Ethernet adapters with PCI-E SR-IOV Virtual Functions. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c index ec5e05052d99..eb462d7db427 100644 --- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -570,7 +570,7 @@ static void pm3393_destroy(struct cmac *cmac) kfree(cmac); } -static struct cmac_ops pm3393_ops = { +static const struct cmac_ops pm3393_ops = { .destroy = pm3393_destroy, .reset = pm3393_reset, .interrupt_enable = pm3393_interrupt_enable, diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index b0cb388f5e12..6f30b6f78553 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -666,7 +666,7 @@ static void mac_destroy(struct cmac *mac) kfree(mac); } -static struct cmac_ops vsc7326_ops = { +static const struct cmac_ops vsc7326_ops = { .destroy = mac_destroy, .reset = mac_reset, .interrupt_handler = mac_intr_handler, diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 8f7aa53a4c4b..60908eab3b3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -701,15 +701,16 @@ static ssize_t attr_store(struct device *d, ssize_t(*set) (struct net_device *, unsigned int), unsigned int min_val, unsigned int max_val) { - char *endp; ssize_t ret; unsigned int val; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val < min_val || val > max_val) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val < min_val || val > max_val) return -EINVAL; rtnl_lock(); @@ -829,14 +830,15 @@ static ssize_t tm_attr_store(struct device *d, struct port_info *pi = netdev_priv(to_net_dev(d)); struct adapter *adap = pi->adapter; unsigned int val; - char *endp; ssize_t ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; - val = simple_strtoul(buf, &endp, 0); - if (endp == buf || val > 10000000) + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val > 10000000) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index a22768c94200..ee04caa6c4d8 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -709,11 +709,21 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) return ret; } - p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); - p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); - p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); - p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); - p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); + if (ret) + return ret; + ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); + if (ret) + return ret; + ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); + if (ret) + return ret; + ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); + if (ret) + return ret; memcpy(p->sn, vpd.sn_data, SERNUM_LEN); /* Old eeproms didn't have port information */ @@ -723,8 +733,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) } else { p->port_type[0] = hex_to_bin(vpd.port0_data[0]); p->port_type[1] = hex_to_bin(vpd.port1_data[0]); - p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); - p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); + ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); + if (ret) + return ret; + ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); + if (ret) + return ret; } ret = hex2bin(p->eth_base, vpd.na_data, 6); diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index c308429dd9c7..d288dcf6062f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -118,6 +118,11 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); + dev_err(adap->pdev_dev, + "CLIP FW cmd failed with error %d, " + "Connections using %pI6c wont be " + "offloaded", + ret, ce->addr6.sin6_addr.s6_addr); return ret; } } else { @@ -127,6 +132,9 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) } } else { write_unlock_bh(&ctbl->lock); + dev_info(adap->pdev_dev, "CLIP table overflow, " + "Connections using %pI6c wont be offloaded", + (void *)lip); return -ENOMEM; } write_unlock_bh(&ctbl->lock); @@ -146,6 +154,9 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) int hash; int ret = -1; + if (!ctbl) + return; + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 55a47de544ea..e01e7228f607 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -483,6 +483,8 @@ struct sge_fl { /* SGE free-buffer queue state */ unsigned int pidx; /* producer index */ unsigned long alloc_failed; /* # of times buffer allocation failed */ unsigned long large_alloc_failed; + unsigned long mapping_err; /* # of RX Buffer DMA Mapping failures */ + unsigned long low; /* # of times momentarily starving */ unsigned long starving; /* RO fields */ unsigned int cntxt_id; /* SGE context id for the free list */ @@ -618,6 +620,7 @@ struct sge_ofld_txq { /* state for an SGE offload Tx queue */ struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ struct tasklet_struct qresume_tsk; /* restarts the queue */ + bool service_ofldq_running; /* service_ofldq() is processing sendq */ u8 full; /* the Tx ring is full */ unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ } ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 4269944c5db5..0d579b192350 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2325,6 +2325,8 @@ do { \ TL("TxMapErr:", mapping_err); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (iscsi_idx < iscsi_entries) { @@ -2359,6 +2361,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (rdma_idx < rdma_entries) { @@ -2388,6 +2392,8 @@ do { \ RL("RxNoMem:", stats.nomem); RL("FLAllocErr:", fl.alloc_failed); RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); RL("FLStarving:", fl.starving); } else if (ciq_idx < ciq_entries) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index a077f9476daf..2a61a42ab033 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -35,79 +35,79 @@ static void set_msglevel(struct net_device *dev, u32 val) } static const char stats_strings[][ETH_GSTRING_LEN] = { - "tx_octets_ok ", - "tx_frames_ok ", - "tx_broadcast_frames ", - "tx_multicast_frames ", - "tx_unicast_frames ", - "tx_error_frames ", - - "tx_frames_64 ", - "tx_frames_65_to_127 ", - "tx_frames_128_to_255 ", - "tx_frames_256_to_511 ", - "tx_frames_512_to_1023 ", - "tx_frames_1024_to_1518 ", - "tx_frames_1519_to_max ", - - "tx_frames_dropped ", - "tx_pause_frames ", - "tx_ppp0_frames ", - "tx_ppp1_frames ", - "tx_ppp2_frames ", - "tx_ppp3_frames ", - "tx_ppp4_frames ", - "tx_ppp5_frames ", - "tx_ppp6_frames ", - "tx_ppp7_frames ", - - "rx_octets_ok ", - "rx_frames_ok ", - "rx_broadcast_frames ", - "rx_multicast_frames ", - "rx_unicast_frames ", - - "rx_frames_too_long ", - "rx_jabber_errors ", - "rx_fcs_errors ", - "rx_length_errors ", - "rx_symbol_errors ", - "rx_runt_frames ", - - "rx_frames_64 ", - "rx_frames_65_to_127 ", - "rx_frames_128_to_255 ", - "rx_frames_256_to_511 ", - "rx_frames_512_to_1023 ", - "rx_frames_1024_to_1518 ", - "rx_frames_1519_to_max ", - - "rx_pause_frames ", - "rx_ppp0_frames ", - "rx_ppp1_frames ", - "rx_ppp2_frames ", - "rx_ppp3_frames ", - "rx_ppp4_frames ", - "rx_ppp5_frames ", - "rx_ppp6_frames ", - "rx_ppp7_frames ", - - "rx_bg0_frames_dropped ", - "rx_bg1_frames_dropped ", - "rx_bg2_frames_dropped ", - "rx_bg3_frames_dropped ", - "rx_bg0_frames_trunc ", - "rx_bg1_frames_trunc ", - "rx_bg2_frames_trunc ", - "rx_bg3_frames_trunc ", - - "tso ", - "tx_csum_offload ", - "rx_csum_good ", - "vlan_extractions ", - "vlan_insertions ", - "gro_packets ", - "gro_merged ", + "tx_octets_ok ", + "tx_frames_ok ", + "tx_broadcast_frames ", + "tx_multicast_frames ", + "tx_unicast_frames ", + "tx_error_frames ", + + "tx_frames_64 ", + "tx_frames_65_to_127 ", + "tx_frames_128_to_255 ", + "tx_frames_256_to_511 ", + "tx_frames_512_to_1023 ", + "tx_frames_1024_to_1518 ", + "tx_frames_1519_to_max ", + + "tx_frames_dropped ", + "tx_pause_frames ", + "tx_ppp0_frames ", + "tx_ppp1_frames ", + "tx_ppp2_frames ", + "tx_ppp3_frames ", + "tx_ppp4_frames ", + "tx_ppp5_frames ", + "tx_ppp6_frames ", + "tx_ppp7_frames ", + + "rx_octets_ok ", + "rx_frames_ok ", + "rx_broadcast_frames ", + "rx_multicast_frames ", + "rx_unicast_frames ", + + "rx_frames_too_long ", + "rx_jabber_errors ", + "rx_fcs_errors ", + "rx_length_errors ", + "rx_symbol_errors ", + "rx_runt_frames ", + + "rx_frames_64 ", + "rx_frames_65_to_127 ", + "rx_frames_128_to_255 ", + "rx_frames_256_to_511 ", + "rx_frames_512_to_1023 ", + "rx_frames_1024_to_1518 ", + "rx_frames_1519_to_max ", + + "rx_pause_frames ", + "rx_ppp0_frames ", + "rx_ppp1_frames ", + "rx_ppp2_frames ", + "rx_ppp3_frames ", + "rx_ppp4_frames ", + "rx_ppp5_frames ", + "rx_ppp6_frames ", + "rx_ppp7_frames ", + + "rx_bg0_frames_dropped ", + "rx_bg1_frames_dropped ", + "rx_bg2_frames_dropped ", + "rx_bg3_frames_dropped ", + "rx_bg0_frames_trunc ", + "rx_bg1_frames_trunc ", + "rx_bg2_frames_trunc ", + "rx_bg3_frames_trunc ", + + "tso ", + "tx_csum_offload ", + "rx_csum_good ", + "vlan_extractions ", + "vlan_insertions ", + "gro_packets ", + "gro_merged ", }; static char adapter_stats_strings[][ETH_GSTRING_LEN] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0d147610a06f..edd706e739fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4865,15 +4865,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } #if IS_ENABLED(CONFIG_IPV6) - adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, - adapter->clipt_end); - if (!adapter->clipt) { - /* We tolerate a lack of clip_table, giving up - * some functionality + if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && + (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { + /* CLIP functionality is not present in hardware, + * hence disable all offload features */ dev_warn(&pdev->dev, - "could not allocate Clip table, continuing\n"); + "CLIP not enabled in hardware, continuing\n"); adapter->params.offload = 0; + } else { + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up + * some functionality + */ + dev_warn(&pdev->dev, + "could not allocate Clip table, continuing\n"); + adapter->params.offload = 0; + } } #endif if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 48d8fbb1c220..8d35ce317f67 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -406,7 +406,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -613,6 +613,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { __free_pages(pg, s->fl_pg_order); + q->mapping_err++; goto out; /* do not try small pages for this error */ } mapping |= RX_LARGE_PG_BUF; @@ -642,6 +643,7 @@ alloc_small_pages: PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { put_page(pg); + q->mapping_err++; goto out; } *d++ = cpu_to_be64(mapping); @@ -663,6 +665,7 @@ out: cred = q->avail - cred; if (unlikely(fl_starving(adap, q))) { smp_wmb(); + q->low++; set_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.starving_fl); } @@ -1029,6 +1032,30 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, *p = 0; } +static void *inline_tx_skb_header(const struct sk_buff *skb, + const struct sge_txq *q, void *pos, + int length) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(length <= left)) { + memcpy(pos, skb->data, length); + pos += length; + } else { + memcpy(pos, skb->data, left); + memcpy(q->desc, skb->data + left, length - left); + pos = (void *)q->desc + (length - left); + } + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) { + *p = 0; + return p + 1; + } + return p; +} + /* * Figure out what HW csum a packet wants and return the appropriate control * bits. @@ -1320,7 +1347,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(q->stat->cidx); + int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) @@ -1542,24 +1569,50 @@ static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) } /** - * service_ofldq - restart a suspended offload queue + * service_ofldq - service/restart a suspended offload queue * @q: the offload queue * - * Services an offload Tx queue by moving packets from its packet queue - * to the HW Tx ring. The function starts and ends with the queue locked. + * Services an offload Tx queue by moving packets from its Pending Send + * Queue to the Hardware TX ring. The function starts and ends with the + * Send Queue locked, but drops the lock while putting the skb at the + * head of the Send Queue onto the Hardware TX Ring. Dropping the lock + * allows more skbs to be added to the Send Queue by other threads. + * The packet being processed at the head of the Pending Send Queue is + * left on the queue in case we experience DMA Mapping errors, etc. + * and need to give up and restart later. + * + * service_ofldq() can be thought of as a task which opportunistically + * uses other threads execution contexts. We use the Offload Queue + * boolean "service_ofldq_running" to make sure that only one instance + * is ever running at a time ... */ static void service_ofldq(struct sge_ofld_txq *q) { - u64 *pos; + u64 *pos, *before, *end; int credits; struct sk_buff *skb; + struct sge_txq *txq; + unsigned int left; unsigned int written = 0; unsigned int flits, ndesc; + /* If another thread is currently in service_ofldq() processing the + * Pending Send Queue then there's nothing to do. Otherwise, flag + * that we're doing the work and continue. Examining/modifying + * the Offload Queue boolean "service_ofldq_running" must be done + * while holding the Pending Send Queue Lock. + */ + if (q->service_ofldq_running) + return; + q->service_ofldq_running = true; + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { - /* - * We drop the lock but leave skb on sendq, thus retaining - * exclusive access to the state of the queue. + /* We drop the lock while we're working with the skb at the + * head of the Pending Send Queue. This allows more skbs to + * be added to the Pending Send Queue while we're working on + * this one. We don't need to lock to guard the TX Ring + * updates because only one thread of execution is ever + * allowed into service_ofldq() at a time. */ spin_unlock(&q->sendq.lock); @@ -1583,9 +1636,32 @@ static void service_ofldq(struct sge_ofld_txq *q) } else { int last_desc, hdr_len = skb_transport_offset(skb); - memcpy(pos, skb->data, hdr_len); - write_sgl(skb, &q->q, (void *)pos + hdr_len, - pos + flits, hdr_len, + /* The WR headers may not fit within one descriptor. + * So we need to deal with wrap-around here. + */ + before = (u64 *)pos; + end = (u64 *)pos + flits; + txq = &q->q; + pos = (void *)inline_tx_skb_header(skb, &q->q, + (void *)pos, + hdr_len); + if (before > (u64 *)pos) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + } + + /* If current position is already at the end of the + * ofld queue, reset the current to point to + * start of the queue and update the end ptr as well. + */ + if (pos == (u64 *)txq->stat) { + left = (u8 *)end - (u8 *)txq->stat; + end = (void *)txq->desc + left; + pos = (void *)txq->desc; + } + + write_sgl(skb, &q->q, (void *)pos, + end, hdr_len, (dma_addr_t *)skb->head); #ifdef CONFIG_NEED_DMA_MAP_STATE skb->dev = q->adap->port[0]; @@ -1604,6 +1680,11 @@ static void service_ofldq(struct sge_ofld_txq *q) written = 0; } + /* Reacquire the Pending Send Queue Lock so we can unlink the + * skb we've just successfully transferred to the TX Ring and + * loop for the next skb which may be at the head of the + * Pending Send Queue. + */ spin_lock(&q->sendq.lock); __skb_unlink(skb, &q->sendq); if (is_ofld_imm(skb)) @@ -1611,6 +1692,11 @@ static void service_ofldq(struct sge_ofld_txq *q) } if (likely(written)) ring_tx_db(q->adap, &q->q, written); + + /*Indicate that no thread is processing the Pending Send Queue + * currently. + */ + q->service_ofldq_running = false; } /** @@ -1624,9 +1710,19 @@ static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) { skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ spin_lock(&q->sendq.lock); + + /* Queue the new skb onto the Offload Queue's Pending Send Queue. If + * that results in this new skb being the only one on the queue, start + * servicing it. If there are other skbs already on the list, then + * either the queue is currently being processed or it's been stopped + * for some reason and it'll be restarted at a later time. Restart + * paths are triggered by events like experiencing a DMA Mapping Error + * or filling the Hardware TX Ring. + */ __skb_queue_tail(&q->sendq, skb); if (q->sendq.qlen == 1) service_ofldq(q); + spin_unlock(&q->sendq.lock); return NET_XMIT_SUCCESS; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 03ed00c49823..a8dda635456d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -162,6 +162,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */ + CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ + CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index fc3044c8ac1c..91b52a21a2e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2802,6 +2802,10 @@ #define HASHEN_V(x) ((x) << HASHEN_S) #define HASHEN_F HASHEN_V(1U) +#define ASLIPCOMPEN_S 17 +#define ASLIPCOMPEN_V(x) ((x) << ASLIPCOMPEN_S) +#define ASLIPCOMPEN_F ASLIPCOMPEN_V(1U) + #define REQQPARERR_S 16 #define REQQPARERR_V(x) ((x) << REQQPARERR_S) #define REQQPARERR_F REQQPARERR_V(1U) |