summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
authorAnjali Singhai <anjali.singhai@intel.com>2015-02-21 06:42:35 +0000
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2015-02-26 04:53:58 -0800
commit71da61976ec18fb57b3ba9a1dd846b747cc7c884 (patch)
tree9de12ed2f5e134a02805d09f26c11b9295bd4438 /drivers/net/ethernet/intel/i40evf
parentb67a03357cab0ccb91d641fead6f167c697a24cb (diff)
i40e: Fix TSO with more than 8 frags per segment issue
The hardware has some limitations the driver needs to adhere to, that we found in extended testing. 1) no more than 8 descriptors per packet on the wire 2) no header can span more than 3 descriptors If one of these events occurs, the hardware will generate an internal error and freeze the Tx queue. This patch linearizes the skb to avoid these situations. Change-ID: I37dab7d3966e14895a9663ec4d0aaa8eb0d9e115 Signed-off-by: Anjali Singhai Jain <anjali.singhai@intel.com> Tested-by: Jim Young <james.m.young@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c65
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
2 files changed, 66 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..1320a433b8ed 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1380,6 +1380,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1654,6 +1715,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192