summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/dev.c10
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c23
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c87
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c18
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h56
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic.h12
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ieee802154/mrf24j40.c31
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/mwifiex/main.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/xen-netback/xenbus.c4
21 files changed, 261 insertions, 116 deletions
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f9cba4123c66..1870c4731a57 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
size_t size;
size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
+ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
- size += sizeof(struct can_berr_counter);
+ size += nla_total_size(sizeof(struct can_berr_counter));
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
- size += sizeof(struct can_bittiming_const);
+ size += nla_total_size(sizeof(struct can_bittiming_const));
return size;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 78d6d6b970e1..48f52882a22b 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -106,7 +106,6 @@
#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
#define XGMAC_ADDR_AE 0x80000000
-#define XGMAC_MAX_FILTER_ADDR 31
/* PMT Control and Status */
#define XGMAC_PMT_POINTER_RESET 0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
struct device *device;
struct napi_struct napi;
+ int max_macs;
struct xgmac_extra_stats xstats;
spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
netdev_mc_count(dev), netdev_uc_count(dev));
- if (dev->flags & IFF_PROMISC) {
- writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
- return;
- }
+ if (dev->flags & IFF_PROMISC)
+ value |= XGMAC_FRAME_FILTER_PR;
memset(hash_filter, 0, sizeof(hash_filter));
- if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ if (netdev_uc_count(dev) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
}
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
goto out;
}
- if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
} else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
- for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
- xgmac_set_mac_addr(ioaddr, NULL, reg);
+ for (i = reg; i <= priv->max_macs; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, i);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
uid = readl(priv->base + XGMAC_VERSION);
netdev_info(ndev, "h/w version is 0x%x\n", uid);
+ /* Figure out how many valid mac address filter registers we have */
+ writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+ if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+ priv->max_macs = 31;
+ else
+ priv->max_macs = 7;
+
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq == -ENXIO) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index ebdac0273501..0ae3177416c7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2657,6 +2657,8 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
+ memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 99f16cbf2fd8..4cfae6c9a63f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
-
- mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
static void mib_counters_timer_wrapper(unsigned long _mp)
{
struct mv643xx_eth_private *mp = (void *)_mp;
-
mib_counters_update(mp);
+ mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
mp->int_mask |= INT_TX_END_0 << i;
}
+ add_timer(&mp->mib_counters_timer);
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
if (!ppdev)
return -ENOMEM;
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->mib_counters_timer.data = (unsigned long)mp;
mp->mib_counters_timer.function = mib_counters_timer_wrapper;
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
- add_timer(&mp->mib_counters_timer);
spin_lock_init(&mp->mib_counters_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index dec455c8f627..afe2efa69c86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
put_page(page);
return -ENOMEM;
}
- page_alloc->size = PAGE_SIZE << order;
+ page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->offset = frag_info->frag_align;
+ page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads.
*/
- atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+ atomic_set(&page->_count,
+ page_alloc->page_size / frag_info->frag_stride);
return 0;
}
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++) {
frag_info = &priv->frag_info[i];
page_alloc[i] = ring_alloc[i];
- page_alloc[i].offset += frag_info->frag_stride;
- if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+ page_alloc[i].page_offset += frag_info->frag_stride;
+
+ if (page_alloc[i].page_offset + frag_info->frag_stride <=
+ ring_alloc[i].page_size)
continue;
+
if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
goto out;
}
for (i = 0; i < priv->num_frags; i++) {
frags[i] = ring_alloc[i];
- dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
ring_alloc[i] = page_alloc[i];
rx_desc->data[i].addr = cpu_to_be64(dma);
}
@@ -117,7 +121,7 @@ out:
frag_info = &priv->frag_info[i];
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
int i)
{
const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
- if (frags[i].offset + frag_info->frag_stride > frags[i].size)
- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
- PCI_DMA_FROMDEVICE);
+ if (next_frag_end > frags[i].page_size)
+ dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+ PCI_DMA_FROMDEVICE);
if (frags[i].page)
put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
atomic_set(&page->_count, 1);
put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->size, PCI_DMA_FROMDEVICE);
- while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+ page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ while (page_alloc->page_offset + frag_info->frag_stride <
+ page_alloc->page_size) {
put_page(page_alloc->page);
- page_alloc->offset += frag_info->frag_stride;
+ page_alloc->page_offset += frag_info->frag_stride;
}
page_alloc->page = NULL;
}
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
- skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb_frags_rx[nr].page_offset = frags[nr].page_offset;
skb->truesize += frag_info->frag_stride;
frags[nr].page = NULL;
}
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(frags[0].page) + frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].page_offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
+ frags[0].page_offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5e0aa569306a..bf06e3610d27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
dma_addr_t dma;
- u32 offset;
- u32 size;
+ u32 page_offset;
+ u32 page_size;
};
struct mlx4_en_tx_ring {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index bd1a2d2bc2ae..ea54d95e5b9f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -448,7 +448,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
netdev_err(ndev, "irq_of_parse_and_map failed\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto irq_map_fail;
}
priv = netdev_priv(ndev);
@@ -472,24 +473,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->tx_desc_base == NULL)
+ if (priv->tx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL);
- if (priv->rx_desc_base == NULL)
+ if (priv->rx_desc_base == NULL) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->tx_buf_base)
+ if (!priv->tx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
GFP_ATOMIC);
- if (!priv->rx_buf_base)
+ if (!priv->rx_buf_base) {
+ ret = -ENOMEM;
goto init_fail;
+ }
platform_set_drvdata(pdev, ndev);
@@ -522,7 +531,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
init_fail:
netdev_err(ndev, "init failed\n");
moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+ free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 21d00a0449a1..f07f2b0fefa0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2257,7 +2257,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_wq;
adapter->dev_rst_time = jiffies;
adapter->ahw->revision_id = pdev->revision;
@@ -2396,6 +2396,9 @@ err_out_disable_msi:
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
err_out_free_netdev:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8df52bac162..ee8df999494c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -620,12 +620,16 @@ static struct sh_eth_cpu_data sh7734_data = {
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
+ .fdr_value = 0x0000070f,
+ .rmcr_value = 0x00000001,
.apr = 1,
.mpr = 1,
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 844ee7539d33..676c3c057bfb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -754,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
};
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -808,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
(1ULL << EF10_STAT_rx_length_error))
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
- static const unsigned long hunt_40g_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_40G_EXTRA_STAT_MASK)
- };
- static const unsigned long hunt_10g_only_stat_mask[] = {
- STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
- HUNT_10G_ONLY_STAT_MASK)
- };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK ( \
+ (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+ u64 raw_mask = HUNT_COMMON_STAT_MASK;
u32 port_caps = efx_mcdi_phy_get_caps(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- return hunt_40g_stat_mask;
+ raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
- return hunt_10g_only_stat_mask;
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+ raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+ return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+ u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+ mask[0] = raw_mask;
+#else
+ mask[0] = raw_mask & 0xffffffff;
+ mask[1] = raw_mask >> 32;
+#endif
}
static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+ efx_ef10_get_stat_mask(efx, mask);
return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
- efx_ef10_stat_mask(efx), names);
+ mask, names);
}
static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
__le64 generation_start, generation_end;
u64 *stats = nic_data->stats;
__le64 *dma_stats;
+ efx_ef10_get_stat_mask(efx, mask);
+
dma_stats = efx->stats_buffer.addr;
nic_data = efx->nic_data;
@@ -853,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
- efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
stats, efx->stats_buffer.addr, false);
+ rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
@@ -873,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats)
{
- const unsigned long *mask = efx_ef10_stat_mask(efx);
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u64 *stats = nic_data->stats;
size_t stats_count = 0, index;
int retry;
+ efx_ef10_get_stat_mask(efx, mask);
+
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index c082562dbf4e..366c8e3e3784 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
size_t outlen;
int rc;
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
goto fail;
}
+ /* We currently assume we have control of the external link
+ * and are completely trusted by firmware. Abort probing
+ * if that's not true for this function.
+ */
+ if (driver_operating &&
+ outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+ (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+ netif_err(efx, probe, efx->net_dev,
+ "This driver version only supports one function per port\n");
+ return -ENODEV;
+ }
+
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
return 0;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index b5cf62492f8e..e0a63ddb7a6c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2574,8 +2574,58 @@
#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
-#define MC_CMD_GMAC_DMABUF_START 0x40 /* enum */
-#define MC_CMD_GMAC_DMABUF_END 0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
#define MC_CMD_MAC_NSTATS 0x61 /* enum */
@@ -5065,6 +5115,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index c75009b8c0d9..9c90bf56090f 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -480,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
- * of this array must be at least the number of set bits in the
- * first @count bits of @mask.
+ * of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
@@ -514,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
}
if (accumulate)
- *stats += val;
+ stats[index] += val;
else
- *stats = val;
+ stats[index] = val;
}
-
- ++stats;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 08883c8edf0e..11b6112d9249 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -402,6 +402,18 @@ enum {
EF10_STAT_rx_align_error,
EF10_STAT_rx_length_error,
EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_rx_pm_trunc_bb_overflow,
+ EF10_STAT_rx_pm_discard_bb_overflow,
+ EF10_STAT_rx_pm_trunc_vfifo_full,
+ EF10_STAT_rx_pm_discard_vfifo_full,
+ EF10_STAT_rx_pm_trunc_qbb,
+ EF10_STAT_rx_pm_discard_qbb,
+ EF10_STAT_rx_pm_discard_mapping,
+ EF10_STAT_rx_dp_q_disabled_packets,
+ EF10_STAT_rx_dp_di_dropped_packets,
+ EF10_STAT_rx_dp_streaming_packets,
+ EF10_STAT_rx_dp_emerg_fetch,
+ EF10_STAT_rx_dp_emerg_wait,
EF10_STAT_COUNT
};
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7290f11a937d..1fd8125e58a8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1766,8 +1766,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
- if (!of_property_read_u32(node, "dual_emac", &prop))
- data->dual_emac = prop;
+ if (of_property_read_bool(node, "dual_emac"))
+ data->dual_emac = 1;
/*
* Populate all the child nodes here...
@@ -1777,7 +1777,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
pr_warn("Doesn't have any child node\n");
- for_each_node_by_name(slave_node, "slave") {
+ for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
u32 phyid;
@@ -1786,6 +1786,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct device_node *mdio_node;
struct platform_device *mdio;
+ /* This is no slave child node, continue */
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
pr_err("Missing slave[%d] phy_id property\n", i);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 42e6deee6db5..0632d34905c7 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -82,7 +82,6 @@ struct mrf24j40 {
struct mutex buffer_mutex; /* only used to protect buf */
struct completion tx_complete;
- struct work_struct irqwork;
u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
};
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret)
goto err;
+ INIT_COMPLETION(devrec->tx_complete);
+
/* Set TXNTRIG bit of TXNCON to send packet */
ret = read_short_reg(devrec, REG_TXNCON, &val);
if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
val |= 0x4;
write_short_reg(devrec, REG_TXNCON, val);
- INIT_COMPLETION(devrec->tx_complete);
-
/* Wait for the device to send the TX complete interrupt. */
ret = wait_for_completion_interruptible_timeout(
&devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
static irqreturn_t mrf24j40_isr(int irq, void *data)
{
struct mrf24j40 *devrec = data;
-
- disable_irq_nosync(irq);
-
- schedule_work(&devrec->irqwork);
-
- return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
- struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
u8 intstat;
int ret;
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
mrf24j40_handle_rx(devrec);
out:
- enable_irq(devrec->spi->irq);
+ return IRQ_HANDLED;
}
static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
mutex_init(&devrec->buffer_mutex);
init_completion(&devrec->tx_complete);
- INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
devrec->spi = spi;
spi_set_drvdata(spi, devrec);
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
val &= ~0x3; /* Clear RX mode (normal) */
write_short_reg(devrec, REG_RXMCR, val);
- ret = request_irq(spi->irq,
- mrf24j40_isr,
- IRQF_TRIGGER_FALLING,
- dev_name(&spi->dev),
- devrec);
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ mrf24j40_isr,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ dev_name(&spi->dev),
+ devrec);
if (ret) {
dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
dev_dbg(printdev(devrec), "remove\n");
free_irq(spi->irq, devrec);
- flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
ieee802154_unregister_device(devrec->dev);
ieee802154_free_device(devrec->dev);
/* TODO: Will ieee802154_free_device() wait until ->xmit() is
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 807815fc9968..7cb105c103fe 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
if (unlikely(!noblock))
add_wait_queue(&tfile->wq.wait, &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ if (unlikely(!noblock))
+ current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- current->state = TASK_RUNNING;
- if (unlikely(!noblock))
+ if (unlikely(!noblock)) {
+ current->state = TASK_RUNNING;
remove_wait_queue(&tfile->wq.wait, &wait);
+ }
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 13f5434e7f6c..1ce8af276878 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1969,15 +1969,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb)
{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_frame_info *fi = get_frame_info(skb);
struct list_head bf_head;
- struct ath_buf *bf;
-
- bf = fi->bf;
+ struct ath_buf *bf = fi->bf;
INIT_LIST_HEAD(&bf_head);
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
+ if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = BUF_AMPDU;
+ ath_tx_addto_baw(sc, tid, bf);
+ }
bf->bf_next = NULL;
bf->bf_lastbf = bf;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index fd778337deee..c2b91f566e05 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -358,10 +358,12 @@ process_start:
}
} while (true);
- if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
+ spin_lock_irqsave(&adapter->main_proc_lock, flags);
+ if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start;
+ }
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 76d95deb274b..dc49e525ae5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
goto exit_release_regions;
}
- pci_enable_msi(pci_dev);
-
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
if (!hw) {
rt2x00_probe_err("Failed to allocate hardware\n");
retval = -ENOMEM;
- goto exit_disable_msi;
+ goto exit_release_regions;
}
pci_set_drvdata(pci_dev, hw);
@@ -152,9 +150,6 @@ exit_free_reg:
exit_free_device:
ieee80211_free_hw(hw);
-exit_disable_msi:
- pci_disable_msi(pci_dev);
-
exit_release_regions:
pci_release_regions(pci_dev);
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
rt2x00pci_free_reg(rt2x00dev);
ieee80211_free_hw(hw);
- pci_disable_msi(pci_dev);
-
/*
* Free the PCI device data.
*/
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index b45bce20ad76..1b08d8798372 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
static void connect(struct backend_info *);
static void backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+ enum xenbus_state state);
static int netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
+ set_backend_state(be, XenbusStateClosed);
+
unregister_hotplug_status_watch(be);
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);