summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2025-10-01 10:10:50 +0200
committerPaolo Abeni <pabeni@redhat.com>2025-10-01 10:14:49 +0200
commitf1455695d2d99894b65db233877acac9a0e120b9 (patch)
treed9c2853acb232bc6187073381ec745c36f961b46 /drivers/net
parent936f160a95cddd361d9c70798464e87a9cc57a37 (diff)
parentdaa26ea63c6f848159821cd9b3cbe47cddbb0a1c (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.17-rc8). Conflicts: tools/testing/selftests/drivers/net/bonding/Makefile 87951b566446 selftests: bonding: add test for passive LACP mode c2377f1763e9 selftests: bonding: add test for LACP actor port priority Adjacent changes: drivers/net/ethernet/cadence/macb.h fca3dc859b20 net: macb: remove illusion about TBQPH/RBQPH being per-queue 89934dbf169e net: macb: Add TAPRIO traffic scheduling support drivers/net/ethernet/cadence/macb_main.c fca3dc859b20 net: macb: remove illusion about TBQPH/RBQPH being per-queue 89934dbf169e net: macb: Add TAPRIO traffic scheduling support Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c138
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c15
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
16 files changed, 133 insertions, 105 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1ea41f1a9190..4da619210c1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4313,7 +4313,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index ba71d95a82d2..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -593,20 +593,22 @@ static int bond_newlink(struct net_device *bond_dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct nlattr **data = params->data;
struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a81d3a7a3bb9..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -865,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 904954610611..0830c48973aa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -220,10 +220,8 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
@@ -1246,10 +1244,8 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
/* ENST register offsets for this queue */
unsigned int ENST_START_TIME;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e7ee8ade7aeb..ca2386b83473 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -52,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -279,9 +275,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@@ -496,19 +492,19 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
+ /* Single register for all queues' high 32 bits. */
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
#endif
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -1167,10 +1163,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -2475,35 +2467,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
if (bp->rx_ring_tieoff) {
- dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2545,35 +2544,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -4528,12 +4537,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4542,12 +4545,6 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
@@ -5701,6 +5698,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@@ -5710,10 +5712,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, RBQPH,
- upper_32_bits(bp->rx_ring_tieoff_dma));
-#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 6bbf6e5584e5..1996d2e4e3e2 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -964,15 +964,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index 2e07b9b746e1..82c443b28b15 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -1032,7 +1032,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
- "Could not get VF driver data\n");
+ "Could not get PF driver data\n");
err = enetc4_pf_struct_init(si);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
index ba32c1bbd9e1..0c1d343253bf 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp.c
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -52,24 +52,19 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
- /* Step 2: Configure the producer index register */
- netc_write(cbdr->regs.pir, cbdr->next_to_clean);
-
- /* Step 3: Configure the consumer index register */
- netc_write(cbdr->regs.cir, cbdr->next_to_use);
-
- /* Step4: Configure the number of BDs of the Control BD Ring */
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
- /* Step 5: Enable the Control BD Ring */
+ /* Step 3: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 4093d0e31df7..cbb5fa30f5a0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -702,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 5027fae0aa77..e808995703cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -3542,6 +3542,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 7ebb6e656884..25381f079b97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -854,6 +854,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e395ef5f356e..722282cebce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -1070,7 +1074,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 22995131824a..89e399606877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -27,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@@ -259,6 +260,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -330,6 +333,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -739,6 +747,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -822,6 +843,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -865,6 +887,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9bc9bd83c232..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a36215195923..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1788,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index ddff6f19ff98..92add3daadbb 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
- netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
- netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,