summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-23 16:50:42 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-23 16:50:42 -0700
commite728258debd553c95d2e70f9cd97c9fde27c7130 (patch)
tree18ef97c80f9923717f5cf6bdab44d77607ca0f4b /drivers/net
parente8df5a0c0d041588e7f02781822d637d226cdbe8 (diff)
parent5e6391da4539c35422c0df1d1d2d9a9bb97cd736 (diff)
Merge tag 'net-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Including fixes from Netfilter. Steady stream of fixes. Last two weeks feel comparable to the two weeks before the merge window. Lots of AI-aided bug discovery. A newer big source is Sashiko/Gemini (Roman Gushchin's system), which points out issues in existing code during patch review (maybe 25% of fixes here likely originating from Sashiko). Nice thing is these are often fixed by the respective maintainers, not drive-bys. Current release - new code bugs: - kconfig: MDIO_PIC64HPSC should depend on ARCH_MICROCHIP Previous releases - regressions: - add async ndo_set_rx_mode and switch drivers which we promised to be called under the per-netdev mutex to it - dsa: remove duplicate netdev_lock_ops() for conduit ethtool ops - hv_sock: report EOF instead of -EIO for FIN - vsock/virtio: fix MSG_PEEK calculation on bytes to copy Previous releases - always broken: - ipv6: fix possible UAF in icmpv6_rcv() - icmp: validate reply type before using icmp_pointers - af_unix: drop all SCM attributes for SOCKMAP - netfilter: fix a number of bugs in the osf (OS fingerprinting) - eth: intel: fix timestamp interrupt configuration for E825C Misc: - bunch of data-race annotations" * tag 'net-7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (148 commits) rxrpc: Fix error handling in rxgk_extract_token() rxrpc: Fix re-decryption of RESPONSE packets rxrpc: Fix rxrpc_input_call_event() to only unshare DATA packets rxrpc: Fix missing validation of ticket length in non-XDR key preparsing rxgk: Fix potential integer overflow in length check rxrpc: Fix conn-level packet handling to unshare RESPONSE packets rxrpc: Fix potential UAF after skb_unshare() failure rxrpc: Fix rxkad crypto unalignment handling rxrpc: Fix memory leaks in rxkad_verify_response() net: rds: fix MR cleanup on copy error m68k: mvme147: Make me the maintainer net: txgbe: fix firmware version check selftests/bpf: check epoll readiness during reuseport migration tcp: call sk_data_ready() after listener migration vhost_net: fix sleeping with preempt-disabled in vhost_net_busy_poll() ipv6: Cap TLV scan in ip6_tnl_parse_tlv_enc_lim tipc: fix double-free in tipc_buf_append() llc: Return -EINPROGRESS from llc_ui_connect() ipv4: icmp: validate reply type before using icmp_pointers selftests/net: packetdrill: cover RFC 5961 5.2 challenge ACK on both edges ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c110
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h4
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c58
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c217
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp_private.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c16
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c69
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c3
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/mdio/Kconfig1
-rw-r--r--drivers/net/netconsole.c2
-rw-r--r--drivers/net/netdevsim/netdev.c8
-rw-r--r--drivers/net/netkit.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/pppoe.c8
-rw-r--r--drivers/net/pse-pd/pse_core.c13
-rw-r--r--drivers/net/slip/slhc.c49
-rw-r--r--drivers/net/virtio_net.c6
54 files changed, 871 insertions, 479 deletions
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 31fa94dac627..c35cef01ec26 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -216,7 +216,7 @@
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
- (0xF << (((_extint) % 2)))
+ (0xF << (((_extint) % 2) * 4))
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
(((_extint) % 2) * 4)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d6bdad4baadd..f8a4eb365c3d 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -47,7 +47,9 @@
static int numdummies = 1;
/* fake multicast ability */
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *dev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
}
@@ -87,7 +89,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_rx_mode_async = set_multicast_list,
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
.ndo_change_carrier = dummy_change_carrier,
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index e1ab15f1ee7d..2bb0a3ff9810 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -745,14 +745,18 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
dma_addr_t dma_addr;
q->buf_size = PAGE_SIZE / 2;
- q->ndesc = ndesc;
q->qdma = qdma;
- q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ q->entry = devm_kzalloc(eth->dev, ndesc * sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
+ q->desc = dmam_alloc_coherent(eth->dev, ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(q->page_pool)) {
int err = PTR_ERR(q->page_pool);
@@ -761,11 +765,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
return err;
}
- q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
- &dma_addr, GFP_KERNEL);
- if (!q->desc)
- return -ENOMEM;
-
+ q->ndesc = ndesc;
netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
@@ -843,6 +843,21 @@ static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
return 0;
}
+static void airoha_qdma_wake_netdev_txqs(struct airoha_queue *q)
+{
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->qdma == qdma)
+ netif_tx_wake_all_queues(port->dev);
+ }
+ q->txq_stopped = false;
+}
+
static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
{
struct airoha_tx_irq_queue *irq_q;
@@ -919,12 +934,21 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(skb->dev, queue);
netdev_tx_completed_queue(txq, 1, skb->len);
- if (netif_tx_queue_stopped(txq) &&
- q->ndesc - q->queued >= q->free_thr)
- netif_tx_wake_queue(txq);
-
dev_kfree_skb_any(skb);
}
+
+ if (q->txq_stopped && q->ndesc - q->queued >= q->free_thr) {
+ /* Since multiple net_device TX queues can share the
+ * same hw QDMA TX queue, there is no guarantee we have
+ * inflight packets queued in hw belonging to a
+ * net_device TX queue stopped in the xmit path.
+ * In order to avoid any potential net_device TX queue
+ * stall, we need to wake all the net_device TX queues
+ * feeding the same hw QDMA TX queue.
+ */
+ airoha_qdma_wake_netdev_txqs(q);
+ }
+
unlock:
spin_unlock_bh(&q->lock);
}
@@ -954,27 +978,27 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
dma_addr_t dma_addr;
spin_lock_init(&q->lock);
- q->ndesc = size;
q->qdma = qdma;
q->free_thr = 1 + MAX_SKB_FRAGS;
INIT_LIST_HEAD(&q->tx_list);
- q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ q->entry = devm_kzalloc(eth->dev, size * sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
- q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ q->desc = dmam_alloc_coherent(eth->dev, size * sizeof(*q->desc),
&dma_addr, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
- for (i = 0; i < q->ndesc; i++) {
+ for (i = 0; i < size; i++) {
u32 val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
list_add_tail(&q->entry[i].list, &q->tx_list);
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
}
+ q->ndesc = size;
/* xmit ring drop default setting */
airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
@@ -996,8 +1020,6 @@ static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
struct airoha_eth *eth = qdma->eth;
dma_addr_t dma_addr;
- netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
- airoha_qdma_tx_napi_poll);
irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
&dma_addr, GFP_KERNEL);
if (!irq_q->q)
@@ -1007,6 +1029,9 @@ static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
irq_q->size = size;
irq_q->qdma = qdma;
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+ airoha_qdma_tx_napi_poll);
+
airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
@@ -1039,12 +1064,15 @@ static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
{
- struct airoha_eth *eth = q->qdma->eth;
- int i;
+ struct airoha_qdma *qdma = q->qdma;
+ struct airoha_eth *eth = qdma->eth;
+ int i, qid = q - &qdma->q_tx[0];
+ u16 index = 0;
spin_lock_bh(&q->lock);
for (i = 0; i < q->ndesc; i++) {
struct airoha_queue_entry *e = &q->entry[i];
+ struct airoha_qdma_desc *desc = &q->desc[i];
if (!e->dma_addr)
continue;
@@ -1055,8 +1083,33 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
e->dma_addr = 0;
e->skb = NULL;
list_add_tail(&e->list, &q->tx_list);
+
+ /* Reset DMA descriptor */
+ WRITE_ONCE(desc->ctrl, 0);
+ WRITE_ONCE(desc->addr, 0);
+ WRITE_ONCE(desc->data, 0);
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+
q->queued--;
}
+
+ if (!list_empty(&q->tx_list)) {
+ struct airoha_queue_entry *e;
+
+ e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
+ list);
+ index = e - q->entry;
+ }
+ /* Set TX_DMA_IDX to TX_CPU_IDX to notify the hw the QDMA TX ring is
+ * empty.
+ */
+ airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+ airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, index));
+
spin_unlock_bh(&q->lock);
}
@@ -1398,8 +1451,12 @@ static void airoha_qdma_cleanup(struct airoha_qdma *qdma)
}
}
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++)
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
+ if (!qdma->q_tx_irq[i].size)
+ continue;
+
netif_napi_del(&qdma->q_tx_irq[i].napi);
+ }
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -1727,7 +1784,7 @@ static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
{
struct airoha_eth *eth = port->qdma->eth;
u32 val, pse_port, chan;
- int src_port;
+ int i, src_port;
/* Forward the traffic to the proper GDM port */
pse_port = port->id == AIROHA_GDM3_IDX ? FE_PSE_PORT_GDM3
@@ -1769,6 +1826,9 @@ static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port)
SP_CPORT_MASK(val),
__field_prep(SP_CPORT_MASK(val), FE_PSE_PORT_CDM2));
+ for (i = 0; i < eth->soc->num_ppe; i++)
+ airoha_ppe_set_cpu_port(port, i, AIROHA_GDM2_IDX);
+
if (port->id == AIROHA_GDM4_IDX && airoha_is_7581(eth)) {
u32 mask = FC_ID_OF_SRC_PORT_MASK(port->nbq);
@@ -1807,7 +1867,8 @@ static int airoha_dev_init(struct net_device *dev)
}
for (i = 0; i < eth->soc->num_ppe; i++)
- airoha_ppe_set_cpu_port(port, i);
+ airoha_ppe_set_cpu_port(port, i,
+ airoha_get_fe_port(port));
return 0;
}
@@ -1984,6 +2045,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (q->queued + nr_frags >= q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
+ q->txq_stopped = true;
spin_unlock_bh(&q->lock);
return NETDEV_TX_BUSY;
}
@@ -2039,8 +2101,10 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
- if (q->ndesc - q->queued < q->free_thr)
+ if (q->ndesc - q->queued < q->free_thr) {
netif_tx_stop_queue(txq);
+ q->txq_stopped = true;
+ }
spin_unlock_bh(&q->lock);
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index 95e557638617..e389d2fe3b86 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -193,6 +193,7 @@ struct airoha_queue {
int ndesc;
int free_thr;
int buf_size;
+ bool txq_stopped;
struct napi_struct napi;
struct page_pool *page_pool;
@@ -653,7 +654,8 @@ int airoha_get_fe_port(struct airoha_gdm_port *port);
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
-void airoha_ppe_set_cpu_port(struct airoha_gdm_port *port, u8 ppe_id);
+void airoha_ppe_set_cpu_port(struct airoha_gdm_port *port, u8 ppe_id,
+ u8 fport);
bool airoha_ppe_is_enabled(struct airoha_eth *eth, int index);
void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
u16 hash, bool rx_wlan);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 03115c1c1063..5c9dff6bccd1 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -85,10 +85,9 @@ static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
}
-void airoha_ppe_set_cpu_port(struct airoha_gdm_port *port, u8 ppe_id)
+void airoha_ppe_set_cpu_port(struct airoha_gdm_port *port, u8 ppe_id, u8 fport)
{
struct airoha_qdma *qdma = port->qdma;
- u8 fport = airoha_get_fe_port(port);
struct airoha_eth *eth = qdma->eth;
u8 qdma_id = qdma - &eth->qdma[0];
u32 fe_cpu_port;
@@ -182,7 +181,8 @@ static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
if (!port)
continue;
- airoha_ppe_set_cpu_port(port, i);
+ airoha_ppe_set_cpu_port(port, i,
+ airoha_get_fe_port(port));
}
}
}
@@ -1356,6 +1356,29 @@ static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
return npu;
}
+static int airoha_ppe_wait_for_npu_init(struct airoha_eth *eth)
+{
+ int err;
+ u32 val;
+
+ /* PPE_FLOW_CFG default register value is 0. Since we reset FE
+ * during the device probe we can just check the configured value
+ * is not 0 here.
+ */
+ err = read_poll_timeout(airoha_fe_rr, val, val, USEC_PER_MSEC,
+ 100 * USEC_PER_MSEC, false, eth,
+ REG_PPE_PPE_FLOW_CFG(0));
+ if (err)
+ return err;
+
+ if (airoha_ppe_is_enabled(eth, 1))
+ err = read_poll_timeout(airoha_fe_rr, val, val, USEC_PER_MSEC,
+ 100 * USEC_PER_MSEC, false, eth,
+ REG_PPE_PPE_FLOW_CFG(1));
+
+ return err;
+}
+
static int airoha_ppe_offload_setup(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_ppe_npu_get(eth);
@@ -1369,6 +1392,11 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
+ /* Wait for NPU PPE configuration to complete */
+ err = airoha_ppe_wait_for_npu_init(eth);
+ if (err)
+ goto error_npu_put;
+
ppe_num_stats_entries = airoha_ppe_get_total_num_stats_entries(ppe);
if (ppe_num_stats_entries > 0) {
err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
index 1c14c5fe8d61..68b74eb2c3a2 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -74,6 +74,13 @@ static int bnge_func_qcaps(struct bnge_dev *bd)
return rc;
}
+ return 0;
+}
+
+static int bnge_func_qrcaps_qcfg(struct bnge_dev *bd)
+{
+ int rc;
+
rc = bnge_hwrm_func_resc_qcaps(bd);
if (rc) {
dev_err(bd->dev, "query resc caps failure rc: %d\n", rc);
@@ -133,23 +140,28 @@ static int bnge_fw_register_dev(struct bnge_dev *bd)
bnge_hwrm_fw_set_time(bd);
- rc = bnge_hwrm_func_drv_rgtr(bd);
+ /* Get the resources and configuration from firmware */
+ rc = bnge_func_qcaps(bd);
if (rc) {
- dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ dev_err(bd->dev, "Failed querying caps rc: %d\n", rc);
return rc;
}
rc = bnge_alloc_ctx_mem(bd);
if (rc) {
dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc);
- goto err_func_unrgtr;
+ goto err_free_ctx_mem;
}
- /* Get the resources and configuration from firmware */
- rc = bnge_func_qcaps(bd);
+ rc = bnge_hwrm_func_drv_rgtr(bd);
if (rc) {
- dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc);
- rc = -ENODEV;
+ dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc);
+ goto err_free_ctx_mem;
+ }
+
+ rc = bnge_func_qrcaps_qcfg(bd);
+ if (rc) {
+ dev_err(bd->dev, "Failed querying resources rc: %d\n", rc);
goto err_func_unrgtr;
}
@@ -158,7 +170,9 @@ static int bnge_fw_register_dev(struct bnge_dev *bd)
return 0;
err_func_unrgtr:
- bnge_fw_unregister_dev(bd);
+ bnge_hwrm_func_drv_unrgtr(bd);
+err_free_ctx_mem:
+ bnge_free_ctx_mem(bd);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
index 94f15e08a88c..b066ee887a09 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -324,7 +324,6 @@ int bnge_alloc_ctx_mem(struct bnge_dev *bd)
u32 l2_qps, qp1_qps, max_qps;
u32 ena, entries_sp, entries;
u32 srqs, max_srqs, min;
- u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
u32 fast_qpmd_qps;
@@ -390,21 +389,6 @@ int bnge_alloc_ctx_mem(struct bnge_dev *bd)
if (!bnge_is_roce_en(bd))
goto skip_rdma;
- ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
-
- rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2);
- if (rc)
- return rc;
- ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
-
ctxm = &ctx->ctx_arr[BNGE_CTX_TIM];
rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 58cf02bcb98a..8c55874f44ca 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11132,8 +11132,9 @@ static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
return rc;
}
-static int bnxt_cfg_rx_mode(struct bnxt *);
-static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
+static int bnxt_cfg_rx_mode(struct bnxt *, struct netdev_hw_addr_list *, bool);
+static bool bnxt_mc_list_updated(struct bnxt *, u32 *,
+ const struct netdev_hw_addr_list *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
@@ -11223,11 +11224,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
} else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
- bnxt_mc_list_updated(bp, &mask);
+ bnxt_mc_list_updated(bp, &mask, &bp->dev->mc);
vnic->rx_mask |= mask;
}
- rc = bnxt_cfg_rx_mode(bp);
+ rc = bnxt_cfg_rx_mode(bp, &bp->dev->uc, true);
if (rc)
goto err_out;
@@ -13622,17 +13623,17 @@ void bnxt_get_ring_drv_stats(struct bnxt *bp,
bnxt_get_one_ring_drv_stats(bp, stats, &bp->bnapi[i]->cp_ring);
}
-static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask,
+ const struct netdev_hw_addr_list *mc)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
bool update = false;
int off = 0;
- netdev_for_each_mc_addr(ha, dev) {
+ netdev_hw_addr_list_for_each(ha, mc) {
if (mc_count >= BNXT_MAX_MC_ADDRS) {
*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
@@ -13656,17 +13657,17 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
return update;
}
-static bool bnxt_uc_list_updated(struct bnxt *bp)
+static bool bnxt_uc_list_updated(struct bnxt *bp,
+ const struct netdev_hw_addr_list *uc)
{
- struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
- if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ if (netdev_hw_addr_list_count(uc) != (vnic->uc_filter_count - 1))
return true;
- netdev_for_each_uc_addr(ha, dev) {
+ netdev_hw_addr_list_for_each(ha, uc) {
if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
return true;
@@ -13675,7 +13676,9 @@ static bool bnxt_uc_list_updated(struct bnxt *bp)
return false;
}
-static void bnxt_set_rx_mode(struct net_device *dev)
+static void bnxt_set_rx_mode(struct net_device *dev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vnic_info *vnic;
@@ -13696,7 +13699,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
- uc_update = bnxt_uc_list_updated(bp);
+ uc_update = bnxt_uc_list_updated(bp, uc);
if (dev->flags & IFF_BROADCAST)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -13704,27 +13707,23 @@ static void bnxt_set_rx_mode(struct net_device *dev)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
} else if (dev->flags & IFF_MULTICAST) {
- mc_update = bnxt_mc_list_updated(bp, &mask);
+ mc_update = bnxt_mc_list_updated(bp, &mask, mc);
}
if (mask != vnic->rx_mask || uc_update || mc_update) {
vnic->rx_mask = mask;
- bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
+ bnxt_cfg_rx_mode(bp, uc, uc_update);
}
}
-static int bnxt_cfg_rx_mode(struct bnxt *bp)
+static int bnxt_cfg_rx_mode(struct bnxt *bp, struct netdev_hw_addr_list *uc,
+ bool uc_update)
{
struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
- bool uc_update;
-
- netif_addr_lock_bh(dev);
- uc_update = bnxt_uc_list_updated(bp);
- netif_addr_unlock_bh(dev);
if (!uc_update)
goto skip_uc;
@@ -13739,10 +13738,10 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
vnic->uc_filter_count = 1;
netif_addr_lock_bh(dev);
- if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+ if (netdev_hw_addr_list_count(uc) > (BNXT_MAX_UC_ADDRS - 1)) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
} else {
- netdev_for_each_uc_addr(ha, dev) {
+ netdev_hw_addr_list_for_each(ha, uc) {
memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
off += ETH_ALEN;
vnic->uc_filter_count++;
@@ -14708,6 +14707,7 @@ static void bnxt_ulp_restart(struct bnxt *bp)
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+ struct net_device *dev = bp->dev;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -14721,9 +14721,6 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_reenable_sriov(bp);
}
- if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
- bnxt_cfg_rx_mode(bp);
-
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
@@ -14788,6 +14785,13 @@ static void bnxt_sp_task(struct work_struct *work)
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
+ if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) {
+ bnxt_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_cfg_rx_mode(bp, &dev->uc, true);
+ bnxt_unlock_sp(bp);
+ }
+
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
@@ -15989,7 +15993,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_start_xmit = bnxt_start_xmit,
.ndo_stop = bnxt_close,
.ndo_get_stats64 = bnxt_get_stats64,
- .ndo_set_rx_mode = bnxt_set_rx_mode,
+ .ndo_set_rx_mode_async = bnxt_set_rx_mode,
.ndo_eth_ioctl = bnxt_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnxt_change_mac_addr,
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
index 703752995e93..c94a928622fd 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp.c
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/netc_global.h>
#include <linux/iopoll.h>
+#include <linux/vmalloc.h>
#include "ntmp_private.h"
@@ -42,6 +43,12 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
if (!cbdr->addr_base)
return -ENOMEM;
+ cbdr->swcbd = vcalloc(cbd_num, sizeof(struct netc_swcbd));
+ if (!cbdr->swcbd) {
+ dma_free_coherent(dev, size, cbdr->addr_base, cbdr->dma_base);
+ return -ENOMEM;
+ }
+
cbdr->dma_size = size;
cbdr->bd_num = cbd_num;
cbdr->regs = *regs;
@@ -52,10 +59,10 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
- spin_lock_init(&cbdr->ring_lock);
+ mutex_init(&cbdr->ring_lock);
cbdr->next_to_use = netc_read(cbdr->regs.pir);
- cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir) & NETC_CBDRCIR_INDEX;
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
@@ -71,10 +78,24 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
}
EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
+static void ntmp_free_data_mem(struct device *dev, struct netc_swcbd *swcbd)
+{
+ if (unlikely(!swcbd->buf))
+ return;
+
+ dma_free_coherent(dev, swcbd->size + NTMP_DATA_ADDR_ALIGN,
+ swcbd->buf, swcbd->dma);
+}
+
void ntmp_free_cbdr(struct netc_cbdr *cbdr)
{
/* Disable the Control BD Ring */
netc_write(cbdr->regs.mr, 0);
+
+ for (int i = 0; i < cbdr->bd_num; i++)
+ ntmp_free_data_mem(cbdr->dev, &cbdr->swcbd[i]);
+
+ vfree(cbdr->swcbd);
dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
cbdr->dma_base);
memset(cbdr, 0, sizeof(*cbdr));
@@ -94,40 +115,59 @@ static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
{
- union netc_cbd *cbd;
- int i;
+ int i = cbdr->next_to_clean;
+
+ while ((netc_read(cbdr->regs.cir) & NETC_CBDRCIR_INDEX) != i) {
+ union netc_cbd *cbd = ntmp_get_cbd(cbdr, i);
+ struct netc_swcbd *swcbd = &cbdr->swcbd[i];
- i = cbdr->next_to_clean;
- while (netc_read(cbdr->regs.cir) != i) {
- cbd = ntmp_get_cbd(cbdr, i);
+ ntmp_free_data_mem(cbdr->dev, swcbd);
+ memset(swcbd, 0, sizeof(*swcbd));
memset(cbd, 0, sizeof(*cbd));
i = (i + 1) % cbdr->bd_num;
}
+ dma_wmb();
cbdr->next_to_clean = i;
}
-static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
+static void ntmp_select_and_lock_cbdr(struct ntmp_user *user,
+ struct netc_cbdr **cbdr)
+{
+ /* Currently only ENETC is supported, and it has only one command
+ * BD ring.
+ */
+ *cbdr = &user->ring[0];
+
+ mutex_lock(&(*cbdr)->ring_lock);
+}
+
+static void ntmp_unlock_cbdr(struct netc_cbdr *cbdr)
+{
+ mutex_unlock(&cbdr->ring_lock);
+}
+
+static int netc_xmit_ntmp_cmd(struct netc_cbdr *cbdr, union netc_cbd *cbd,
+ struct netc_swcbd *swcbd)
{
union netc_cbd *cur_cbd;
- struct netc_cbdr *cbdr;
- int i, err;
+ int i, err, used_bds;
u16 status;
u32 val;
- /* Currently only i.MX95 ENETC is supported, and it only has one
- * command BD ring
- */
- cbdr = &user->ring[0];
-
- spin_lock_bh(&cbdr->ring_lock);
-
- if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
+ used_bds = cbdr->bd_num - ntmp_get_free_cbd_num(cbdr);
+ if (unlikely(used_bds >= NETC_CBDR_CLEAN_WORK)) {
ntmp_clean_cbdr(cbdr);
+ if (unlikely(!ntmp_get_free_cbd_num(cbdr))) {
+ ntmp_free_data_mem(cbdr->dev, swcbd);
+ return -EBUSY;
+ }
+ }
i = cbdr->next_to_use;
cur_cbd = ntmp_get_cbd(cbdr, i);
*cur_cbd = *cbd;
+ cbdr->swcbd[i] = *swcbd;
dma_wmb();
/* Update producer index of both software and hardware */
@@ -135,11 +175,17 @@ static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
cbdr->next_to_use = i;
netc_write(cbdr->regs.pir, i);
- err = read_poll_timeout_atomic(netc_read, val, val == i,
- NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
- true, cbdr->regs.cir);
+ err = read_poll_timeout(netc_read, val,
+ (val & NETC_CBDRCIR_INDEX) == i,
+ NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
+ true, cbdr->regs.cir);
if (unlikely(err))
- goto cbdr_unlock;
+ return err;
+
+ if (unlikely(val & NETC_CBDRCIR_SBE)) {
+ dev_err(cbdr->dev, "Command BD system bus error\n");
+ return -EIO;
+ }
dma_rmb();
/* Get the writeback command BD, because the caller may need
@@ -150,40 +196,29 @@ static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
/* Check the writeback error status */
status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
if (unlikely(status)) {
- err = -EIO;
- dev_err(user->dev, "Command BD error: 0x%04x\n", status);
+ dev_err(cbdr->dev, "Command BD error: 0x%04x\n", status);
+ return -EIO;
}
- ntmp_clean_cbdr(cbdr);
- dma_wmb();
-
-cbdr_unlock:
- spin_unlock_bh(&cbdr->ring_lock);
-
- return err;
+ return 0;
}
-static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
+static int ntmp_alloc_data_mem(struct device *dev, struct netc_swcbd *swcbd,
+ void **buf_align)
{
void *buf;
- buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
- &data->dma, GFP_KERNEL);
+ buf = dma_alloc_coherent(dev, swcbd->size + NTMP_DATA_ADDR_ALIGN,
+ &swcbd->dma, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- data->buf = buf;
+ swcbd->buf = buf;
*buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
return 0;
}
-static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
-{
- dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
- data->buf, data->dma);
-}
-
static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
int len, int table_id, int cmd,
int access_method)
@@ -234,37 +269,39 @@ static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
u8 tbl_ver, u32 entry_id, u32 req_len,
u32 resp_len)
{
- struct ntmp_dma_buf data = {
- .dev = user->dev,
+ struct netc_swcbd swcbd = {
.size = max(req_len, resp_len),
};
struct ntmp_req_by_eid *req;
+ struct netc_cbdr *cbdr;
union netc_cbd cbd;
int err;
- err = ntmp_alloc_data_mem(&data, (void **)&req);
+ err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req);
if (err)
return err;
ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
- ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
+ ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(req_len, resp_len),
tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
- err = netc_xmit_ntmp_cmd(user, &cbd);
+ ntmp_select_and_lock_cbdr(user, &cbdr);
+ err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd);
if (err)
dev_err(user->dev,
"Failed to delete entry 0x%x of %s, err: %pe",
entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
-
- ntmp_free_data_mem(&data);
+ ntmp_unlock_cbdr(cbdr);
return err;
}
-static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
- u32 len, struct ntmp_req_by_eid *req,
- dma_addr_t dma, bool compare_eid)
+static int ntmp_query_entry_by_id(struct netc_cbdr *cbdr, int tbl_id,
+ struct ntmp_req_by_eid *req,
+ struct netc_swcbd *swcbd,
+ bool compare_eid)
{
+ u32 len = NTMP_LEN(sizeof(*req), swcbd->size);
struct ntmp_cmn_resp_query *resp;
int cmd = NTMP_CMD_QUERY;
union netc_cbd cbd;
@@ -276,10 +313,11 @@ static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
cmd = NTMP_CMD_QU;
/* Request header */
- ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
- err = netc_xmit_ntmp_cmd(user, &cbd);
+ ntmp_fill_request_hdr(&cbd, swcbd->dma, len, tbl_id, cmd,
+ NTMP_AM_ENTRY_ID);
+ err = netc_xmit_ntmp_cmd(cbdr, &cbd, swcbd);
if (err) {
- dev_err(user->dev,
+ dev_err(cbdr->dev,
"Failed to query entry 0x%x of %s, err: %pe\n",
entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
return err;
@@ -293,7 +331,7 @@ static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
resp = (struct ntmp_cmn_resp_query *)req;
if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
- dev_err(user->dev,
+ dev_err(cbdr->dev,
"%s: query EID 0x%x doesn't match response EID 0x%x\n",
ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
return -EIO;
@@ -305,15 +343,15 @@ static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
struct maft_entry_data *maft)
{
- struct ntmp_dma_buf data = {
- .dev = user->dev,
+ struct netc_swcbd swcbd = {
.size = sizeof(struct maft_req_add),
};
struct maft_req_add *req;
+ struct netc_cbdr *cbdr;
union netc_cbd cbd;
int err;
- err = ntmp_alloc_data_mem(&data, (void **)&req);
+ err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req);
if (err)
return err;
@@ -322,14 +360,15 @@ int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
req->keye = maft->keye;
req->cfge = maft->cfge;
- ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(swcbd.size, 0),
NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
- err = netc_xmit_ntmp_cmd(user, &cbd);
+
+ ntmp_select_and_lock_cbdr(user, &cbdr);
+ err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd);
if (err)
dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
entry_id, ERR_PTR(err));
-
- ntmp_free_data_mem(&data);
+ ntmp_unlock_cbdr(cbdr);
return err;
}
@@ -338,31 +377,31 @@ EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
struct maft_entry_data *maft)
{
- struct ntmp_dma_buf data = {
- .dev = user->dev,
+ struct netc_swcbd swcbd = {
.size = sizeof(struct maft_resp_query),
};
struct maft_resp_query *resp;
struct ntmp_req_by_eid *req;
+ struct netc_cbdr *cbdr;
int err;
- err = ntmp_alloc_data_mem(&data, (void **)&req);
+ err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req);
if (err)
return err;
ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
- err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
- NTMP_LEN(sizeof(*req), data.size),
- req, data.dma, true);
+
+ ntmp_select_and_lock_cbdr(user, &cbdr);
+ err = ntmp_query_entry_by_id(cbdr, NTMP_MAFT_ID, req, &swcbd, true);
if (err)
- goto end;
+ goto unlock_cbdr;
resp = (struct maft_resp_query *)req;
maft->keye = resp->keye;
maft->cfge = resp->cfge;
-end:
- ntmp_free_data_mem(&data);
+unlock_cbdr:
+ ntmp_unlock_cbdr(cbdr);
return err;
}
@@ -378,8 +417,9 @@ EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
int count)
{
- struct ntmp_dma_buf data = {.dev = user->dev};
struct rsst_req_update *req;
+ struct netc_swcbd swcbd;
+ struct netc_cbdr *cbdr;
union netc_cbd cbd;
int err, i;
@@ -387,8 +427,8 @@ int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
/* HW only takes in a full 64 entry table */
return -EINVAL;
- data.size = struct_size(req, groups, count);
- err = ntmp_alloc_data_mem(&data, (void **)&req);
+ swcbd.size = struct_size(req, groups, count);
+ err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req);
if (err)
return err;
@@ -398,15 +438,15 @@ int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
for (i = 0; i < count; i++)
req->groups[i] = (u8)(table[i]);
- ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
+ ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(swcbd.size, 0),
NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
- err = netc_xmit_ntmp_cmd(user, &cbd);
+ ntmp_select_and_lock_cbdr(user, &cbdr);
+ err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd);
if (err)
dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
ERR_PTR(err));
-
- ntmp_free_data_mem(&data);
+ ntmp_unlock_cbdr(cbdr);
return err;
}
@@ -414,8 +454,9 @@ EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
{
- struct ntmp_dma_buf data = {.dev = user->dev};
struct ntmp_req_by_eid *req;
+ struct netc_swcbd swcbd;
+ struct netc_cbdr *cbdr;
union netc_cbd cbd;
int err, i;
u8 *group;
@@ -424,21 +465,23 @@ int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
- RSST_CFGE_DATA_SIZE(count);
- err = ntmp_alloc_data_mem(&data, (void **)&req);
+ swcbd.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
+ RSST_CFGE_DATA_SIZE(count);
+ err = ntmp_alloc_data_mem(user->dev, &swcbd, (void **)&req);
if (err)
return err;
/* Set the request data buffer */
ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
- ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
+ ntmp_fill_request_hdr(&cbd, swcbd.dma, NTMP_LEN(sizeof(*req), swcbd.size),
NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
- err = netc_xmit_ntmp_cmd(user, &cbd);
+
+ ntmp_select_and_lock_cbdr(user, &cbdr);
+ err = netc_xmit_ntmp_cmd(cbdr, &cbd, &swcbd);
if (err) {
dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
ERR_PTR(err));
- goto end;
+ goto unlock_cbdr;
}
group = (u8 *)req;
@@ -446,8 +489,8 @@ int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
for (i = 0; i < count; i++)
table[i] = group[i];
-end:
- ntmp_free_data_mem(&data);
+unlock_cbdr:
+ ntmp_unlock_cbdr(cbdr);
return err;
}
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp_private.h b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
index 34394e40fddd..f8dff3ba2c28 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp_private.h
+++ b/drivers/net/ethernet/freescale/enetc/ntmp_private.h
@@ -12,6 +12,9 @@
#define NTMP_EID_REQ_LEN 8
#define NETC_CBDR_BD_NUM 256
+#define NETC_CBDRCIR_INDEX GENMASK(9, 0)
+#define NETC_CBDRCIR_SBE BIT(31)
+#define NETC_CBDR_CLEAN_WORK 16
union netc_cbd {
struct {
@@ -54,13 +57,6 @@ union netc_cbd {
} resp_hdr; /* NTMP Response Message Header Format */
};
-struct ntmp_dma_buf {
- struct device *dev;
- size_t size;
- void *buf;
- dma_addr_t dma;
-};
-
struct ntmp_cmn_req_data {
__le16 update_act;
u8 dbg_opt;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9befdacd6730..7ce0cc8ab8f4 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7706,6 +7706,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
+ e1000e_ptp_remove(adapter);
err_eeprom:
if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
e1000_phy_hw_reset(&adapter->hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 926d001b2150..028bd500603a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -13783,7 +13783,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->neigh_priv_len = sizeof(u32) * 4;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index dad001abc908..3c1465cf0515 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1150,14 +1150,18 @@ bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
/**
* iavf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
+ * @uc: snapshot of uc address list
+ * @mc: snapshot of mc address list
**/
-static void iavf_set_rx_mode(struct net_device *netdev)
+static void iavf_set_rx_mode(struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
spin_lock_bh(&adapter->mac_vlan_list_lock);
- __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
- __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
+ __hw_addr_sync_dev(uc, netdev, iavf_addr_sync, iavf_addr_unsync);
+ __hw_addr_sync_dev(mc, netdev, iavf_addr_sync, iavf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
@@ -1210,7 +1214,9 @@ static void iavf_configure(struct iavf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int i;
- iavf_set_rx_mode(netdev);
+ netif_addr_lock_bh(netdev);
+ iavf_set_rx_mode(netdev, &netdev->uc, &netdev->mc);
+ netif_addr_unlock_bh(netdev);
iavf_configure_tx(adapter);
iavf_configure_rx(adapter);
@@ -5153,7 +5159,7 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_open = iavf_open,
.ndo_stop = iavf_close,
.ndo_start_xmit = iavf_xmit_frame,
- .ndo_set_rx_mode = iavf_set_rx_mode,
+ .ndo_set_rx_mode_async = iavf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = iavf_set_mac,
.ndo_change_mtu = iavf_change_mtu,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 1d8cf29cb65a..5bb1de1cfd33 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -277,7 +277,7 @@ struct iavf_rx_desc {
/* L2 Tag 2 Presence */
#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
/* Stripped S-TAG VLAN from the receive packet */
-#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 48)
/* Stripped S-TAG VLAN from the receive packet */
#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
/* The packet is a UDP tunneled packet */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index eb3a48330cc1..725b130dd3a2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -753,7 +753,7 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
- ring->flags |= ICE_TX_FLAGS_RING_XDP;
+ set_bit(ICE_TX_RING_FLAGS_XDP, ring->flags);
}
/**
@@ -778,7 +778,7 @@ static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
*/
static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
{
- return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+ return test_bit(ICE_TX_RING_FLAGS_TXTIME, ring->flags);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 859e9c66f3e7..3cbb1b0582e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1252,7 +1252,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
__le16 link_speed;
-#define ICE_AQ_LINK_SPEED_M 0x7FF
+#define ICE_AQ_LINK_SPEED_M GENMASK(11, 0)
#define ICE_AQ_LINK_SPEED_10MB BIT(0)
#define ICE_AQ_LINK_SPEED_100MB BIT(1)
#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index bd77f1c001ee..16aa25535152 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -943,7 +943,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
- if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ if (test_bit(ICE_TX_RING_FLAGS_VLAN_L2TAG2, tx_ring->flags))
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index e6a20af6f63d..f28416a707d7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3290,6 +3290,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
tx_rings[i].tstamp_ring = NULL;
+ clear_bit(ICE_TX_RING_FLAGS_TXTIME, tx_rings[i].flags);
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 689c6025ea82..837b71b7b2b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1412,9 +1412,9 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->count = vsi->num_tx_desc;
ring->txq_teid = ICE_INVAL_TEID;
if (dvm_ena)
- ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
+ set_bit(ICE_TX_RING_FLAGS_VLAN_L2TAG2, ring->flags);
else
- ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
+ set_bit(ICE_TX_RING_FLAGS_VLAN_L2TAG1, ring->flags);
WRITE_ONCE(vsi->tx_rings[i], ring);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f93ee3b62391..5f92377d4dfc 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1923,82 +1923,6 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/**
- * ice_force_phys_link_state - Force the physical link state
- * @vsi: VSI to force the physical link state to up/down
- * @link_up: true/false indicates to set the physical link to up/down
- *
- * Force the physical link state by getting the current PHY capabilities from
- * hardware and setting the PHY config based on the determined capabilities. If
- * link changes a link event will be triggered because both the Enable Automatic
- * Link Update and LESM Enable bits are set when setting the PHY capabilities.
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
-{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_aqc_set_phy_cfg_data *cfg;
- struct ice_port_info *pi;
- struct device *dev;
- int retcode;
-
- if (!vsi || !vsi->port_info || !vsi->back)
- return -EINVAL;
- if (vsi->type != ICE_VSI_PF)
- return 0;
-
- dev = ice_pf_to_dev(vsi->back);
-
- pi = vsi->port_info;
-
- pcaps = kzalloc_obj(*pcaps);
- if (!pcaps)
- return -ENOMEM;
-
- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (retcode) {
- dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- goto out;
- }
-
- /* No change in link */
- if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
- link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
- goto out;
-
- /* Use the current user PHY configuration. The current user PHY
- * configuration is initialized during probe from PHY capabilities
- * software mode, and updated on set PHY configuration.
- */
- cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- retcode = -ENOMEM;
- goto out;
- }
-
- cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- if (link_up)
- cfg->caps |= ICE_AQ_PHY_ENA_LINK;
- else
- cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
-
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
- if (retcode) {
- dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- }
-
- kfree(cfg);
-out:
- kfree(pcaps);
- return retcode;
-}
-
-/**
* ice_init_nvm_phy_type - Initialize the NVM PHY type
* @pi: port info structure
*
@@ -2066,7 +1990,7 @@ static void ice_init_link_dflt_override(struct ice_port_info *pi)
* first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
* is used to indicate that the user PHY cfg default override is initialized
* and the PHY has not been configured with the default override settings. The
- * state is set here, and cleared in ice_configure_phy the first time the PHY is
+ * state is set here, and cleared in ice_phy_cfg the first time the PHY is
* configured.
*
* This function should be called only if the FW doesn't support default
@@ -2172,14 +2096,18 @@ err_out:
}
/**
- * ice_configure_phy - configure PHY
+ * ice_phy_cfg - configure PHY
* @vsi: VSI of PHY
+ * @link_en: true/false indicates to set link to enable/disable
*
* Set the PHY configuration. If the current PHY configuration is the same as
- * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
- * configure the based get PHY capabilities for topology with media.
+ * the curr_user_phy_cfg and link_en hasn't changed, then do nothing to avoid
+ * link flap. Otherwise configure the PHY based get PHY capabilities for
+ * topology with media and link_en.
+ *
+ * Return: 0 on success, negative on failure
*/
-static int ice_configure_phy(struct ice_vsi *vsi)
+static int ice_phy_cfg(struct ice_vsi *vsi, bool link_en)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
@@ -2199,9 +2127,6 @@ static int ice_configure_phy(struct ice_vsi *vsi)
phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
- return ice_force_phys_link_state(vsi, true);
-
pcaps = kzalloc_obj(*pcaps);
if (!pcaps)
return -ENOMEM;
@@ -2215,10 +2140,8 @@ static int ice_configure_phy(struct ice_vsi *vsi)
goto done;
}
- /* If PHY enable link is configured and configuration has not changed,
- * there's nothing to do
- */
- if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
+ /* Configuration has not changed. There's nothing to do. */
+ if (link_en == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
goto done;
@@ -2282,8 +2205,12 @@ static int ice_configure_phy(struct ice_vsi *vsi)
*/
ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
- /* Enable link and link update */
- cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+ /* Enable/Disable link and link update */
+ cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ if (link_en)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
if (err)
@@ -2336,7 +2263,7 @@ static void ice_check_media_subtask(struct ice_pf *pf)
test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
return;
- err = ice_configure_phy(vsi);
+ err = ice_phy_cfg(vsi, true);
if (!err)
clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
@@ -4892,9 +4819,15 @@ static int ice_init_link(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_link_default_override_tlv *ldo;
+ bool link_en;
+
+ ldo = &pf->link_dflt_override;
+ link_en = !(ldo->options &
+ ICE_LINK_OVERRIDE_AUTO_LINK_DIS);
if (vsi)
- ice_configure_phy(vsi);
+ ice_phy_cfg(vsi, link_en);
}
} else {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
@@ -9707,7 +9640,7 @@ int ice_open_internal(struct net_device *netdev)
}
}
- err = ice_configure_phy(vsi);
+ err = ice_phy_cfg(vsi, true);
if (err) {
netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
@@ -9748,7 +9681,7 @@ int ice_stop(struct net_device *netdev)
}
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- int link_err = ice_force_phys_link_state(vsi, false);
+ int link_err = ice_phy_cfg(vsi, false);
if (link_err) {
if (link_err == -ENOMEDIUM)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 6cb0cf7a9891..36df742c326c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2710,7 +2710,7 @@ static bool ice_any_port_has_timestamps(struct ice_pf *pf)
bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ int ret;
/* Check software indicator */
switch (pf->ptp.tx_interrupt_mode) {
@@ -2731,16 +2731,19 @@ bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)
}
/* Check hardware indicator */
- for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
- u64 tstamp_ready = 0;
- int err;
-
- err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (err || tstamp_ready)
- return true;
+ ret = ice_check_phy_tx_tstamp_ready(hw);
+ if (ret < 0) {
+ dev_dbg(ice_pf_to_dev(pf), "Unable to read PHY Tx timestamp ready bitmap, err %d\n",
+ ret);
+ /* Stop triggering IRQs if we're unable to read PHY */
+ return false;
}
- return false;
+ /* ice_check_phy_tx_tstamp_ready() returns 1 if there are timestamps
+ * available, 0 if there are no waiting timestamps, and a negative
+ * value if there was an error (which we checked for above).
+ */
+ return ret > 0;
}
/**
@@ -2824,8 +2827,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- bool trigger_oicr = false;
- unsigned int i;
+ int ret;
if (!pf->ptp.port.tx.has_ready_bitmap)
return;
@@ -2833,21 +2835,11 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
if (!ice_pf_src_tmr_owned(pf))
return;
- for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
- u64 tstamp_ready;
- int err;
-
- err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
- if (!err && tstamp_ready) {
- trigger_oicr = true;
- break;
- }
- }
-
- if (trigger_oicr) {
- /* Trigger a software interrupt, to ensure this data
- * gets processed.
- */
+ ret = ice_check_phy_tx_tstamp_ready(hw);
+ if (ret < 0) {
+ dev_dbg(dev, "PTP periodic task unable to read PHY timestamp ready bitmap, err %d\n",
+ ret);
+ } else if (ret) {
dev_dbg(dev, "PTP periodic task detected waiting timestamps. Triggering Tx timestamp interrupt now.\n");
wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 19dddd9b53dd..4d298c27bfb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -78,14 +78,14 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.blktime = 0x666, /* 3.2 */
.tx_offset = {
.serdes = 0x234c, /* 17.6484848 */
- .no_fec = 0x8e80, /* 71.25 */
+ .no_fec = 0x93d9, /* 73 */
.fc = 0xb4a4, /* 90.32 */
.sfd = 0x4a4, /* 2.32 */
.onestep = 0x4ccd /* 38.4 */
},
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
- .no_fec = 0xffffcccd, /* -25.6 */
+ .no_fec = 0xffffc7b6, /* -28 */
.fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
@@ -118,17 +118,17 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.mktime = 0x147b, /* 10.24, only if RS-FEC enabled */
.tx_offset = {
.serdes = 0xe1e, /* 7.0593939 */
- .no_fec = 0x3857, /* 28.17 */
+ .no_fec = 0x4266, /* 33 */
.fc = 0x48c3, /* 36.38 */
- .rs = 0x8100, /* 64.5 */
+ .rs = 0x8a00, /* 69 */
.sfd = 0x1dc, /* 0.93 */
.onestep = 0x1eb8 /* 15.36 */
},
.rx_offset = {
.serdes = 0xfffff7a9, /* -4.1697 */
- .no_fec = 0xffffe71a, /* -12.45 */
+ .no_fec = 0xffffe700, /* -12 */
.fc = 0xfffe894d, /* -187.35 */
- .rs = 0xfffff8cd, /* -3.6 */
+ .rs = 0xfffff8cc, /* -3 */
.sfd = 0x1dc, /* 0.93 */
.bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 61c0a0d93ea8..24fb7a3e14d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -378,6 +378,31 @@ static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
*/
/**
+ * ice_ptp_init_phc_e825c - Perform E825C specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E825C-specific PTP hardware clock initialization steps.
+ *
+ * Return: 0 on success, or a negative error value on failure.
+ */
+static int ice_ptp_init_phc_e825c(struct ice_hw *hw)
+{
+ int err;
+
+ /* Soft reset all ports, to ensure everything is at a clean state */
+ for (int port = 0; port < hw->ptp.num_lports; port++) {
+ err = ice_ptp_phy_soft_reset_eth56g(hw, port);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to soft reset port %d, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
* @hw: pointer to the HW struct
* @port: destination port
@@ -1847,6 +1872,8 @@ static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
* @ena: enable or disable interrupt
* @threshold: interrupt threshold
*
+ * The threshold cannot be 0 while the interrupt is enabled.
+ *
* Configure TX timestamp interrupt for the specified port
*
* Return:
@@ -1858,19 +1885,45 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
int err;
u32 val;
+ if (ena && !threshold)
+ return -EINVAL;
+
err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
if (err)
return err;
+ val &= ~PHY_TS_INT_CONFIG_ENA_M;
if (ena) {
- val |= PHY_TS_INT_CONFIG_ENA_M;
val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
- } else {
- val &= ~PHY_TS_INT_CONFIG_ENA_M;
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG,
+ val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP,
+ "Failed to update 'threshold' PHY_REG_TS_INT_CONFIG port=%u ena=%u threshold=%u\n",
+ port, !!ena, threshold);
+ return err;
+ }
+ val |= PHY_TS_INT_CONFIG_ENA_M;
+ }
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP,
+ "Failed to update 'ena' PHY_REG_TS_INT_CONFIG port=%u ena=%u threshold=%u\n",
+ port, !!ena, threshold);
+ return err;
}
- return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP,
+ "Failed to read PHY_REG_TS_INT_CONFIG port=%u ena=%u threshold=%u\n",
+ port, !!ena, threshold);
+ return err;
+ }
+
+ return 0;
}
/**
@@ -2116,6 +2169,35 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
+ * ice_check_phy_tx_tstamp_ready_eth56g - Check Tx memory status for all ports
+ * @hw: pointer to the HW struct
+ *
+ * Check the PHY_REG_TX_MEMORY_STATUS for all ports. A set bit indicates
+ * a waiting timestamp.
+ *
+ * Return: 1 if any port has at least one timestamp ready bit set,
+ * 0 otherwise, and a negative error code if unable to read the bitmap.
+ */
+static int ice_check_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(hw, port, &tstamp_ready);
+ if (err)
+ return err;
+
+ if (tstamp_ready)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
* @hw: pointer to the HW struct
* @ts_status: the timestamp mask pointer
@@ -2137,13 +2219,19 @@ int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
*ts_status = 0;
for (phy = 0; phy < params->num_phys; phy++) {
+ u8 port;
int err;
- err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
+ /* ice_read_phy_eth56g expects a port index, so use the first
+ * port of the PHY
+ */
+ port = phy * hw->ptp.ports_per_phy;
+
+ err = ice_read_phy_eth56g(hw, port, PHY_PTP_INT_STATUS, &status);
if (err)
return err;
- *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
+ *ts_status |= (status & mask) << port;
}
ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
@@ -2152,6 +2240,69 @@ int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
}
/**
+ * ice_ptp_phy_soft_reset_eth56g - Perform a PHY soft reset on ETH56G
+ * @hw: pointer to the HW structure
+ * @port: PHY port number
+ *
+ * Trigger a soft reset of the ETH56G PHY by toggling the soft reset
+ * bit in the PHY global register. The reset sequence consists of:
+ * 1. Clearing the soft reset bit
+ * 2. Asserting the soft reset bit
+ * 3. Clearing the soft reset bit again
+ *
+ * Short delays are inserted between each step to allow the hardware
+ * to settle. This provides a controlled way to reinitialize the PHY
+ * without requiring a full device reset.
+ *
+ * Return: 0 on success, or a negative error code on failure when
+ * reading or writing the PHY register.
+ */
+int ice_ptp_phy_soft_reset_eth56g(struct ice_hw *hw, u8 port)
+{
+ u32 global_val;
+ int err;
+
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_GLOBAL, &global_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_REG_GLOBAL for port %d, err %d\n",
+ port, err);
+ return err;
+ }
+
+ global_val &= ~PHY_REG_GLOBAL_SOFT_RESET_M;
+ ice_debug(hw, ICE_DBG_PTP, "Clearing soft reset bit for port %d, val: 0x%x\n",
+ port, global_val);
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_GLOBAL, global_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_REG_GLOBAL for port %d, err %d\n",
+ port, err);
+ return err;
+ }
+
+ usleep_range(5000, 6000);
+
+ global_val |= PHY_REG_GLOBAL_SOFT_RESET_M;
+ ice_debug(hw, ICE_DBG_PTP, "Set soft reset bit for port %d, val: 0x%x\n",
+ port, global_val);
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_GLOBAL, global_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_REG_GLOBAL for port %d, err %d\n",
+ port, err);
+ return err;
+ }
+ usleep_range(5000, 6000);
+
+ global_val &= ~PHY_REG_GLOBAL_SOFT_RESET_M;
+ ice_debug(hw, ICE_DBG_PTP, "Clear soft reset bit for port %d, val: 0x%x\n",
+ port, global_val);
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_GLOBAL, global_val);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_REG_GLOBAL for port %d, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
* ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
* @hw: pointer to the HW struct
* @port: the PHY port to read from
@@ -4203,6 +4354,35 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
}
/**
+ * ice_check_phy_tx_tstamp_ready_e82x - Check Tx memory status for all quads
+ * @hw: pointer to the HW struct
+ *
+ * Check the Q_REG_TX_MEMORY_STATUS for all quads. A set bit indicates
+ * a waiting timestamp.
+ *
+ * Return: 1 if any quad has at least one timestamp ready bit set,
+ * 0 otherwise, and a negative error value if unable to read the bitmap.
+ */
+static int ice_check_phy_tx_tstamp_ready_e82x(struct ice_hw *hw)
+{
+ int quad;
+
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++) {
+ u64 tstamp_ready;
+ int err;
+
+ err = ice_get_phy_tx_tstamp_ready(hw, quad, &tstamp_ready);
+ if (err)
+ return err;
+
+ if (tstamp_ready)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
* ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
* @hw: pointer to the HW struct
* @quad: the timestamp quad
@@ -4755,6 +4935,23 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
return 0;
}
+/**
+ * ice_check_phy_tx_tstamp_ready_e810 - Check Tx memory status register
+ * @hw: pointer to the HW struct
+ *
+ * The E810 devices do not have a Tx memory status register. Note this is
+ * intentionally different behavior from ice_get_phy_tx_tstamp_ready_e810
+ * which always says that all bits are ready. This function is called in cases
+ * where code will trigger interrupts if timestamps are waiting, and should
+ * not be called for E810 hardware.
+ *
+ * Return: 0.
+ */
+static int ice_check_phy_tx_tstamp_ready_e810(struct ice_hw *hw)
+{
+ return 0;
+}
+
/* E810 SMA functions
*
* The following functions operate specifically on E810 hardware and are used
@@ -5010,6 +5207,21 @@ static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
}
/**
+ * ice_check_phy_tx_tstamp_ready_e830 - Check Tx memory status register
+ * @hw: pointer to the HW struct
+ *
+ * Return: 1 if the device has waiting timestamps, 0 otherwise.
+ */
+static int ice_check_phy_tx_tstamp_ready_e830(struct ice_hw *hw)
+{
+ u64 tstamp_ready;
+
+ ice_get_phy_tx_tstamp_ready_e830(hw, 0, &tstamp_ready);
+
+ return !!tstamp_ready;
+}
+
+/**
* ice_ptp_init_phy_e830 - initialize PHY parameters
* @ptp: pointer to the PTP HW struct
*/
@@ -5381,8 +5593,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
+ int err = 0;
u8 tmr_idx;
- int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -5399,8 +5611,8 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
case ICE_MAC_E830:
- /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
- return 0;
+ /* E830 sync PHYs automatically after setting cmd register */
+ break;
case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
@@ -5564,7 +5776,7 @@ int ice_ptp_init_phc(struct ice_hw *hw)
case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
case ICE_MAC_GENERIC_3K_E825:
- return 0;
+ return ice_ptp_init_phc_e825c(hw);
default:
return -EOPNOTSUPP;
}
@@ -5602,6 +5814,33 @@ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
}
/**
+ * ice_check_phy_tx_tstamp_ready - Check PHY Tx timestamp memory status
+ * @hw: pointer to the HW struct
+ *
+ * Check the PHY for Tx timestamp memory status on all ports. If you need to
+ * see individual timestamp status for each index, use
+ * ice_get_phy_tx_tstamp_ready() instead.
+ *
+ * Return: 1 if any port has timestamps available, 0 if there are no timestamps
+ * available, and a negative error code on failure.
+ */
+int ice_check_phy_tx_tstamp_ready(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ return ice_check_phy_tx_tstamp_ready_e810(hw);
+ case ICE_MAC_E830:
+ return ice_check_phy_tx_tstamp_ready_e830(hw);
+ case ICE_MAC_GENERIC:
+ return ice_check_phy_tx_tstamp_ready_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_check_phy_tx_tstamp_ready_eth56g(hw);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_cgu_get_pin_desc_e823 - get pin description array
* @hw: pointer to the hw struct
* @input: if request is done against input or output pin
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 9bfd3e79c580..1c9e77dbc770 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -300,6 +300,7 @@ void ice_ptp_reset_ts_memory(struct ice_hw *hw);
int ice_ptp_init_phc(struct ice_hw *hw);
void ice_ptp_init_hw(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+int ice_check_phy_tx_tstamp_ready(struct ice_hw *hw);
int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
enum ice_ptp_tmr_cmd configured_cmd);
@@ -374,6 +375,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
+int ice_ptp_phy_soft_reset_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL
#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL
@@ -676,6 +678,9 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
#define ICE_P0_GNSS_PRSNT_N BIT(4)
/* ETH56G PHY register addresses */
+#define PHY_REG_GLOBAL 0x0
+#define PHY_REG_GLOBAL_SOFT_RESET_M BIT(11)
+
/* Timestamp PHY incval registers */
#define PHY_REG_TIMETUS_L 0x8
#define PHY_REG_TIMETUS_U 0xC
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 2cf04bc6edce..a730aa368c92 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -305,6 +305,8 @@ ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
aux_dev_uninit:
auxiliary_device_uninit(&sf_dev->adev);
+ return err;
+
sf_dev_free:
kfree(sf_dev);
xa_erase:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a2cd4cf37734..4ca1a0602307 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -190,9 +190,10 @@ void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
{
ice_free_tstamp_ring(tx_ring);
+ clear_bit(ICE_TX_RING_FLAGS_TXTIME, tx_ring->flags);
+ smp_wmb(); /* order flag clear before pointer NULL */
kfree_rcu(tx_ring->tstamp_ring, rcu);
- tx_ring->tstamp_ring = NULL;
- tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+ WRITE_ONCE(tx_ring->tstamp_ring, NULL);
}
/**
@@ -405,7 +406,7 @@ static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
tx_ring->tstamp_ring = tstamp_ring;
tstamp_ring->desc = NULL;
tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
- tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ set_bit(ICE_TX_RING_FLAGS_TXTIME, tx_ring->flags);
return 0;
}
@@ -1521,13 +1522,20 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
return;
if (ice_is_txtime_cfg(tx_ring)) {
- struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
- u32 tstamp_count = tstamp_ring->count;
- u32 j = tstamp_ring->next_to_use;
+ struct ice_tstamp_ring *tstamp_ring;
+ u32 tstamp_count, j;
struct ice_ts_desc *ts_desc;
struct timespec64 ts;
u32 tstamp;
+ smp_rmb(); /* order flag read before pointer read */
+ tstamp_ring = READ_ONCE(tx_ring->tstamp_ring);
+ if (unlikely(!tstamp_ring))
+ goto ring_kick;
+
+ tstamp_count = tstamp_ring->count;
+ j = tstamp_ring->next_to_use;
+
ts = ktime_to_timespec64(first->skb->tstamp);
tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
@@ -1555,6 +1563,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
tstamp_ring->next_to_use = j;
writel_relaxed(j, tstamp_ring->tail);
} else {
+ring_kick:
writel_relaxed(i, tx_ring->tail);
}
return;
@@ -1814,7 +1823,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
*/
if (skb_vlan_tag_present(skb)) {
first->vid = skb_vlan_tag_get(skb);
- if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ if (test_bit(ICE_TX_RING_FLAGS_VLAN_L2TAG2, tx_ring->flags))
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
else
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
@@ -2158,6 +2167,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buf[tx_ring->next_to_use];
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2183,8 +2195,6 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
offload.tx_ring = tx_ring;
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
@@ -2249,6 +2259,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
out_drop:
ice_trace(xmit_frame_ring_drop, tx_ring, skb);
dev_kfree_skb_any(skb);
+ first->type = ICE_TX_BUF_EMPTY;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b6547e1b7c42..5e517f219379 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -212,6 +212,14 @@ enum ice_rx_dtype {
ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
};
+enum ice_tx_ring_flags {
+ ICE_TX_RING_FLAGS_XDP,
+ ICE_TX_RING_FLAGS_VLAN_L2TAG1,
+ ICE_TX_RING_FLAGS_VLAN_L2TAG2,
+ ICE_TX_RING_FLAGS_TXTIME,
+ ICE_TX_RING_FLAGS_NBITS,
+};
+
struct ice_pkt_ctx {
u64 cached_phctime;
__be16 vlan_proto;
@@ -352,11 +360,7 @@ struct ice_tx_ring {
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
- u8 flags;
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
-#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
-#define ICE_TX_FLAGS_TXTIME BIT(3)
+ DECLARE_BITMAP(flags, ICE_TX_RING_FLAGS_NBITS);
struct xsk_buff_pool *xsk_pool;
@@ -398,7 +402,7 @@ static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
- return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
+ return test_bit(ICE_TX_RING_FLAGS_XDP, ring->flags);
}
enum ice_container_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index c3408b3f7010..091b80a67189 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -201,7 +201,10 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_nu
void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
-void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc);
int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fdfe9d1cfe21..12492c4a5d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -609,20 +609,26 @@ static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
}
static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct netdev_hw_addr *ha;
- netif_addr_lock_bh(netdev);
+ if (!uc || !mc) {
+ netif_addr_lock_bh(netdev);
+ mlx5e_sync_netdev_addr(fs, netdev, &netdev->uc, &netdev->mc);
+ netif_addr_unlock_bh(netdev);
+ return;
+ }
mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
- netdev_for_each_uc_addr(ha, netdev)
+
+ netdev_hw_addr_list_for_each(ha, uc)
mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
- netdev_for_each_mc_addr(ha, netdev)
+ netdev_hw_addr_list_for_each(ha, mc)
mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
-
- netif_addr_unlock_bh(netdev);
}
static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
@@ -724,7 +730,9 @@ static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
}
static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
@@ -736,7 +744,7 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
hn->action = MLX5E_ACTION_DEL;
if (fs->state_destroy)
- mlx5e_sync_netdev_addr(fs, netdev);
+ mlx5e_sync_netdev_addr(fs, netdev, uc, mc);
mlx5e_apply_netdev_addr(fs);
}
@@ -820,13 +828,15 @@ static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
}
void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
- struct net_device *netdev)
+ struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_l2_table *ea = &fs->l2;
if (mlx5e_is_uplink_rep(priv)) {
- mlx5e_handle_netdev_addr(fs, netdev);
+ mlx5e_handle_netdev_addr(fs, netdev, uc, mc);
goto update_vport_context;
}
@@ -856,7 +866,7 @@ void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
if (enable_broadcast)
mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(fs, netdev);
+ mlx5e_handle_netdev_addr(fs, netdev, uc, mc);
if (disable_broadcast)
mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6c4eeb88588c..5a46870c4b74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4145,11 +4145,13 @@ static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
}
-static void mlx5e_set_rx_mode(struct net_device *dev)
+static void mlx5e_set_rx_mode(struct net_device *dev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_nic_set_rx_mode(priv);
+ mlx5e_fs_set_rx_mode_work(priv->fs, dev, uc, mc);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -5324,7 +5326,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_setup_tc = mlx5e_setup_tc,
.ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
- .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_rx_mode_async = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
@@ -6309,8 +6311,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
+ struct net_device *dev = priv->netdev;
- return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+ netdev_lock_ops(dev);
+ mlx5e_fs_set_rx_mode_work(priv->fs, dev, NULL, NULL);
+ netdev_unlock_ops(dev);
}
/* mlx5e generic netdev management API (move to en_common.c) */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 72d8ae2df33f..74827e8ca125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1849,7 +1849,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
err = mlx5_notifiers_init(dev);
if (err)
- goto err_hca_caps;
+ goto err_notifiers_init;
/* The conjunction of sw_vhca_id with sw_owner_id will be a global
* unique id per function which uses mlx5_core.
@@ -1865,6 +1865,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
return 0;
+err_notifiers_init:
+ mlx5_hca_caps_free(dev);
err_hca_caps:
mlx5_adev_cleanup(dev);
err_adev_init:
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index b4b396ca9bce..c406a3b56b37 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -183,7 +183,9 @@ static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
return ret;
}
-void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
bool uc_promisc = false, mc_promisc = false;
struct net_device *netdev = fbd->netdev;
@@ -213,10 +215,10 @@ void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
}
/* Synchronize unicast and multicast address lists */
- err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
+ err = __hw_addr_sync_dev(uc, netdev, fbnic_uc_sync, fbnic_uc_unsync);
if (err == -ENOSPC)
uc_promisc = true;
- err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
+ err = __hw_addr_sync_dev(mc, netdev, fbnic_mc_sync, fbnic_mc_unsync);
if (err == -ENOSPC)
mc_promisc = true;
@@ -238,18 +240,21 @@ void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
fbnic_write_tce_tcam(fbd);
}
-static void fbnic_set_rx_mode(struct net_device *netdev)
+static void fbnic_set_rx_mode(struct net_device *netdev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
struct fbnic_net *fbn = netdev_priv(netdev);
struct fbnic_dev *fbd = fbn->fbd;
/* No need to update the hardware if we are not running */
if (netif_running(netdev))
- __fbnic_set_rx_mode(fbd);
+ __fbnic_set_rx_mode(fbd, uc, mc);
}
static int fbnic_set_mac(struct net_device *netdev, void *p)
{
+ struct fbnic_net *fbn = netdev_priv(netdev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -257,7 +262,8 @@ static int fbnic_set_mac(struct net_device *netdev, void *p)
eth_hw_addr_set(netdev, addr->sa_data);
- fbnic_set_rx_mode(netdev);
+ if (netif_running(netdev))
+ __fbnic_set_rx_mode(fbn->fbd, &netdev->uc, &netdev->mc);
return 0;
}
@@ -551,7 +557,7 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_features_check = fbnic_features_check,
.ndo_set_mac_address = fbnic_set_mac,
.ndo_change_mtu = fbnic_change_mtu,
- .ndo_set_rx_mode = fbnic_set_rx_mode,
+ .ndo_set_rx_mode_async = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
.ndo_bpf = fbnic_bpf,
.ndo_hwtstamp_get = fbnic_hwtstamp_get,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 9129a658f8fa..eded20b0e9e4 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -97,7 +97,9 @@ void fbnic_time_init(struct fbnic_net *fbn);
int fbnic_time_start(struct fbnic_net *fbn);
void fbnic_time_stop(struct fbnic_net *fbn);
-void __fbnic_set_rx_mode(struct fbnic_dev *fbd);
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc);
void fbnic_clear_rx_mode(struct fbnic_dev *fbd);
void fbnic_phylink_get_pauseparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index b7c0b7349d00..7e85b480203c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -135,7 +135,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_rss_reinit_hw(fbn->fbd, fbn);
- __fbnic_set_rx_mode(fbn->fbd);
+ __fbnic_set_rx_mode(fbn->fbd, &fbn->netdev->uc, &fbn->netdev->mc);
/* Enable Tx/Rx processing */
fbnic_napi_enable(fbn);
@@ -180,7 +180,7 @@ static int fbnic_fw_config_after_crash(struct fbnic_dev *fbd)
}
fbnic_rpc_reset_valid_entries(fbd);
- __fbnic_set_rx_mode(fbd);
+ __fbnic_set_rx_mode(fbd, &fbd->netdev->uc, &fbd->netdev->mc);
return 0;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 42a186db43ea..fe95b6f69646 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -244,7 +244,7 @@ void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
if (fbd->fw_cap.need_bmc_tcam_reinit) {
fbnic_bmc_rpc_init(fbd);
- __fbnic_set_rx_mode(fbd);
+ __fbnic_set_rx_mode(fbd, &fbd->netdev->uc, &fbd->netdev->mc);
fbd->fw_cap.need_bmc_tcam_reinit = false;
}
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 31f75b4a67fd..b795a3a60571 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -408,10 +408,8 @@ struct ks8851_net {
struct gpio_desc *gpio;
struct mii_bus *mii_bus;
- void (*lock)(struct ks8851_net *ks,
- unsigned long *flags);
- void (*unlock)(struct ks8851_net *ks,
- unsigned long *flags);
+ void (*lock)(struct ks8851_net *ks);
+ void (*unlock)(struct ks8851_net *ks);
unsigned int (*rdreg16)(struct ks8851_net *ks,
unsigned int reg);
void (*wrreg16)(struct ks8851_net *ks,
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 8048770958d6..4afbb40bc0e4 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -28,25 +28,23 @@
/**
* ks8851_lock - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock(struct ks8851_net *ks)
{
- ks->lock(ks, flags);
+ ks->lock(ks);
}
/**
* ks8851_unlock - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock(struct ks8851_net *ks)
{
- ks->unlock(ks, flags);
+ ks->unlock(ks);
}
/**
@@ -129,11 +127,10 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
static int ks8851_write_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
u16 val;
int i;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/*
* Wake up chip in case it was powered off when stopped; otherwise,
@@ -149,7 +146,7 @@ static int ks8851_write_mac_addr(struct net_device *dev)
if (!netif_running(dev))
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -163,12 +160,11 @@ static int ks8851_write_mac_addr(struct net_device *dev)
static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
u8 addr[ETH_ALEN];
u16 reg;
int i;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
@@ -177,7 +173,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
}
eth_hw_addr_set(dev, addr);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
/**
@@ -312,11 +308,10 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
struct sk_buff_head rxq;
- unsigned long flags;
unsigned int status;
struct sk_buff *skb;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
status = ks8851_rdreg16(ks, KS_ISR);
ks8851_wrreg16(ks, KS_ISR, status);
@@ -373,14 +368,17 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
}
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
- if (status & IRQ_RXI)
+ if (status & IRQ_RXI) {
+ local_bh_disable();
while ((skb = __skb_dequeue(&rxq)))
netif_rx(skb);
+ local_bh_enable();
+ }
return IRQ_HANDLED;
}
@@ -405,7 +403,6 @@ static void ks8851_flush_tx_work(struct ks8851_net *ks)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int ret;
ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
@@ -418,7 +415,7 @@ static int ks8851_net_open(struct net_device *dev)
/* lock the card, even if we may not actually be doing anything
* else at the moment */
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
netif_dbg(ks, ifup, ks->netdev, "opening\n");
@@ -471,7 +468,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
mii_check_link(&ks->mii);
return 0;
}
@@ -487,23 +484,22 @@ static int ks8851_net_open(struct net_device *dev)
static int ks8851_net_stop(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
netif_info(ks, ifdown, dev, "shutting down\n");
netif_stop_queue(dev);
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* turn off the IRQs and ack any outstanding */
ks8851_wrreg16(ks, KS_IER, 0x0000);
ks8851_wrreg16(ks, KS_ISR, 0xffff);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
/* stop any outstanding work */
ks8851_flush_tx_work(ks);
flush_work(&ks->rxctrl_work);
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* shutdown RX process */
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
@@ -512,7 +508,7 @@ static int ks8851_net_stop(struct net_device *dev)
/* set powermode to soft power down to save power */
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
/* ensure any queued tx buffers are dumped */
while (!skb_queue_empty(&ks->txq)) {
@@ -566,14 +562,13 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
static void ks8851_rxctrl_work(struct work_struct *work)
{
struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
- unsigned long flags;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
/* need to shutdown RXQ before modifying filter parameters */
ks8851_wrreg16(ks, KS_RXCR1, 0x00);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
static void ks8851_set_rx_mode(struct net_device *dev)
@@ -780,7 +775,6 @@ static int ks8851_set_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
- unsigned long flags;
int len = ee->len;
u16 tmp;
@@ -794,7 +788,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_eeprom_claim(ks);
@@ -817,7 +811,7 @@ static int ks8851_set_eeprom(struct net_device *dev,
eeprom_93cx6_wren(&ks->eeprom, false);
ks8851_eeprom_release(ks);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -827,7 +821,6 @@ static int ks8851_get_eeprom(struct net_device *dev,
{
struct ks8851_net *ks = netdev_priv(dev);
int offset = ee->offset;
- unsigned long flags;
int len = ee->len;
/* must be 2 byte aligned */
@@ -837,7 +830,7 @@ static int ks8851_get_eeprom(struct net_device *dev,
if (!(ks->rc_ccr & CCR_EEPROM))
return -ENOENT;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_eeprom_claim(ks);
@@ -845,7 +838,7 @@ static int ks8851_get_eeprom(struct net_device *dev,
eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
ks8851_eeprom_release(ks);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return 0;
}
@@ -904,7 +897,6 @@ static int ks8851_phy_reg(int reg)
static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int result;
int ksreg;
@@ -912,9 +904,9 @@ static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
if (ksreg < 0)
return ksreg;
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
return result;
}
@@ -949,14 +941,13 @@ static void ks8851_phy_write(struct net_device *dev,
int phy, int reg, int value)
{
struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
int ksreg;
ksreg = ks8851_phy_reg(reg);
if (ksreg >= 0) {
- ks8851_lock(ks, &flags);
+ ks8851_lock(ks);
ks8851_wrreg16(ks, ksreg, value);
- ks8851_unlock(ks, &flags);
+ ks8851_unlock(ks);
}
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 78695be2570b..9f1c33f6ddec 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -55,29 +55,27 @@ struct ks8851_net_par {
/**
* ks8851_lock_par - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock_par(struct ks8851_net *ks)
{
struct ks8851_net_par *ksp = to_ks8851_par(ks);
- spin_lock_irqsave(&ksp->lock, *flags);
+ spin_lock_bh(&ksp->lock);
}
/**
* ks8851_unlock_par - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock_par(struct ks8851_net *ks)
{
struct ks8851_net_par *ksp = to_ks8851_par(ks);
- spin_unlock_irqrestore(&ksp->lock, *flags);
+ spin_unlock_bh(&ksp->lock);
}
/**
@@ -233,7 +231,6 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
{
struct ks8851_net *ks = netdev_priv(dev);
netdev_tx_t ret = NETDEV_TX_OK;
- unsigned long flags;
unsigned int txqcr;
u16 txmir;
int err;
@@ -241,7 +238,7 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
netif_dbg(ks, tx_queued, ks->netdev,
"%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
- ks8851_lock_par(ks, &flags);
+ ks8851_lock_par(ks);
txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff;
@@ -262,7 +259,7 @@ static netdev_tx_t ks8851_start_xmit_par(struct sk_buff *skb,
ret = NETDEV_TX_BUSY;
}
- ks8851_unlock_par(ks, &flags);
+ ks8851_unlock_par(ks);
return ret;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index a161ae45743a..b9e68520278d 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -71,11 +71,10 @@ struct ks8851_net_spi {
/**
* ks8851_lock_spi - register access lock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Claim chip register access lock
*/
-static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_lock_spi(struct ks8851_net *ks)
{
struct ks8851_net_spi *kss = to_ks8851_spi(ks);
@@ -85,11 +84,10 @@ static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
/**
* ks8851_unlock_spi - register access unlock
* @ks: The chip state
- * @flags: Spinlock flags
*
* Release chip register access lock
*/
-static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags)
+static void ks8851_unlock_spi(struct ks8851_net *ks)
{
struct ks8851_net_spi *kss = to_ks8851_spi(ks);
@@ -309,7 +307,6 @@ static void ks8851_tx_work(struct work_struct *work)
struct ks8851_net_spi *kss;
unsigned short tx_space;
struct ks8851_net *ks;
- unsigned long flags;
struct sk_buff *txb;
bool last;
@@ -317,7 +314,7 @@ static void ks8851_tx_work(struct work_struct *work)
ks = &kss->ks8851;
last = skb_queue_empty(&ks->txq);
- ks8851_lock_spi(ks, &flags);
+ ks8851_lock_spi(ks);
while (!last) {
txb = skb_dequeue(&ks->txq);
@@ -343,7 +340,7 @@ static void ks8851_tx_work(struct work_struct *work)
ks->tx_space = tx_space;
spin_unlock_bh(&ks->statelock);
- ks8851_unlock_spi(ks, &flags);
+ ks8851_unlock_spi(ks);
}
/**
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index fbec952aa436..a654b3699c4c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -3640,8 +3640,12 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
ac->gdma_dev = gd;
gd->driver_data = ac;
+
+ INIT_WORK(&ac->link_change_work, mana_link_state_handle);
}
+ INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
+
err = mana_create_eq(ac);
if (err) {
dev_err(dev, "Failed to create EQs: %d\n", err);
@@ -3657,8 +3661,6 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (!resuming) {
ac->num_ports = num_ports;
-
- INIT_WORK(&ac->link_change_work, mana_link_state_handle);
} else {
if (ac->num_ports != num_ports) {
dev_err(dev, "The number of vPorts changed: %d->%d\n",
@@ -3687,10 +3689,9 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
- /* we log the port for which the probe failed and stop
- * probes for subsequent ports.
- * Note that we keep running ports, for which the probes
- * were successful, unless add_adev fails too
+ /* Log the port for which the probe failed, stop probing
+ * subsequent ports, and skip add_adev.
+ * mana_remove() will clean up already-probed ports.
*/
if (err) {
dev_err(dev, "Probe Failed for port %d\n", i);
@@ -3704,10 +3705,9 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
enable_work(&apc->queue_reset_work);
err = mana_attach(ac->ports[i]);
rtnl_unlock();
- /* we log the port for which the attach failed and stop
- * attach for subsequent ports
- * Note that we keep running ports, for which the attach
- * were successful, unless add_adev fails too
+ /* Log the port for which the attach failed, stop
+ * attaching subsequent ports, and skip add_adev.
+ * mana_remove() will clean up already-attached ports.
*/
if (err) {
dev_err(dev, "Attach Failed for port %d\n", i);
@@ -3716,9 +3716,9 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd, "eth");
+ if (!err)
+ err = add_adev(gd, "eth");
- INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
out:
@@ -3739,11 +3739,16 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct mana_port_context *apc;
- struct device *dev = gc->dev;
+ struct device *dev;
struct net_device *ndev;
int err;
int i;
+ if (!gc || !ac)
+ return;
+
+ dev = gc->dev;
+
disable_work_sync(&ac->link_change_work);
cancel_delayed_work_sync(&ac->gf_stats_work);
@@ -3756,7 +3761,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
- goto out;
+ break;
}
apc = netdev_priv(ndev);
@@ -3787,7 +3792,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
mana_destroy_eq(ac);
-out:
+
if (ac->per_port_queue_reset_wq) {
destroy_workqueue(ac->per_port_queue_reset_wq);
ac->per_port_queue_reset_wq = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
index 79470f198a62..9cf19446657c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
@@ -435,12 +435,17 @@ static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt,
/* Full Island ID and channel bits overlap? */
ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0);
- if (ret)
+ if (ret) {
+ pr_warn("%s: decode dest_island failed: %d\n", __func__, ret);
return ret;
+ }
/* The current address won't go where expected? */
- if (dest_island != -1 && dest_island != v)
+ if (dest_island != -1 && dest_island != v) {
+ pr_warn("%s: dest_island mismatch: current (%d) != decoded (%d)\n",
+ __func__, dest_island, v);
return -EINVAL;
+ }
/* If dest_island was -1, we don't care where it goes. */
return 0;
@@ -493,7 +498,7 @@ static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
* the address but we can verify if the existing
* contents will point to a valid island.
*/
- return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ return nfp_encode_basic_qdr(*addr, dest_island, cpp_tgt,
mode, addr40, isld1, isld0);
iid_lsb = addr40 ? 34 : 26;
@@ -504,7 +509,7 @@ static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
return 0;
case 1:
if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
- return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ return nfp_encode_basic_qdr(*addr, dest_island, cpp_tgt,
mode, addr40, isld1, isld0);
idx_lsb = addr40 ? 39 : 31;
@@ -530,7 +535,7 @@ static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
* be set before hand and with them select an island.
* So we need to confirm that it's at least plausible.
*/
- return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ return nfp_encode_basic_qdr(*addr, dest_island, cpp_tgt,
mode, addr40, isld1, isld0);
/* Make sure we compare against isldN values
@@ -551,7 +556,7 @@ static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
* iid<1> = addr<30> = channel<0>
* channel<1> = addr<31> = Index
*/
- return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+ return nfp_encode_basic_qdr(*addr, dest_island, cpp_tgt,
mode, addr40, isld1, isld0);
isld[0] &= ~3;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 01a983001ab4..ca68248dbc78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1410,8 +1410,6 @@ static int stmmac_phylink_setup(struct stmmac_priv *priv)
priv->tx_lpi_clk_stop = priv->plat->flags &
STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
- config->default_an_inband = priv->plat->default_an_inband;
-
/* Get the PHY interface modes (at the PHY end of the link) that
* are supported by the platform.
*/
@@ -1419,6 +1417,8 @@ static int stmmac_phylink_setup(struct stmmac_priv *priv)
priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
config->supported_interfaces);
+ config->default_an_inband = priv->plat->default_an_inband;
+
/* Set the platform/firmware specified interface mode if the
* supported interfaces have not already been provided using
* phy_interface as a last resort.
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index ec32a5f422f2..8b7c3753bb6a 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -864,7 +864,8 @@ static int txgbe_probe(struct pci_dev *pdev,
"0x%08x", etrack_id);
}
- if (etrack_id < 0x20010)
+ if (wx->mac.type == wx_mac_sp &&
+ ((etrack_id & 0xfffff) < 0x20010))
dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
err = txgbe_test_hostif(wx);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 70b9e58b9b78..5150f2e4f66b 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -2400,6 +2400,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
return -ENODEV;
}
+ local_bh_disable();
udp_tunnel_xmit_skb(rt, sk, skb_to_send,
fl4.saddr, fl4.daddr,
inet_dscp_to_dsfield(fl4.flowi4_dscp),
@@ -2409,6 +2410,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
!net_eq(sock_net(sk),
dev_net(gtp->dev)),
false, 0);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9f90c598649d..c40fa331836b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1689,6 +1689,7 @@ static size_t macvlan_get_size(const struct net_device *dev)
+ macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */
+ nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */
+ nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */
+ + nla_total_size(4) /* IFLA_MACVLAN_BC_CUTOFF */
);
}
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 516b0d05e16e..c71132f33f84 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -147,6 +147,7 @@ config MDIO_OCTEON
config MDIO_PIC64HPSC
tristate "PIC64-HPSC/HX MDIO interface support"
+ depends on ARCH_MICROCHIP || COMPILE_TEST
depends on HAS_IOMEM && OF_MDIO
help
This driver supports the MDIO interface found on the PIC64-HPSC/HX
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 3c9acd6e49e8..205384dab89a 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -497,6 +497,8 @@ static void trim_newline(char *s, size_t maxlen)
size_t len;
len = strnlen(s, maxlen);
+ if (!len)
+ return;
if (s[len - 1] == '\n')
s[len - 1] = '\0';
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e1541ca76715..a05af192caf3 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -185,7 +185,9 @@ out_drop_cnt:
return NETDEV_TX_OK;
}
-static void nsim_set_rx_mode(struct net_device *dev)
+static void nsim_set_rx_mode(struct net_device *dev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
}
@@ -623,7 +625,7 @@ static const struct net_shaper_ops nsim_shaper_ops = {
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
- .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_rx_mode_async = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
@@ -648,7 +650,7 @@ static const struct net_device_ops nsim_netdev_ops = {
static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
- .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_rx_mode_async = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 7b56a7ad7a49..5e2eecc3165d 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -186,7 +186,9 @@ static int netkit_get_iflink(const struct net_device *dev)
return iflink;
}
-static void netkit_set_multicast(struct net_device *dev)
+static void netkit_set_multicast(struct net_device *dev,
+ struct netdev_hw_addr_list *uc,
+ struct netdev_hw_addr_list *mc)
{
/* Nothing to do, we receive whatever gets pushed to us! */
}
@@ -330,7 +332,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_open = netkit_open,
.ndo_stop = netkit_close,
.ndo_start_xmit = netkit_xmit,
- .ndo_set_rx_mode = netkit_set_multicast,
+ .ndo_set_rx_mode_async = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom,
.ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index b0d3bc49c685..57c68efa5ff8 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2245,7 +2245,7 @@ ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
*/
static void __ppp_decompress_proto(struct sk_buff *skb)
{
- if (skb->data[0] & 0x01)
+ if (ppp_skb_is_compressed_proto(skb))
*(u8 *)skb_push(skb, 1) = 0x00;
}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index d546a7af0d54..bdd61c504a1c 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -393,7 +393,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb_mac_header_len(skb) < ETH_HLEN)
goto drop;
- if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
goto drop;
ph = pppoe_hdr(skb);
@@ -403,6 +403,12 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->len < len)
goto drop;
+ /* skb->data points to the PPP protocol header after skb_pull_rcsum.
+ * Drop PFC frames.
+ */
+ if (ppp_skb_is_compressed_proto(skb))
+ goto drop;
+
if (pskb_trim_rcsum(skb, len))
goto drop;
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index f6b94ac7a68a..87aa4f4e9724 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -1170,6 +1170,7 @@ struct pse_irq {
struct pse_controller_dev *pcdev;
struct pse_irq_desc desc;
unsigned long *notifs;
+ unsigned long *notifs_mask;
};
/**
@@ -1247,7 +1248,6 @@ static int pse_set_config_isr(struct pse_controller_dev *pcdev, int id,
static irqreturn_t pse_isr(int irq, void *data)
{
struct pse_controller_dev *pcdev;
- unsigned long notifs_mask = 0;
struct pse_irq_desc *desc;
struct pse_irq *h = data;
int ret, i;
@@ -1257,14 +1257,15 @@ static irqreturn_t pse_isr(int irq, void *data)
/* Clear notifs mask */
memset(h->notifs, 0, pcdev->nr_lines * sizeof(*h->notifs));
+ bitmap_zero(h->notifs_mask, pcdev->nr_lines);
mutex_lock(&pcdev->lock);
- ret = desc->map_event(irq, pcdev, h->notifs, &notifs_mask);
- if (ret || !notifs_mask) {
+ ret = desc->map_event(irq, pcdev, h->notifs, h->notifs_mask);
+ if (ret || bitmap_empty(h->notifs_mask, pcdev->nr_lines)) {
mutex_unlock(&pcdev->lock);
return IRQ_NONE;
}
- for_each_set_bit(i, &notifs_mask, pcdev->nr_lines) {
+ for_each_set_bit(i, h->notifs_mask, pcdev->nr_lines) {
unsigned long notifs, rnotifs;
struct pse_ntf ntf = {};
@@ -1340,6 +1341,10 @@ int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
if (!h->notifs)
return -ENOMEM;
+ h->notifs_mask = devm_bitmap_zalloc(dev, pcdev->nr_lines, GFP_KERNEL);
+ if (!h->notifs_mask)
+ return -ENOMEM;
+
ret = devm_request_threaded_irq(dev, irq, NULL, pse_isr,
IRQF_ONESHOT | irq_flags,
irq_name, h);
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index e3c785da3eef..1a9b27d5e256 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -80,9 +80,9 @@
#include <linux/unaligned.h>
static unsigned char *encode(unsigned char *cp, unsigned short n);
-static long decode(unsigned char **cpp);
+static long decode(unsigned char **cpp, const unsigned char *end);
static unsigned char * put16(unsigned char *cp, unsigned short x);
-static unsigned short pull16(unsigned char **cpp);
+static long pull16(unsigned char **cpp, const unsigned char *end);
/* Allocate compression data structure
* slots must be in range 0 to 255 (zero meaning no compression)
@@ -190,30 +190,34 @@ encode(unsigned char *cp, unsigned short n)
return cp;
}
-/* Pull a 16-bit integer in host order from buffer in network byte order */
-static unsigned short
-pull16(unsigned char **cpp)
+/* Pull a 16-bit integer in host order from buffer in network byte order.
+ * Returns -1 if the buffer is exhausted, otherwise the 16-bit value.
+ */
+static long
+pull16(unsigned char **cpp, const unsigned char *end)
{
- short rval;
+ long rval;
+ if (*cpp + 2 > end)
+ return -1;
rval = *(*cpp)++;
rval <<= 8;
rval |= *(*cpp)++;
return rval;
}
-/* Decode a number */
+/* Decode a number. Returns -1 if the buffer is exhausted. */
static long
-decode(unsigned char **cpp)
+decode(unsigned char **cpp, const unsigned char *end)
{
int x;
+ if (*cpp >= end)
+ return -1;
x = *(*cpp)++;
- if(x == 0){
- return pull16(cpp) & 0xffff; /* pull16 returns -1 on error */
- } else {
- return x & 0xff; /* -1 if PULLCHAR returned error */
- }
+ if (x == 0)
+ return pull16(cpp, end);
+ return x & 0xff;
}
/*
@@ -499,6 +503,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
struct cstate *cs;
int len, hdrlen;
unsigned char *cp = icp;
+ const unsigned char *end = icp + isize;
/* We've got a compressed packet; read the change byte */
comp->sls_i_compressed++;
@@ -506,6 +511,8 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
comp->sls_i_error++;
return 0;
}
+ if (!comp->rstate)
+ goto bad;
changes = *cp++;
if(changes & NEW_C){
/* Make sure the state index is in range, then grab the state.
@@ -534,6 +541,8 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
thp = &cs->cs_tcp;
ip = &cs->cs_ip;
+ if (cp + 2 > end)
+ goto bad;
thp->check = *(__sum16 *)cp;
cp += 2;
@@ -564,26 +573,26 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
default:
if(changes & NEW_U){
thp->urg = 1;
- if((x = decode(&cp)) == -1) {
+ if((x = decode(&cp, end)) == -1) {
goto bad;
}
thp->urg_ptr = htons(x);
} else
thp->urg = 0;
if(changes & NEW_W){
- if((x = decode(&cp)) == -1) {
+ if((x = decode(&cp, end)) == -1) {
goto bad;
}
thp->window = htons( ntohs(thp->window) + x);
}
if(changes & NEW_A){
- if((x = decode(&cp)) == -1) {
+ if((x = decode(&cp, end)) == -1) {
goto bad;
}
thp->ack_seq = htonl( ntohl(thp->ack_seq) + x);
}
if(changes & NEW_S){
- if((x = decode(&cp)) == -1) {
+ if((x = decode(&cp, end)) == -1) {
goto bad;
}
thp->seq = htonl( ntohl(thp->seq) + x);
@@ -591,7 +600,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
break;
}
if(changes & NEW_I){
- if((x = decode(&cp)) == -1) {
+ if((x = decode(&cp, end)) == -1) {
goto bad;
}
ip->id = htons (ntohs (ip->id) + x);
@@ -649,6 +658,10 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
struct cstate *cs;
unsigned int ihl;
+ if (!comp->rstate) {
+ comp->sls_i_error++;
+ return slhc_toss(comp);
+ }
/* The packet is shorter than a legal IP header.
* Also make sure isize is positive.
*/
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bfb566fecb92..f4adcfee7a80 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3759,6 +3759,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
queue_pairs);
return -EINVAL;
}
+
+ /* Keep max_tx_vq in sync so that a later RSS command does not
+ * revert queue_pairs to a stale value.
+ */
+ if (vi->has_rss)
+ vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
succ:
vi->curr_queue_pairs = queue_pairs;
if (dev->flags & IFF_UP) {