From d261f5b09c28850dc63ca1d3018596f829f402d5 Mon Sep 17 00:00:00 2001 From: Mohammad Heib Date: Fri, 31 Oct 2025 17:52:02 +0200 Subject: net: ionic: add dma_wmb() before ringing TX doorbell The TX path currently writes descriptors and then immediately writes to the MMIO doorbell register to notify the NIC. On weakly ordered architectures, descriptor writes may still be pending in CPU or DMA write buffers when the doorbell is issued, leading to the device fetching stale or incomplete descriptors. Add a dma_wmb() in ionic_txq_post() to ensure all descriptor writes are visible to the device before the doorbell MMIO write. Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling") Signed-off-by: Mohammad Heib Link: https://patch.msgid.link/20251031155203.203031-1-mheib@redhat.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index d10b58ebf603..2e571d0a0d8a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -29,6 +29,10 @@ static void ionic_tx_clean(struct ionic_queue *q, static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell) { + /* Ensure TX descriptor writes reach memory before NIC reads them. + * Prevents device from fetching stale descriptors. + */ + dma_wmb(); ionic_q_post(q, ring_dbell); } -- cgit v1.2.3 From de0337d641bfa5b6d6b489e479792f1039274e84 Mon Sep 17 00:00:00 2001 From: Mohammad Heib Date: Fri, 31 Oct 2025 17:52:03 +0200 Subject: net: ionic: map SKB after pseudo-header checksum prep The TSO path called ionic_tx_map_skb() before preparing the TCP pseudo checksum (ionic_tx_tcp_[inner_]pseudo_csum()), which may perform skb_cow_head() and might modifies bytes in the linear header area. Mapping first and then mutating the header risks: - Using a stale DMA address if skb_cow_head() relocates the head, and/or - Device reading stale header bytes on weakly-ordered systems (CPU writes after mapping are not guaranteed visible without an explicit dma_sync_single_for_device()). Reorder the TX path to perform all header mutations (including skb_cow_head()) *before* DMA mapping. Mapping is now done only after the skb layout and header contents are final. This removes the need for any post-mapping dma_sync and prevents on-wire corruption observed under VLAN+TSO load after repeated runs. This change is purely an ordering fix; no functional behavior change otherwise. Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling") Signed-off-by: Mohammad Heib Reviewed-by: Brett Creeley Link: https://patch.msgid.link/20251031155203.203031-2-mheib@redhat.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 30 ++++++++++-------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c') diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 2e571d0a0d8a..301ebee2fdc5 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -1448,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, bool encap; int err; - desc_info = &q->tx_info[q->head_idx]; - - if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) - return -EIO; - - len = skb->len; - mss = skb_shinfo(skb)->gso_size; - outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | - SKB_GSO_GRE_CSUM | - SKB_GSO_IPXIP4 | - SKB_GSO_IPXIP6 | - SKB_GSO_UDP_TUNNEL | - SKB_GSO_UDP_TUNNEL_CSUM)); has_vlan = !!skb_vlan_tag_present(skb); vlan_tci = skb_vlan_tag_get(skb); encap = skb->encapsulation; @@ -1474,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, err = ionic_tx_tcp_inner_pseudo_csum(skb); else err = ionic_tx_tcp_pseudo_csum(skb); - if (unlikely(err)) { - /* clean up mapping from ionic_tx_map_skb */ - ionic_tx_desc_unmap_bufs(q, desc_info); + if (unlikely(err)) return err; - } + desc_info = &q->tx_info[q->head_idx]; + if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) + return -EIO; + + len = skb->len; + mss = skb_shinfo(skb)->gso_size; + outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPXIP4 | + SKB_GSO_IPXIP6 | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)); if (encap) hdrlen = skb_inner_tcp_all_headers(skb); else -- cgit v1.2.3