summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cadence/macb_main.c
diff options
context:
space:
mode:
authorSean Anderson <sean.anderson@linux.dev>2025-02-20 11:42:57 -0500
committerJakub Kicinski <kuba@kernel.org>2025-02-21 16:39:08 -0800
commite6a532185daae9302bf2f358c8675733093b906e (patch)
tree1012fbc785e188d8cfdd0074ca98302198f82a0f /drivers/net/ethernet/cadence/macb_main.c
parent3e401818c81bb9f8438d1c2a803471f441981329 (diff)
net: cadence: macb: Implement BQL
Implement byte queue limits to allow queuing disciplines to account for packets enqueued in the ring buffer but not yet transmitted. There are a separate set of transmit functions for AT91 that I haven't touched since I don't have hardware to test on. Signed-off-by: Sean Anderson <sean.anderson@linux.dev> Link: https://patch.msgid.link/20250220164257.96859-1-sean.anderson@linux.dev Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2112a9701e05..5345f3e1a795 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1079,15 +1079,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1140,8 +1143,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1158,6 +1163,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1228,6 +1236,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
@@ -1269,6 +1278,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1283,6 +1293,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -2384,6 +2397,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3019,6 +3034,7 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);