summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-01-18 12:32:04 +0000
committerJakub Kicinski <kuba@kernel.org>2026-01-20 19:03:09 -0800
commit670ade3bfae6f8a06213b65be3435b445c11c36d (patch)
tree0e432025306c0c9df693fcc1d27194039022fd39
parent2d265e2fe6f2d6370003db99c96eaff2dd1b7e43 (diff)
tcp: move tcp_rate_skb_delivered() to tcp_input.c
tcp_rate_skb_delivered() is only called from tcp_input.c. Move it there and make it static. Both gcc and clang are (auto)inlining it, TCP performance is increased at a small space cost. $ scripts/bloat-o-meter -t vmlinux.old vmlinux.new add/remove: 0/2 grow/shrink: 3/0 up/down: 509/-187 (322) Function old new delta tcp_sacktag_walk 1682 1867 +185 tcp_ack 5230 5405 +175 tcp_shifted_skb 437 586 +149 __pfx_tcp_rate_skb_delivered 16 - -16 tcp_rate_skb_delivered 171 - -171 Total: Before=22566192, After=22566514, chg +0.00% Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Link: https://patch.msgid.link/20260118123204.2315993-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/ipv4/tcp_input.c44
-rw-r--r--net/ipv4/tcp_rate.c44
3 files changed, 44 insertions, 46 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 15f9b20f851f..25143f156957 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1356,8 +1356,6 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
/* From tcp_rate.c */
-void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
- struct rate_sample *rs);
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 198f8a0d37be..dc8e256321b0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1637,6 +1637,50 @@ static u8 tcp_sacktag_one(struct sock *sk,
return sacked;
}
+/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
+ * delivery information when the skb was last transmitted.
+ *
+ * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
+ * called multiple times. We favor the information from the most recently
+ * sent skb, i.e., the skb with the most recently sent time and the highest
+ * sequence.
+ */
+static void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs)
+{
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+ struct tcp_sock *tp = tcp_sk(sk);
+ u64 tx_tstamp;
+
+ if (!scb->tx.delivered_mstamp)
+ return;
+
+ tx_tstamp = tcp_skb_timestamp_us(skb);
+ if (!rs->prior_delivered ||
+ tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
+ scb->end_seq, rs->last_end_seq)) {
+ rs->prior_delivered_ce = scb->tx.delivered_ce;
+ rs->prior_delivered = scb->tx.delivered;
+ rs->prior_mstamp = scb->tx.delivered_mstamp;
+ rs->is_app_limited = scb->tx.is_app_limited;
+ rs->is_retrans = scb->sacked & TCPCB_RETRANS;
+ rs->last_end_seq = scb->end_seq;
+
+ /* Record send time of most recently ACKed packet: */
+ tp->first_tx_mstamp = tx_tstamp;
+ /* Find the duration of the "send phase" of this window: */
+ rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+ scb->tx.first_tx_mstamp);
+
+ }
+ /* Mark off the skb delivered once it's sacked to avoid being
+ * used again when it's cumulatively acked. For acked packets
+ * we don't need to reset since it'll be freed soon.
+ */
+ if (scb->sacked & TCPCB_SACKED_ACKED)
+ scb->tx.delivered_mstamp = 0;
+}
+
/* Shift newly-SACKed bytes from this skb to the immediately previous
* already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
*/
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 98eb346f986e..f0f2ef377043 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -34,50 +34,6 @@
* ready to send in the write queue.
*/
-/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
- * delivery information when the skb was last transmitted.
- *
- * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
- * called multiple times. We favor the information from the most recently
- * sent skb, i.e., the skb with the most recently sent time and the highest
- * sequence.
- */
-void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
- struct rate_sample *rs)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- u64 tx_tstamp;
-
- if (!scb->tx.delivered_mstamp)
- return;
-
- tx_tstamp = tcp_skb_timestamp_us(skb);
- if (!rs->prior_delivered ||
- tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
- scb->end_seq, rs->last_end_seq)) {
- rs->prior_delivered_ce = scb->tx.delivered_ce;
- rs->prior_delivered = scb->tx.delivered;
- rs->prior_mstamp = scb->tx.delivered_mstamp;
- rs->is_app_limited = scb->tx.is_app_limited;
- rs->is_retrans = scb->sacked & TCPCB_RETRANS;
- rs->last_end_seq = scb->end_seq;
-
- /* Record send time of most recently ACKed packet: */
- tp->first_tx_mstamp = tx_tstamp;
- /* Find the duration of the "send phase" of this window: */
- rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
- scb->tx.first_tx_mstamp);
-
- }
- /* Mark off the skb delivered once it's sacked to avoid being
- * used again when it's cumulatively acked. For acked packets
- * we don't need to reset since it'll be freed soon.
- */
- if (scb->sacked & TCPCB_SACKED_ACKED)
- scb->tx.delivered_mstamp = 0;
-}
-
/* Update the connection delivery information and generate a rate sample. */
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs)