diff options
| author | Eric Dumazet <edumazet@google.com> | 2026-01-14 16:51:09 +0000 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-01-17 15:43:16 -0800 |
| commit | f10ab9d3a7ea06ef7d6aa2d3c527f8cf728bbdee (patch) | |
| tree | 5f33df23e5bcd4269c76bac0d17aff0a22df0992 /net | |
| parent | 799759e610d32de37f65cb109f2801b5f099b13d (diff) | |
tcp: move tcp_rate_skb_sent() to tcp_output.c
It is only called from __tcp_transmit_skb() and __tcp_retransmit_skb().
Move it in tcp_output.c and make it static.
clang compiler is now able to inline it from __tcp_transmit_skb().
gcc compiler inlines it in the two callers, which is also fine.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Neal Cardwell <ncardwell@google.com>
Link: https://patch.msgid.link/20260114165109.1747722-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
| -rw-r--r-- | net/ipv4/tcp_output.c | 35 | ||||
| -rw-r--r-- | net/ipv4/tcp_rate.c | 35 |
2 files changed, 35 insertions, 35 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 479afb714bdf..256b669e8d3b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1432,6 +1432,41 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); } +/* Snapshot the current delivery information in the skb, to generate + * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered(). + */ +static void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + + /* In general we need to start delivery rate samples from the + * time we received the most recent ACK, to ensure we include + * the full time the network needs to deliver all in-flight + * packets. If there are no packets in flight yet, then we + * know that any ACKs after now indicate that the network was + * able to deliver those packets completely in the sampling + * interval between now and the next ACK. + * + * Note that we use packets_out instead of tcp_packets_in_flight(tp) + * because the latter is a guess based on RTO and loss-marking + * heuristics. We don't want spurious RTOs or loss markings to cause + * a spuriously small time interval, causing a spuriously high + * bandwidth estimate. + */ + if (!tp->packets_out) { + u64 tstamp_us = tcp_skb_timestamp_us(skb); + + tp->first_tx_mstamp = tstamp_us; + tp->delivered_mstamp = tstamp_us; + } + + TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; + TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; + TCP_SKB_CB(skb)->tx.delivered = tp->delivered; + TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; + TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; +} + INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c index a8f6d9d06f2e..98eb346f986e 100644 --- a/net/ipv4/tcp_rate.c +++ b/net/ipv4/tcp_rate.c @@ -34,41 +34,6 @@ * ready to send in the write queue. */ -/* Snapshot the current delivery information in the skb, to generate - * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered(). - */ -void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - /* In general we need to start delivery rate samples from the - * time we received the most recent ACK, to ensure we include - * the full time the network needs to deliver all in-flight - * packets. If there are no packets in flight yet, then we - * know that any ACKs after now indicate that the network was - * able to deliver those packets completely in the sampling - * interval between now and the next ACK. - * - * Note that we use packets_out instead of tcp_packets_in_flight(tp) - * because the latter is a guess based on RTO and loss-marking - * heuristics. We don't want spurious RTOs or loss markings to cause - * a spuriously small time interval, causing a spuriously high - * bandwidth estimate. - */ - if (!tp->packets_out) { - u64 tstamp_us = tcp_skb_timestamp_us(skb); - - tp->first_tx_mstamp = tstamp_us; - tp->delivered_mstamp = tstamp_us; - } - - TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; - TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; - TCP_SKB_CB(skb)->tx.delivered = tp->delivered; - TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; - TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; -} - /* When an skb is sacked or acked, we fill in the rate sample with the (prior) * delivery information when the skb was last transmitted. * |
