summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-04-16 20:03:08 +0000
committerJakub Kicinski <kuba@kernel.org>2026-04-18 11:10:12 -0700
commit829ba1f329cb7cbd56d599a6d225997fba66dc32 (patch)
tree3e202c664895cc35b2df8853795d9ac67dd18239 /net
parent21e92a38cfd891538598ba8f805e0165a820d532 (diff)
tcp: add data-races annotations around tp->reordering, tp->snd_cwnd
tcp_get_timestamping_opt_stats() intentionally runs lockless, we must add READ_ONCE(), WRITE_ONCE() data_race() annotations to keep KCSAN happy. Fixes: bb7c19f96012 ("tcp: add related fields into SCM_TIMESTAMPING_OPT_STATS") Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20260416200319.3608680-4-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_input.c14
-rw-r--r--net/ipv4/tcp_metrics.c2
3 files changed, 13 insertions, 11 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e39e0734d958..24ba80d244b1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4445,13 +4445,13 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
rate64 = tcp_compute_delivery_rate(tp);
nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
- nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
- nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
- nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
+ nla_put_u32(stats, TCP_NLA_SND_CWND, READ_ONCE(tp->snd_cwnd));
+ nla_put_u32(stats, TCP_NLA_REORDERING, READ_ONCE(tp->reordering));
+ nla_put_u32(stats, TCP_NLA_MIN_RTT, data_race(tcp_min_rtt(tp)));
nla_put_u8(stats, TCP_NLA_RECUR_RETRANS,
READ_ONCE(inet_csk(sk)->icsk_retransmits));
- nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
+ nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited));
nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 021f745747c5..6bb6bf049a35 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1293,8 +1293,9 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
- tp->reordering = min_t(u32, (metric + mss - 1) / mss,
- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
+ WRITE_ONCE(tp->reordering,
+ min_t(u32, (metric + mss - 1) / mss,
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
}
/* This exciting event is worth to be remembered. 8) */
@@ -2439,8 +2440,9 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
if (!tcp_limit_reno_sacked(tp))
return;
- tp->reordering = min_t(u32, tp->packets_out + addend,
- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
+ WRITE_ONCE(tp->reordering,
+ min_t(u32, tp->packets_out + addend,
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
tp->reord_seen++;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
}
@@ -2579,8 +2581,8 @@ void tcp_enter_loss(struct sock *sk)
reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
tp->sacked_out >= reordering)
- tp->reordering = min_t(unsigned int, tp->reordering,
- reordering);
+ WRITE_ONCE(tp->reordering,
+ min_t(unsigned int, tp->reordering, reordering));
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 06b1d5d3b6df..7a9d6d9006f6 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -496,7 +496,7 @@ void tcp_init_metrics(struct sock *sk)
}
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val && tp->reordering != val)
- tp->reordering = val;
+ WRITE_ONCE(tp->reordering, val);
crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
rcu_read_unlock();