mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 03:11:11 -04:00
tcp: annotate data-races around tp->snd_ssthresh
tcp_get_timestamping_opt_stats() intentionally runs lockless, we must
add READ_ONCE() and WRITE_ONCE() annotations to keep KCSAN happy.
Fixes: 7156d194a0 ("tcp: add snd_ssthresh stat in SCM_TIMESTAMPING_OPT_STATS")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260416200319.3608680-5-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
829ba1f329
commit
fd571afb05
@@ -5396,7 +5396,7 @@ static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
|
||||
if (val <= 0)
|
||||
return -EINVAL;
|
||||
tp->snd_cwnd_clamp = val;
|
||||
tp->snd_ssthresh = val;
|
||||
WRITE_ONCE(tp->snd_ssthresh, val);
|
||||
break;
|
||||
case TCP_BPF_DELACK_MAX:
|
||||
timeout = usecs_to_jiffies(val);
|
||||
|
||||
@@ -3425,7 +3425,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
icsk->icsk_rto = TCP_TIMEOUT_INIT;
|
||||
WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN);
|
||||
WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX);
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
WRITE_ONCE(tp->snd_ssthresh, TCP_INFINITE_SSTHRESH);
|
||||
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
|
||||
tp->snd_cwnd_cnt = 0;
|
||||
tp->is_cwnd_limited = 0;
|
||||
@@ -4452,7 +4452,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
|
||||
nla_put_u8(stats, TCP_NLA_RECUR_RETRANS,
|
||||
READ_ONCE(inet_csk(sk)->icsk_retransmits));
|
||||
nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, data_race(!!tp->rate_app_limited));
|
||||
nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
|
||||
nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, READ_ONCE(tp->snd_ssthresh));
|
||||
nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
|
||||
nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
|
||||
|
||||
|
||||
@@ -897,8 +897,8 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
|
||||
|
||||
if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
|
||||
bbr->mode = BBR_DRAIN; /* drain queue we created */
|
||||
tcp_sk(sk)->snd_ssthresh =
|
||||
bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
|
||||
WRITE_ONCE(tcp_sk(sk)->snd_ssthresh,
|
||||
bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT));
|
||||
} /* fall through to check if in-flight is already small: */
|
||||
if (bbr->mode == BBR_DRAIN &&
|
||||
bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
|
||||
@@ -1043,7 +1043,7 @@ __bpf_kfunc static void bbr_init(struct sock *sk)
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
|
||||
bbr->prior_cwnd = 0;
|
||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||
WRITE_ONCE(tp->snd_ssthresh, TCP_INFINITE_SSTHRESH);
|
||||
bbr->rtt_cnt = 0;
|
||||
bbr->next_rtt_delivered = tp->delivered;
|
||||
bbr->prev_ca_state = TCP_CA_Open;
|
||||
|
||||
@@ -74,7 +74,7 @@ static void bictcp_init(struct sock *sk)
|
||||
bictcp_reset(ca);
|
||||
|
||||
if (initial_ssthresh)
|
||||
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
|
||||
WRITE_ONCE(tcp_sk(sk)->snd_ssthresh, initial_ssthresh);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -162,7 +162,7 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||
NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tcp_snd_cwnd(tp));
|
||||
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -181,7 +181,7 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||
NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tcp_snd_cwnd(tp));
|
||||
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ __bpf_kfunc static void cubictcp_init(struct sock *sk)
|
||||
bictcp_hystart_reset(sk);
|
||||
|
||||
if (!hystart && initial_ssthresh)
|
||||
tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
|
||||
WRITE_ONCE(tcp_sk(sk)->snd_ssthresh, initial_ssthresh);
|
||||
}
|
||||
|
||||
__bpf_kfunc static void cubictcp_cwnd_event_tx_start(struct sock *sk)
|
||||
@@ -420,7 +420,7 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||
NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||
tcp_snd_cwnd(tp));
|
||||
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -440,7 +440,7 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||
NET_ADD_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||
tcp_snd_cwnd(tp));
|
||||
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_snd_cwnd(tp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ static void dctcp_react_to_loss(struct sock *sk)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
ca->loss_cwnd = tcp_snd_cwnd(tp);
|
||||
tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
|
||||
WRITE_ONCE(tp->snd_ssthresh, max(tcp_snd_cwnd(tp) >> 1U, 2U));
|
||||
}
|
||||
|
||||
__bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state)
|
||||
|
||||
@@ -2567,7 +2567,7 @@ void tcp_enter_loss(struct sock *sk)
|
||||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
|
||||
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
||||
tp->prior_cwnd = tcp_snd_cwnd(tp);
|
||||
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, icsk->icsk_ca_ops->ssthresh(sk));
|
||||
tcp_ca_event(sk, CA_EVENT_LOSS);
|
||||
tcp_init_undo(tp);
|
||||
}
|
||||
@@ -2860,7 +2860,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
|
||||
tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
|
||||
|
||||
if (tp->prior_ssthresh > tp->snd_ssthresh) {
|
||||
tp->snd_ssthresh = tp->prior_ssthresh;
|
||||
WRITE_ONCE(tp->snd_ssthresh, tp->prior_ssthresh);
|
||||
tcp_ecn_withdraw_cwr(tp);
|
||||
}
|
||||
}
|
||||
@@ -2978,7 +2978,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
|
||||
tp->prior_cwnd = tcp_snd_cwnd(tp);
|
||||
tp->prr_delivered = 0;
|
||||
tp->prr_out = 0;
|
||||
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, inet_csk(sk)->icsk_ca_ops->ssthresh(sk));
|
||||
tcp_ecn_queue_cwr(tp);
|
||||
}
|
||||
|
||||
@@ -3120,7 +3120,7 @@ static void tcp_non_congestion_loss_retransmit(struct sock *sk)
|
||||
|
||||
if (icsk->icsk_ca_state != TCP_CA_Loss) {
|
||||
tp->high_seq = tp->snd_nxt;
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk));
|
||||
tp->prior_ssthresh = 0;
|
||||
tp->undo_marker = 0;
|
||||
tcp_set_ca_state(sk, TCP_CA_Loss);
|
||||
|
||||
@@ -490,9 +490,9 @@ void tcp_init_metrics(struct sock *sk)
|
||||
val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
|
||||
0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||
if (val) {
|
||||
tp->snd_ssthresh = val;
|
||||
WRITE_ONCE(tp->snd_ssthresh, val);
|
||||
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
|
||||
tp->snd_ssthresh = tp->snd_cwnd_clamp;
|
||||
WRITE_ONCE(tp->snd_ssthresh, tp->snd_cwnd_clamp);
|
||||
}
|
||||
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
|
||||
if (val && tp->reordering != val)
|
||||
|
||||
@@ -396,8 +396,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
||||
|
||||
/* We have enough data to determine we are congested */
|
||||
ca->nv_allow_cwnd_growth = 0;
|
||||
tp->snd_ssthresh =
|
||||
(nv_ssthresh_factor * max_win) >> 3;
|
||||
WRITE_ONCE(tp->snd_ssthresh,
|
||||
(nv_ssthresh_factor * max_win) >> 3);
|
||||
if (tcp_snd_cwnd(tp) - max_win > 2) {
|
||||
/* gap > 2, we do exponential cwnd decrease */
|
||||
int dec;
|
||||
|
||||
@@ -171,7 +171,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
|
||||
|
||||
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
|
||||
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk));
|
||||
restart_cwnd = min(restart_cwnd, cwnd);
|
||||
|
||||
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
|
||||
@@ -2143,7 +2143,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
|
||||
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
||||
u32 win_used = max(tp->snd_cwnd_used, init_win);
|
||||
if (win_used < tcp_snd_cwnd(tp)) {
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk));
|
||||
tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
|
||||
}
|
||||
tp->snd_cwnd_used = 0;
|
||||
|
||||
@@ -245,7 +245,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
*/
|
||||
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
|
||||
(u32)target_cwnd + 1));
|
||||
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh,
|
||||
tcp_vegas_ssthresh(tp));
|
||||
|
||||
} else if (tcp_in_slow_start(tp)) {
|
||||
/* Slow start. */
|
||||
@@ -261,8 +262,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
* we slow down.
|
||||
*/
|
||||
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
|
||||
tp->snd_ssthresh
|
||||
= tcp_vegas_ssthresh(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh,
|
||||
tcp_vegas_ssthresh(tp));
|
||||
} else if (diff < alpha) {
|
||||
/* We don't have enough extra packets
|
||||
* in the network, so speed up.
|
||||
@@ -280,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
|
||||
tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
|
||||
|
||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk));
|
||||
}
|
||||
|
||||
/* Wipe the slate clean for the next RTT. */
|
||||
|
||||
@@ -244,11 +244,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
|
||||
|
||||
switch (event) {
|
||||
case CA_EVENT_COMPLETE_CWR:
|
||||
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_westwood_bw_rttmin(sk));
|
||||
tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
|
||||
break;
|
||||
case CA_EVENT_LOSS:
|
||||
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
||||
WRITE_ONCE(tp->snd_ssthresh, tcp_westwood_bw_rttmin(sk));
|
||||
/* Update RTT_min when next ack arrives */
|
||||
w->reset_rtt_min = 1;
|
||||
break;
|
||||
|
||||
@@ -147,7 +147,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
|
||||
yeah->reno_count));
|
||||
|
||||
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||
WRITE_ONCE(tp->snd_ssthresh,
|
||||
tcp_snd_cwnd(tp));
|
||||
}
|
||||
|
||||
if (yeah->reno_count <= 2)
|
||||
|
||||
Reference in New Issue
Block a user