Merge branch 'tcp-destroy-tcp-ao-tcp-md5-keys-in-sk_destruct'

Dmitry Safonov says:

====================
tcp: Destroy TCP-AO, TCP-MD5 keys in .sk_destruct()

On one side a minor/cosmetic issue, especially nowadays when
TCP-AO/TCP-MD5 signature verification failures aren't logged to dmesg.

Yet, I think worth addressing for two reasons:
- unsigned RST gets ignored by the peer and the connection is alive for
  longer (keep-alive interval)
- netstat counters increase and trace events report that trusted BGP peer
  is sending unsigned/incorrectly signed segments, which can ring alarm
  on monitoring.
====================

Link: https://patch.msgid.link/20250909-b4-tcp-ao-md5-rst-finwait2-v5-0-9ffaaaf8b236@arista.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-09-11 19:05:59 -07:00
7 changed files with 45 additions and 45 deletions

View File

@@ -1941,6 +1941,7 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
void tcp_md5_destruct_sock(struct sock *sk);
#else
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
@@ -1957,6 +1958,9 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) NULL
static inline void tcp_md5_destruct_sock(struct sock *sk)
{
}
#endif
int tcp_md5_alloc_sigpool(void);

View File

@@ -130,7 +130,6 @@ struct tcp_ao_info {
u32 snd_sne;
u32 rcv_sne;
refcount_t refcnt; /* Protects twsk destruction */
struct rcu_head rcu;
};
#ifdef CONFIG_TCP_MD5SIG

View File

@@ -412,6 +412,22 @@ static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
return rate64;
}
#ifdef CONFIG_TCP_MD5SIG
void tcp_md5_destruct_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->md5sig_info) {
tcp_clear_md5_list(sk);
kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 1));
static_branch_slow_dec_deferred(&tcp_md5_needed);
tcp_md5_release_sigpool();
}
}
EXPORT_IPV6_MOD_GPL(tcp_md5_destruct_sock);
#endif
/* Address-family independent initialization for a tcp_sock.
*
* NOTE: A lot of things set to zero explicitly by call to

View File

@@ -268,9 +268,8 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head)
kfree_sensitive(key);
}
static void tcp_ao_info_free_rcu(struct rcu_head *head)
static void tcp_ao_info_free(struct tcp_ao_info *ao)
{
struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
struct tcp_ao_key *key;
struct hlist_node *n;
@@ -310,7 +309,7 @@ void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
if (!twsk)
tcp_ao_sk_omem_free(sk, ao);
call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
tcp_ao_info_free(ao);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)

View File

@@ -1503,9 +1503,9 @@ void tcp_clear_md5_list(struct sock *sk)
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
hlist_del_rcu(&key->node);
hlist_del(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
kfree_rcu(key, rcu);
kfree(key);
}
}
@@ -2494,6 +2494,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
static void tcp4_destruct_sock(struct sock *sk)
{
tcp_md5_destruct_sock(sk);
tcp_ao_destroy_sock(sk, false);
inet_sock_destruct(sk);
}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2509,23 +2516,12 @@ static int tcp_v4_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
sk->sk_destruct = tcp4_destruct_sock;
#endif
return 0;
}
#ifdef CONFIG_TCP_MD5SIG
static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
{
struct tcp_md5sig_info *md5sig;
md5sig = container_of(head, struct tcp_md5sig_info, rcu);
kfree(md5sig);
static_branch_slow_dec_deferred(&tcp_md5_needed);
tcp_md5_release_sigpool();
}
#endif
static void tcp_release_user_frags(struct sock *sk)
{
#ifdef CONFIG_PAGE_POOL
@@ -2562,19 +2558,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */
skb_rbtree_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
struct tcp_md5sig_info *md5sig;
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
tcp_clear_md5_list(sk);
call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu);
rcu_assign_pointer(tp->md5sig_info, NULL);
}
#endif
tcp_ao_destroy_sock(sk, false);
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);

View File

@@ -377,26 +377,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
}
EXPORT_SYMBOL(tcp_time_wait);
#ifdef CONFIG_TCP_MD5SIG
static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
{
struct tcp_md5sig_key *key;
key = container_of(head, struct tcp_md5sig_key, rcu);
kfree(key);
static_branch_slow_dec_deferred(&tcp_md5_needed);
tcp_md5_release_sigpool();
}
#endif
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key)
call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
if (twsk->tw_md5_key) {
kfree(twsk->tw_md5_key);
static_branch_slow_dec_deferred(&tcp_md5_needed);
tcp_md5_release_sigpool();
}
}
#endif
tcp_ao_destroy_sock(sk, true);

View File

@@ -2110,6 +2110,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
static void tcp6_destruct_sock(struct sock *sk)
{
tcp_md5_destruct_sock(sk);
tcp_ao_destroy_sock(sk, false);
inet6_sock_destruct(sk);
}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2125,6 +2132,7 @@ static int tcp_v6_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
sk->sk_destruct = tcp6_destruct_sock;
#endif
return 0;