ipv6: change inet6_sk_rebuild_header() to use inet->cork.fl.u.ip6

TCP v6 spends a good amount of time rebuilding a fresh fl6 at each
transmit in inet6_csk_xmit()/inet6_csk_route_socket().

TCP v4 caches the information in inet->cork.fl.u.ip4 instead.

This patch is a first step converting IPv6 to the same strategy:

Before this patch inet6_sk_rebuild_header() only validated/rebuilt
a dst. Automatic variable @fl6 content was lost.

After this patch inet6_sk_rebuild_header() also initializes
inet->cork.fl.u.ip6, which can be reused in the future.

This makes inet6_sk_rebuild_header() very similar to
inet_sk_rebuild_header().

Also remove the EXPORT_SYMBOL_GPL(), inet6_sk_rebuild_header()
is not called from any module.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20260204163035.4123817-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet
2026-02-04 16:30:34 +00:00
committed by Jakub Kicinski
parent 047c5265ec
commit 85d05e2817

View File

@@ -824,45 +824,42 @@ EXPORT_SYMBOL(inet6_unregister_protosw);
int inet6_sk_rebuild_header(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
struct flowi6 *fl6;
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst)
return 0;
if (!dst) {
struct inet_sock *inet = inet_sk(sk);
struct in6_addr *final_p, final;
struct flowi6 fl6;
fl6 = &inet->cork.fl.u.ip6;
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_proto = sk->sk_protocol;
fl6->daddr = sk->sk_v6_daddr;
fl6->saddr = np->saddr;
fl6->flowlabel = np->flow_label;
fl6->flowi6_oif = sk->sk_bound_dev_if;
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_dport = inet->inet_dport;
fl6->fl6_sport = inet->inet_sport;
fl6->flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = np->saddr;
fl6.flowlabel = np->flow_label;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
fl6.flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
rcu_read_unlock();
rcu_read_lock();
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
&final);
rcu_read_unlock();
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
WRITE_ONCE(sk->sk_err_soft, -PTR_ERR(dst));
return PTR_ERR(dst);
}
ip6_dst_store(sk, dst, false, false);
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
WRITE_ONCE(sk->sk_err_soft, -PTR_ERR(dst));
return PTR_ERR(dst);
}
ip6_dst_store(sk, dst, false, false);
return 0;
}
EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
const struct inet6_skb_parm *opt)