mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 14:51:51 -04:00
tcp: inline tcp_chrono_start()
tcp_chrono_start() is small enough, and used in TCP sendmsg() fast path (from tcp_skb_entail()). Note clang is already inlining it from functions in tcp_output.c. Inlining it improves performance and reduces bloat : $ scripts/bloat-o-meter -t vmlinux.old vmlinux.new add/remove: 0/2 grow/shrink: 1/0 up/down: 1/-84 (-83) Function old new delta tcp_skb_entail 280 281 +1 __pfx_tcp_chrono_start 16 - -16 tcp_chrono_start 68 - -68 Total: Before=25192434, After=25192351, chg -0.00% Note that tcp_chrono_stop() is too big. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Link: https://patch.msgid.link/20260308123549.2924460-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
committed by
Jakub Kicinski
parent
4b78c9cbd8
commit
d6d4ff335d
@@ -2159,7 +2159,30 @@ enum tcp_chrono {
|
||||
__TCP_CHRONO_MAX,
|
||||
};
|
||||
|
||||
void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
|
||||
static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
|
||||
{
|
||||
const u32 now = tcp_jiffies32;
|
||||
enum tcp_chrono old = tp->chrono_type;
|
||||
|
||||
if (old > TCP_CHRONO_UNSPEC)
|
||||
tp->chrono_stat[old - 1] += now - tp->chrono_start;
|
||||
tp->chrono_start = now;
|
||||
tp->chrono_type = new;
|
||||
}
|
||||
|
||||
static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* If there are multiple conditions worthy of tracking in a
|
||||
* chronograph then the highest priority enum takes precedence
|
||||
* over the other conditions. So that if something "more interesting"
|
||||
* starts happening, stop the previous chrono and start a new one.
|
||||
*/
|
||||
if (type > tp->chrono_type)
|
||||
tcp_chrono_set(tp, type);
|
||||
}
|
||||
|
||||
void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
|
||||
|
||||
/* This helper is needed, because skb->tcp_tsorted_anchor uses
|
||||
|
||||
@@ -2903,30 +2903,6 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
|
||||
{
|
||||
const u32 now = tcp_jiffies32;
|
||||
enum tcp_chrono old = tp->chrono_type;
|
||||
|
||||
if (old > TCP_CHRONO_UNSPEC)
|
||||
tp->chrono_stat[old - 1] += now - tp->chrono_start;
|
||||
tp->chrono_start = now;
|
||||
tp->chrono_type = new;
|
||||
}
|
||||
|
||||
void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* If there are multiple conditions worthy of tracking in a
|
||||
* chronograph then the highest priority enum takes precedence
|
||||
* over the other conditions. So that if something "more interesting"
|
||||
* starts happening, stop the previous chrono and start a new one.
|
||||
*/
|
||||
if (type > tp->chrono_type)
|
||||
tcp_chrono_set(tp, type);
|
||||
}
|
||||
|
||||
void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
Reference in New Issue
Block a user