mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 13:30:45 -05:00
In the MPTCP receive path, we release the subflow allocated fwd memory just to allocate it again shortly after for the msk. That could increases the failures chances, especially when we will add backlog processing, with other actions could consume the just released memory before the msk socket has a chance to do the rcv allocation. Replace the skb_orphan() call with an open-coded variant that explicitly borrows, the fwd memory from the subflow socket instead of releasing it. The borrowed memory does not have PAGE_SIZE granularity; rounding to the page size will make the fwd allocated memory higher than what is strictly required and could make the incoming subflow fwd mem consistently negative. Instead, keep track of the accumulated frag and borrow the full page at subflow close time. This allow removing the last drop in the TCP to MPTCP transition and the associated, now unused, MIB. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Mat Martineau <martineau@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Link: https://patch.msgid.link/20251121-net-next-mptcp-memcg-backlog-imp-v1-12-1f34b6c1e0b1@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
63 lines
1.5 KiB
C
63 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* MPTCP Fast Open Mechanism
|
|
*
|
|
* Copyright (c) 2021-2022, Dmytro SHYTYI
|
|
*/
|
|
|
|
#include "protocol.h"
|
|
|
|
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
|
|
struct request_sock *req)
|
|
{
|
|
struct sock *sk, *ssk;
|
|
struct sk_buff *skb;
|
|
struct tcp_sock *tp;
|
|
|
|
/* on early fallback the subflow context is deleted by
|
|
* subflow_syn_recv_sock()
|
|
*/
|
|
if (!subflow)
|
|
return;
|
|
|
|
ssk = subflow->tcp_sock;
|
|
sk = subflow->conn;
|
|
tp = tcp_sk(ssk);
|
|
|
|
subflow->is_mptfo = 1;
|
|
|
|
skb = skb_peek(&ssk->sk_receive_queue);
|
|
if (WARN_ON_ONCE(!skb))
|
|
return;
|
|
|
|
/* dequeue the skb from sk receive queue */
|
|
__skb_unlink(skb, &ssk->sk_receive_queue);
|
|
skb_ext_reset(skb);
|
|
|
|
mptcp_subflow_lend_fwdmem(subflow, skb);
|
|
|
|
/* We copy the fastopen data, but that don't belong to the mptcp sequence
|
|
* space, need to offset it in the subflow sequence, see mptcp_subflow_get_map_offset()
|
|
*/
|
|
tp->copied_seq += skb->len;
|
|
subflow->ssn_offset += skb->len;
|
|
|
|
/* Only the sequence delta is relevant */
|
|
MPTCP_SKB_CB(skb)->map_seq = -skb->len;
|
|
MPTCP_SKB_CB(skb)->end_seq = 0;
|
|
MPTCP_SKB_CB(skb)->offset = 0;
|
|
MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
|
|
MPTCP_SKB_CB(skb)->cant_coalesce = 1;
|
|
|
|
mptcp_data_lock(sk);
|
|
DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk));
|
|
|
|
mptcp_borrow_fwdmem(sk, skb);
|
|
skb_set_owner_r(skb, sk);
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
mptcp_sk(sk)->bytes_received += skb->len;
|
|
|
|
sk->sk_data_ready(sk);
|
|
|
|
mptcp_data_unlock(sk);
|
|
}
|