mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 13:30:45 -05:00
udp: make busylock per socket
While having all spinlocks packed into an array was a space saver, this also caused NUMA imbalance and hash collisions. UDPv6 socket size becomes 1600 after this patch. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250916160951.541279-10-edumazet@google.com Reviewed-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
9db27c8062
commit
3cd04c8f4a
@@ -109,6 +109,7 @@ struct udp_sock {
|
||||
*/
|
||||
struct hlist_node tunnel_list;
|
||||
struct numa_drop_counters drop_counters;
|
||||
spinlock_t busylock ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
#define udp_test_bit(nr, sk) \
|
||||
|
||||
@@ -289,6 +289,7 @@ static inline void udp_lib_init_sock(struct sock *sk)
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
|
||||
sk->sk_drop_counters = &up->drop_counters;
|
||||
spin_lock_init(&up->busylock);
|
||||
skb_queue_head_init(&up->reader_queue);
|
||||
INIT_HLIST_NODE(&up->tunnel_list);
|
||||
up->forward_threshold = sk->sk_rcvbuf >> 2;
|
||||
|
||||
@@ -1689,17 +1689,11 @@ static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
|
||||
* to relieve pressure on the receive_queue spinlock shared by consumer.
|
||||
* Under flood, this means that only one producer can be in line
|
||||
* trying to acquire the receive_queue spinlock.
|
||||
* These busylock can be allocated on a per cpu manner, instead of a
|
||||
* per socket one (that would consume a cache line per socket)
|
||||
*/
|
||||
static int udp_busylocks_log __read_mostly;
|
||||
static spinlock_t *udp_busylocks __read_mostly;
|
||||
|
||||
static spinlock_t *busylock_acquire(void *ptr)
|
||||
static spinlock_t *busylock_acquire(struct sock *sk)
|
||||
{
|
||||
spinlock_t *busy;
|
||||
spinlock_t *busy = &udp_sk(sk)->busylock;
|
||||
|
||||
busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
|
||||
spin_lock(busy);
|
||||
return busy;
|
||||
}
|
||||
@@ -3997,7 +3991,6 @@ static void __init bpf_iter_register(void)
|
||||
void __init udp_init(void)
|
||||
{
|
||||
unsigned long limit;
|
||||
unsigned int i;
|
||||
|
||||
udp_table_init(&udp_table, "UDP");
|
||||
limit = nr_free_buffer_pages() / 8;
|
||||
@@ -4006,15 +3999,6 @@ void __init udp_init(void)
|
||||
sysctl_udp_mem[1] = limit;
|
||||
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
|
||||
|
||||
/* 16 spinlocks per cpu */
|
||||
udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
|
||||
udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
|
||||
GFP_KERNEL);
|
||||
if (!udp_busylocks)
|
||||
panic("UDP: failed to alloc udp_busylocks\n");
|
||||
for (i = 0; i < (1U << udp_busylocks_log); i++)
|
||||
spin_lock_init(udp_busylocks + i);
|
||||
|
||||
if (register_pernet_subsys(&udp_sysctl_ops))
|
||||
panic("UDP: failed to init sysctl parameters.\n");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user