Merge branch 'ipv6-flowlabel-per-netns-budget-for-unprivileged-callers'

Maoyi Xie says:

====================
ipv6: flowlabel: per-netns budget for unprivileged callers

From: Maoyi Xie <maoyi.xie@ntu.edu.sg>

This series fixes the cross-tenant DoS in net/ipv6/ip6_flowlabel.c.
v1 through v6 were single-patch postings, each in its own thread.
v6 review pointed out that the existing fl_size read in
mem_check() and the corresponding write in fl_intern() are not in
the same critical section. v7 split the work into 2 patches.

Patch 1/2 is a prerequisite. It moves spin_lock_bh(&ip6_fl_lock)
and the matching unlock from fl_intern() into its only caller
ipv6_flowlabel_get(), so the mem_check() call runs under the same
critical section as the fl_intern() insert. With all writers and
the read of fl_size under the lock, fl_size is converted from
atomic_t to plain int. This is independent of the per-netns
budget. It also makes 2/2 backportable without conflicts.

Patch 2/2 is the v6 patch, rebased on 1/2.

  - flowlabel_count is plain int rather than atomic_t, since the
    previous patch put all writers and readers under ip6_fl_lock.
  - In ip6_fl_gc(), fl_free() is now placed below the fl_size
    and flowlabel_count decrements, removing the v6 cache of
    fl->fl_net.
  - In ip6_fl_purge(), fl_free() stays in its original position.
    The function argument net is used for flowlabel_count.
  - mem_check() uses spaces around the / operator on all four
    expressions, addressing the checkpatch note in v6 review.

Numeric budget (preserved from v6):

  pre-patch:
    global non-CAP_NET_ADMIN budget = FL_MAX_SIZE - FL_MAX_SIZE/4
                                    = 4096 - 1024 = 3072
    per-actor reach                 = 3072

  post-patch:
    FL_MAX_SIZE doubled to 8192
    global non-CAP_NET_ADMIN budget = 8192 - 2048 = 6144
    per-netns ceiling               = 6144 / 2 = 3072
    per-actor reach                 = 3072 (preserved)

CAP_NET_ADMIN against init_user_ns still bypasses both caps.

Reproducer (KASAN VM, 4 cores, qemu): unprivileged netns A holds
3072 flowlabels via 100 procs. Fresh unprivileged netns B then
allocates 32 flowlabels (the FL_MAX_PER_SOCK ceiling for one
socket), the same as a clean baseline. Without the per-netns
ceiling, netns A could push fl_size past FL_MAX_SIZE - FL_MAX_SIZE
/ 4 and netns B would see allocations denied.
====================

Link: https://patch.msgid.link/20260506082416.2259567-1-maoyixie.tju@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2026-05-08 14:59:17 -07:00
2 changed files with 32 additions and 15 deletions

View File

@@ -119,6 +119,7 @@ struct netns_ipv6 {
struct fib_notifier_ops *notifier_ops;
struct fib_notifier_ops *ip6mr_notifier_ops;
atomic_t ipmr_seq;
int flowlabel_count;
struct {
struct hlist_head head;
spinlock_t lock;

View File

@@ -36,11 +36,11 @@
/* FL hash table */
#define FL_MAX_PER_SOCK 32
#define FL_MAX_SIZE 4096
#define FL_MAX_SIZE 8192
#define FL_HASH_MASK 255
#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
static atomic_t fl_size = ATOMIC_INIT(0);
static int fl_size;
static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
static void ip6_fl_gc(struct timer_list *unused);
@@ -162,8 +162,9 @@ static void ip6_fl_gc(struct timer_list *unused)
ttd = fl->expires;
if (time_after_eq(now, ttd)) {
*flp = fl->next;
fl_size--;
fl->fl_net->ipv6.flowlabel_count--;
fl_free(fl);
atomic_dec(&fl_size);
continue;
}
if (!sched || time_before(ttd, sched))
@@ -172,7 +173,7 @@ static void ip6_fl_gc(struct timer_list *unused)
flp = &fl->next;
}
}
if (!sched && atomic_read(&fl_size))
if (!sched && fl_size)
sched = now + FL_MAX_LINGER;
if (sched) {
mod_timer(&ip6_fl_gc_timer, sched);
@@ -196,7 +197,8 @@ static void __net_exit ip6_fl_purge(struct net *net)
atomic_read(&fl->users) == 0) {
*flp = fl->next;
fl_free(fl);
atomic_dec(&fl_size);
fl_size--;
net->ipv6.flowlabel_count--;
continue;
}
flp = &fl->next;
@@ -210,10 +212,10 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
{
struct ip6_flowlabel *lfl;
lockdep_assert_held(&ip6_fl_lock);
fl->label = label & IPV6_FLOWLABEL_MASK;
rcu_read_lock();
spin_lock_bh(&ip6_fl_lock);
if (label == 0) {
for (;;) {
fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK;
@@ -235,8 +237,6 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
lfl = __fl_lookup(net, fl->label);
if (lfl) {
atomic_inc(&lfl->users);
spin_unlock_bh(&ip6_fl_lock);
rcu_read_unlock();
return lfl;
}
}
@@ -244,9 +244,8 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
fl->lastuse = jiffies;
fl->next = fl_ht[FL_HASH(fl->label)];
rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
atomic_inc(&fl_size);
spin_unlock_bh(&ip6_fl_lock);
rcu_read_unlock();
fl_size++;
net->ipv6.flowlabel_count++;
return NULL;
}
@@ -464,10 +463,17 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
static int mem_check(struct sock *sk)
{
int room = FL_MAX_SIZE - atomic_read(&fl_size);
const int unpriv_total_limit = FL_MAX_SIZE - (FL_MAX_SIZE / 4);
const int unpriv_user_limit = unpriv_total_limit / 2;
struct net *net = sock_net(sk);
int room;
struct ipv6_fl_socklist *sfl;
int count = 0;
lockdep_assert_held(&ip6_fl_lock);
room = FL_MAX_SIZE - fl_size;
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
return 0;
@@ -478,7 +484,9 @@ static int mem_check(struct sock *sk)
if (room <= 0 ||
((count >= FL_MAX_PER_SOCK ||
(count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
(count > 0 && room < FL_MAX_SIZE / 2) ||
room < FL_MAX_SIZE / 4 ||
net->ipv6.flowlabel_count >= unpriv_user_limit) &&
!capable(CAP_NET_ADMIN)))
return -ENOBUFS;
@@ -692,11 +700,19 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
if (!sfl1)
goto done;
rcu_read_lock();
spin_lock_bh(&ip6_fl_lock);
err = mem_check(sk);
if (err == 0)
fl1 = fl_intern(net, fl, freq->flr_label);
else
fl1 = NULL;
spin_unlock_bh(&ip6_fl_lock);
rcu_read_unlock();
if (err != 0)
goto done;
fl1 = fl_intern(net, fl, freq->flr_label);
if (fl1)
goto recheck;