mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 04:21:09 -04:00
sched_ext: Fix ops->priv clobber on concurrent attach/detach
Under heavy concurrent attach/detach operations, scx_claim_exit() can
trigger a NULL pointer dereference. This can be reproduced running the
reload_loop kselftests inside a virtme-ng session:
$ vng -v -- ./tools/testing/selftests/sched_ext/runner -t reload_loop
...
BUG: kernel NULL pointer dereference, address: 0000000000000400
RIP: 0010:scx_claim_exit+0x3b/0x120
Call Trace:
<TASK>
bpf_scx_unreg+0x45/0xb0
bpf_struct_ops_map_link_dealloc+0x39/0x50
bpf_link_release+0x18/0x20
__fput+0x10b/0x2e0
__x64_sys_close+0x47/0xa0
The underlying race (diagnosed by Tejun Heo) is a stomp of @ops->priv,
not a missing NULL check:
T2 unreg(K) T1 reg(K)
----------- ---------
sch = ops->priv = sch_b800
scx_disable; flush_disable_work
[scx_root_disable: scx_root=NULL,
mutex_unlock, state=DISABLED]
mutex_lock; state ok
scx_alloc_and_add_sched:
ops->priv = sch_a800
scx_root = sch_a800; init=0
state=ENABLED; mutex_unlock
[flush returns]
RCU_INIT_POINTER(ops->priv, NULL) <-- clobbers sch_a800
kobject_put(sch_b800)
T1 acquires scx_enable_mutex inside scx_root_disable()'s mutex_unlock
window and starts a fresh attach on the same kdata, assigning sch_a800
to @ops->priv. T2 then continues out of scx_disable()/flush_disable_work
and clobbers @ops->priv to NULL, leaking sch_a800; the bpf_link is gone
but state stays SCX_ENABLED, so all future attaches fail with -EBUSY
permanently. The next bpf_scx_unreg() on that kdata then reads NULL
@ops->priv and dereferences it in scx_claim_exit().
Make @ops->priv the lifecycle binding: in scx_root_enable_workfn() and
scx_sub_enable_workfn(), after the existing state check and still under
scx_enable_mutex, refuse with -EBUSY if @ops->priv is non-NULL. This
rejects an attempt to reuse a kdata that is still bound to a previous
scheduler instance, closing the race without changing the unreg side.
Fixes: 105dcd005b ("sched_ext: Introduce scx_prog_sched()")
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -6803,6 +6803,19 @@ static void scx_root_enable_workfn(struct kthread_work *work)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* @ops->priv binds @ops to its scx_sched instance. It is set here by
|
||||
* scx_alloc_and_add_sched() and cleared at the tail of bpf_scx_unreg(),
|
||||
* which runs after scx_root_disable() has dropped scx_enable_mutex. If
|
||||
* it's still non-NULL here, a previous attachment on @ops has not
|
||||
* finished tearing down; proceeding would let the in-flight unreg's
|
||||
* RCU_INIT_POINTER(NULL) clobber the @ops->priv we are about to assign.
|
||||
*/
|
||||
if (rcu_access_pointer(ops->priv)) {
|
||||
ret = -EBUSY;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ret = alloc_kick_syncs();
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
@@ -7120,6 +7133,12 @@ static void scx_sub_enable_workfn(struct kthread_work *work)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* See scx_root_enable_workfn() for the @ops->priv check. */
|
||||
if (rcu_access_pointer(ops->priv)) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
cgrp = cgroup_get_from_id(ops->sub_cgroup_id);
|
||||
if (IS_ERR(cgrp)) {
|
||||
ret = PTR_ERR(cgrp);
|
||||
|
||||
Reference in New Issue
Block a user