mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-13 15:32:15 -04:00
Merge branch 'Two fixes for cpu-map'
Hou Tao says: ==================== The patchset fixes two reported warning in cpu-map when running xdp_redirect_cpu and some RT threads concurrently. Patch #1 fixes the warning in __cpu_map_ring_cleanup() when kthread is stopped prematurely. Patch #2 fixes the warning in __xdp_return() when there are pending skbs in ptr_ring. Please see individual patches for more details. And comments are always welcome. ==================== Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
@@ -28,6 +28,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/completion.h>
|
||||
#include <trace/events/xdp.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
|
||||
struct rcu_head rcu;
|
||||
|
||||
struct work_struct kthread_stop_wq;
|
||||
struct completion kthread_running;
|
||||
};
|
||||
|
||||
struct bpf_cpu_map {
|
||||
@@ -129,11 +131,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
|
||||
* invoked cpu_map_kthread_stop(). Catch any broken behaviour
|
||||
* gracefully and warn once.
|
||||
*/
|
||||
struct xdp_frame *xdpf;
|
||||
void *ptr;
|
||||
|
||||
while ((xdpf = ptr_ring_consume(ring)))
|
||||
if (WARN_ON_ONCE(xdpf))
|
||||
xdp_return_frame(xdpf);
|
||||
while ((ptr = ptr_ring_consume(ring))) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (unlikely(__ptr_test_bit(0, &ptr))) {
|
||||
__ptr_clear_bit(0, &ptr);
|
||||
kfree_skb(ptr);
|
||||
continue;
|
||||
}
|
||||
xdp_return_frame(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
|
||||
@@ -153,7 +161,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
|
||||
static void cpu_map_kthread_stop(struct work_struct *work)
|
||||
{
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
int err;
|
||||
|
||||
rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
|
||||
|
||||
@@ -163,14 +170,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
|
||||
rcu_barrier();
|
||||
|
||||
/* kthread_stop will wake_up_process and wait for it to complete */
|
||||
err = kthread_stop(rcpu->kthread);
|
||||
if (err) {
|
||||
/* kthread_stop may be called before cpu_map_kthread_run
|
||||
* is executed, so we need to release the memory related
|
||||
* to rcpu.
|
||||
*/
|
||||
put_cpu_map_entry(rcpu);
|
||||
}
|
||||
kthread_stop(rcpu->kthread);
|
||||
}
|
||||
|
||||
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
|
||||
@@ -298,11 +298,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
|
||||
return nframes;
|
||||
}
|
||||
|
||||
|
||||
static int cpu_map_kthread_run(void *data)
|
||||
{
|
||||
struct bpf_cpu_map_entry *rcpu = data;
|
||||
|
||||
complete(&rcpu->kthread_running);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
/* When kthread gives stop order, then rcpu have been disconnected
|
||||
@@ -467,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
|
||||
goto free_ptr_ring;
|
||||
|
||||
/* Setup kthread */
|
||||
init_completion(&rcpu->kthread_running);
|
||||
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
|
||||
"cpumap/%d/map:%d", cpu,
|
||||
map->id);
|
||||
@@ -480,6 +481,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
|
||||
kthread_bind(rcpu->kthread, cpu);
|
||||
wake_up_process(rcpu->kthread);
|
||||
|
||||
/* Make sure kthread has been running, so kthread_stop() will not
|
||||
* stop the kthread prematurely and all pending frames or skbs
|
||||
* will be handled by the kthread before kthread_stop() returns.
|
||||
*/
|
||||
wait_for_completion(&rcpu->kthread_running);
|
||||
|
||||
return rcpu;
|
||||
|
||||
free_prog:
|
||||
|
||||
Reference in New Issue
Block a user