mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-30 00:03:27 -04:00
Merge tag 'trace-v7.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix potential deadlock in osnoise and hotplug The interface_lock can be called by a osnoise thread and the CPU shutdown logic of osnoise can wait for this thread to finish. But cpus_read_lock() can also be taken while holding the interface_lock. This produces a circular lock dependency and can cause a deadlock. Swap the ordering of cpus_read_lock() and the interface_lock to have interface_lock taken within the cpus_read_lock() context to prevent this circular dependency. - Fix freeing of event triggers in early boot up If the same trigger is added on the kernel command line, the second one will fail to be applied and the trigger created will be freed. This calls into the deferred logic and creates a kernel thread to do the freeing. But the command line logic is called before kernel threads can be created and this leads to a NULL pointer dereference. Delay freeing event triggers until late init. * tag 'trace-v7.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Drain deferred trigger frees if kthread creation fails tracing: Fix potential deadlock in cpu hotplug with osnoise
This commit is contained in:
@@ -22,6 +22,39 @@ static struct task_struct *trigger_kthread;
|
||||
static struct llist_head trigger_data_free_list;
|
||||
static DEFINE_MUTEX(trigger_data_kthread_mutex);
|
||||
|
||||
static int trigger_kthread_fn(void *ignore);
|
||||
|
||||
static void trigger_create_kthread_locked(void)
|
||||
{
|
||||
lockdep_assert_held(&trigger_data_kthread_mutex);
|
||||
|
||||
if (!trigger_kthread) {
|
||||
struct task_struct *kthread;
|
||||
|
||||
kthread = kthread_create(trigger_kthread_fn, NULL,
|
||||
"trigger_data_free");
|
||||
if (!IS_ERR(kthread))
|
||||
WRITE_ONCE(trigger_kthread, kthread);
|
||||
}
|
||||
}
|
||||
|
||||
static void trigger_data_free_queued_locked(void)
|
||||
{
|
||||
struct event_trigger_data *data, *tmp;
|
||||
struct llist_node *llnodes;
|
||||
|
||||
lockdep_assert_held(&trigger_data_kthread_mutex);
|
||||
|
||||
llnodes = llist_del_all(&trigger_data_free_list);
|
||||
if (!llnodes)
|
||||
return;
|
||||
|
||||
tracepoint_synchronize_unregister();
|
||||
|
||||
llist_for_each_entry_safe(data, tmp, llnodes, llist)
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
/* Bulk garbage collection of event_trigger_data elements */
|
||||
static int trigger_kthread_fn(void *ignore)
|
||||
{
|
||||
@@ -56,30 +89,50 @@ void trigger_data_free(struct event_trigger_data *data)
|
||||
if (data->cmd_ops->set_filter)
|
||||
data->cmd_ops->set_filter(NULL, data, NULL);
|
||||
|
||||
if (unlikely(!trigger_kthread)) {
|
||||
guard(mutex)(&trigger_data_kthread_mutex);
|
||||
/* Check again after taking mutex */
|
||||
if (!trigger_kthread) {
|
||||
struct task_struct *kthread;
|
||||
|
||||
kthread = kthread_create(trigger_kthread_fn, NULL,
|
||||
"trigger_data_free");
|
||||
if (!IS_ERR(kthread))
|
||||
WRITE_ONCE(trigger_kthread, kthread);
|
||||
}
|
||||
/*
|
||||
* Boot-time trigger registration can fail before kthread creation
|
||||
* works. Keep the deferred-free semantics during boot and let late
|
||||
* init start the kthread to drain the list.
|
||||
*/
|
||||
if (system_state == SYSTEM_BOOTING && !trigger_kthread) {
|
||||
llist_add(&data->llist, &trigger_data_free_list);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!trigger_kthread) {
|
||||
/* Do it the slow way */
|
||||
tracepoint_synchronize_unregister();
|
||||
kfree(data);
|
||||
return;
|
||||
if (unlikely(!trigger_kthread)) {
|
||||
guard(mutex)(&trigger_data_kthread_mutex);
|
||||
|
||||
trigger_create_kthread_locked();
|
||||
/* Check again after taking mutex */
|
||||
if (!trigger_kthread) {
|
||||
llist_add(&data->llist, &trigger_data_free_list);
|
||||
/* Drain the queued frees synchronously if creation failed. */
|
||||
trigger_data_free_queued_locked();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
llist_add(&data->llist, &trigger_data_free_list);
|
||||
wake_up_process(trigger_kthread);
|
||||
}
|
||||
|
||||
static int __init trigger_data_free_init(void)
|
||||
{
|
||||
guard(mutex)(&trigger_data_kthread_mutex);
|
||||
|
||||
if (llist_empty(&trigger_data_free_list))
|
||||
return 0;
|
||||
|
||||
trigger_create_kthread_locked();
|
||||
if (trigger_kthread)
|
||||
wake_up_process(trigger_kthread);
|
||||
else
|
||||
trigger_data_free_queued_locked();
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(trigger_data_free_init);
|
||||
|
||||
static inline void data_ops_trigger(struct event_trigger_data *data,
|
||||
struct trace_buffer *buffer, void *rec,
|
||||
struct ring_buffer_event *event)
|
||||
|
||||
@@ -2073,8 +2073,8 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
|
||||
if (!osnoise_has_registered_instances())
|
||||
return;
|
||||
|
||||
guard(mutex)(&interface_lock);
|
||||
guard(cpus_read_lock)();
|
||||
guard(mutex)(&interface_lock);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
@@ -2237,11 +2237,11 @@ static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
|
||||
if (running)
|
||||
stop_per_cpu_kthreads();
|
||||
|
||||
mutex_lock(&interface_lock);
|
||||
/*
|
||||
* avoid CPU hotplug operations that might read options.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
mutex_lock(&interface_lock);
|
||||
|
||||
retval = cnt;
|
||||
|
||||
@@ -2257,8 +2257,8 @@ static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
|
||||
clear_bit(option, &osnoise_options);
|
||||
}
|
||||
|
||||
cpus_read_unlock();
|
||||
mutex_unlock(&interface_lock);
|
||||
cpus_read_unlock();
|
||||
|
||||
if (running)
|
||||
start_per_cpu_kthreads();
|
||||
@@ -2345,16 +2345,16 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
|
||||
if (running)
|
||||
stop_per_cpu_kthreads();
|
||||
|
||||
mutex_lock(&interface_lock);
|
||||
/*
|
||||
* osnoise_cpumask is read by CPU hotplug operations.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
mutex_lock(&interface_lock);
|
||||
|
||||
cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
|
||||
|
||||
cpus_read_unlock();
|
||||
mutex_unlock(&interface_lock);
|
||||
cpus_read_unlock();
|
||||
|
||||
if (running)
|
||||
start_per_cpu_kthreads();
|
||||
|
||||
Reference in New Issue
Block a user