mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-18 18:48:39 -05:00
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes from Ingo Molnar:
0. 'idle RCU':
Adds RCU APIs that allow non-idle tasks to enter RCU idle mode and
provides x86 code to make use of them, allowing RCU to treat
user-mode execution as an extended quiescent state when the new
RCU_USER_QS kernel configuration parameter is specified. (Work is
in progress to port this to a few other architectures, but is not
part of this series.)
1. A fix for a latent bug that has been in RCU ever since the addition
of CPU stall warnings. This bug results in false-positive stall
warnings, but thus far only on embedded systems with severely
cut-down userspace configurations.
2. Further reductions in latency spikes for huge systems, along with
additional boot-time adaptation to the actual hardware.
This is a large change, as it moves RCU grace-period initialization
and cleanup, along with quiescent-state forcing, from softirq to a
kthread. However, it appears to be in quite good shape (famous
last words).
3. Updates to documentation and rcutorture, the latter category
including keeping statistics on CPU-hotplug latencies and fixing
some initialization-time races.
4. CPU-hotplug fixes and improvements.
5. Idle-loop fixes that were omitted on an earlier submission.
6. Miscellaneous fixes and improvements
In certain RCU configurations new kernel threads will show up (rcu_bh,
rcu_sched), showing RCU processing overhead.
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (90 commits)
rcu: Apply micro-optimization and int/bool fixes to RCU's idle handling
rcu: Userspace RCU extended QS selftest
x86: Exit RCU extended QS on notify resume
x86: Use the new schedule_user API on userspace preemption
rcu: Exit RCU extended QS on user preemption
rcu: Exit RCU extended QS on kernel preemption after irq/exception
x86: Exception hooks for userspace RCU extended QS
x86: Unspaghettize do_general_protection()
x86: Syscall hooks for userspace RCU extended QS
rcu: Switch task's syscall hooks on context switch
rcu: Ignore userspace extended quiescent state by default
rcu: Allow rcu_user_enter()/exit() to nest
rcu: Settle config for userspace extended quiescent state
rcu: Make RCU_FAST_NO_HZ handle adaptive ticks
rcu: New rcu_user_enter_after_irq() and rcu_user_exit_after_irq() APIs
rcu: New rcu_user_enter() and rcu_user_exit() APIs
ia64: Add missing RCU idle APIs on idle loop
xtensa: Add missing RCU idle APIs on idle loop
score: Add missing RCU idle APIs on idle loop
parisc: Add missing RCU idle APIs on idle loop
...
This commit is contained in:
@@ -430,6 +430,8 @@ enum
|
||||
NR_SOFTIRQS
|
||||
};
|
||||
|
||||
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
|
||||
|
||||
/* map softirq index to softirq name. update 'softirq_to_name' in
|
||||
* kernel/softirq.c when adding a new softirq.
|
||||
*/
|
||||
|
||||
@@ -14,6 +14,11 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
|
||||
|
||||
|
||||
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
|
||||
void *data,
|
||||
unsigned int cpu,
|
||||
const char *namefmt);
|
||||
|
||||
/**
|
||||
* kthread_run - create and wake a thread.
|
||||
* @threadfn: the function to run until signal_pending(current).
|
||||
@@ -34,9 +39,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
|
||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_should_stop(void);
|
||||
bool kthread_should_stop(void);
|
||||
bool kthread_should_park(void);
|
||||
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||
void *kthread_data(struct task_struct *k);
|
||||
int kthread_park(struct task_struct *k);
|
||||
void kthread_unpark(struct task_struct *k);
|
||||
void kthread_parkme(void);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
extern struct task_struct *kthreadd_task;
|
||||
|
||||
@@ -191,6 +191,21 @@ extern void rcu_idle_enter(void);
|
||||
extern void rcu_idle_exit(void);
|
||||
extern void rcu_irq_enter(void);
|
||||
extern void rcu_irq_exit(void);
|
||||
|
||||
#ifdef CONFIG_RCU_USER_QS
|
||||
extern void rcu_user_enter(void);
|
||||
extern void rcu_user_exit(void);
|
||||
extern void rcu_user_enter_after_irq(void);
|
||||
extern void rcu_user_exit_after_irq(void);
|
||||
extern void rcu_user_hooks_switch(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
#else
|
||||
static inline void rcu_user_enter(void) { }
|
||||
static inline void rcu_user_exit(void) { }
|
||||
static inline void rcu_user_enter_after_irq(void) { }
|
||||
static inline void rcu_user_exit_after_irq(void) { }
|
||||
#endif /* CONFIG_RCU_USER_QS */
|
||||
|
||||
extern void exit_rcu(void);
|
||||
|
||||
/**
|
||||
@@ -210,14 +225,12 @@ extern void exit_rcu(void);
|
||||
* to nest RCU_NONIDLE() wrappers, but the nesting level is currently
|
||||
* quite limited. If deeper nesting is required, it will be necessary
|
||||
* to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
|
||||
*
|
||||
* This macro may be used from process-level code only.
|
||||
*/
|
||||
#define RCU_NONIDLE(a) \
|
||||
do { \
|
||||
rcu_idle_exit(); \
|
||||
rcu_irq_enter(); \
|
||||
do { a; } while (0); \
|
||||
rcu_idle_enter(); \
|
||||
rcu_irq_exit(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
||||
@@ -1885,6 +1885,14 @@ static inline void rcu_copy_process(struct task_struct *p)
|
||||
|
||||
#endif
|
||||
|
||||
static inline void rcu_switch(struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
#ifdef CONFIG_RCU_USER_QS
|
||||
rcu_user_hooks_switch(prev, next);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tsk_restore_flags(struct task_struct *task,
|
||||
unsigned long orig_flags, unsigned long flags)
|
||||
{
|
||||
|
||||
43
include/linux/smpboot.h
Normal file
43
include/linux/smpboot.h
Normal file
@@ -0,0 +1,43 @@
|
||||
#ifndef _LINUX_SMPBOOT_H
|
||||
#define _LINUX_SMPBOOT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct task_struct;
|
||||
/* Cookie handed to the thread_fn*/
|
||||
struct smpboot_thread_data;
|
||||
|
||||
/**
|
||||
* struct smp_hotplug_thread - CPU hotplug related thread descriptor
|
||||
* @store: Pointer to per cpu storage for the task pointers
|
||||
* @list: List head for core management
|
||||
* @thread_should_run: Check whether the thread should run or not. Called with
|
||||
* preemption disabled.
|
||||
* @thread_fn: The associated thread function
|
||||
* @setup: Optional setup function, called when the thread gets
|
||||
* operational the first time
|
||||
* @cleanup: Optional cleanup function, called when the thread
|
||||
* should stop (module exit)
|
||||
* @park: Optional park function, called when the thread is
|
||||
* parked (cpu offline)
|
||||
* @unpark: Optional unpark function, called when the thread is
|
||||
* unparked (cpu online)
|
||||
* @thread_comm: The base name of the thread
|
||||
*/
|
||||
struct smp_hotplug_thread {
|
||||
struct task_struct __percpu **store;
|
||||
struct list_head list;
|
||||
int (*thread_should_run)(unsigned int cpu);
|
||||
void (*thread_fn)(unsigned int cpu);
|
||||
void (*setup)(unsigned int cpu);
|
||||
void (*cleanup)(unsigned int cpu, bool online);
|
||||
void (*park)(unsigned int cpu);
|
||||
void (*unpark)(unsigned int cpu);
|
||||
const char *thread_comm;
|
||||
};
|
||||
|
||||
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
|
||||
int smpboot_thread_schedule(void);
|
||||
|
||||
#endif
|
||||
@@ -136,6 +136,22 @@ static inline void tracepoint_synchronize_unregister(void)
|
||||
postrcu; \
|
||||
} while (0)
|
||||
|
||||
#ifndef MODULE
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
|
||||
static inline void trace_##name##_rcuidle(proto) \
|
||||
{ \
|
||||
if (static_key_false(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond), \
|
||||
rcu_idle_exit(), \
|
||||
rcu_idle_enter()); \
|
||||
}
|
||||
#else
|
||||
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Make sure the alignment of the structure in the __tracepoints section will
|
||||
* not add unwanted padding between the beginning of the section and the
|
||||
@@ -151,16 +167,8 @@ static inline void tracepoint_synchronize_unregister(void)
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond),,); \
|
||||
} \
|
||||
static inline void trace_##name##_rcuidle(proto) \
|
||||
{ \
|
||||
if (static_key_false(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(&__tracepoint_##name, \
|
||||
TP_PROTO(data_proto), \
|
||||
TP_ARGS(data_args), \
|
||||
TP_CONDITION(cond), \
|
||||
rcu_idle_exit(), \
|
||||
rcu_idle_enter()); \
|
||||
} \
|
||||
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
|
||||
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
|
||||
static inline int \
|
||||
register_trace_##name(void (*probe)(data_proto), void *data) \
|
||||
{ \
|
||||
|
||||
Reference in New Issue
Block a user