mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
locking/mutex: Add context analysis
Add compiler context analysis annotations. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://patch.msgid.link/20260121111213.745353747@infradead.org
This commit is contained in:
@@ -183,7 +183,7 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
|
||||
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
||||
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __acquires(lock);
|
||||
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass) __cond_acquires(0, lock);
|
||||
extern int __must_check _mutex_lock_killable(struct mutex *lock,
|
||||
|
||||
@@ -44,7 +44,7 @@ context_lock_struct(mutex) {
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
struct optimistic_spin_queue osq; /* Spinner MCS lock */
|
||||
#endif
|
||||
struct mutex_waiter *first_waiter;
|
||||
struct mutex_waiter *first_waiter __guarded_by(&wait_lock);
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
void *magic;
|
||||
#endif
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
# and is generally not a function of system call inputs.
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
CONTEXT_ANALYSIS_mutex.o := y
|
||||
|
||||
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
|
||||
|
||||
# Avoid recursion lockdep -> sanitizer -> ... -> lockdep & improve performance.
|
||||
|
||||
@@ -46,8 +46,9 @@
|
||||
static void __mutex_init_generic(struct mutex *lock)
|
||||
{
|
||||
atomic_long_set(&lock->owner, 0);
|
||||
raw_spin_lock_init(&lock->wait_lock);
|
||||
lock->first_waiter = NULL;
|
||||
scoped_guard (raw_spinlock_init, &lock->wait_lock) {
|
||||
lock->first_waiter = NULL;
|
||||
}
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
osq_lock_init(&lock->osq);
|
||||
#endif
|
||||
@@ -150,6 +151,7 @@ EXPORT_SYMBOL(mutex_init_generic);
|
||||
* follow with a __mutex_trylock() before failing.
|
||||
*/
|
||||
static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
|
||||
__cond_acquires(true, lock)
|
||||
{
|
||||
unsigned long curr = (unsigned long)current;
|
||||
unsigned long zero = 0UL;
|
||||
@@ -163,6 +165,7 @@ static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
|
||||
}
|
||||
|
||||
static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
|
||||
__cond_releases(true, lock)
|
||||
{
|
||||
unsigned long curr = (unsigned long)current;
|
||||
|
||||
@@ -201,6 +204,7 @@ static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
|
||||
static void
|
||||
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
struct mutex_waiter *first)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
|
||||
debug_mutex_add_waiter(lock, waiter, current);
|
||||
@@ -219,6 +223,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
||||
|
||||
static void
|
||||
__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
if (list_empty(&waiter->list)) {
|
||||
__mutex_clear_flag(lock, MUTEX_FLAGS);
|
||||
@@ -268,7 +273,8 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
|
||||
* We also put the fastpath first in the kernel image, to make sure the
|
||||
* branch is predicted by the CPU as default-untaken.
|
||||
*/
|
||||
static void __sched __mutex_lock_slowpath(struct mutex *lock);
|
||||
static void __sched __mutex_lock_slowpath(struct mutex *lock)
|
||||
__acquires(lock);
|
||||
|
||||
/**
|
||||
* mutex_lock - acquire the mutex
|
||||
@@ -349,7 +355,7 @@ bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||
* Similarly, stop spinning if we are no longer the
|
||||
* first waiter.
|
||||
*/
|
||||
if (waiter && lock->first_waiter != waiter)
|
||||
if (waiter && data_race(lock->first_waiter != waiter))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -534,7 +540,8 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
|
||||
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
|
||||
__releases(lock);
|
||||
|
||||
/**
|
||||
* mutex_unlock - release the mutex
|
||||
@@ -574,6 +581,7 @@ EXPORT_SYMBOL(mutex_unlock);
|
||||
* of a unlocked mutex is not allowed.
|
||||
*/
|
||||
void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
||||
__no_context_analysis
|
||||
{
|
||||
__ww_mutex_unlock(lock);
|
||||
mutex_unlock(&lock->base);
|
||||
@@ -587,6 +595,7 @@ static __always_inline int __sched
|
||||
__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
|
||||
struct lockdep_map *nest_lock, unsigned long ip,
|
||||
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
struct mutex_waiter waiter;
|
||||
@@ -780,6 +789,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
|
||||
static int __sched
|
||||
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
|
||||
struct lockdep_map *nest_lock, unsigned long ip)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
|
||||
}
|
||||
@@ -787,6 +797,7 @@ __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
|
||||
static int __sched
|
||||
__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
|
||||
unsigned long ip, struct ww_acquire_ctx *ww_ctx)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
|
||||
}
|
||||
@@ -834,6 +845,7 @@ void __sched
|
||||
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
|
||||
{
|
||||
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
|
||||
__acquire(lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_nested);
|
||||
@@ -842,6 +854,7 @@ void __sched
|
||||
_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
||||
{
|
||||
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
|
||||
__acquire(lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||
|
||||
@@ -870,12 +883,14 @@ mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
|
||||
token = io_schedule_prepare();
|
||||
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||
subclass, NULL, _RET_IP_, NULL, 0);
|
||||
__acquire(lock);
|
||||
io_schedule_finish(token);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
|
||||
|
||||
static inline int
|
||||
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
__cond_releases(nonzero, lock)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
|
||||
unsigned tmp;
|
||||
@@ -937,6 +952,7 @@ EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
|
||||
* Release the lock, slowpath:
|
||||
*/
|
||||
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
|
||||
__releases(lock)
|
||||
{
|
||||
struct task_struct *next = NULL;
|
||||
struct mutex_waiter *waiter;
|
||||
@@ -945,6 +961,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
unsigned long flags;
|
||||
|
||||
mutex_release(&lock->dep_map, ip);
|
||||
__release(lock);
|
||||
|
||||
/*
|
||||
* Release the lock before (potentially) taking the spinlock such that
|
||||
@@ -1066,24 +1083,29 @@ EXPORT_SYMBOL_GPL(mutex_lock_io);
|
||||
|
||||
static noinline void __sched
|
||||
__mutex_lock_slowpath(struct mutex *lock)
|
||||
__acquires(lock)
|
||||
{
|
||||
__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
__acquire(lock);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__mutex_lock_killable_slowpath(struct mutex *lock)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__mutex_lock_interruptible_slowpath(struct mutex *lock)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
|
||||
}
|
||||
|
||||
static noinline int __sched
|
||||
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
|
||||
_RET_IP_, ctx);
|
||||
@@ -1092,6 +1114,7 @@ __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
static noinline int __sched
|
||||
__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
__cond_acquires(0, lock)
|
||||
{
|
||||
return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
|
||||
_RET_IP_, ctx);
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||
*/
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#include <linux/mutex.h>
|
||||
/*
|
||||
* This is the control structure for tasks blocked on mutex, which resides
|
||||
* on the blocked task's kernel stack:
|
||||
|
||||
@@ -7,12 +7,14 @@
|
||||
|
||||
static inline struct mutex_waiter *
|
||||
__ww_waiter_first(struct mutex *lock)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
return lock->first_waiter;
|
||||
}
|
||||
|
||||
static inline struct mutex_waiter *
|
||||
__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
w = list_next_entry(w, list);
|
||||
if (lock->first_waiter == w)
|
||||
@@ -23,6 +25,7 @@ __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
|
||||
|
||||
static inline struct mutex_waiter *
|
||||
__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
w = list_prev_entry(w, list);
|
||||
if (lock->first_waiter == w)
|
||||
@@ -33,6 +36,7 @@ __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
|
||||
|
||||
static inline struct mutex_waiter *
|
||||
__ww_waiter_last(struct mutex *lock)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
struct mutex_waiter *w = lock->first_waiter;
|
||||
|
||||
@@ -43,6 +47,7 @@ __ww_waiter_last(struct mutex *lock)
|
||||
|
||||
static inline void
|
||||
__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
__mutex_add_waiter(lock, waiter, pos);
|
||||
}
|
||||
@@ -60,16 +65,19 @@ __ww_mutex_has_waiters(struct mutex *lock)
|
||||
}
|
||||
|
||||
static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
|
||||
__acquires(&lock->wait_lock)
|
||||
{
|
||||
raw_spin_lock_irqsave(&lock->wait_lock, *flags);
|
||||
}
|
||||
|
||||
static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
|
||||
__releases(&lock->wait_lock)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
|
||||
}
|
||||
|
||||
static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
}
|
||||
@@ -296,6 +304,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
|
||||
struct ww_acquire_ctx *ww_ctx,
|
||||
struct ww_acquire_ctx *hold_ctx,
|
||||
struct wake_q_head *wake_q)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
struct task_struct *owner = __ww_mutex_owner(lock);
|
||||
|
||||
@@ -360,6 +369,7 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
|
||||
static void
|
||||
__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
|
||||
struct wake_q_head *wake_q)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
struct MUTEX_WAITER *cur;
|
||||
|
||||
@@ -453,6 +463,7 @@ __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
|
||||
static inline int
|
||||
__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
|
||||
@@ -503,6 +514,7 @@ __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
|
||||
struct MUTEX *lock,
|
||||
struct ww_acquire_ctx *ww_ctx,
|
||||
struct wake_q_head *wake_q)
|
||||
__must_hold(&lock->wait_lock)
|
||||
{
|
||||
struct MUTEX_WAITER *cur, *pos = NULL;
|
||||
bool is_wait_die;
|
||||
|
||||
Reference in New Issue
Block a user