Merge tag 'lockdep-for-tip.2025.07.16' of git://git.kernel.org/pub/scm/linux/kernel/git/boqun/linux into locking/core

Locking changes for v6.17:

- General
  - Mark devm_mutex_init() as __must_check
  - Add #[must_use] to Lock::try_lock()
  - Remove OWNER_SPINNABLE in rwsem
  - Remove redundant #ifdefs in mutex
- Lockdep
  - Avoid returning struct in lock_stats()
  - Change `static const` into enum for LOCKF_*_IRQ_*
  - Temporarily use synchronize_rcu_expedited() in
    lockdep_unregister_key() to speed things up.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
Peter Zijlstra
2025-07-19 19:25:53 +02:00
10 changed files with 50 additions and 40 deletions

View File

@@ -307,7 +307,9 @@ static int lp8860_probe(struct i2c_client *client)
led->client = client;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
devm_mutex_init(&client->dev, &led->lock);
ret = devm_mutex_init(&client->dev, &led->lock);
if (ret)
return dev_err_probe(&client->dev, ret, "Failed to initialize lock\n");
led->regmap = devm_regmap_init_i2c(client, &lp8860_regmap_config);
if (IS_ERR(led->regmap)) {

View File

@@ -1273,7 +1273,9 @@ static int nxp_fspi_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Failed to request irq\n");
devm_mutex_init(dev, &f->lock);
ret = devm_mutex_init(dev, &f->lock);
if (ret)
return dev_err_probe(dev, ret, "Failed to initialize lock\n");
ctlr->bus_num = -1;
ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;

View File

@@ -175,7 +175,7 @@ struct lock_class_stats {
unsigned long bounces[nr_bounce_types];
};
struct lock_class_stats lock_stats(struct lock_class *class);
void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
void clear_lock_stats(struct lock_class *class);
#endif

View File

@@ -126,11 +126,11 @@ do { \
#ifdef CONFIG_DEBUG_MUTEXES
int __devm_mutex_init(struct device *dev, struct mutex *lock);
int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock);
#else
static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock)
{
/*
* When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
@@ -141,14 +141,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#endif
#define devm_mutex_init(dev, mutex) \
#define __mutex_init_ret(mutex) \
({ \
typeof(mutex) mutex_ = (mutex); \
\
mutex_init(mutex_); \
__devm_mutex_init(dev, mutex_); \
mutex_; \
})
#define devm_mutex_init(dev, mutex) \
__devm_mutex_init(dev, __mutex_init_ret(mutex))
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.

View File

@@ -297,33 +297,30 @@ static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
dst->nr += src->nr;
}
struct lock_class_stats lock_stats(struct lock_class *class)
void lock_stats(struct lock_class *class, struct lock_class_stats *stats)
{
struct lock_class_stats stats;
int cpu, i;
memset(&stats, 0, sizeof(struct lock_class_stats));
memset(stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats->contention_point); i++)
stats->contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
stats.contending_point[i] += pcs->contending_point[i];
for (i = 0; i < ARRAY_SIZE(stats->contending_point); i++)
stats->contending_point[i] += pcs->contending_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
lock_time_add(&pcs->read_waittime, &stats->read_waittime);
lock_time_add(&pcs->write_waittime, &stats->write_waittime);
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
lock_time_add(&pcs->read_holdtime, &stats->read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats->write_holdtime);
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
stats.bounces[i] += pcs->bounces[i];
for (i = 0; i < ARRAY_SIZE(stats->bounces); i++)
stats->bounces[i] += pcs->bounces[i];
}
return stats;
}
void clear_lock_stats(struct lock_class *class)
@@ -6619,8 +6616,16 @@ void lockdep_unregister_key(struct lock_class_key *key)
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
/*
* Wait until is_dynamic_key() has finished accessing k->hash_entry.
*
* Some operations like __qdisc_destroy() will call this in a debug
* kernel, and the network traffic is disabled while waiting, hence
* the delay of the wait matters in debugging cases. Currently use a
* synchronize_rcu_expedited() to speed up the wait at the cost of
* system IPIs. TODO: Replace RCU with hazptr for this.
*/
synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(lockdep_unregister_key);

View File

@@ -47,29 +47,31 @@ enum {
__LOCKF(USED_READ)
};
enum {
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
static const unsigned long LOCKF_ENABLED_IRQ =
LOCKF_ENABLED_IRQ =
#include "lockdep_states.h"
0;
0,
#undef LOCKDEP_STATE
#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE |
static const unsigned long LOCKF_USED_IN_IRQ =
LOCKF_USED_IN_IRQ =
#include "lockdep_states.h"
0;
0,
#undef LOCKDEP_STATE
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ |
static const unsigned long LOCKF_ENABLED_IRQ_READ =
LOCKF_ENABLED_IRQ_READ =
#include "lockdep_states.h"
0;
0,
#undef LOCKDEP_STATE
#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ |
static const unsigned long LOCKF_USED_IN_IRQ_READ =
LOCKF_USED_IN_IRQ_READ =
#include "lockdep_states.h"
0;
0,
#undef LOCKDEP_STATE
};
#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)

View File

@@ -657,7 +657,7 @@ static int lock_stat_open(struct inode *inode, struct file *file)
if (!test_bit(idx, lock_classes_in_use))
continue;
iter->class = class;
iter->stats = lock_stats(class);
lock_stats(class, &iter->stats);
iter++;
}

View File

@@ -191,9 +191,7 @@ static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX);
#endif
debug_mutex_add_waiter(lock, waiter, current);
list_add_tail(&waiter->list, list);
@@ -209,9 +207,7 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
__mutex_clear_flag(lock, MUTEX_FLAGS);
debug_mutex_remove_waiter(lock, waiter, current);
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
hung_task_clear_blocker();
#endif
}
/*

View File

@@ -727,8 +727,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
return ret;
}
#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
static inline enum owner_state
rwsem_owner_state(struct task_struct *owner, unsigned long flags)
{
@@ -835,7 +833,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
enum owner_state owner_state;
owner_state = rwsem_spin_on_owner(sem);
if (!(owner_state & OWNER_SPINNABLE))
if (owner_state == OWNER_NONSPINNABLE)
break;
/*

View File

@@ -175,6 +175,8 @@ pub fn lock(&self) -> Guard<'_, T, B> {
/// Tries to acquire the lock.
///
/// Returns a guard that can be used to access the data protected by the lock if successful.
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
#[must_use = "if unused, the lock will be immediately unlocked"]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.