mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 10:01:39 -05:00
Merge tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"Mutexes:
- Redo __mutex_init() to reduce generated code size (Sebastian
Andrzej Siewior)
Seqlocks:
- Introduce scoped_seqlock_read() (Peter Zijlstra)
- Change thread_group_cputime() to use scoped_seqlock_read() (Oleg
Nesterov)
- Change do_task_stat() to use scoped_seqlock_read() (Oleg Nesterov)
- Change do_io_accounting() to use scoped_seqlock_read() (Oleg
Nesterov)
- Fix the incorrect documentation of read_seqbegin_or_lock() /
need_seqretry() (Oleg Nesterov)
- Allow KASAN to fail optimizing (Peter Zijlstra)
Local lock updates:
- Fix all kernel-doc warnings (Randy Dunlap)
- Add the <linux/local_lock*.h> headers to MAINTAINERS (Sebastian
Andrzej Siewior)
- Reduce the risk of shadowing via s/l/__l/ and s/tl/__tl/ (Vincent
Mailhol)
Lock debugging:
- spinlock/debug: Fix data-race in do_raw_write_lock (Alexander
Sverdlin)
Atomic primitives infrastructure:
- atomic: Skip alignment check for try_cmpxchg() old arg (Arnd
Bergmann)
Rust runtime integration:
- sync: atomic: Enable generated Atomic<T> usage (Boqun Feng)
- sync: atomic: Implement Debug for Atomic<Debug> (Boqun Feng)
- debugfs: Remove Rust native atomics and replace them with Linux
versions (Boqun Feng)
- debugfs: Implement Reader for Mutex<T> only when T is Unpin (Boqun
Feng)
- lock: guard: Add T: Unpin bound to DerefMut (Daniel Almeida)
- lock: Pin the inner data (Daniel Almeida)
- lock: Add a Pin<&mut T> accessor (Daniel Almeida)"
* tag 'locking-core-2025-12-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/local_lock: Fix all kernel-doc warnings
locking/local_lock: s/l/__l/ and s/tl/__tl/ to reduce the risk of shadowing
locking/local_lock: Add the <linux/local_lock*.h> headers to MAINTAINERS
locking/mutex: Redo __mutex_init() to reduce generated code size
rust: debugfs: Replace the usage of Rust native atomics
rust: sync: atomic: Implement Debug for Atomic<Debug>
rust: sync: atomic: Make Atomic*Ops pub(crate)
seqlock: Allow KASAN to fail optimizing
rust: debugfs: Implement Reader for Mutex<T> only when T is Unpin
seqlock: Change do_io_accounting() to use scoped_seqlock_read()
seqlock: Change do_task_stat() to use scoped_seqlock_read()
seqlock: Change thread_group_cputime() to use scoped_seqlock_read()
seqlock: Introduce scoped_seqlock_read()
documentation: seqlock: fix the wrong documentation of read_seqbegin_or_lock/need_seqretry
atomic: Skip alignment check for try_cmpxchg() old arg
rust: lock: Add a Pin<&mut T> accessor
rust: lock: Pin the inner data
rust: lock: guard: Add T: Unpin bound to DerefMut
locking/spinlock/debug: Fix data-race in do_raw_write_lock
This commit is contained in:
@@ -4,14 +4,11 @@
|
||||
//! Traits for rendering or updating values exported to DebugFS.
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::sync::atomic::{Atomic, AtomicBasicOps, AtomicType, Relaxed};
|
||||
use crate::sync::Mutex;
|
||||
use crate::uaccess::UserSliceReader;
|
||||
use core::fmt::{self, Debug, Formatter};
|
||||
use core::str::FromStr;
|
||||
use core::sync::atomic::{
|
||||
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
|
||||
AtomicU8, AtomicUsize, Ordering,
|
||||
};
|
||||
|
||||
/// A trait for types that can be written into a string.
|
||||
///
|
||||
@@ -50,7 +47,7 @@ pub trait Reader {
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result;
|
||||
}
|
||||
|
||||
impl<T: FromStr> Reader for Mutex<T> {
|
||||
impl<T: FromStr + Unpin> Reader for Mutex<T> {
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 128];
|
||||
if reader.len() > buf.len() {
|
||||
@@ -66,37 +63,21 @@ fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_reader_for_atomic {
|
||||
($(($atomic_type:ty, $int_type:ty)),*) => {
|
||||
$(
|
||||
impl Reader for $atomic_type {
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 21]; // Enough for a 64-bit number.
|
||||
if reader.len() > buf.len() {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
let n = reader.len();
|
||||
reader.read_slice(&mut buf[..n])?;
|
||||
impl<T: AtomicType + FromStr> Reader for Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicBasicOps,
|
||||
{
|
||||
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
|
||||
let mut buf = [0u8; 21]; // Enough for a 64-bit number.
|
||||
if reader.len() > buf.len() {
|
||||
return Err(EINVAL);
|
||||
}
|
||||
let n = reader.len();
|
||||
reader.read_slice(&mut buf[..n])?;
|
||||
|
||||
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
|
||||
let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?;
|
||||
self.store(val, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
|
||||
let val = s.trim().parse::<T>().map_err(|_| EINVAL)?;
|
||||
self.store(val, Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl_reader_for_atomic!(
|
||||
(AtomicI16, i16),
|
||||
(AtomicI32, i32),
|
||||
(AtomicI64, i64),
|
||||
(AtomicI8, i8),
|
||||
(AtomicIsize, isize),
|
||||
(AtomicU16, u16),
|
||||
(AtomicU32, u32),
|
||||
(AtomicU64, u64),
|
||||
(AtomicU8, u8),
|
||||
(AtomicUsize, usize)
|
||||
);
|
||||
|
||||
@@ -22,9 +22,10 @@
|
||||
|
||||
pub use internal::AtomicImpl;
|
||||
pub use ordering::{Acquire, Full, Relaxed, Release};
|
||||
pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps};
|
||||
|
||||
use crate::build_error;
|
||||
use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps, AtomicRepr};
|
||||
use internal::AtomicRepr;
|
||||
use ordering::OrderingType;
|
||||
|
||||
/// A memory location which can be safely modified from multiple execution contexts.
|
||||
@@ -306,6 +307,15 @@ pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AtomicType + core::fmt::Debug> core::fmt::Debug for Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicBasicOps,
|
||||
{
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
core::fmt::Debug::fmt(&self.load(Relaxed), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AtomicType> Atomic<T>
|
||||
where
|
||||
T::Repr: AtomicExchangeOps,
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
types::{NotThreadSafe, Opaque, ScopeGuard},
|
||||
};
|
||||
use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
|
||||
use pin_init::{pin_data, pin_init, PinInit};
|
||||
use pin_init::{pin_data, pin_init, PinInit, Wrapper};
|
||||
|
||||
pub mod mutex;
|
||||
pub mod spinlock;
|
||||
@@ -115,6 +115,7 @@ pub struct Lock<T: ?Sized, B: Backend> {
|
||||
_pin: PhantomPinned,
|
||||
|
||||
/// The data protected by the lock.
|
||||
#[pin]
|
||||
pub(crate) data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
@@ -127,9 +128,13 @@ unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
|
||||
|
||||
impl<T, B: Backend> Lock<T, B> {
|
||||
/// Constructs a new lock initialiser.
|
||||
pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
|
||||
pub fn new(
|
||||
t: impl PinInit<T>,
|
||||
name: &'static CStr,
|
||||
key: Pin<&'static LockClassKey>,
|
||||
) -> impl PinInit<Self> {
|
||||
pin_init!(Self {
|
||||
data: UnsafeCell::new(t),
|
||||
data <- UnsafeCell::pin_init(t),
|
||||
_pin: PhantomPinned,
|
||||
// SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
|
||||
// static lifetimes so they live indefinitely.
|
||||
@@ -240,6 +245,31 @@ pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
|
||||
|
||||
cb()
|
||||
}
|
||||
|
||||
/// Returns a pinned mutable reference to the protected data.
|
||||
///
|
||||
/// The guard implements [`DerefMut`] when `T: Unpin`, so for [`Unpin`]
|
||||
/// types [`DerefMut`] should be used instead of this function.
|
||||
///
|
||||
/// [`DerefMut`]: core::ops::DerefMut
|
||||
/// [`Unpin`]: core::marker::Unpin
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use kernel::sync::{Mutex, MutexGuard};
|
||||
/// # use core::{pin::Pin, marker::PhantomPinned};
|
||||
/// struct Data(PhantomPinned);
|
||||
///
|
||||
/// fn example(mutex: &Mutex<Data>) {
|
||||
/// let mut data: MutexGuard<'_, Data> = mutex.lock();
|
||||
/// let mut data: Pin<&mut Data> = data.as_mut();
|
||||
/// }
|
||||
/// ```
|
||||
pub fn as_mut(&mut self) -> Pin<&mut T> {
|
||||
// SAFETY: `self.lock.data` is structurally pinned.
|
||||
unsafe { Pin::new_unchecked(&mut *self.lock.data.get()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
|
||||
@@ -251,7 +281,10 @@ fn deref(&self) -> &Self::Target {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
|
||||
impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
|
||||
where
|
||||
T: Unpin,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
|
||||
unsafe { &mut *self.lock.data.get() }
|
||||
|
||||
@@ -106,7 +106,10 @@ fn deref(&self) -> &Self::Target {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B> {
|
||||
impl<B: GlobalLockBackend> core::ops::DerefMut for GlobalGuard<B>
|
||||
where
|
||||
B::Item: Unpin,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user