mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
rust: block: convert block::mq to use Refcount
Currently there's a custom reference counting in `block::mq`, which uses `AtomicU64` Rust atomics, and this type doesn't exist on some 32-bit architectures. We cannot just change it to use 32-bit atomics, because doing so will make it vulnerable to refcount overflow. So switch it to use the kernel refcount `kernel::sync::Refcount` instead. There is an operation needed by `block::mq`, atomically decreasing refcount from 2 to 0, which is not available through refcount.h, so I exposed `Refcount::as_atomic` which allows accessing the refcount directly. [boqun: Adopt the LKMM atomic API] Signed-off-by: Gary Guo <gary@garyguo.net> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Benno Lossin <lossin@kernel.org> Reviewed-by: Elle Rhumsaa <elle@weathered-steel.dev> Acked-by: Andreas Hindborg <a.hindborg@kernel.org> Tested-by: David Gow <davidgow@google.com> Link: https://lore.kernel.org/r/20250723233312.3304339-5-gary@kernel.org
This commit is contained in:
@@ -10,9 +10,10 @@
|
||||
block::mq::Request,
|
||||
error::{from_result, Result},
|
||||
prelude::*,
|
||||
sync::Refcount,
|
||||
types::ARef,
|
||||
};
|
||||
use core::{marker::PhantomData, sync::atomic::AtomicU64, sync::atomic::Ordering};
|
||||
use core::marker::PhantomData;
|
||||
|
||||
/// Implement this trait to interface blk-mq as block devices.
|
||||
///
|
||||
@@ -78,7 +79,7 @@ impl<T: Operations> OperationsVTable<T> {
|
||||
let request = unsafe { &*(*bd).rq.cast::<Request<T>>() };
|
||||
|
||||
// One refcount for the ARef, one for being in flight
|
||||
request.wrapper_ref().refcount().store(2, Ordering::Relaxed);
|
||||
request.wrapper_ref().refcount().set(2);
|
||||
|
||||
// SAFETY:
|
||||
// - We own a refcount that we took above. We pass that to `ARef`.
|
||||
@@ -187,7 +188,7 @@ impl<T: Operations> OperationsVTable<T> {
|
||||
|
||||
// SAFETY: The refcount field is allocated but not initialized, so
|
||||
// it is valid for writes.
|
||||
unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(AtomicU64::new(0)) };
|
||||
unsafe { RequestDataWrapper::refcount_ptr(pdu.as_ptr()).write(Refcount::new(0)) };
|
||||
|
||||
Ok(0)
|
||||
})
|
||||
|
||||
@@ -8,13 +8,10 @@
|
||||
bindings,
|
||||
block::mq::Operations,
|
||||
error::Result,
|
||||
sync::{atomic::Relaxed, Refcount},
|
||||
types::{ARef, AlwaysRefCounted, Opaque},
|
||||
};
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
ptr::NonNull,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
};
|
||||
use core::{marker::PhantomData, ptr::NonNull};
|
||||
|
||||
/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
|
||||
///
|
||||
@@ -37,6 +34,9 @@
|
||||
/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
|
||||
/// back ownership to the block layer.
|
||||
///
|
||||
/// Note that the driver can still obtain new `ARef` even if there is no `ARef`s in existence by
|
||||
/// using `tag_to_rq`, hence the need to distinguish B and C.
|
||||
///
|
||||
/// The states are tracked through the private `refcount` field of
|
||||
/// `RequestDataWrapper`. This structure lives in the private data area of the C
|
||||
/// [`struct request`].
|
||||
@@ -98,13 +98,16 @@ pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
|
||||
///
|
||||
/// [`struct request`]: srctree/include/linux/blk-mq.h
|
||||
fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
|
||||
// We can race with `TagSet::tag_to_rq`
|
||||
if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
|
||||
2,
|
||||
0,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
// To hand back the ownership, we need the current refcount to be 2.
|
||||
// Since we can race with `TagSet::tag_to_rq`, this needs to atomically reduce
|
||||
// refcount to 0. `Refcount` does not provide a way to do this, so use the underlying
|
||||
// atomics directly.
|
||||
if let Err(_old) = this
|
||||
.wrapper_ref()
|
||||
.refcount()
|
||||
.as_atomic()
|
||||
.cmpxchg(2, 0, Relaxed)
|
||||
{
|
||||
return Err(this);
|
||||
}
|
||||
|
||||
@@ -173,13 +176,13 @@ pub(crate) struct RequestDataWrapper {
|
||||
/// - 0: The request is owned by C block layer.
|
||||
/// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
|
||||
/// - 2+: There are [`ARef`] references to the request.
|
||||
refcount: AtomicU64,
|
||||
refcount: Refcount,
|
||||
}
|
||||
|
||||
impl RequestDataWrapper {
|
||||
/// Return a reference to the refcount of the request that is embedding
|
||||
/// `self`.
|
||||
pub(crate) fn refcount(&self) -> &AtomicU64 {
|
||||
pub(crate) fn refcount(&self) -> &Refcount {
|
||||
&self.refcount
|
||||
}
|
||||
|
||||
@@ -189,7 +192,7 @@ pub(crate) fn refcount(&self) -> &AtomicU64 {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `this` must point to a live allocation of at least the size of `Self`.
|
||||
pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
|
||||
pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut Refcount {
|
||||
// SAFETY: Because of the safety requirements of this function, the
|
||||
// field projection is safe.
|
||||
unsafe { &raw mut (*this).refcount }
|
||||
@@ -205,47 +208,13 @@ unsafe impl<T: Operations> Send for Request<T> {}
|
||||
// mutate `self` are internally synchronized`
|
||||
unsafe impl<T: Operations> Sync for Request<T> {}
|
||||
|
||||
/// Store the result of `op(target.load())` in target, returning new value of
|
||||
/// target.
|
||||
fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
|
||||
let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
|
||||
|
||||
// SAFETY: Because the operation passed to `fetch_update` above always
|
||||
// return `Some`, `old` will always be `Ok`.
|
||||
let old = unsafe { old.unwrap_unchecked() };
|
||||
|
||||
op(old)
|
||||
}
|
||||
|
||||
/// Store the result of `op(target.load)` in `target` if `target.load() !=
|
||||
/// pred`, returning [`true`] if the target was updated.
|
||||
fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
|
||||
target
|
||||
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
|
||||
if x == pred {
|
||||
None
|
||||
} else {
|
||||
Some(op(x))
|
||||
}
|
||||
})
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
// SAFETY: All instances of `Request<T>` are reference counted. This
|
||||
// implementation of `AlwaysRefCounted` ensure that increments to the ref count
|
||||
// keeps the object alive in memory at least until a matching reference count
|
||||
// decrement is executed.
|
||||
unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
|
||||
fn inc_ref(&self) {
|
||||
let refcount = &self.wrapper_ref().refcount();
|
||||
|
||||
#[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
|
||||
let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
|
||||
|
||||
#[cfg(CONFIG_DEBUG_MISC)]
|
||||
if !updated {
|
||||
panic!("Request refcount zero on clone")
|
||||
}
|
||||
self.wrapper_ref().refcount().inc();
|
||||
}
|
||||
|
||||
unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
|
||||
@@ -257,10 +226,10 @@ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
|
||||
let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
|
||||
|
||||
#[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
|
||||
let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
|
||||
let is_zero = refcount.dec_and_test();
|
||||
|
||||
#[cfg(CONFIG_DEBUG_MISC)]
|
||||
if new_refcount == 0 {
|
||||
if is_zero {
|
||||
panic!("Request reached refcount zero in Rust abstractions");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
//! C header: [`include/linux/refcount.h`](srctree/include/linux/refcount.h)
|
||||
|
||||
use crate::build_assert;
|
||||
use crate::sync::atomic::Atomic;
|
||||
use crate::types::Opaque;
|
||||
|
||||
/// Atomic reference counter.
|
||||
@@ -34,6 +35,20 @@ fn as_ptr(&self) -> *mut bindings::refcount_t {
|
||||
self.0.get()
|
||||
}
|
||||
|
||||
/// Get the underlying atomic counter that backs the refcount.
|
||||
///
|
||||
/// NOTE: Usage of this function is discouraged as it can circumvent the protections offered by
|
||||
/// `refcount.h`. If there is no way to achieve the result using APIs in `refcount.h`, then
|
||||
/// this function can be used. Otherwise consider adding a binding for the required API.
|
||||
#[inline]
|
||||
pub fn as_atomic(&self) -> &Atomic<i32> {
|
||||
let ptr = self.0.get().cast();
|
||||
// SAFETY: `refcount_t` is a transparent wrapper of `atomic_t`, which is an atomic 32-bit
|
||||
// integer that is layout-wise compatible with `Atomic<i32>`. All values are valid for
|
||||
// `refcount_t`, despite some of the values being considered saturated and "bad".
|
||||
unsafe { &*ptr }
|
||||
}
|
||||
|
||||
/// Set a refcount's value.
|
||||
#[inline]
|
||||
pub fn set(&self, value: i32) {
|
||||
|
||||
Reference in New Issue
Block a user