summaryrefslogtreecommitdiff
path: root/rust/kernel/sync
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/sync')
-rw-r--r--rust/kernel/sync/aref.rs3
-rw-r--r--rust/kernel/sync/atomic/internal.rs114
-rw-r--r--rust/kernel/sync/atomic/predefine.rs55
-rw-r--r--rust/kernel/sync/lock.rs7
-rw-r--r--rust/kernel/sync/lock/global.rs2
-rw-r--r--rust/kernel/sync/lock/mutex.rs5
-rw-r--r--rust/kernel/sync/lock/spinlock.rs5
-rw-r--r--rust/kernel/sync/set_once.rs8
8 files changed, 172 insertions, 27 deletions
diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs
index 0d24a0432015..0616c0353c2b 100644
--- a/rust/kernel/sync/aref.rs
+++ b/rust/kernel/sync/aref.rs
@@ -83,6 +83,9 @@ unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
// example, when the reference count reaches zero and `T` is dropped.
unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+// Even if `T` is pinned, pointers to `T` can still move.
+impl<T: AlwaysRefCounted> Unpin for ARef<T> {}
+
impl<T: AlwaysRefCounted> ARef<T> {
/// Creates a new instance of [`ARef`].
///
diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs
index 6fdd8e59f45b..0dac58bca2b3 100644
--- a/rust/kernel/sync/atomic/internal.rs
+++ b/rust/kernel/sync/atomic/internal.rs
@@ -13,17 +13,22 @@ mod private {
pub trait Sealed {}
}
-// `i32` and `i64` are only supported atomic implementations.
+// The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`),
+// while the Rust side also layers provides atomic support for `i8` and `i16`
+// on top of lower-level C primitives.
+impl private::Sealed for i8 {}
+impl private::Sealed for i16 {}
impl private::Sealed for i32 {}
impl private::Sealed for i64 {}
/// A marker trait for types that implement atomic operations with C side primitives.
///
-/// This trait is sealed, and only types that have directly mapping to the C side atomics should
-/// impl this:
+/// This trait is sealed, and only types that map directly to the C side atomics
+/// or can be implemented with lower-level C primitives are allowed to implement this:
///
-/// - `i32` maps to `atomic_t`.
-/// - `i64` maps to `atomic64_t`.
+/// - `i8` and `i16` are implemented with lower-level C primitives.
+/// - `i32` map to `atomic_t`
+/// - `i64` map to `atomic64_t`
pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
/// The type of the delta in arithmetic or logical operations.
///
@@ -32,6 +37,20 @@ pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
type Delta;
}
+// The current helpers of load/store uses `{WRITE,READ}_ONCE()` hence the atomicity is only
+// guaranteed against read-modify-write operations if the architecture supports native atomic RmW.
+#[cfg(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW)]
+impl AtomicImpl for i8 {
+ type Delta = Self;
+}
+
+// The current helpers of load/store uses `{WRITE,READ}_ONCE()` hence the atomicity is only
+// guaranteed against read-modify-write operations if the architecture supports native atomic RmW.
+#[cfg(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW)]
+impl AtomicImpl for i16 {
+ type Delta = Self;
+}
+
// `atomic_t` implements atomic operations on `i32`.
impl AtomicImpl for i32 {
type Delta = Self;
@@ -156,16 +175,17 @@ macro_rules! impl_atomic_method {
}
}
-// Delcares $ops trait with methods and implements the trait for `i32` and `i64`.
-macro_rules! declare_and_impl_atomic_methods {
- ($(#[$attr:meta])* $pub:vis trait $ops:ident {
- $(
- $(#[doc=$doc:expr])*
- fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
- $unsafe:tt { bindings::#call($($arg:tt)*) }
- }
- )*
- }) => {
+macro_rules! declare_atomic_ops_trait {
+ (
+ $(#[$attr:meta])* $pub:vis trait $ops:ident {
+ $(
+ $(#[doc=$doc:expr])*
+ fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { bindings::#call($($arg:tt)*) }
+ }
+ )*
+ }
+ ) => {
$(#[$attr])*
$pub trait $ops: AtomicImpl {
$(
@@ -175,21 +195,25 @@ macro_rules! declare_and_impl_atomic_methods {
);
)*
}
+ }
+}
- impl $ops for i32 {
+macro_rules! impl_atomic_ops_for_one {
+ (
+ $ty:ty => $ctype:ident,
+ $(#[$attr:meta])* $pub:vis trait $ops:ident {
$(
- impl_atomic_method!(
- (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
- $unsafe { call($($arg)*) }
- }
- );
+ $(#[doc=$doc:expr])*
+ fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { bindings::#call($($arg:tt)*) }
+ }
)*
}
-
- impl $ops for i64 {
+ ) => {
+ impl $ops for $ty {
$(
impl_atomic_method!(
- (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
+ ($ctype) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
$unsafe { call($($arg)*) }
}
);
@@ -198,7 +222,47 @@ macro_rules! declare_and_impl_atomic_methods {
}
}
+// Declares $ops trait with methods and implements the trait.
+macro_rules! declare_and_impl_atomic_methods {
+ (
+ [ $($map:tt)* ]
+ $(#[$attr:meta])* $pub:vis trait $ops:ident { $($body:tt)* }
+ ) => {
+ declare_and_impl_atomic_methods!(
+ @with_ops_def
+ [ $($map)* ]
+ ( $(#[$attr])* $pub trait $ops { $($body)* } )
+ );
+ };
+
+ (@with_ops_def [ $($map:tt)* ] ( $($ops_def:tt)* )) => {
+ declare_atomic_ops_trait!( $($ops_def)* );
+
+ declare_and_impl_atomic_methods!(
+ @munch
+ [ $($map)* ]
+ ( $($ops_def)* )
+ );
+ };
+
+ (@munch [] ( $($ops_def:tt)* )) => {};
+
+ (@munch [ $ty:ty => $ctype:ident $(, $($rest:tt)*)? ] ( $($ops_def:tt)* )) => {
+ impl_atomic_ops_for_one!(
+ $ty => $ctype,
+ $($ops_def)*
+ );
+
+ declare_and_impl_atomic_methods!(
+ @munch
+ [ $($($rest)*)? ]
+ ( $($ops_def)* )
+ );
+ };
+}
+
declare_and_impl_atomic_methods!(
+ [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
/// Basic atomic operations
pub trait AtomicBasicOps {
/// Atomic read (load).
@@ -216,6 +280,7 @@ declare_and_impl_atomic_methods!(
);
declare_and_impl_atomic_methods!(
+ [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
/// Exchange and compare-and-exchange atomic operations
pub trait AtomicExchangeOps {
/// Atomic exchange.
@@ -243,6 +308,7 @@ declare_and_impl_atomic_methods!(
);
declare_and_impl_atomic_methods!(
+ [ i32 => atomic, i64 => atomic64 ]
/// Atomic arithmetic operations
pub trait AtomicArithmeticOps {
/// Atomic add (wrapping).
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
index 0fca1ba3c2db..67a0406d3ea4 100644
--- a/rust/kernel/sync/atomic/predefine.rs
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -5,6 +5,29 @@
use crate::static_assert;
use core::mem::{align_of, size_of};
+// Ensure size and alignment requirements are checked.
+static_assert!(size_of::<bool>() == size_of::<i8>());
+static_assert!(align_of::<bool>() == align_of::<i8>());
+
+// SAFETY: `bool` has the same size and alignment as `i8`, and Rust guarantees that `bool` has
+// only two valid bit patterns: 0 (false) and 1 (true). Those are valid `i8` values, so `bool` is
+// round-trip transmutable to `i8`.
+unsafe impl super::AtomicType for bool {
+ type Repr = i8;
+}
+
+// SAFETY: `i8` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i8 {
+ type Repr = i8;
+}
+
+// SAFETY: `i16` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i16 {
+ type Repr = i16;
+}
+
// SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to
// itself.
unsafe impl super::AtomicType for i32 {
@@ -129,7 +152,7 @@ mod tests {
#[test]
fn atomic_basic_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
assert_eq!(v, x.load(Relaxed));
@@ -137,8 +160,18 @@ mod tests {
}
#[test]
+ fn atomic_acquire_release_tests() {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(0);
+
+ x.store(v, Release);
+ assert_eq!(v, x.load(Acquire));
+ });
+ }
+
+ #[test]
fn atomic_xchg_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
let old = v;
@@ -151,7 +184,7 @@ mod tests {
#[test]
fn atomic_cmpxchg_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
let old = v;
@@ -177,4 +210,20 @@ mod tests {
assert_eq!(v + 25, x.load(Relaxed));
});
}
+
+ #[test]
+ fn atomic_bool_tests() {
+ let x = Atomic::new(false);
+
+ assert_eq!(false, x.load(Relaxed));
+ x.store(true, Relaxed);
+ assert_eq!(true, x.load(Relaxed));
+
+ assert_eq!(true, x.xchg(false, Relaxed));
+ assert_eq!(false, x.load(Relaxed));
+
+ assert_eq!(Err(false), x.cmpxchg(true, true, Relaxed));
+ assert_eq!(false, x.load(Relaxed));
+ assert_eq!(Ok(false), x.cmpxchg(false, true, Full));
+ }
}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 46a57d1fc309..10b6b5e9b024 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -156,6 +156,7 @@ impl<B: Backend> Lock<(), B> {
/// the whole lifetime of `'a`.
///
/// [`State`]: Backend::State
+ #[inline]
pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
// SAFETY:
// - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
@@ -169,6 +170,7 @@ impl<B: Backend> Lock<(), B> {
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
+ #[inline]
pub fn lock(&self) -> Guard<'_, T, B> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -182,6 +184,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Returns a guard that can be used to access the data protected by the lock if successful.
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
#[must_use = "if unused, the lock will be immediately unlocked"]
+ #[inline]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -275,6 +278,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
type Target = T;
+ #[inline]
fn deref(&self) -> &Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &*self.lock.data.get() }
@@ -285,6 +289,7 @@ impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
where
T: Unpin,
{
+ #[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &mut *self.lock.data.get() }
@@ -292,6 +297,7 @@ where
}
impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ #[inline]
fn drop(&mut self) {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -304,6 +310,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
+ #[inline]
pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
// SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
unsafe { B::assert_is_held(lock.state.get()) };
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
index eab48108a4ae..aecbdc34738f 100644
--- a/rust/kernel/sync/lock/global.rs
+++ b/rust/kernel/sync/lock/global.rs
@@ -77,6 +77,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Lock this global lock.
+ #[inline]
pub fn lock(&'static self) -> GlobalGuard<B> {
GlobalGuard {
inner: self.inner.lock(),
@@ -84,6 +85,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Try to lock this global lock.
+ #[inline]
pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
Some(GlobalGuard {
inner: self.inner.try_lock()?,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 581cee7ab842..cda0203efefb 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -102,6 +102,7 @@ unsafe impl super::Backend for MutexBackend {
type State = bindings::mutex;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -112,18 +113,21 @@ unsafe impl super::Backend for MutexBackend {
unsafe { bindings::__mutex_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::mutex_lock(ptr) };
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::mutex_trylock(ptr) };
@@ -135,6 +139,7 @@ unsafe impl super::Backend for MutexBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::mutex_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index d7be38ccbdc7..ef76fa07ca3a 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -101,6 +101,7 @@ unsafe impl super::Backend for SpinLockBackend {
type State = bindings::spinlock_t;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -111,18 +112,21 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe { bindings::__spin_lock_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::spin_lock(ptr) }
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::spin_trylock(ptr) };
@@ -134,6 +138,7 @@ unsafe impl super::Backend for SpinLockBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::spin_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/set_once.rs b/rust/kernel/sync/set_once.rs
index bdba601807d8..139cef05e935 100644
--- a/rust/kernel/sync/set_once.rs
+++ b/rust/kernel/sync/set_once.rs
@@ -123,3 +123,11 @@ impl<T> Drop for SetOnce<T> {
}
}
}
+
+// SAFETY: `SetOnce` can be transferred across thread boundaries iff the data it contains can.
+unsafe impl<T: Send> Send for SetOnce<T> {}
+
+// SAFETY: `SetOnce` synchronises access to the inner value via atomic operations,
+// so shared references are safe when `T: Sync`. Since the inner `T` may be dropped
+// on any thread, we also require `T: Send`.
+unsafe impl<T: Send + Sync> Sync for SetOnce<T> {}