summaryrefslogtreecommitdiff
path: root/rust/kernel/sync/atomic
diff options
context:
space:
mode:
authorBoqun Feng <boqun.feng@gmail.com>2026-03-03 12:16:55 -0800
committerPeter Zijlstra <peterz@infradead.org>2026-03-08 11:06:49 +0100
commitac8f06ade38a49f7725cc219fc6e90d1d4708d2b (patch)
treef7989c5aabf221a67ef19b28b11fc00f489b39e5 /rust/kernel/sync/atomic
parent553c02fb588d4310193eba80f75b43b20befd1d2 (diff)
rust: sync: atomic: Add Atomic<*{mut,const} T> support
Atomic pointer support is an important piece of synchronization algorithm, e.g. RCU, hence provide the support for that. Note that instead of relying on atomic_long or the implementation of `Atomic<usize>`, a new set of helpers (atomic_ptr_*) is introduced for atomic pointer specifically, this is because ptr2int casting would lose the provenance of a pointer and even though in theory there are a few tricks the provenance can be restored, it'll still be a simpler implementation if C could provide atomic pointers directly. The side effects of this approach are: we don't have the arithmetic and logical operations for pointers yet and the current implementation only works on ARCH_SUPPORTS_ATOMIC_RMW architectures, but these are implementation issues and can be added later. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Gary Guo <gary@garyguo.net> Reviewed-by: FUJITA Tomonori <fujita.tomonori@gmail.com> Link: https://patch.msgid.link/20260120140503.62804-3-boqun.feng@gmail.com Link: https://patch.msgid.link/20260303201701.12204-8-boqun@kernel.org
Diffstat (limited to 'rust/kernel/sync/atomic')
-rw-r--r--rust/kernel/sync/atomic/internal.rs24
-rw-r--r--rust/kernel/sync/atomic/predefine.rs46
2 files changed, 61 insertions, 9 deletions
diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs
index ef516bcb02ee..e301db4eaf91 100644
--- a/rust/kernel/sync/atomic/internal.rs
+++ b/rust/kernel/sync/atomic/internal.rs
@@ -7,6 +7,7 @@
use crate::bindings;
use crate::macros::paste;
use core::cell::UnsafeCell;
+use ffi::c_void;
mod private {
/// Sealed trait marker to disable customized impls on atomic implementation traits.
@@ -14,10 +15,11 @@ mod private {
}
// The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`),
-// while the Rust side also layers provides atomic support for `i8` and `i16`
-// on top of lower-level C primitives.
+// while the Rust side also provides atomic support for `i8`, `i16` and `*const c_void` on top of
+// lower-level C primitives.
impl private::Sealed for i8 {}
impl private::Sealed for i16 {}
+impl private::Sealed for *const c_void {}
impl private::Sealed for i32 {}
impl private::Sealed for i64 {}
@@ -26,10 +28,10 @@ impl private::Sealed for i64 {}
/// This trait is sealed, and only types that map directly to the C side atomics
/// or can be implemented with lower-level C primitives are allowed to implement this:
///
-/// - `i8` and `i16` are implemented with lower-level C primitives.
+/// - `i8`, `i16` and `*const c_void` are implemented with lower-level C primitives.
/// - `i32` map to `atomic_t`
/// - `i64` map to `atomic64_t`
-pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
+pub trait AtomicImpl: Sized + Copy + private::Sealed {
/// The type of the delta in arithmetic or logical operations.
///
/// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of
@@ -37,9 +39,9 @@ pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
type Delta;
}
-// The current helpers of load/store of atomic `i8` and `i16` use `{WRITE,READ}_ONCE()` hence the
-// atomicity is only guaranteed against read-modify-write operations if the architecture supports
-// native atomic RmW.
+// The current helpers of load/store of atomic `i8`, `i16` and pointers use `{WRITE,READ}_ONCE()`
+// hence the atomicity is only guaranteed against read-modify-write operations if the architecture
+// supports native atomic RmW.
//
// In the future when a CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=n architecture plans to support Rust, the
// load/store helpers that guarantee atomicity against RmW operations (usually via a lock) need to
@@ -58,6 +60,10 @@ impl AtomicImpl for i16 {
type Delta = Self;
}
+impl AtomicImpl for *const c_void {
+ type Delta = isize;
+}
+
// `atomic_t` implements atomic operations on `i32`.
impl AtomicImpl for i32 {
type Delta = Self;
@@ -269,7 +275,7 @@ macro_rules! declare_and_impl_atomic_methods {
}
declare_and_impl_atomic_methods!(
- [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
+ [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ]
/// Basic atomic operations
pub trait AtomicBasicOps {
/// Atomic read (load).
@@ -287,7 +293,7 @@ declare_and_impl_atomic_methods!(
);
declare_and_impl_atomic_methods!(
- [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
+ [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ]
/// Exchange and compare-and-exchange atomic operations
pub trait AtomicExchangeOps {
/// Atomic exchange.
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
index 67a0406d3ea4..6f2c60529b64 100644
--- a/rust/kernel/sync/atomic/predefine.rs
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -4,6 +4,7 @@
use crate::static_assert;
use core::mem::{align_of, size_of};
+use ffi::c_void;
// Ensure size and alignment requirements are checked.
static_assert!(size_of::<bool>() == size_of::<i8>());
@@ -28,6 +29,26 @@ unsafe impl super::AtomicType for i16 {
type Repr = i16;
}
+// SAFETY:
+//
+// - `*mut T` has the same size and alignment with `*const c_void`, and is round-trip
+// transmutable to `*const c_void`.
+// - `*mut T` is safe to transfer between execution contexts. See the safety requirement of
+// [`AtomicType`].
+unsafe impl<T: Sized> super::AtomicType for *mut T {
+ type Repr = *const c_void;
+}
+
+// SAFETY:
+//
+// - `*const T` has the same size and alignment with `*const c_void`, and is round-trip
+// transmutable to `*const c_void`.
+// - `*const T` is safe to transfer between execution contexts. See the safety requirement of
+// [`AtomicType`].
+unsafe impl<T: Sized> super::AtomicType for *const T {
+ type Repr = *const c_void;
+}
+
// SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to
// itself.
unsafe impl super::AtomicType for i32 {
@@ -226,4 +247,29 @@ mod tests {
assert_eq!(false, x.load(Relaxed));
assert_eq!(Ok(false), x.cmpxchg(false, true, Full));
}
+
+ #[test]
+ fn atomic_ptr_tests() {
+ let mut v = 42;
+ let mut u = 43;
+ let x = Atomic::new(&raw mut v);
+
+ assert_eq!(x.load(Acquire), &raw mut v);
+ assert_eq!(x.cmpxchg(&raw mut u, &raw mut u, Relaxed), Err(&raw mut v));
+ assert_eq!(x.cmpxchg(&raw mut v, &raw mut u, Relaxed), Ok(&raw mut v));
+ assert_eq!(x.load(Relaxed), &raw mut u);
+
+ let x = Atomic::new(&raw const v);
+
+ assert_eq!(x.load(Acquire), &raw const v);
+ assert_eq!(
+ x.cmpxchg(&raw const u, &raw const u, Relaxed),
+ Err(&raw const v)
+ );
+ assert_eq!(
+ x.cmpxchg(&raw const v, &raw const u, Relaxed),
+ Ok(&raw const v)
+ );
+ assert_eq!(x.load(Relaxed), &raw const u);
+ }
}