summaryrefslogtreecommitdiff
path: root/rust/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel')
-rw-r--r--rust/kernel/auxiliary.rs71
-rw-r--r--rust/kernel/bits.rs6
-rw-r--r--rust/kernel/block/mq/gen_disk.rs3
-rw-r--r--rust/kernel/block/mq/tag_set.rs4
-rw-r--r--rust/kernel/bug.rs20
-rw-r--r--rust/kernel/build_assert.rs7
-rw-r--r--rust/kernel/clk.rs8
-rw-r--r--rust/kernel/configfs.rs9
-rw-r--r--rust/kernel/cpufreq.rs5
-rw-r--r--rust/kernel/cpumask.rs10
-rw-r--r--rust/kernel/debugfs.rs86
-rw-r--r--rust/kernel/debugfs/callback_adapters.rs21
-rw-r--r--rust/kernel/debugfs/entry.rs16
-rw-r--r--rust/kernel/debugfs/file_ops.rs43
-rw-r--r--rust/kernel/debugfs/traits.rs43
-rw-r--r--rust/kernel/device.rs59
-rw-r--r--rust/kernel/device/property.rs11
-rw-r--r--rust/kernel/device_id.rs2
-rw-r--r--rust/kernel/devres.rs197
-rw-r--r--rust/kernel/dma.rs24
-rw-r--r--rust/kernel/driver.rs110
-rw-r--r--rust/kernel/drm/driver.rs6
-rw-r--r--rust/kernel/drm/gem/mod.rs8
-rw-r--r--rust/kernel/faux.rs13
-rw-r--r--rust/kernel/fmt.rs2
-rw-r--r--rust/kernel/i2c.rs39
-rw-r--r--rust/kernel/impl_flags.rs272
-rw-r--r--rust/kernel/init.rs40
-rw-r--r--rust/kernel/io.rs488
-rw-r--r--rust/kernel/io/mem.rs33
-rw-r--r--rust/kernel/io/poll.rs16
-rw-r--r--rust/kernel/io/resource.rs2
-rw-r--r--rust/kernel/iommu/mod.rs5
-rw-r--r--rust/kernel/iommu/pgtable.rs279
-rw-r--r--rust/kernel/irq/flags.rs2
-rw-r--r--rust/kernel/irq/request.rs6
-rw-r--r--rust/kernel/kunit.rs39
-rw-r--r--rust/kernel/lib.rs6
-rw-r--r--rust/kernel/list/arc.rs14
-rw-r--r--rust/kernel/maple_tree.rs11
-rw-r--r--rust/kernel/miscdevice.rs10
-rw-r--r--rust/kernel/net/phy.rs6
-rw-r--r--rust/kernel/num/bounded.rs49
-rw-r--r--rust/kernel/page.rs36
-rw-r--r--rust/kernel/pci.rs38
-rw-r--r--rust/kernel/pci/id.rs3
-rw-r--r--rust/kernel/pci/io.rs214
-rw-r--r--rust/kernel/platform.rs73
-rw-r--r--rust/kernel/print.rs153
-rw-r--r--rust/kernel/ptr.rs12
-rw-r--r--rust/kernel/pwm.rs124
-rw-r--r--rust/kernel/rbtree.rs31
-rw-r--r--rust/kernel/regulator.rs9
-rw-r--r--rust/kernel/safety.rs53
-rw-r--r--rust/kernel/scatterlist.rs3
-rw-r--r--rust/kernel/seq_file.rs4
-rw-r--r--rust/kernel/soc.rs135
-rw-r--r--rust/kernel/sync.rs73
-rw-r--r--rust/kernel/sync/arc.rs3
-rw-r--r--rust/kernel/sync/aref.rs3
-rw-r--r--rust/kernel/sync/atomic/internal.rs114
-rw-r--r--rust/kernel/sync/atomic/predefine.rs66
-rw-r--r--rust/kernel/sync/lock.rs7
-rw-r--r--rust/kernel/sync/lock/global.rs2
-rw-r--r--rust/kernel/sync/lock/mutex.rs5
-rw-r--r--rust/kernel/sync/lock/spinlock.rs5
-rw-r--r--rust/kernel/sync/refcount.rs3
-rw-r--r--rust/kernel/sync/set_once.rs8
-rw-r--r--rust/kernel/task.rs24
-rw-r--r--rust/kernel/transmute.rs8
-rw-r--r--rust/kernel/usb.rs48
71 files changed, 2616 insertions, 742 deletions
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index 56f3c180e8f6..93c0db1f6655 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -5,31 +5,51 @@
//! C header: [`include/linux/auxiliary_bus.h`](srctree/include/linux/auxiliary_bus.h)
use crate::{
- bindings, container_of, device,
- device_id::{RawDeviceId, RawDeviceIdIndex},
+ bindings,
+ container_of,
+ device,
+ device_id::{
+ RawDeviceId,
+ RawDeviceIdIndex, //
+ },
devres::Devres,
driver,
- error::{from_result, to_result, Result},
+ error::{
+ from_result,
+ to_result, //
+ },
prelude::*,
types::Opaque,
- ThisModule,
+ ThisModule, //
};
use core::{
marker::PhantomData,
mem::offset_of,
- ptr::{addr_of_mut, NonNull},
+ ptr::{
+ addr_of_mut,
+ NonNull, //
+ },
};
/// An adapter for the registration of auxiliary drivers.
pub struct Adapter<T: Driver>(T);
-// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// SAFETY:
+// - `bindings::auxiliary_driver` is a C type declared as `repr(C)`.
+// - `T` is the type of the driver's device private data.
+// - `struct auxiliary_driver` embeds a `struct device_driver`.
+// - `DEVICE_DRIVER_OFFSET` is the correct byte offset to the embedded `struct device_driver`.
+unsafe impl<T: Driver + 'static> driver::DriverLayout for Adapter<T> {
+ type DriverType = bindings::auxiliary_driver;
+ type DriverData = T;
+ const DEVICE_DRIVER_OFFSET: usize = core::mem::offset_of!(Self::DriverType, driver);
+}
+
+// SAFETY: A call to `unregister` for a given instance of `DriverType` is guaranteed to be valid if
// a preceding call to `register` has been successful.
unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
- type RegType = bindings::auxiliary_driver;
-
unsafe fn register(
- adrv: &Opaque<Self::RegType>,
+ adrv: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
@@ -41,14 +61,14 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
(*adrv.get()).id_table = T::ID_TABLE.as_ptr();
}
- // SAFETY: `adrv` is guaranteed to be a valid `RegType`.
+ // SAFETY: `adrv` is guaranteed to be a valid `DriverType`.
to_result(unsafe {
bindings::__auxiliary_driver_register(adrv.get(), module.0, name.as_char_ptr())
})
}
- unsafe fn unregister(adrv: &Opaque<Self::RegType>) {
- // SAFETY: `adrv` is guaranteed to be a valid `RegType`.
+ unsafe fn unregister(adrv: &Opaque<Self::DriverType>) {
+ // SAFETY: `adrv` is guaranteed to be a valid `DriverType`.
unsafe { bindings::auxiliary_driver_unregister(adrv.get()) }
}
}
@@ -81,13 +101,15 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
// `struct auxiliary_device`.
//
- // INVARIANT: `adev` is valid for the duration of `probe_callback()`.
+ // INVARIANT: `adev` is valid for the duration of `remove_callback()`.
let adev = unsafe { &*adev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- drop(unsafe { adev.as_ref().drvdata_obtain::<T>() });
+ let data = unsafe { adev.as_ref().drvdata_borrow::<T>() };
+
+ T::unbind(adev, data);
}
}
@@ -110,12 +132,7 @@ impl DeviceId {
let name = name.to_bytes_with_nul();
let modname = modname.to_bytes_with_nul();
- // TODO: Replace with `bindings::auxiliary_device_id::default()` once stabilized for
- // `const`.
- //
- // SAFETY: FFI type is valid to be zero-initialized.
- let mut id: bindings::auxiliary_device_id = unsafe { core::mem::zeroed() };
-
+ let mut id: bindings::auxiliary_device_id = pin_init::zeroed();
let mut i = 0;
while i < modname.len() {
id.name[i] = modname[i];
@@ -187,6 +204,20 @@ pub trait Driver {
///
/// Called when an auxiliary device is matches a corresponding driver.
fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> impl PinInit<Self, Error>;
+
+ /// Auxiliary driver unbind.
+ ///
+ /// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback
+ /// is optional.
+ ///
+ /// This callback serves as a place for drivers to perform teardown operations that require a
+ /// `&Device<Core>` or `&Device<Bound>` reference. For instance, drivers may try to perform I/O
+ /// operations to gracefully tear down the device.
+ ///
+ /// Otherwise, release operations for driver resources should be performed in `Self::drop`.
+ fn unbind(dev: &Device<device::Core>, this: Pin<&Self>) {
+ let _ = (dev, this);
+ }
}
/// The auxiliary device representation.
diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs
index 553d50265883..2daead125626 100644
--- a/rust/kernel/bits.rs
+++ b/rust/kernel/bits.rs
@@ -27,7 +27,8 @@ macro_rules! impl_bit_fn {
///
/// This version is the default and should be used if `n` is known at
/// compile time.
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub const fn [<bit_ $ty>](n: u32) -> $ty {
build_assert!(n < <$ty>::BITS);
(1 as $ty) << n
@@ -75,7 +76,8 @@ macro_rules! impl_genmask_fn {
/// This version is the default and should be used if the range is known
/// at compile time.
$(#[$genmask_ex])*
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub const fn [<genmask_ $ty>](range: RangeInclusive<u32>) -> $ty {
let start = *range.start();
let end = *range.end();
diff --git a/rust/kernel/block/mq/gen_disk.rs b/rust/kernel/block/mq/gen_disk.rs
index 1ce815c8cdab..c8b0ecb17082 100644
--- a/rust/kernel/block/mq/gen_disk.rs
+++ b/rust/kernel/block/mq/gen_disk.rs
@@ -107,8 +107,7 @@ impl GenDiskBuilder {
drop(unsafe { T::QueueData::from_foreign(data) });
});
- // SAFETY: `bindings::queue_limits` contain only fields that are valid when zeroed.
- let mut lim: bindings::queue_limits = unsafe { core::mem::zeroed() };
+ let mut lim: bindings::queue_limits = pin_init::zeroed();
lim.logical_block_size = self.logical_block_size;
lim.physical_block_size = self.physical_block_size;
diff --git a/rust/kernel/block/mq/tag_set.rs b/rust/kernel/block/mq/tag_set.rs
index c3cf56d52bee..dae9df408a86 100644
--- a/rust/kernel/block/mq/tag_set.rs
+++ b/rust/kernel/block/mq/tag_set.rs
@@ -38,9 +38,7 @@ impl<T: Operations> TagSet<T> {
num_tags: u32,
num_maps: u32,
) -> impl PinInit<Self, error::Error> {
- // SAFETY: `blk_mq_tag_set` only contains integers and pointers, which
- // all are allowed to be 0.
- let tag_set: bindings::blk_mq_tag_set = unsafe { core::mem::zeroed() };
+ let tag_set: bindings::blk_mq_tag_set = pin_init::zeroed();
let tag_set: Result<_> = core::mem::size_of::<RequestDataWrapper>()
.try_into()
.map(|cmd_size| {
diff --git a/rust/kernel/bug.rs b/rust/kernel/bug.rs
index 36aef43e5ebe..ed943960f851 100644
--- a/rust/kernel/bug.rs
+++ b/rust/kernel/bug.rs
@@ -11,9 +11,9 @@
#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
#[cfg(CONFIG_DEBUG_BUGVERBOSE)]
macro_rules! warn_flags {
- ($flags:expr) => {
+ ($file:expr, $flags:expr) => {
const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
- const _FILE: &[u8] = file!().as_bytes();
+ const _FILE: &[u8] = $file.as_bytes();
// Plus one for null-terminator.
static FILE: [u8; _FILE.len() + 1] = {
let mut bytes = [0; _FILE.len() + 1];
@@ -50,7 +50,7 @@ macro_rules! warn_flags {
#[cfg(all(CONFIG_BUG, not(CONFIG_UML), not(CONFIG_LOONGARCH), not(CONFIG_ARM)))]
#[cfg(not(CONFIG_DEBUG_BUGVERBOSE))]
macro_rules! warn_flags {
- ($flags:expr) => {
+ ($file:expr, $flags:expr) => {
const FLAGS: u32 = $crate::bindings::BUGFLAG_WARNING | $flags;
// SAFETY:
@@ -75,7 +75,7 @@ macro_rules! warn_flags {
#[doc(hidden)]
#[cfg(all(CONFIG_BUG, CONFIG_UML))]
macro_rules! warn_flags {
- ($flags:expr) => {
+ ($file:expr, $flags:expr) => {
// SAFETY: It is always safe to call `warn_slowpath_fmt()`
// with a valid null-terminated string.
unsafe {
@@ -93,7 +93,7 @@ macro_rules! warn_flags {
#[doc(hidden)]
#[cfg(all(CONFIG_BUG, any(CONFIG_LOONGARCH, CONFIG_ARM)))]
macro_rules! warn_flags {
- ($flags:expr) => {
+ ($file:expr, $flags:expr) => {
// SAFETY: It is always safe to call `WARN_ON()`.
unsafe { $crate::bindings::WARN_ON(true) }
};
@@ -103,7 +103,7 @@ macro_rules! warn_flags {
#[doc(hidden)]
#[cfg(not(CONFIG_BUG))]
macro_rules! warn_flags {
- ($flags:expr) => {};
+ ($file:expr, $flags:expr) => {};
}
#[doc(hidden)]
@@ -116,10 +116,16 @@ pub const fn bugflag_taint(value: u32) -> u32 {
macro_rules! warn_on {
($cond:expr) => {{
let cond = $cond;
+
+ #[cfg(CONFIG_DEBUG_BUGVERBOSE_DETAILED)]
+ const _COND_STR: &str = concat!("[", stringify!($cond), "] ", file!());
+ #[cfg(not(CONFIG_DEBUG_BUGVERBOSE_DETAILED))]
+ const _COND_STR: &str = file!();
+
if cond {
const WARN_ON_FLAGS: u32 = $crate::bug::bugflag_taint($crate::bindings::TAINT_WARN);
- $crate::warn_flags!(WARN_ON_FLAGS);
+ $crate::warn_flags!(_COND_STR, WARN_ON_FLAGS);
}
cond
}};
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
index 6331b15d7c4d..f8124dbc663f 100644
--- a/rust/kernel/build_assert.rs
+++ b/rust/kernel/build_assert.rs
@@ -61,8 +61,13 @@ macro_rules! build_error {
/// build_assert!(N > 1); // Build-time check
/// assert!(N > 1); // Run-time check
/// }
+/// ```
///
-/// #[inline]
+/// When a condition depends on a function argument, the function must be annotated with
+/// `#[inline(always)]`. Without this attribute, the compiler may choose to not inline the
+/// function, preventing it from optimizing out the error path.
+/// ```
+/// #[inline(always)]
/// fn bar(n: usize) {
/// // `static_assert!(n > 1);` is not allowed
/// build_assert!(n > 1); // Build-time check
diff --git a/rust/kernel/clk.rs b/rust/kernel/clk.rs
index c1cfaeaa36a2..4059aff34d09 100644
--- a/rust/kernel/clk.rs
+++ b/rust/kernel/clk.rs
@@ -94,7 +94,7 @@ mod common_clk {
/// # Invariants
///
/// A [`Clk`] instance holds either a pointer to a valid [`struct clk`] created by the C
- /// portion of the kernel or a NULL pointer.
+ /// portion of the kernel or a `NULL` pointer.
///
/// Instances of this type are reference-counted. Calling [`Clk::get`] ensures that the
/// allocation remains valid for the lifetime of the [`Clk`].
@@ -104,13 +104,12 @@ mod common_clk {
/// The following example demonstrates how to obtain and configure a clock for a device.
///
/// ```
- /// use kernel::c_str;
/// use kernel::clk::{Clk, Hertz};
/// use kernel::device::Device;
/// use kernel::error::Result;
///
/// fn configure_clk(dev: &Device) -> Result {
- /// let clk = Clk::get(dev, Some(c_str!("apb_clk")))?;
+ /// let clk = Clk::get(dev, Some(c"apb_clk"))?;
///
/// clk.prepare_enable()?;
///
@@ -272,13 +271,12 @@ mod common_clk {
/// device. The code functions correctly whether or not the clock is available.
///
/// ```
- /// use kernel::c_str;
/// use kernel::clk::{OptionalClk, Hertz};
/// use kernel::device::Device;
/// use kernel::error::Result;
///
/// fn configure_clk(dev: &Device) -> Result {
- /// let clk = OptionalClk::get(dev, Some(c_str!("apb_clk")))?;
+ /// let clk = OptionalClk::get(dev, Some(c"apb_clk"))?;
///
/// clk.prepare_enable()?;
///
diff --git a/rust/kernel/configfs.rs b/rust/kernel/configfs.rs
index 466fb7f40762..2339c6467325 100644
--- a/rust/kernel/configfs.rs
+++ b/rust/kernel/configfs.rs
@@ -21,7 +21,6 @@
//!
//! ```ignore
//! use kernel::alloc::flags;
-//! use kernel::c_str;
//! use kernel::configfs_attrs;
//! use kernel::configfs;
//! use kernel::new_mutex;
@@ -50,7 +49,7 @@
//!
//! try_pin_init!(Self {
//! config <- configfs::Subsystem::new(
-//! c_str!("rust_configfs"), item_type, Configuration::new()
+//! c"rust_configfs", item_type, Configuration::new()
//! ),
//! })
//! }
@@ -66,7 +65,7 @@
//! impl Configuration {
//! fn new() -> impl PinInit<Self, Error> {
//! try_pin_init!(Self {
-//! message: c_str!("Hello World\n"),
+//! message: c"Hello World\n",
//! bar <- new_mutex!((KBox::new([0; PAGE_SIZE], flags::GFP_KERNEL)?, 0)),
//! })
//! }
@@ -1000,7 +999,9 @@ macro_rules! configfs_attrs {
static [< $data:upper _ $name:upper _ATTR >]:
$crate::configfs::Attribute<$attr, $data, $data> =
unsafe {
- $crate::configfs::Attribute::new(c_str!(::core::stringify!($name)))
+ $crate::configfs::Attribute::new(
+ $crate::c_str!(::core::stringify!($name)),
+ )
};
)*
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index f968fbd22890..76faa1ac8501 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -840,7 +840,6 @@ pub trait Driver {
/// ```
/// use kernel::{
/// cpufreq,
-/// c_str,
/// device::{Core, Device},
/// macros::vtable,
/// of, platform,
@@ -853,7 +852,7 @@ pub trait Driver {
///
/// #[vtable]
/// impl cpufreq::Driver for SampleDriver {
-/// const NAME: &'static CStr = c_str!("cpufreq-sample");
+/// const NAME: &'static CStr = c"cpufreq-sample";
/// const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV;
/// const BOOST_ENABLED: bool = true;
///
@@ -1015,6 +1014,8 @@ impl<T: Driver> Registration<T> {
..pin_init::zeroed()
};
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
const fn copy_name(name: &'static CStr) -> [c_char; CPUFREQ_NAME_LEN] {
let src = name.to_bytes_with_nul();
let mut dst = [0; CPUFREQ_NAME_LEN];
diff --git a/rust/kernel/cpumask.rs b/rust/kernel/cpumask.rs
index c1d17826ae7b..44bb36636ee3 100644
--- a/rust/kernel/cpumask.rs
+++ b/rust/kernel/cpumask.rs
@@ -39,7 +39,7 @@ use core::ops::{Deref, DerefMut};
/// fn set_clear_cpu(ptr: *mut bindings::cpumask, set_cpu: CpuId, clear_cpu: CpuId) {
/// // SAFETY: The `ptr` is valid for writing and remains valid for the lifetime of the
/// // returned reference.
-/// let mask = unsafe { Cpumask::as_mut_ref(ptr) };
+/// let mask = unsafe { Cpumask::from_raw_mut(ptr) };
///
/// mask.set(set_cpu);
/// mask.clear(clear_cpu);
@@ -49,13 +49,13 @@ use core::ops::{Deref, DerefMut};
pub struct Cpumask(Opaque<bindings::cpumask>);
impl Cpumask {
- /// Creates a mutable reference to an existing `struct cpumask` pointer.
+ /// Creates a mutable reference from an existing `struct cpumask` pointer.
///
/// # Safety
///
/// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask) -> &'a mut Self {
+ pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpumask) -> &'a mut Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the
@@ -63,13 +63,13 @@ impl Cpumask {
unsafe { &mut *ptr.cast() }
}
- /// Creates a reference to an existing `struct cpumask` pointer.
+ /// Creates a reference from an existing `struct cpumask` pointer.
///
/// # Safety
///
/// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime
/// of the returned reference.
- pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask) -> &'a Self {
+ pub unsafe fn from_raw<'a>(ptr: *const bindings::cpumask) -> &'a Self {
// SAFETY: Guaranteed by the safety requirements of the function.
//
// INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the
diff --git a/rust/kernel/debugfs.rs b/rust/kernel/debugfs.rs
index facad81e8290..d7b8014a6474 100644
--- a/rust/kernel/debugfs.rs
+++ b/rust/kernel/debugfs.rs
@@ -8,28 +8,52 @@
// When DebugFS is disabled, many parameters are dead. Linting for this isn't helpful.
#![cfg_attr(not(CONFIG_DEBUG_FS), allow(unused_variables))]
-use crate::fmt;
-use crate::prelude::*;
-use crate::str::CStr;
#[cfg(CONFIG_DEBUG_FS)]
use crate::sync::Arc;
-use crate::uaccess::UserSliceReader;
-use core::marker::PhantomData;
-use core::marker::PhantomPinned;
+use crate::{
+ fmt,
+ prelude::*,
+ str::CStr,
+ uaccess::UserSliceReader, //
+};
+
#[cfg(CONFIG_DEBUG_FS)]
use core::mem::ManuallyDrop;
-use core::ops::Deref;
+use core::{
+ marker::{
+ PhantomData,
+ PhantomPinned, //
+ },
+ ops::Deref,
+};
mod traits;
-pub use traits::{BinaryReader, BinaryReaderMut, BinaryWriter, Reader, Writer};
+pub use traits::{
+ BinaryReader,
+ BinaryReaderMut,
+ BinaryWriter,
+ Reader,
+ Writer, //
+};
mod callback_adapters;
-use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter};
+use callback_adapters::{
+ FormatAdapter,
+ NoWriter,
+ WritableAdapter, //
+};
+
mod file_ops;
use file_ops::{
- BinaryReadFile, BinaryReadWriteFile, BinaryWriteFile, FileOps, ReadFile, ReadWriteFile,
- WriteFile,
+ BinaryReadFile,
+ BinaryReadWriteFile,
+ BinaryWriteFile,
+ FileOps,
+ ReadFile,
+ ReadWriteFile,
+ WriteFile, //
};
+
#[cfg(CONFIG_DEBUG_FS)]
mod entry;
#[cfg(CONFIG_DEBUG_FS)]
@@ -102,9 +126,8 @@ impl Dir {
/// # Examples
///
/// ```
- /// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
- /// let debugfs = Dir::new(c_str!("parent"));
+ /// let debugfs = Dir::new(c"parent");
/// ```
pub fn new(name: &CStr) -> Self {
Dir::create(name, None)
@@ -115,10 +138,9 @@ impl Dir {
/// # Examples
///
/// ```
- /// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
- /// let parent = Dir::new(c_str!("parent"));
- /// let child = parent.subdir(c_str!("child"));
+ /// let parent = Dir::new(c"parent");
+ /// let child = parent.subdir(c"child");
/// ```
pub fn subdir(&self, name: &CStr) -> Self {
Dir::create(name, Some(self))
@@ -132,11 +154,10 @@ impl Dir {
/// # Examples
///
/// ```
- /// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// # use kernel::prelude::*;
- /// # let dir = Dir::new(c_str!("my_debugfs_dir"));
- /// let file = KBox::pin_init(dir.read_only_file(c_str!("foo"), 200), GFP_KERNEL)?;
+ /// # let dir = Dir::new(c"my_debugfs_dir");
+ /// let file = KBox::pin_init(dir.read_only_file(c"foo", 200), GFP_KERNEL)?;
/// // "my_debugfs_dir/foo" now contains the number 200.
/// // The file is removed when `file` is dropped.
/// # Ok::<(), Error>(())
@@ -161,11 +182,10 @@ impl Dir {
/// # Examples
///
/// ```
- /// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// # use kernel::prelude::*;
- /// # let dir = Dir::new(c_str!("my_debugfs_dir"));
- /// let file = KBox::pin_init(dir.read_binary_file(c_str!("foo"), [0x1, 0x2]), GFP_KERNEL)?;
+ /// # let dir = Dir::new(c"my_debugfs_dir");
+ /// let file = KBox::pin_init(dir.read_binary_file(c"foo", [0x1, 0x2]), GFP_KERNEL)?;
/// # Ok::<(), Error>(())
/// ```
pub fn read_binary_file<'a, T, E: 'a>(
@@ -187,21 +207,25 @@ impl Dir {
/// # Examples
///
/// ```
- /// # use core::sync::atomic::{AtomicU32, Ordering};
- /// # use kernel::c_str;
- /// # use kernel::debugfs::Dir;
- /// # use kernel::prelude::*;
- /// # let dir = Dir::new(c_str!("foo"));
+ /// # use kernel::{
+ /// # debugfs::Dir,
+ /// # prelude::*,
+ /// # sync::atomic::{
+ /// # Atomic,
+ /// # Relaxed,
+ /// # },
+ /// # };
+ /// # let dir = Dir::new(c"foo");
/// let file = KBox::pin_init(
- /// dir.read_callback_file(c_str!("bar"),
- /// AtomicU32::new(3),
+ /// dir.read_callback_file(c"bar",
+ /// Atomic::<u32>::new(3),
/// &|val, f| {
- /// let out = val.load(Ordering::Relaxed);
+ /// let out = val.load(Relaxed);
/// writeln!(f, "{out:#010x}")
/// }),
/// GFP_KERNEL)?;
/// // Reading "foo/bar" will show "0x00000003".
- /// file.store(10, Ordering::Relaxed);
+ /// file.store(10, Relaxed);
/// // Reading "foo/bar" will now show "0x0000000a".
/// # Ok::<(), Error>(())
/// ```
diff --git a/rust/kernel/debugfs/callback_adapters.rs b/rust/kernel/debugfs/callback_adapters.rs
index a260d8dee051..dee7d021e18c 100644
--- a/rust/kernel/debugfs/callback_adapters.rs
+++ b/rust/kernel/debugfs/callback_adapters.rs
@@ -4,12 +4,21 @@
//! Adapters which allow the user to supply a write or read implementation as a value rather
//! than a trait implementation. If provided, it will override the trait implementation.
-use super::{Reader, Writer};
-use crate::fmt;
-use crate::prelude::*;
-use crate::uaccess::UserSliceReader;
-use core::marker::PhantomData;
-use core::ops::Deref;
+use super::{
+ Reader,
+ Writer, //
+};
+
+use crate::{
+ fmt,
+ prelude::*,
+ uaccess::UserSliceReader, //
+};
+
+use core::{
+ marker::PhantomData,
+ ops::Deref, //
+};
/// # Safety
///
diff --git a/rust/kernel/debugfs/entry.rs b/rust/kernel/debugfs/entry.rs
index 706cb7f73d6c..46aad64896ec 100644
--- a/rust/kernel/debugfs/entry.rs
+++ b/rust/kernel/debugfs/entry.rs
@@ -1,10 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
-use crate::debugfs::file_ops::FileOps;
-use crate::ffi::c_void;
-use crate::str::{CStr, CStrExt as _};
-use crate::sync::Arc;
+use crate::{
+ debugfs::file_ops::FileOps,
+ prelude::*,
+ str::{
+ CStr,
+ CStrExt as _, //
+ },
+ sync::Arc,
+};
+
use core::marker::PhantomData;
/// Owning handle to a DebugFS entry.
@@ -148,7 +154,7 @@ impl Entry<'_> {
/// # Guarantees
///
/// Due to the type invariant, the value returned from this function will always be an error
- /// code, NULL, or a live DebugFS directory. If it is live, it will remain live at least as
+ /// code, `NULL`, or a live DebugFS directory. If it is live, it will remain live at least as
/// long as this entry lives.
pub(crate) fn as_ptr(&self) -> *mut bindings::dentry {
self.entry
diff --git a/rust/kernel/debugfs/file_ops.rs b/rust/kernel/debugfs/file_ops.rs
index 8a0442d6dd7a..f15908f71c4a 100644
--- a/rust/kernel/debugfs/file_ops.rs
+++ b/rust/kernel/debugfs/file_ops.rs
@@ -1,14 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
-use super::{BinaryReader, BinaryWriter, Reader, Writer};
-use crate::debugfs::callback_adapters::Adapter;
-use crate::fmt;
-use crate::fs::file;
-use crate::prelude::*;
-use crate::seq_file::SeqFile;
-use crate::seq_print;
-use crate::uaccess::UserSlice;
+use super::{
+ BinaryReader,
+ BinaryWriter,
+ Reader,
+ Writer, //
+};
+
+use crate::{
+ debugfs::callback_adapters::Adapter,
+ fmt,
+ fs::file,
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ uaccess::UserSlice, //
+};
+
use core::marker::PhantomData;
#[cfg(CONFIG_DEBUG_FS)]
@@ -126,8 +135,7 @@ impl<T: Writer + Sync> ReadFile<T> for T {
llseek: Some(bindings::seq_lseek),
release: Some(bindings::single_release),
open: Some(writer_open::<Self>),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`.
// `open`'s only requirement beyond what is provided to all open functions is that the
@@ -179,8 +187,7 @@ impl<T: Writer + Reader + Sync> ReadWriteFile<T> for T {
write: Some(write::<T>),
llseek: Some(bindings::seq_lseek),
release: Some(bindings::single_release),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`
// and `write`.
@@ -235,8 +242,7 @@ impl<T: Reader + Sync> WriteFile<T> for T {
open: Some(write_only_open),
write: Some(write_only_write::<T>),
llseek: Some(bindings::noop_llseek),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY:
// * `write_only_open` populates the file private data with the inode private data
@@ -288,8 +294,7 @@ impl<T: BinaryWriter + Sync> BinaryReadFile<T> for T {
read: Some(blob_read::<T>),
llseek: Some(bindings::default_llseek),
open: Some(bindings::simple_open),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY:
@@ -343,8 +348,7 @@ impl<T: BinaryReader + Sync> BinaryWriteFile<T> for T {
write: Some(blob_write::<T>),
llseek: Some(bindings::default_llseek),
open: Some(bindings::simple_open),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY:
@@ -369,8 +373,7 @@ impl<T: BinaryWriter + BinaryReader + Sync> BinaryReadWriteFile<T> for T {
write: Some(blob_write::<T>),
llseek: Some(bindings::default_llseek),
open: Some(bindings::simple_open),
- // SAFETY: `file_operations` supports zeroes in all fields.
- ..unsafe { core::mem::zeroed() }
+ ..pin_init::zeroed()
};
// SAFETY:
diff --git a/rust/kernel/debugfs/traits.rs b/rust/kernel/debugfs/traits.rs
index 3eee60463fd5..8c39524b6a99 100644
--- a/rust/kernel/debugfs/traits.rs
+++ b/rust/kernel/debugfs/traits.rs
@@ -3,17 +3,38 @@
//! Traits for rendering or updating values exported to DebugFS.
-use crate::alloc::Allocator;
-use crate::fmt;
-use crate::fs::file;
-use crate::prelude::*;
-use crate::sync::atomic::{Atomic, AtomicBasicOps, AtomicType, Relaxed};
-use crate::sync::Arc;
-use crate::sync::Mutex;
-use crate::transmute::{AsBytes, FromBytes};
-use crate::uaccess::{UserSliceReader, UserSliceWriter};
-use core::ops::{Deref, DerefMut};
-use core::str::FromStr;
+use crate::{
+ alloc::Allocator,
+ fmt,
+ fs::file,
+ prelude::*,
+ sync::{
+ atomic::{
+ Atomic,
+ AtomicBasicOps,
+ AtomicType,
+ Relaxed, //
+ },
+ Arc,
+ Mutex, //
+ },
+ transmute::{
+ AsBytes,
+ FromBytes, //
+ },
+ uaccess::{
+ UserSliceReader,
+ UserSliceWriter, //
+ },
+};
+
+use core::{
+ ops::{
+ Deref,
+ DerefMut, //
+ },
+ str::FromStr,
+};
/// A trait for types that can be written into a string.
///
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index c79be2e2bfe3..94e0548e7687 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -5,16 +5,20 @@
//! C header: [`include/linux/device.h`](srctree/include/linux/device.h)
use crate::{
- bindings, fmt,
+ bindings,
+ fmt,
prelude::*,
sync::aref::ARef,
- types::{ForeignOwnable, Opaque},
+ types::{
+ ForeignOwnable,
+ Opaque, //
+ }, //
+};
+use core::{
+ any::TypeId,
+ marker::PhantomData,
+ ptr, //
};
-use core::{any::TypeId, marker::PhantomData, ptr};
-
-#[cfg(CONFIG_PRINTK)]
-use crate::c_str;
-use crate::str::CStrExt as _;
pub mod property;
@@ -67,8 +71,9 @@ static_assert!(core::mem::size_of::<bindings::driver_type>() >= core::mem::size_
///
/// # Implementing Bus Devices
///
-/// This section provides a guideline to implement bus specific devices, such as [`pci::Device`] or
-/// [`platform::Device`].
+/// This section provides a guideline to implement bus specific devices, such as:
+#[cfg_attr(CONFIG_PCI, doc = "* [`pci::Device`](kernel::pci::Device)")]
+/// * [`platform::Device`]
///
/// A bus specific device should be defined as follows.
///
@@ -158,9 +163,8 @@ static_assert!(core::mem::size_of::<bindings::driver_type>() >= core::mem::size_
/// `bindings::device::release` is valid to be called from any thread, hence `ARef<Device>` can be
/// dropped from any thread.
///
-/// [`AlwaysRefCounted`]: kernel::types::AlwaysRefCounted
+/// [`AlwaysRefCounted`]: kernel::sync::aref::AlwaysRefCounted
/// [`impl_device_context_deref`]: kernel::impl_device_context_deref
-/// [`pci::Device`]: kernel::pci::Device
/// [`platform::Device`]: kernel::platform::Device
#[repr(transparent)]
pub struct Device<Ctx: DeviceContext = Normal>(Opaque<bindings::device>, PhantomData<Ctx>);
@@ -233,30 +237,32 @@ impl Device<CoreInternal> {
///
/// # Safety
///
- /// - Must only be called once after a preceding call to [`Device::set_drvdata`].
/// - The type `T` must match the type of the `ForeignOwnable` previously stored by
/// [`Device::set_drvdata`].
- pub unsafe fn drvdata_obtain<T: 'static>(&self) -> Pin<KBox<T>> {
+ pub(crate) unsafe fn drvdata_obtain<T: 'static>(&self) -> Option<Pin<KBox<T>>> {
// SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
let ptr = unsafe { bindings::dev_get_drvdata(self.as_raw()) };
// SAFETY: By the type invariants, `self.as_raw()` is a valid pointer to a `struct device`.
unsafe { bindings::dev_set_drvdata(self.as_raw(), core::ptr::null_mut()) };
+ if ptr.is_null() {
+ return None;
+ }
+
// SAFETY:
- // - By the safety requirements of this function, `ptr` comes from a previous call to
- // `into_foreign()`.
+ // - If `ptr` is not NULL, it comes from a previous call to `into_foreign()`.
// - `dev_get_drvdata()` guarantees to return the same pointer given to `dev_set_drvdata()`
// in `into_foreign()`.
- unsafe { Pin::<KBox<T>>::from_foreign(ptr.cast()) }
+ Some(unsafe { Pin::<KBox<T>>::from_foreign(ptr.cast()) })
}
/// Borrow the driver's private data bound to this [`Device`].
///
/// # Safety
///
- /// - Must only be called after a preceding call to [`Device::set_drvdata`] and before
- /// [`Device::drvdata_obtain`].
+ /// - Must only be called after a preceding call to [`Device::set_drvdata`] and before the
+ /// device is fully unbound.
/// - The type `T` must match the type of the `ForeignOwnable` previously stored by
/// [`Device::set_drvdata`].
pub unsafe fn drvdata_borrow<T: 'static>(&self) -> Pin<&T> {
@@ -272,7 +278,7 @@ impl Device<Bound> {
/// # Safety
///
/// - Must only be called after a preceding call to [`Device::set_drvdata`] and before
- /// [`Device::drvdata_obtain`].
+ /// the device is fully unbound.
/// - The type `T` must match the type of the `ForeignOwnable` previously stored by
/// [`Device::set_drvdata`].
unsafe fn drvdata_unchecked<T: 'static>(&self) -> Pin<&T> {
@@ -321,7 +327,7 @@ impl Device<Bound> {
// SAFETY:
// - The above check of `dev_get_drvdata()` guarantees that we are called after
- // `set_drvdata()` and before `drvdata_obtain()`.
+ // `set_drvdata()`.
// - We've just checked that the type of the driver's private data is in fact `T`.
Ok(unsafe { self.drvdata_unchecked() })
}
@@ -463,7 +469,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
bindings::_dev_printk(
klevel.as_ptr().cast::<crate::ffi::c_char>(),
self.as_raw(),
- c_str!("%pA").as_char_ptr(),
+ c"%pA".as_char_ptr(),
core::ptr::from_ref(&msg).cast::<crate::ffi::c_void>(),
)
};
@@ -540,7 +546,7 @@ pub trait DeviceContext: private::Sealed {}
/// [`Device<Normal>`]. It is the only [`DeviceContext`] for which it is valid to implement
/// [`AlwaysRefCounted`] for.
///
-/// [`AlwaysRefCounted`]: kernel::types::AlwaysRefCounted
+/// [`AlwaysRefCounted`]: kernel::sync::aref::AlwaysRefCounted
pub struct Normal;
/// The [`Core`] context is the context of a bus specific device when it appears as argument of
@@ -595,6 +601,13 @@ impl DeviceContext for Core {}
impl DeviceContext for CoreInternal {}
impl DeviceContext for Normal {}
+impl<Ctx: DeviceContext> AsRef<Device<Ctx>> for Device<Ctx> {
+ #[inline]
+ fn as_ref(&self) -> &Device<Ctx> {
+ self
+ }
+}
+
/// Convert device references to bus device references.
///
/// Bus devices can implement this trait to allow abstractions to provide the bus device in
@@ -714,7 +727,7 @@ macro_rules! impl_device_context_into_aref {
macro_rules! dev_printk {
($method:ident, $dev:expr, $($f:tt)*) => {
{
- ($dev).$method($crate::prelude::fmt!($($f)*));
+ $crate::device::Device::$method($dev.as_ref(), $crate::prelude::fmt!($($f)*))
}
}
}
diff --git a/rust/kernel/device/property.rs b/rust/kernel/device/property.rs
index 3a332a8c53a9..5aead835fbbc 100644
--- a/rust/kernel/device/property.rs
+++ b/rust/kernel/device/property.rs
@@ -14,7 +14,8 @@ use crate::{
fmt,
prelude::*,
str::{CStr, CString},
- types::{ARef, Opaque},
+ sync::aref::ARef,
+ types::Opaque,
};
/// A reference-counted fwnode_handle.
@@ -178,11 +179,11 @@ impl FwNode {
/// # Examples
///
/// ```
- /// # use kernel::{c_str, device::{Device, property::FwNode}, str::CString};
+ /// # use kernel::{device::{Device, property::FwNode}, str::CString};
/// fn examples(dev: &Device) -> Result {
/// let fwnode = dev.fwnode().ok_or(ENOENT)?;
- /// let b: u32 = fwnode.property_read(c_str!("some-number")).required_by(dev)?;
- /// if let Some(s) = fwnode.property_read::<CString>(c_str!("some-str")).optional() {
+ /// let b: u32 = fwnode.property_read(c"some-number").required_by(dev)?;
+ /// if let Some(s) = fwnode.property_read::<CString>(c"some-str").optional() {
/// // ...
/// }
/// Ok(())
@@ -359,7 +360,7 @@ impl fmt::Debug for FwNodeReferenceArgs {
}
// SAFETY: Instances of `FwNode` are always reference-counted.
-unsafe impl crate::types::AlwaysRefCounted for FwNode {
+unsafe impl crate::sync::aref::AlwaysRefCounted for FwNode {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the
// refcount is non-zero.
diff --git a/rust/kernel/device_id.rs b/rust/kernel/device_id.rs
index 62c42da12e9d..8e9721446014 100644
--- a/rust/kernel/device_id.rs
+++ b/rust/kernel/device_id.rs
@@ -15,7 +15,7 @@ use core::mem::MaybeUninit;
/// # Safety
///
/// Implementers must ensure that `Self` is layout-compatible with [`RawDeviceId::RawType`];
-/// i.e. it's safe to transmute to `RawDeviceId`.
+/// i.e. it's safe to transmute to `RawType`.
///
/// This requirement is needed so `IdArray::new` can convert `Self` to `RawType` when building
/// the ID table.
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index 835d9c11948e..6afe196be42c 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -8,30 +8,24 @@
use crate::{
alloc::Flags,
bindings,
- device::{Bound, Device},
- error::{to_result, Error, Result},
- ffi::c_void,
+ device::{
+ Bound,
+ Device, //
+ },
+ error::to_result,
prelude::*,
- revocable::{Revocable, RevocableGuard},
- sync::{aref::ARef, rcu, Completion},
- types::{ForeignOwnable, Opaque, ScopeGuard},
+ revocable::{
+ Revocable,
+ RevocableGuard, //
+ },
+ sync::{
+ aref::ARef,
+ rcu,
+ Arc, //
+ },
+ types::ForeignOwnable,
};
-use pin_init::Wrapper;
-
-/// [`Devres`] inner data accessed from [`Devres::callback`].
-#[pin_data]
-struct Inner<T: Send> {
- #[pin]
- data: Revocable<T>,
- /// Tracks whether [`Devres::callback`] has been completed.
- #[pin]
- devm: Completion,
- /// Tracks whether revoking [`Self::data`] has been completed.
- #[pin]
- revoke: Completion,
-}
-
/// This abstraction is meant to be used by subsystems to containerize [`Device`] bound resources to
/// manage their lifetime.
///
@@ -61,14 +55,17 @@ struct Inner<T: Send> {
/// devres::Devres,
/// io::{
/// Io,
-/// IoRaw,
-/// PhysAddr,
+/// IoKnownSize,
+/// Mmio,
+/// MmioRaw,
+/// PhysAddr, //
/// },
+/// prelude::*,
/// };
/// use core::ops::Deref;
///
/// // See also [`pci::Bar`] for a real example.
-/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+/// struct IoMem<const SIZE: usize>(MmioRaw<SIZE>);
///
/// impl<const SIZE: usize> IoMem<SIZE> {
/// /// # Safety
@@ -83,7 +80,7 @@ struct Inner<T: Send> {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
+/// Ok(IoMem(MmioRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
@@ -95,28 +92,23 @@ struct Inner<T: Send> {
/// }
///
/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
-/// type Target = Io<SIZE>;
+/// type Target = Mmio<SIZE>;
///
/// fn deref(&self) -> &Self::Target {
/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
-/// unsafe { Io::from_raw(&self.0) }
+/// unsafe { Mmio::from_raw(&self.0) }
/// }
/// }
/// # fn no_run(dev: &Device<Bound>) -> Result<(), Error> {
/// // SAFETY: Invalid usage for example purposes.
/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
-/// let devres = KBox::pin_init(Devres::new(dev, iomem), GFP_KERNEL)?;
+/// let devres = Devres::new(dev, iomem)?;
///
/// let res = devres.try_access().ok_or(ENXIO)?;
/// res.write8(0x42, 0x0);
/// # Ok(())
/// # }
/// ```
-///
-/// # Invariants
-///
-/// `Self::inner` is guaranteed to be initialized and is always accessed read-only.
-#[pin_data(PinnedDrop)]
pub struct Devres<T: Send> {
dev: ARef<Device>,
/// Pointer to [`Self::devres_callback`].
@@ -124,14 +116,7 @@ pub struct Devres<T: Send> {
/// Has to be stored, since Rust does not guarantee to always return the same address for a
/// function. However, the C API uses the address as a key.
callback: unsafe extern "C" fn(*mut c_void),
- /// Contains all the fields shared with [`Self::callback`].
- // TODO: Replace with `UnsafePinned`, once available.
- //
- // Subsequently, the `drop_in_place()` in `Devres::drop` and `Devres::new` as well as the
- // explicit `Send` and `Sync' impls can be removed.
- #[pin]
- inner: Opaque<Inner<T>>,
- _add_action: (),
+ data: Arc<Revocable<T>>,
}
impl<T: Send> Devres<T> {
@@ -139,74 +124,48 @@ impl<T: Send> Devres<T> {
///
/// The `data` encapsulated within the returned `Devres` instance' `data` will be
/// (revoked)[`Revocable`] once the device is detached.
- pub fn new<'a, E>(
- dev: &'a Device<Bound>,
- data: impl PinInit<T, E> + 'a,
- ) -> impl PinInit<Self, Error> + 'a
+ pub fn new<E>(dev: &Device<Bound>, data: impl PinInit<T, E>) -> Result<Self>
where
- T: 'a,
Error: From<E>,
{
- try_pin_init!(&this in Self {
- dev: dev.into(),
- callback: Self::devres_callback,
- // INVARIANT: `inner` is properly initialized.
- inner <- Opaque::pin_init(try_pin_init!(Inner {
- devm <- Completion::new(),
- revoke <- Completion::new(),
- data <- Revocable::new(data),
- })),
- // TODO: Replace with "initializer code blocks" [1] once available.
- //
- // [1] https://github.com/Rust-for-Linux/pin-init/pull/69
- _add_action: {
- // SAFETY: `this` is a valid pointer to uninitialized memory.
- let inner = unsafe { &raw mut (*this.as_ptr()).inner };
+ let callback = Self::devres_callback;
+ let data = Arc::pin_init(Revocable::new(data), GFP_KERNEL)?;
+ let devres_data = data.clone();
- // SAFETY:
- // - `dev.as_raw()` is a pointer to a valid bound device.
- // - `inner` is guaranteed to be a valid for the duration of the lifetime of `Self`.
- // - `devm_add_action()` is guaranteed not to call `callback` until `this` has been
- // properly initialized, because we require `dev` (i.e. the *bound* device) to
- // live at least as long as the returned `impl PinInit<Self, Error>`.
- to_result(unsafe {
- bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast())
- }).inspect_err(|_| {
- let inner = Opaque::cast_into(inner);
+ // SAFETY:
+ // - `dev.as_raw()` is a pointer to a valid bound device.
+ // - `data` is guaranteed to be a valid for the duration of the lifetime of `Self`.
+ // - `devm_add_action()` is guaranteed not to call `callback` for the entire lifetime of
+ // `dev`.
+ to_result(unsafe {
+ bindings::devm_add_action(
+ dev.as_raw(),
+ Some(callback),
+ Arc::as_ptr(&data).cast_mut().cast(),
+ )
+ })?;
- // SAFETY: `inner` is a valid pointer to an `Inner<T>` and valid for both reads
- // and writes.
- unsafe { core::ptr::drop_in_place(inner) };
- })?;
- },
- })
- }
+ // `devm_add_action()` was successful and has consumed the reference count.
+ core::mem::forget(devres_data);
- fn inner(&self) -> &Inner<T> {
- // SAFETY: By the type invairants of `Self`, `inner` is properly initialized and always
- // accessed read-only.
- unsafe { &*self.inner.get() }
+ Ok(Self {
+ dev: dev.into(),
+ callback,
+ data,
+ })
}
fn data(&self) -> &Revocable<T> {
- &self.inner().data
+ &self.data
}
#[allow(clippy::missing_safety_doc)]
unsafe extern "C" fn devres_callback(ptr: *mut kernel::ffi::c_void) {
- // SAFETY: In `Self::new` we've passed a valid pointer to `Inner` to `devm_add_action()`,
- // hence `ptr` must be a valid pointer to `Inner`.
- let inner = unsafe { &*ptr.cast::<Inner<T>>() };
-
- // Ensure that `inner` can't be used anymore after we signal completion of this callback.
- let inner = ScopeGuard::new_with_data(inner, |inner| inner.devm.complete_all());
+ // SAFETY: In `Self::new` we've passed a valid pointer of `Revocable<T>` to
+ // `devm_add_action()`, hence `ptr` must be a valid pointer to `Revocable<T>`.
+ let data = unsafe { Arc::from_raw(ptr.cast::<Revocable<T>>()) };
- if !inner.data.revoke() {
- // If `revoke()` returns false, it means that `Devres::drop` already started revoking
- // `data` for us. Hence we have to wait until `Devres::drop` signals that it
- // completed revoking `data`.
- inner.revoke.wait_for_completion();
- }
+ data.revoke();
}
fn remove_action(&self) -> bool {
@@ -218,7 +177,7 @@ impl<T: Send> Devres<T> {
bindings::devm_remove_action_nowarn(
self.dev.as_raw(),
Some(self.callback),
- core::ptr::from_ref(self.inner()).cast_mut().cast(),
+ core::ptr::from_ref(self.data()).cast_mut().cast(),
)
} == 0)
}
@@ -241,8 +200,16 @@ impl<T: Send> Devres<T> {
/// # Examples
///
/// ```no_run
- /// # #![cfg(CONFIG_PCI)]
- /// # use kernel::{device::Core, devres::Devres, pci};
+ /// #![cfg(CONFIG_PCI)]
+ /// use kernel::{
+ /// device::Core,
+ /// devres::Devres,
+ /// io::{
+ /// Io,
+ /// IoKnownSize, //
+ /// },
+ /// pci, //
+ /// };
///
/// fn from_core(dev: &pci::Device<Core>, devres: Devres<pci::Bar<0x4>>) -> Result {
/// let bar = devres.access(dev.as_ref())?;
@@ -289,31 +256,19 @@ unsafe impl<T: Send> Send for Devres<T> {}
// SAFETY: `Devres` can be shared with any task, if `T: Sync`.
unsafe impl<T: Send + Sync> Sync for Devres<T> {}
-#[pinned_drop]
-impl<T: Send> PinnedDrop for Devres<T> {
- fn drop(self: Pin<&mut Self>) {
+impl<T: Send> Drop for Devres<T> {
+ fn drop(&mut self) {
// SAFETY: When `drop` runs, it is guaranteed that nobody is accessing the revocable data
// anymore, hence it is safe not to wait for the grace period to finish.
if unsafe { self.data().revoke_nosync() } {
// We revoked `self.data` before the devres action did, hence try to remove it.
- if !self.remove_action() {
- // We could not remove the devres action, which means that it now runs concurrently,
- // hence signal that `self.data` has been revoked by us successfully.
- self.inner().revoke.complete_all();
-
- // Wait for `Self::devres_callback` to be done using this object.
- self.inner().devm.wait_for_completion();
+ if self.remove_action() {
+ // SAFETY: In `Self::new` we have taken an additional reference count of `self.data`
+ // for `devm_add_action()`. Since `remove_action()` was successful, we have to drop
+ // this additional reference count.
+ drop(unsafe { Arc::from_raw(Arc::as_ptr(&self.data)) });
}
- } else {
- // `Self::devres_callback` revokes `self.data` for us, hence wait for it to be done
- // using this object.
- self.inner().devm.wait_for_completion();
}
-
- // INVARIANT: At this point it is guaranteed that `inner` can't be accessed any more.
- //
- // SAFETY: `inner` is valid for dropping.
- unsafe { core::ptr::drop_in_place(self.inner.get()) };
}
}
@@ -345,7 +300,13 @@ where
/// # Examples
///
/// ```no_run
-/// use kernel::{device::{Bound, Device}, devres};
+/// use kernel::{
+/// device::{
+/// Bound,
+/// Device, //
+/// },
+/// devres, //
+/// };
///
/// /// Registration of e.g. a class device, IRQ, etc.
/// struct Registration;
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index 84d3c67269e8..909d56fd5118 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -27,8 +27,9 @@ pub type DmaAddress = bindings::dma_addr_t;
/// Trait to be implemented by DMA capable bus devices.
///
/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
-/// where the underlying bus is DMA capable, such as [`pci::Device`](::kernel::pci::Device) or
-/// [`platform::Device`](::kernel::platform::Device).
+/// where the underlying bus is DMA capable, such as:
+#[cfg_attr(CONFIG_PCI, doc = "* [`pci::Device`](kernel::pci::Device)")]
+/// * [`platform::Device`](::kernel::platform::Device)
pub trait Device: AsRef<device::Device<Core>> {
/// Set up the device's DMA streaming addressing capabilities.
///
@@ -84,6 +85,23 @@ pub trait Device: AsRef<device::Device<Core>> {
bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
})
}
+
+ /// Set the maximum size of a single DMA segment the device may request.
+ ///
+ /// This method is usually called once from `probe()` as soon as the device capabilities are
+ /// known.
+ ///
+ /// # Safety
+ ///
+ /// This method must not be called concurrently with any DMA allocation or mapping primitives,
+ /// such as [`CoherentAllocation::alloc_attrs`].
+ unsafe fn dma_set_max_seg_size(&self, size: u32) {
+ // SAFETY:
+ // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
+ // - The safety requirement of this function guarantees that there are no concurrent calls
+ // to DMA allocation and mapping primitives using this parameter.
+ unsafe { bindings::dma_set_max_seg_size(self.as_ref().as_raw(), size) }
+ }
}
/// A DMA mask that holds a bitmask with the lowest `n` bits set.
@@ -532,8 +550,6 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
///
/// # Safety
///
- /// * Callers must ensure that the device does not read/write to/from memory while the returned
- /// slice is live.
/// * Callers must ensure that this call does not race with a read or write to the same region
/// that overlaps with this write.
///
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
index 9beae2e3d57e..36de8098754d 100644
--- a/rust/kernel/driver.rs
+++ b/rust/kernel/driver.rs
@@ -33,7 +33,14 @@
//! }
//! ```
//!
-//! For specific examples see [`auxiliary::Driver`], [`pci::Driver`] and [`platform::Driver`].
+//! For specific examples see:
+//!
+//! * [`platform::Driver`](kernel::platform::Driver)
+#![cfg_attr(
+ CONFIG_AUXILIARY_BUS,
+ doc = "* [`auxiliary::Driver`](kernel::auxiliary::Driver)"
+)]
+#![cfg_attr(CONFIG_PCI, doc = "* [`pci::Driver`](kernel::pci::Driver)")]
//!
//! The `probe()` callback should return a `impl PinInit<Self, Error>`, i.e. the driver's private
//! data. The bus abstraction should store the pointer in the corresponding bus device. The generic
@@ -79,7 +86,6 @@
//!
//! For this purpose the generic infrastructure in [`device_id`] should be used.
//!
-//! [`auxiliary::Driver`]: kernel::auxiliary::Driver
//! [`Core`]: device::Core
//! [`Device`]: device::Device
//! [`Device<Core>`]: device::Device<device::Core>
@@ -87,31 +93,53 @@
//! [`DeviceContext`]: device::DeviceContext
//! [`device_id`]: kernel::device_id
//! [`module_driver`]: kernel::module_driver
-//! [`pci::Driver`]: kernel::pci::Driver
-//! [`platform::Driver`]: kernel::platform::Driver
-use crate::error::{Error, Result};
-use crate::{acpi, device, of, str::CStr, try_pin_init, types::Opaque, ThisModule};
-use core::pin::Pin;
-use pin_init::{pin_data, pinned_drop, PinInit};
+use crate::{
+ acpi,
+ device,
+ of,
+ prelude::*,
+ types::Opaque,
+ ThisModule, //
+};
+
+/// Trait describing the layout of a specific device driver.
+///
+/// This trait describes the layout of a specific driver structure, such as `struct pci_driver` or
+/// `struct platform_driver`.
+///
+/// # Safety
+///
+/// Implementors must guarantee that:
+/// - `DriverType` is `repr(C)`,
+/// - `DriverData` is the type of the driver's device private data.
+/// - `DriverType` embeds a valid `struct device_driver` at byte offset `DEVICE_DRIVER_OFFSET`.
+pub unsafe trait DriverLayout {
+ /// The specific driver type embedding a `struct device_driver`.
+ type DriverType: Default;
+
+ /// The type of the driver's device private data.
+ type DriverData;
+
+ /// Byte offset of the embedded `struct device_driver` within `DriverType`.
+ ///
+ /// This must correspond exactly to the location of the embedded `struct device_driver` field.
+ const DEVICE_DRIVER_OFFSET: usize;
+}
/// The [`RegistrationOps`] trait serves as generic interface for subsystems (e.g., PCI, Platform,
/// Amba, etc.) to provide the corresponding subsystem specific implementation to register /
-/// unregister a driver of the particular type (`RegType`).
+/// unregister a driver of the particular type (`DriverType`).
///
-/// For instance, the PCI subsystem would set `RegType` to `bindings::pci_driver` and call
+/// For instance, the PCI subsystem would set `DriverType` to `bindings::pci_driver` and call
/// `bindings::__pci_register_driver` from `RegistrationOps::register` and
/// `bindings::pci_unregister_driver` from `RegistrationOps::unregister`.
///
/// # Safety
///
-/// A call to [`RegistrationOps::unregister`] for a given instance of `RegType` is only valid if a
-/// preceding call to [`RegistrationOps::register`] has been successful.
-pub unsafe trait RegistrationOps {
- /// The type that holds information about the registration. This is typically a struct defined
- /// by the C portion of the kernel.
- type RegType: Default;
-
+/// A call to [`RegistrationOps::unregister`] for a given instance of `DriverType` is only valid if
+/// a preceding call to [`RegistrationOps::register`] has been successful.
+pub unsafe trait RegistrationOps: DriverLayout {
/// Registers a driver.
///
/// # Safety
@@ -119,7 +147,7 @@ pub unsafe trait RegistrationOps {
/// On success, `reg` must remain pinned and valid until the matching call to
/// [`RegistrationOps::unregister`].
unsafe fn register(
- reg: &Opaque<Self::RegType>,
+ reg: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result;
@@ -130,7 +158,7 @@ pub unsafe trait RegistrationOps {
///
/// Must only be called after a preceding successful call to [`RegistrationOps::register`] for
/// the same `reg`.
- unsafe fn unregister(reg: &Opaque<Self::RegType>);
+ unsafe fn unregister(reg: &Opaque<Self::DriverType>);
}
/// A [`Registration`] is a generic type that represents the registration of some driver type (e.g.
@@ -142,7 +170,7 @@ pub unsafe trait RegistrationOps {
#[pin_data(PinnedDrop)]
pub struct Registration<T: RegistrationOps> {
#[pin]
- reg: Opaque<T::RegType>,
+ reg: Opaque<T::DriverType>,
}
// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
@@ -153,17 +181,51 @@ unsafe impl<T: RegistrationOps> Sync for Registration<T> {}
// any thread, so `Registration` is `Send`.
unsafe impl<T: RegistrationOps> Send for Registration<T> {}
-impl<T: RegistrationOps> Registration<T> {
+impl<T: RegistrationOps + 'static> Registration<T> {
+ extern "C" fn post_unbind_callback(dev: *mut bindings::device) {
+ // SAFETY: The driver core only ever calls the post unbind callback with a valid pointer to
+ // a `struct device`.
+ //
+ // INVARIANT: `dev` is valid for the duration of the `post_unbind_callback()`.
+ let dev = unsafe { &*dev.cast::<device::Device<device::CoreInternal>>() };
+
+ // `remove()` and all devres callbacks have been completed at this point, hence drop the
+ // driver's device private data.
+ //
+ // SAFETY: By the safety requirements of the `Driver` trait, `T::DriverData` is the
+ // driver's device private data type.
+ drop(unsafe { dev.drvdata_obtain::<T::DriverData>() });
+ }
+
+ /// Attach generic `struct device_driver` callbacks.
+ fn callbacks_attach(drv: &Opaque<T::DriverType>) {
+ let ptr = drv.get().cast::<u8>();
+
+ // SAFETY:
+ // - `drv.get()` yields a valid pointer to `Self::DriverType`.
+ // - Adding `DEVICE_DRIVER_OFFSET` yields the address of the embedded `struct device_driver`
+ // as guaranteed by the safety requirements of the `Driver` trait.
+ let base = unsafe { ptr.add(T::DEVICE_DRIVER_OFFSET) };
+
+ // CAST: `base` points to the offset of the embedded `struct device_driver`.
+ let base = base.cast::<bindings::device_driver>();
+
+ // SAFETY: It is safe to set the fields of `struct device_driver` on initialization.
+ unsafe { (*base).p_cb.post_unbind_rust = Some(Self::post_unbind_callback) };
+ }
+
/// Creates a new instance of the registration object.
pub fn new(name: &'static CStr, module: &'static ThisModule) -> impl PinInit<Self, Error> {
try_pin_init!(Self {
- reg <- Opaque::try_ffi_init(|ptr: *mut T::RegType| {
+ reg <- Opaque::try_ffi_init(|ptr: *mut T::DriverType| {
// SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write.
- unsafe { ptr.write(T::RegType::default()) };
+ unsafe { ptr.write(T::DriverType::default()) };
// SAFETY: `try_ffi_init` guarantees that `ptr` is valid for write, and it has
// just been initialised above, so it's also valid for read.
- let drv = unsafe { &*(ptr as *const Opaque<T::RegType>) };
+ let drv = unsafe { &*(ptr as *const Opaque<T::DriverType>) };
+
+ Self::callbacks_attach(drv);
// SAFETY: `drv` is guaranteed to be pinned until `T::unregister`.
unsafe { T::register(drv, name, module) }
diff --git a/rust/kernel/drm/driver.rs b/rust/kernel/drm/driver.rs
index f30ee4c6245c..e09f977b5b51 100644
--- a/rust/kernel/drm/driver.rs
+++ b/rust/kernel/drm/driver.rs
@@ -121,7 +121,6 @@ pub trait Driver {
pub struct Registration<T: Driver>(ARef<drm::Device<T>>);
impl<T: Driver> Registration<T> {
- /// Creates a new [`Registration`] and registers it.
fn new(drm: &drm::Device<T>, flags: usize) -> Result<Self> {
// SAFETY: `drm.as_raw()` is valid by the invariants of `drm::Device`.
to_result(unsafe { bindings::drm_dev_register(drm.as_raw(), flags) })?;
@@ -129,8 +128,9 @@ impl<T: Driver> Registration<T> {
Ok(Self(drm.into()))
}
- /// Same as [`Registration::new`}, but transfers ownership of the [`Registration`] to
- /// [`devres::register`].
+ /// Registers a new [`Device`](drm::Device) with userspace.
+ ///
+ /// Ownership of the [`Registration`] object is passed to [`devres::register`].
pub fn new_foreign_owned(
drm: &drm::Device<T>,
dev: &device::Device<device::Bound>,
diff --git a/rust/kernel/drm/gem/mod.rs b/rust/kernel/drm/gem/mod.rs
index a7f682e95c01..d49a9ba02635 100644
--- a/rust/kernel/drm/gem/mod.rs
+++ b/rust/kernel/drm/gem/mod.rs
@@ -210,7 +210,7 @@ impl<T: DriverObject> Object<T> {
// SAFETY: The arguments are all valid per the type invariants.
to_result(unsafe { bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) })?;
- // SAFETY: We never move out of `Self`.
+ // SAFETY: We will never move out of `Self` as `ARef<Self>` is always treated as pinned.
let ptr = KBox::into_raw(unsafe { Pin::into_inner_unchecked(obj) });
// SAFETY: `ptr` comes from `KBox::into_raw` and hence can't be NULL.
@@ -253,7 +253,7 @@ impl<T: DriverObject> Object<T> {
}
// SAFETY: Instances of `Object<T>` are always reference-counted.
-unsafe impl<T: DriverObject> crate::types::AlwaysRefCounted for Object<T> {
+unsafe impl<T: DriverObject> crate::sync::aref::AlwaysRefCounted for Object<T> {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::drm_gem_object_get(self.as_raw()) };
@@ -293,9 +293,7 @@ impl<T: DriverObject> AllocImpl for Object<T> {
}
pub(super) const fn create_fops() -> bindings::file_operations {
- // SAFETY: As by the type invariant, it is safe to initialize `bindings::file_operations`
- // zeroed.
- let mut fops: bindings::file_operations = unsafe { core::mem::zeroed() };
+ let mut fops: bindings::file_operations = pin_init::zeroed();
fops.owner = core::ptr::null_mut();
fops.open = Some(bindings::drm_open);
diff --git a/rust/kernel/faux.rs b/rust/kernel/faux.rs
index 7fe2dd197e37..43b4974f48cd 100644
--- a/rust/kernel/faux.rs
+++ b/rust/kernel/faux.rs
@@ -6,8 +6,17 @@
//!
//! C header: [`include/linux/device/faux.h`](srctree/include/linux/device/faux.h)
-use crate::{bindings, device, error::code::*, prelude::*};
-use core::ptr::{addr_of_mut, null, null_mut, NonNull};
+use crate::{
+ bindings,
+ device,
+ prelude::*, //
+};
+use core::ptr::{
+ addr_of_mut,
+ null,
+ null_mut,
+ NonNull, //
+};
/// The registration of a faux device.
///
diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs
index 84d634201d90..1e8725eb44ed 100644
--- a/rust/kernel/fmt.rs
+++ b/rust/kernel/fmt.rs
@@ -6,7 +6,7 @@
pub use core::fmt::{Arguments, Debug, Error, Formatter, Result, Write};
-/// Internal adapter used to route allow implementations of formatting traits for foreign types.
+/// Internal adapter used to route and allow implementations of formatting traits for foreign types.
///
/// It is inserted automatically by the [`fmt!`] macro and is not meant to be used directly.
///
diff --git a/rust/kernel/i2c.rs b/rust/kernel/i2c.rs
index 491e6cc25cf4..bb5b830f48c3 100644
--- a/rust/kernel/i2c.rs
+++ b/rust/kernel/i2c.rs
@@ -92,13 +92,22 @@ macro_rules! i2c_device_table {
/// An adapter for the registration of I2C drivers.
pub struct Adapter<T: Driver>(T);
-// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// SAFETY:
+// - `bindings::i2c_driver` is a C type declared as `repr(C)`.
+// - `T` is the type of the driver's device private data.
+// - `struct i2c_driver` embeds a `struct device_driver`.
+// - `DEVICE_DRIVER_OFFSET` is the correct byte offset to the embedded `struct device_driver`.
+unsafe impl<T: Driver + 'static> driver::DriverLayout for Adapter<T> {
+ type DriverType = bindings::i2c_driver;
+ type DriverData = T;
+ const DEVICE_DRIVER_OFFSET: usize = core::mem::offset_of!(Self::DriverType, driver);
+}
+
+// SAFETY: A call to `unregister` for a given instance of `DriverType` is guaranteed to be valid if
// a preceding call to `register` has been successful.
unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
- type RegType = bindings::i2c_driver;
-
unsafe fn register(
- idrv: &Opaque<Self::RegType>,
+ idrv: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
@@ -133,12 +142,12 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
(*idrv.get()).driver.acpi_match_table = acpi_table;
}
- // SAFETY: `idrv` is guaranteed to be a valid `RegType`.
+ // SAFETY: `idrv` is guaranteed to be a valid `DriverType`.
to_result(unsafe { bindings::i2c_register_driver(module.0, idrv.get()) })
}
- unsafe fn unregister(idrv: &Opaque<Self::RegType>) {
- // SAFETY: `idrv` is guaranteed to be a valid `RegType`.
+ unsafe fn unregister(idrv: &Opaque<Self::DriverType>) {
+ // SAFETY: `idrv` is guaranteed to be a valid `DriverType`.
unsafe { bindings::i2c_del_driver(idrv.get()) }
}
}
@@ -169,9 +178,9 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `I2cClient::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { idev.as_ref().drvdata_obtain::<T>() };
+ let data = unsafe { idev.as_ref().drvdata_borrow::<T>() };
- T::unbind(idev, data.as_ref());
+ T::unbind(idev, data);
}
extern "C" fn shutdown_callback(idev: *mut bindings::i2c_client) {
@@ -181,9 +190,9 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `shutdown_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { idev.as_ref().drvdata_obtain::<T>() };
+ let data = unsafe { idev.as_ref().drvdata_borrow::<T>() };
- T::shutdown(idev, data.as_ref());
+ T::shutdown(idev, data);
}
/// The [`i2c::IdTable`] of the corresponding driver.
@@ -253,7 +262,7 @@ macro_rules! module_i2c_driver {
/// # Example
///
///```
-/// # use kernel::{acpi, bindings, c_str, device::Core, i2c, of};
+/// # use kernel::{acpi, bindings, device::Core, i2c, of};
///
/// struct MyDriver;
///
@@ -262,7 +271,7 @@ macro_rules! module_i2c_driver {
/// MODULE_ACPI_TABLE,
/// <MyDriver as i2c::Driver>::IdInfo,
/// [
-/// (acpi::DeviceId::new(c_str!("LNUXBEEF")), ())
+/// (acpi::DeviceId::new(c"LNUXBEEF"), ())
/// ]
/// );
///
@@ -271,7 +280,7 @@ macro_rules! module_i2c_driver {
/// MODULE_I2C_TABLE,
/// <MyDriver as i2c::Driver>::IdInfo,
/// [
-/// (i2c::DeviceId::new(c_str!("rust_driver_i2c")), ())
+/// (i2c::DeviceId::new(c"rust_driver_i2c"), ())
/// ]
/// );
///
@@ -280,7 +289,7 @@ macro_rules! module_i2c_driver {
/// MODULE_OF_TABLE,
/// <MyDriver as i2c::Driver>::IdInfo,
/// [
-/// (of::DeviceId::new(c_str!("test,device")), ())
+/// (of::DeviceId::new(c"test,device"), ())
/// ]
/// );
///
diff --git a/rust/kernel/impl_flags.rs b/rust/kernel/impl_flags.rs
new file mode 100644
index 000000000000..e2bd7639da12
--- /dev/null
+++ b/rust/kernel/impl_flags.rs
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bitflag type generator.
+
+/// Common helper for declaring bitflag and bitmask types.
+///
+/// This macro takes as input:
+/// - A struct declaration representing a bitmask type
+/// (e.g., `pub struct Permissions(u32)`).
+/// - An enumeration declaration representing individual bit flags
+/// (e.g., `pub enum Permission { ... }`).
+///
+/// And generates:
+/// - The struct and enum types with appropriate `#[repr]` attributes.
+/// - Implementations of common bitflag operators
+/// ([`::core::ops::BitOr`], [`::core::ops::BitAnd`], etc.).
+/// - Utility methods such as `.contains()` to check flags.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::impl_flags;
+///
+/// impl_flags!(
+/// /// Represents multiple permissions.
+/// #[derive(Debug, Clone, Default, Copy, PartialEq, Eq)]
+/// pub struct Permissions(u32);
+///
+/// /// Represents a single permission.
+/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+/// pub enum Permission {
+/// /// Read permission.
+/// Read = 1 << 0,
+///
+/// /// Write permission.
+/// Write = 1 << 1,
+///
+/// /// Execute permission.
+/// Execute = 1 << 2,
+/// }
+/// );
+///
+/// // Combine multiple permissions using the bitwise OR (`|`) operator.
+/// let mut read_write: Permissions = Permission::Read | Permission::Write;
+/// assert!(read_write.contains(Permission::Read));
+/// assert!(read_write.contains(Permission::Write));
+/// assert!(!read_write.contains(Permission::Execute));
+/// assert!(read_write.contains_any(Permission::Read | Permission::Execute));
+/// assert!(read_write.contains_all(Permission::Read | Permission::Write));
+///
+/// // Using the bitwise OR assignment (`|=`) operator.
+/// read_write |= Permission::Execute;
+/// assert!(read_write.contains(Permission::Execute));
+///
+/// // Masking a permission with the bitwise AND (`&`) operator.
+/// let read_only: Permissions = read_write & Permission::Read;
+/// assert!(read_only.contains(Permission::Read));
+/// assert!(!read_only.contains(Permission::Write));
+///
+/// // Toggling permissions with the bitwise XOR (`^`) operator.
+/// let toggled: Permissions = read_only ^ Permission::Read;
+/// assert!(!toggled.contains(Permission::Read));
+///
+/// // Inverting permissions with the bitwise NOT (`!`) operator.
+/// let negated = !read_only;
+/// assert!(negated.contains(Permission::Write));
+/// assert!(!negated.contains(Permission::Read));
+/// ```
+#[macro_export]
+macro_rules! impl_flags {
+ (
+ $(#[$outer_flags:meta])*
+ $vis_flags:vis struct $flags:ident($ty:ty);
+
+ $(#[$outer_flag:meta])*
+ $vis_flag:vis enum $flag:ident {
+ $(
+ $(#[$inner_flag:meta])*
+ $name:ident = $value:expr
+ ),+ $( , )?
+ }
+ ) => {
+ $(#[$outer_flags])*
+ #[repr(transparent)]
+ $vis_flags struct $flags($ty);
+
+ $(#[$outer_flag])*
+ #[repr($ty)]
+ $vis_flag enum $flag {
+ $(
+ $(#[$inner_flag])*
+ $name = $value
+ ),+
+ }
+
+ impl ::core::convert::From<$flag> for $flags {
+ #[inline]
+ fn from(value: $flag) -> Self {
+ Self(value as $ty)
+ }
+ }
+
+ impl ::core::convert::From<$flags> for $ty {
+ #[inline]
+ fn from(value: $flags) -> Self {
+ value.0
+ }
+ }
+
+ impl ::core::ops::BitOr for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self::Output {
+ Self(self.0 | rhs.0)
+ }
+ }
+
+ impl ::core::ops::BitOrAssign for $flags {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ *self = *self | rhs;
+ }
+ }
+
+ impl ::core::ops::BitOr<$flag> for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: $flag) -> Self::Output {
+ self | Self::from(rhs)
+ }
+ }
+
+ impl ::core::ops::BitOrAssign<$flag> for $flags {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: $flag) {
+ *self = *self | rhs;
+ }
+ }
+
+ impl ::core::ops::BitAnd for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: Self) -> Self::Output {
+ Self(self.0 & rhs.0)
+ }
+ }
+
+ impl ::core::ops::BitAndAssign for $flags {
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ *self = *self & rhs;
+ }
+ }
+
+ impl ::core::ops::BitAnd<$flag> for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitand(self, rhs: $flag) -> Self::Output {
+ self & Self::from(rhs)
+ }
+ }
+
+ impl ::core::ops::BitAndAssign<$flag> for $flags {
+ #[inline]
+ fn bitand_assign(&mut self, rhs: $flag) {
+ *self = *self & rhs;
+ }
+ }
+
+ impl ::core::ops::BitXor for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ Self((self.0 ^ rhs.0) & Self::all_bits())
+ }
+ }
+
+ impl ::core::ops::BitXorAssign for $flags {
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ *self = *self ^ rhs;
+ }
+ }
+
+ impl ::core::ops::BitXor<$flag> for $flags {
+ type Output = Self;
+ #[inline]
+ fn bitxor(self, rhs: $flag) -> Self::Output {
+ self ^ Self::from(rhs)
+ }
+ }
+
+ impl ::core::ops::BitXorAssign<$flag> for $flags {
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: $flag) {
+ *self = *self ^ rhs;
+ }
+ }
+
+ impl ::core::ops::Not for $flags {
+ type Output = Self;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self((!self.0) & Self::all_bits())
+ }
+ }
+
+ impl ::core::ops::BitOr for $flag {
+ type Output = $flags;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self::Output {
+ $flags(self as $ty | rhs as $ty)
+ }
+ }
+
+ impl ::core::ops::BitAnd for $flag {
+ type Output = $flags;
+ #[inline]
+ fn bitand(self, rhs: Self) -> Self::Output {
+ $flags(self as $ty & rhs as $ty)
+ }
+ }
+
+ impl ::core::ops::BitXor for $flag {
+ type Output = $flags;
+ #[inline]
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ $flags((self as $ty ^ rhs as $ty) & $flags::all_bits())
+ }
+ }
+
+ impl ::core::ops::Not for $flag {
+ type Output = $flags;
+ #[inline]
+ fn not(self) -> Self::Output {
+ $flags((!(self as $ty)) & $flags::all_bits())
+ }
+ }
+
+ impl $flags {
+ /// Returns an empty instance where no flags are set.
+ #[inline]
+ pub const fn empty() -> Self {
+ Self(0)
+ }
+
+ /// Returns a mask containing all valid flag bits.
+ #[inline]
+ pub const fn all_bits() -> $ty {
+ 0 $( | $value )+
+ }
+
+ /// Checks if a specific flag is set.
+ #[inline]
+ pub fn contains(self, flag: $flag) -> bool {
+ (self.0 & flag as $ty) == flag as $ty
+ }
+
+ /// Checks if at least one of the provided flags is set.
+ #[inline]
+ pub fn contains_any(self, flags: $flags) -> bool {
+ (self.0 & flags.0) != 0
+ }
+
+ /// Checks if all of the provided flags are set.
+ #[inline]
+ pub fn contains_all(self, flags: $flags) -> bool {
+ (self.0 & flags.0) == flags.0
+ }
+ }
+ };
+}
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 899b9a962762..7a0d4559d7b5 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -219,20 +219,12 @@ pub trait InPlaceInit<T>: Sized {
/// [`Error`]: crate::error::Error
#[macro_export]
macro_rules! try_init {
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }) => {
- ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? {
- $($fields)*
- }? $crate::error::Error)
- };
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }? $err:ty) => {
- ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? {
- $($fields)*
- }? $err)
- };
+ ($($args:tt)*) => {
+ ::pin_init::init!(
+ #[default_error($crate::error::Error)]
+ $($args)*
+ )
+ }
}
/// Construct an in-place, fallible pinned initializer for `struct`s.
@@ -279,18 +271,10 @@ macro_rules! try_init {
/// [`Error`]: crate::error::Error
#[macro_export]
macro_rules! try_pin_init {
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }) => {
- ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? {
- $($fields)*
- }? $crate::error::Error)
- };
- ($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
- $($fields:tt)*
- }? $err:ty) => {
- ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? {
- $($fields)*
- }? $err)
- };
+ ($($args:tt)*) => {
+ ::pin_init::pin_init!(
+ #[default_error($crate::error::Error)]
+ $($args)*
+ )
+ }
}
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index 98e8b84e68d1..c1cca7b438c3 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -32,16 +32,16 @@ pub type ResourceSize = bindings::resource_size_t;
/// By itself, the existence of an instance of this structure does not provide any guarantees that
/// the represented MMIO region does exist or is properly mapped.
///
-/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
-/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
-/// any guarantees are given.
-pub struct IoRaw<const SIZE: usize = 0> {
+/// Instead, the bus specific MMIO implementation must convert this raw representation into an
+/// `Mmio` instance providing the actual memory accessors. Only by the conversion into an `Mmio`
+/// structure any guarantees are given.
+pub struct MmioRaw<const SIZE: usize = 0> {
addr: usize,
maxsize: usize,
}
-impl<const SIZE: usize> IoRaw<SIZE> {
- /// Returns a new `IoRaw` instance on success, an error otherwise.
+impl<const SIZE: usize> MmioRaw<SIZE> {
+ /// Returns a new `MmioRaw` instance on success, an error otherwise.
pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
if maxsize < SIZE {
return Err(EINVAL);
@@ -81,14 +81,16 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// ffi::c_void,
/// io::{
/// Io,
-/// IoRaw,
+/// IoKnownSize,
+/// Mmio,
+/// MmioRaw,
/// PhysAddr,
/// },
/// };
/// use core::ops::Deref;
///
-/// // See also [`pci::Bar`] for a real example.
-/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
+/// // See also `pci::Bar` for a real example.
+/// struct IoMem<const SIZE: usize>(MmioRaw<SIZE>);
///
/// impl<const SIZE: usize> IoMem<SIZE> {
/// /// # Safety
@@ -103,7 +105,7 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// return Err(ENOMEM);
/// }
///
-/// Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
+/// Ok(IoMem(MmioRaw::new(addr as usize, SIZE)?))
/// }
/// }
///
@@ -115,11 +117,11 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// }
///
/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
-/// type Target = Io<SIZE>;
+/// type Target = Mmio<SIZE>;
///
/// fn deref(&self) -> &Self::Target {
/// // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
-/// unsafe { Io::from_raw(&self.0) }
+/// unsafe { Mmio::from_raw(&self.0) }
/// }
/// }
///
@@ -133,104 +135,183 @@ impl<const SIZE: usize> IoRaw<SIZE> {
/// # }
/// ```
#[repr(transparent)]
-pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
+pub struct Mmio<const SIZE: usize = 0>(MmioRaw<SIZE>);
+
+/// Internal helper macros used to invoke C MMIO read functions.
+///
+/// This macro is intended to be used by higher-level MMIO access macros (define_read) and provides
+/// a unified expansion for infallible vs. fallible read semantics. It emits a direct call into the
+/// corresponding C helper and performs the required cast to the Rust return type.
+///
+/// # Parameters
+///
+/// * `$c_fn` – The C function performing the MMIO read.
+/// * `$self` – The I/O backend object.
+/// * `$ty` – The type of the value to be read.
+/// * `$addr` – The MMIO address to read.
+///
+/// This macro does not perform any validation; all invariants must be upheld by the higher-level
+/// abstraction invoking it.
+macro_rules! call_mmio_read {
+ (infallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn($addr as *const c_void) as $type }
+ };
+
+ (fallible, $c_fn:ident, $self:ident, $type:ty, $addr:expr) => {{
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ Ok(unsafe { bindings::$c_fn($addr as *const c_void) as $type })
+ }};
+}
+
+/// Internal helper macros used to invoke C MMIO write functions.
+///
+/// This macro is intended to be used by higher-level MMIO access macros (define_write) and provides
+/// a unified expansion for infallible vs. fallible write semantics. It emits a direct call into the
+/// corresponding C helper and performs the required cast to the Rust return type.
+///
+/// # Parameters
+///
+/// * `$c_fn` – The C function performing the MMIO write.
+/// * `$self` – The I/O backend object.
+/// * `$ty` – The type of the written value.
+/// * `$addr` – The MMIO address to write.
+/// * `$value` – The value to write.
+///
+/// This macro does not perform any validation; all invariants must be upheld by the higher-level
+/// abstraction invoking it.
+macro_rules! call_mmio_write {
+ (infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn($value, $addr as *mut c_void) }
+ };
+
+ (fallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {{
+ // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
+ unsafe { bindings::$c_fn($value, $addr as *mut c_void) };
+ Ok(())
+ }};
+}
macro_rules! define_read {
- ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
+ (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) ->
+ $type_name:ty) => {
/// Read IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
/// time, the build will fail.
$(#[$attr])*
- #[inline]
- pub fn $name(&self, offset: usize) -> $type_name {
+ // Always inline to optimize out error path of `io_addr_assert`.
+ #[inline(always)]
+ $vis fn $name(&self, offset: usize) -> $type_name {
let addr = self.io_addr_assert::<$type_name>(offset);
- // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(addr as *const c_void) }
+ // SAFETY: By the type invariant `addr` is a valid address for IO operations.
+ $call_macro!(infallible, $c_fn, self, $type_name, addr)
}
+ };
+ (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) ->
+ $type_name:ty) => {
/// Read IO data from a given offset.
///
/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
/// out of bounds.
$(#[$attr])*
- pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
+ $vis fn $try_name(&self, offset: usize) -> Result<$type_name> {
let addr = self.io_addr::<$type_name>(offset)?;
- // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
+ // SAFETY: By the type invariant `addr` is a valid address for IO operations.
+ $call_macro!(fallible, $c_fn, self, $type_name, addr)
}
};
}
+pub(crate) use define_read;
macro_rules! define_write {
- ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
+ (infallible, $(#[$attr:meta])* $vis:vis $name:ident, $call_macro:ident($c_fn:ident) <-
+ $type_name:ty) => {
/// Write IO data from a given offset known at compile time.
///
/// Bound checks are performed on compile time, hence if the offset is not known at compile
/// time, the build will fail.
$(#[$attr])*
- #[inline]
- pub fn $name(&self, value: $type_name, offset: usize) {
+ // Always inline to optimize out error path of `io_addr_assert`.
+ #[inline(always)]
+ $vis fn $name(&self, value: $type_name, offset: usize) {
let addr = self.io_addr_assert::<$type_name>(offset);
- // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as *mut c_void) }
+ $call_macro!(infallible, $c_fn, self, $type_name, addr, value);
}
+ };
+ (fallible, $(#[$attr:meta])* $vis:vis $try_name:ident, $call_macro:ident($c_fn:ident) <-
+ $type_name:ty) => {
/// Write IO data from a given offset.
///
/// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
/// out of bounds.
$(#[$attr])*
- pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
+ $vis fn $try_name(&self, value: $type_name, offset: usize) -> Result {
let addr = self.io_addr::<$type_name>(offset)?;
- // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
- unsafe { bindings::$c_fn(value, addr as *mut c_void) }
- Ok(())
+ $call_macro!(fallible, $c_fn, self, $type_name, addr, value)
}
};
}
-
-impl<const SIZE: usize> Io<SIZE> {
- /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
- ///
- /// # Safety
- ///
- /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
- /// `maxsize`.
- pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
- // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
- unsafe { &*core::ptr::from_ref(raw).cast() }
+pub(crate) use define_write;
+
+/// Checks whether an access of type `U` at the given `offset`
+/// is valid within this region.
+#[inline]
+const fn offset_valid<U>(offset: usize, size: usize) -> bool {
+ let type_size = core::mem::size_of::<U>();
+ if let Some(end) = offset.checked_add(type_size) {
+ end <= size && offset % type_size == 0
+ } else {
+ false
}
+}
+
+/// Marker trait indicating that an I/O backend supports operations of a certain type.
+///
+/// Different I/O backends can implement this trait to expose only the operations they support.
+///
+/// For example, a PCI configuration space may implement `IoCapable<u8>`, `IoCapable<u16>`,
+/// and `IoCapable<u32>`, but not `IoCapable<u64>`, while an MMIO region on a 64-bit
+/// system might implement all four.
+pub trait IoCapable<T> {}
+/// Types implementing this trait (e.g. MMIO BARs or PCI config regions)
+/// can perform I/O operations on regions of memory.
+///
+/// This is an abstract representation to be implemented by arbitrary I/O
+/// backends (e.g. MMIO, PCI config space, etc.).
+///
+/// The [`Io`] trait provides:
+/// - Base address and size information
+/// - Helper methods for offset validation and address calculation
+/// - Fallible (runtime checked) accessors for different data widths
+///
+/// Which I/O methods are available depends on which [`IoCapable<T>`] traits
+/// are implemented for the type.
+///
+/// # Examples
+///
+/// For MMIO regions, all widths (u8, u16, u32, and u64 on 64-bit systems) are typically
+/// supported. For PCI configuration space, u8, u16, and u32 are supported but u64 is not.
+pub trait Io {
/// Returns the base address of this mapping.
- #[inline]
- pub fn addr(&self) -> usize {
- self.0.addr()
- }
+ fn addr(&self) -> usize;
/// Returns the maximum size of this mapping.
- #[inline]
- pub fn maxsize(&self) -> usize {
- self.0.maxsize()
- }
-
- #[inline]
- const fn offset_valid<U>(offset: usize, size: usize) -> bool {
- let type_size = core::mem::size_of::<U>();
- if let Some(end) = offset.checked_add(type_size) {
- end <= size && offset % type_size == 0
- } else {
- false
- }
- }
+ fn maxsize(&self) -> usize;
+ /// Returns the absolute I/O address for a given `offset`,
+ /// performing runtime bound checks.
#[inline]
fn io_addr<U>(&self, offset: usize) -> Result<usize> {
- if !Self::offset_valid::<U>(offset, self.maxsize()) {
+ if !offset_valid::<U>(offset, self.maxsize()) {
return Err(EINVAL);
}
@@ -239,50 +320,289 @@ impl<const SIZE: usize> Io<SIZE> {
self.addr().checked_add(offset).ok_or(EINVAL)
}
- #[inline]
+ /// Fallible 8-bit read with runtime bounds check.
+ #[inline(always)]
+ fn try_read8(&self, _offset: usize) -> Result<u8>
+ where
+ Self: IoCapable<u8>,
+ {
+ build_error!("Backend does not support fallible 8-bit read")
+ }
+
+ /// Fallible 16-bit read with runtime bounds check.
+ #[inline(always)]
+ fn try_read16(&self, _offset: usize) -> Result<u16>
+ where
+ Self: IoCapable<u16>,
+ {
+ build_error!("Backend does not support fallible 16-bit read")
+ }
+
+ /// Fallible 32-bit read with runtime bounds check.
+ #[inline(always)]
+ fn try_read32(&self, _offset: usize) -> Result<u32>
+ where
+ Self: IoCapable<u32>,
+ {
+ build_error!("Backend does not support fallible 32-bit read")
+ }
+
+ /// Fallible 64-bit read with runtime bounds check.
+ #[inline(always)]
+ fn try_read64(&self, _offset: usize) -> Result<u64>
+ where
+ Self: IoCapable<u64>,
+ {
+ build_error!("Backend does not support fallible 64-bit read")
+ }
+
+ /// Fallible 8-bit write with runtime bounds check.
+ #[inline(always)]
+ fn try_write8(&self, _value: u8, _offset: usize) -> Result
+ where
+ Self: IoCapable<u8>,
+ {
+ build_error!("Backend does not support fallible 8-bit write")
+ }
+
+ /// Fallible 16-bit write with runtime bounds check.
+ #[inline(always)]
+ fn try_write16(&self, _value: u16, _offset: usize) -> Result
+ where
+ Self: IoCapable<u16>,
+ {
+ build_error!("Backend does not support fallible 16-bit write")
+ }
+
+ /// Fallible 32-bit write with runtime bounds check.
+ #[inline(always)]
+ fn try_write32(&self, _value: u32, _offset: usize) -> Result
+ where
+ Self: IoCapable<u32>,
+ {
+ build_error!("Backend does not support fallible 32-bit write")
+ }
+
+ /// Fallible 64-bit write with runtime bounds check.
+ #[inline(always)]
+ fn try_write64(&self, _value: u64, _offset: usize) -> Result
+ where
+ Self: IoCapable<u64>,
+ {
+ build_error!("Backend does not support fallible 64-bit write")
+ }
+
+ /// Infallible 8-bit read with compile-time bounds check.
+ #[inline(always)]
+ fn read8(&self, _offset: usize) -> u8
+ where
+ Self: IoKnownSize + IoCapable<u8>,
+ {
+ build_error!("Backend does not support infallible 8-bit read")
+ }
+
+ /// Infallible 16-bit read with compile-time bounds check.
+ #[inline(always)]
+ fn read16(&self, _offset: usize) -> u16
+ where
+ Self: IoKnownSize + IoCapable<u16>,
+ {
+ build_error!("Backend does not support infallible 16-bit read")
+ }
+
+ /// Infallible 32-bit read with compile-time bounds check.
+ #[inline(always)]
+ fn read32(&self, _offset: usize) -> u32
+ where
+ Self: IoKnownSize + IoCapable<u32>,
+ {
+ build_error!("Backend does not support infallible 32-bit read")
+ }
+
+ /// Infallible 64-bit read with compile-time bounds check.
+ #[inline(always)]
+ fn read64(&self, _offset: usize) -> u64
+ where
+ Self: IoKnownSize + IoCapable<u64>,
+ {
+ build_error!("Backend does not support infallible 64-bit read")
+ }
+
+ /// Infallible 8-bit write with compile-time bounds check.
+ #[inline(always)]
+ fn write8(&self, _value: u8, _offset: usize)
+ where
+ Self: IoKnownSize + IoCapable<u8>,
+ {
+ build_error!("Backend does not support infallible 8-bit write")
+ }
+
+ /// Infallible 16-bit write with compile-time bounds check.
+ #[inline(always)]
+ fn write16(&self, _value: u16, _offset: usize)
+ where
+ Self: IoKnownSize + IoCapable<u16>,
+ {
+ build_error!("Backend does not support infallible 16-bit write")
+ }
+
+ /// Infallible 32-bit write with compile-time bounds check.
+ #[inline(always)]
+ fn write32(&self, _value: u32, _offset: usize)
+ where
+ Self: IoKnownSize + IoCapable<u32>,
+ {
+ build_error!("Backend does not support infallible 32-bit write")
+ }
+
+ /// Infallible 64-bit write with compile-time bounds check.
+ #[inline(always)]
+ fn write64(&self, _value: u64, _offset: usize)
+ where
+ Self: IoKnownSize + IoCapable<u64>,
+ {
+ build_error!("Backend does not support infallible 64-bit write")
+ }
+}
+
+/// Trait for types with a known size at compile time.
+///
+/// This trait is implemented by I/O backends that have a compile-time known size,
+/// enabling the use of infallible I/O accessors with compile-time bounds checking.
+///
+/// Types implementing this trait can use the infallible methods in [`Io`] trait
+/// (e.g., `read8`, `write32`), which require `Self: IoKnownSize` bound.
+pub trait IoKnownSize: Io {
+ /// Minimum usable size of this region.
+ const MIN_SIZE: usize;
+
+ /// Returns the absolute I/O address for a given `offset`,
+ /// performing compile-time bound checks.
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
fn io_addr_assert<U>(&self, offset: usize) -> usize {
- build_assert!(Self::offset_valid::<U>(offset, SIZE));
+ build_assert!(offset_valid::<U>(offset, Self::MIN_SIZE));
self.addr() + offset
}
+}
+
+// MMIO regions support 8, 16, and 32-bit accesses.
+impl<const SIZE: usize> IoCapable<u8> for Mmio<SIZE> {}
+impl<const SIZE: usize> IoCapable<u16> for Mmio<SIZE> {}
+impl<const SIZE: usize> IoCapable<u32> for Mmio<SIZE> {}
+
+// MMIO regions on 64-bit systems also support 64-bit accesses.
+#[cfg(CONFIG_64BIT)]
+impl<const SIZE: usize> IoCapable<u64> for Mmio<SIZE> {}
+
+impl<const SIZE: usize> Io for Mmio<SIZE> {
+ /// Returns the base address of this mapping.
+ #[inline]
+ fn addr(&self) -> usize {
+ self.0.addr()
+ }
+
+ /// Returns the maximum size of this mapping.
+ #[inline]
+ fn maxsize(&self) -> usize {
+ self.0.maxsize()
+ }
- define_read!(read8, try_read8, readb -> u8);
- define_read!(read16, try_read16, readw -> u16);
- define_read!(read32, try_read32, readl -> u32);
+ define_read!(fallible, try_read8, call_mmio_read(readb) -> u8);
+ define_read!(fallible, try_read16, call_mmio_read(readw) -> u16);
+ define_read!(fallible, try_read32, call_mmio_read(readl) -> u32);
define_read!(
+ fallible,
#[cfg(CONFIG_64BIT)]
- read64,
try_read64,
- readq -> u64
+ call_mmio_read(readq) -> u64
+ );
+
+ define_write!(fallible, try_write8, call_mmio_write(writeb) <- u8);
+ define_write!(fallible, try_write16, call_mmio_write(writew) <- u16);
+ define_write!(fallible, try_write32, call_mmio_write(writel) <- u32);
+ define_write!(
+ fallible,
+ #[cfg(CONFIG_64BIT)]
+ try_write64,
+ call_mmio_write(writeq) <- u64
);
- define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
- define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
- define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
+ define_read!(infallible, read8, call_mmio_read(readb) -> u8);
+ define_read!(infallible, read16, call_mmio_read(readw) -> u16);
+ define_read!(infallible, read32, call_mmio_read(readl) -> u32);
define_read!(
+ infallible,
#[cfg(CONFIG_64BIT)]
- read64_relaxed,
- try_read64_relaxed,
- readq_relaxed -> u64
+ read64,
+ call_mmio_read(readq) -> u64
);
- define_write!(write8, try_write8, writeb <- u8);
- define_write!(write16, try_write16, writew <- u16);
- define_write!(write32, try_write32, writel <- u32);
+ define_write!(infallible, write8, call_mmio_write(writeb) <- u8);
+ define_write!(infallible, write16, call_mmio_write(writew) <- u16);
+ define_write!(infallible, write32, call_mmio_write(writel) <- u32);
define_write!(
+ infallible,
#[cfg(CONFIG_64BIT)]
write64,
- try_write64,
- writeq <- u64
+ call_mmio_write(writeq) <- u64
+ );
+}
+
+impl<const SIZE: usize> IoKnownSize for Mmio<SIZE> {
+ const MIN_SIZE: usize = SIZE;
+}
+
+impl<const SIZE: usize> Mmio<SIZE> {
+ /// Converts an `MmioRaw` into an `Mmio` instance, providing the accessors to the MMIO mapping.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
+ /// `maxsize`.
+ pub unsafe fn from_raw(raw: &MmioRaw<SIZE>) -> &Self {
+ // SAFETY: `Mmio` is a transparent wrapper around `MmioRaw`.
+ unsafe { &*core::ptr::from_ref(raw).cast() }
+ }
+
+ define_read!(infallible, pub read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
+ define_read!(infallible, pub read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
+ define_read!(infallible, pub read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
+ define_read!(
+ infallible,
+ #[cfg(CONFIG_64BIT)]
+ pub read64_relaxed,
+ call_mmio_read(readq_relaxed) -> u64
+ );
+
+ define_read!(fallible, pub try_read8_relaxed, call_mmio_read(readb_relaxed) -> u8);
+ define_read!(fallible, pub try_read16_relaxed, call_mmio_read(readw_relaxed) -> u16);
+ define_read!(fallible, pub try_read32_relaxed, call_mmio_read(readl_relaxed) -> u32);
+ define_read!(
+ fallible,
+ #[cfg(CONFIG_64BIT)]
+ pub try_read64_relaxed,
+ call_mmio_read(readq_relaxed) -> u64
+ );
+
+ define_write!(infallible, pub write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
+ define_write!(infallible, pub write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
+ define_write!(infallible, pub write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
+ define_write!(
+ infallible,
+ #[cfg(CONFIG_64BIT)]
+ pub write64_relaxed,
+ call_mmio_write(writeq_relaxed) <- u64
);
- define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
- define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
- define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
+ define_write!(fallible, pub try_write8_relaxed, call_mmio_write(writeb_relaxed) <- u8);
+ define_write!(fallible, pub try_write16_relaxed, call_mmio_write(writew_relaxed) <- u16);
+ define_write!(fallible, pub try_write32_relaxed, call_mmio_write(writel_relaxed) <- u32);
define_write!(
+ fallible,
#[cfg(CONFIG_64BIT)]
- write64_relaxed,
- try_write64_relaxed,
- writeq_relaxed <- u64
+ pub try_write64_relaxed,
+ call_mmio_write(writeq_relaxed) <- u64
);
}
diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs
index b03b82cd531b..620022cff401 100644
--- a/rust/kernel/io/mem.rs
+++ b/rust/kernel/io/mem.rs
@@ -5,7 +5,6 @@
use core::ops::Deref;
use crate::{
- c_str,
device::{
Bound,
Device, //
@@ -17,8 +16,8 @@ use crate::{
Region,
Resource, //
},
- Io,
- IoRaw, //
+ Mmio,
+ MmioRaw, //
},
prelude::*,
};
@@ -52,7 +51,12 @@ impl<'a> IoRequest<'a> {
/// illustration purposes.
///
/// ```no_run
- /// use kernel::{bindings, c_str, platform, of, device::Core};
+ /// use kernel::{
+ /// bindings,
+ /// device::Core,
+ /// of,
+ /// platform,
+ /// };
/// struct SampleDriver;
///
/// impl platform::Driver for SampleDriver {
@@ -110,7 +114,12 @@ impl<'a> IoRequest<'a> {
/// illustration purposes.
///
/// ```no_run
- /// use kernel::{bindings, c_str, platform, of, device::Core};
+ /// use kernel::{
+ /// bindings,
+ /// device::Core,
+ /// of,
+ /// platform,
+ /// };
/// struct SampleDriver;
///
/// impl platform::Driver for SampleDriver {
@@ -172,7 +181,7 @@ impl<const SIZE: usize> ExclusiveIoMem<SIZE> {
fn ioremap(resource: &Resource) -> Result<Self> {
let start = resource.start();
let size = resource.size();
- let name = resource.name().unwrap_or(c_str!(""));
+ let name = resource.name().unwrap_or_default();
let region = resource
.request_region(
@@ -203,7 +212,7 @@ impl<const SIZE: usize> ExclusiveIoMem<SIZE> {
}
impl<const SIZE: usize> Deref for ExclusiveIoMem<SIZE> {
- type Target = Io<SIZE>;
+ type Target = Mmio<SIZE>;
fn deref(&self) -> &Self::Target {
&self.iomem
@@ -217,10 +226,10 @@ impl<const SIZE: usize> Deref for ExclusiveIoMem<SIZE> {
///
/// # Invariants
///
-/// [`IoMem`] always holds an [`IoRaw`] instance that holds a valid pointer to the
+/// [`IoMem`] always holds an [`MmioRaw`] instance that holds a valid pointer to the
/// start of the I/O memory mapped region.
pub struct IoMem<const SIZE: usize = 0> {
- io: IoRaw<SIZE>,
+ io: MmioRaw<SIZE>,
}
impl<const SIZE: usize> IoMem<SIZE> {
@@ -255,7 +264,7 @@ impl<const SIZE: usize> IoMem<SIZE> {
return Err(ENOMEM);
}
- let io = IoRaw::new(addr as usize, size)?;
+ let io = MmioRaw::new(addr as usize, size)?;
let io = IoMem { io };
Ok(io)
@@ -278,10 +287,10 @@ impl<const SIZE: usize> Drop for IoMem<SIZE> {
}
impl<const SIZE: usize> Deref for IoMem<SIZE> {
- type Target = Io<SIZE>;
+ type Target = Mmio<SIZE>;
fn deref(&self) -> &Self::Target {
// SAFETY: Safe as by the invariant of `IoMem`.
- unsafe { Io::from_raw(&self.io) }
+ unsafe { Mmio::from_raw(&self.io) }
}
}
diff --git a/rust/kernel/io/poll.rs b/rust/kernel/io/poll.rs
index b1a2570364f4..75d1b3e8596c 100644
--- a/rust/kernel/io/poll.rs
+++ b/rust/kernel/io/poll.rs
@@ -45,12 +45,16 @@ use crate::{
/// # Examples
///
/// ```no_run
-/// use kernel::io::{Io, poll::read_poll_timeout};
+/// use kernel::io::{
+/// Io,
+/// Mmio,
+/// poll::read_poll_timeout, //
+/// };
/// use kernel::time::Delta;
///
/// const HW_READY: u16 = 0x01;
///
-/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result {
+/// fn wait_for_hardware<const SIZE: usize>(io: &Mmio<SIZE>) -> Result {
/// read_poll_timeout(
/// // The `op` closure reads the value of a specific status register.
/// || io.try_read16(0x1000),
@@ -128,12 +132,16 @@ where
/// # Examples
///
/// ```no_run
-/// use kernel::io::{poll::read_poll_timeout_atomic, Io};
+/// use kernel::io::{
+/// Io,
+/// Mmio,
+/// poll::read_poll_timeout_atomic, //
+/// };
/// use kernel::time::Delta;
///
/// const HW_READY: u16 = 0x01;
///
-/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result {
+/// fn wait_for_hardware<const SIZE: usize>(io: &Mmio<SIZE>) -> Result {
/// read_poll_timeout_atomic(
/// // The `op` closure reads the value of a specific status register.
/// || io.try_read16(0x1000),
diff --git a/rust/kernel/io/resource.rs b/rust/kernel/io/resource.rs
index 56cfde97ce87..b7ac9faf141d 100644
--- a/rust/kernel/io/resource.rs
+++ b/rust/kernel/io/resource.rs
@@ -226,6 +226,8 @@ impl Flags {
/// Resource represents a memory region that must be ioremaped using `ioremap_np`.
pub const IORESOURCE_MEM_NONPOSTED: Flags = Flags::new(bindings::IORESOURCE_MEM_NONPOSTED);
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
const fn new(value: u32) -> Self {
crate::build_assert!(value as u64 <= c_ulong::MAX as u64);
Flags(value as c_ulong)
diff --git a/rust/kernel/iommu/mod.rs b/rust/kernel/iommu/mod.rs
new file mode 100644
index 000000000000..1423d7b19b57
--- /dev/null
+++ b/rust/kernel/iommu/mod.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust support related to IOMMU.
+
+pub mod pgtable;
diff --git a/rust/kernel/iommu/pgtable.rs b/rust/kernel/iommu/pgtable.rs
new file mode 100644
index 000000000000..c88e38fd938a
--- /dev/null
+++ b/rust/kernel/iommu/pgtable.rs
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IOMMU page table management.
+//!
+//! C header: [`include/linux/io-pgtable.h`](srctree/include/linux/io-pgtable.h)
+
+use core::{
+ marker::PhantomData,
+ ptr::NonNull, //
+};
+
+use crate::{
+ alloc,
+ bindings,
+ device::{
+ Bound,
+ Device, //
+ },
+ devres::Devres,
+ error::to_result,
+ io::PhysAddr,
+ prelude::*, //
+};
+
+use bindings::io_pgtable_fmt;
+
+/// Protection flags used with IOMMU mappings.
+pub mod prot {
+ /// Read access.
+ pub const READ: u32 = bindings::IOMMU_READ;
+ /// Write access.
+ pub const WRITE: u32 = bindings::IOMMU_WRITE;
+ /// Request cache coherency.
+ pub const CACHE: u32 = bindings::IOMMU_CACHE;
+ /// Request no-execute permission.
+ pub const NOEXEC: u32 = bindings::IOMMU_NOEXEC;
+ /// MMIO peripheral mapping.
+ pub const MMIO: u32 = bindings::IOMMU_MMIO;
+ /// Privileged mapping.
+ pub const PRIVILEGED: u32 = bindings::IOMMU_PRIV;
+}
+
+/// Represents a requested `io_pgtable` configuration.
+pub struct Config {
+ /// Quirk bitmask (type-specific).
+ pub quirks: usize,
+ /// Valid page sizes, as a bitmask of powers of two.
+ pub pgsize_bitmap: usize,
+ /// Input address space size in bits.
+ pub ias: u32,
+ /// Output address space size in bits.
+ pub oas: u32,
+ /// IOMMU uses coherent accesses for page table walks.
+ pub coherent_walk: bool,
+}
+
+/// An io page table using a specific format.
+///
+/// # Invariants
+///
+/// The pointer references a valid io page table.
+pub struct IoPageTable<F: IoPageTableFmt> {
+ ptr: NonNull<bindings::io_pgtable_ops>,
+ _marker: PhantomData<F>,
+}
+
+// SAFETY: `struct io_pgtable_ops` is not restricted to a single thread.
+unsafe impl<F: IoPageTableFmt> Send for IoPageTable<F> {}
+// SAFETY: `struct io_pgtable_ops` may be accessed concurrently.
+unsafe impl<F: IoPageTableFmt> Sync for IoPageTable<F> {}
+
+/// The format used by this page table.
+pub trait IoPageTableFmt: 'static {
+ /// The value representing this format.
+ const FORMAT: io_pgtable_fmt;
+}
+
+impl<F: IoPageTableFmt> IoPageTable<F> {
+ /// Create a new `IoPageTable` as a device resource.
+ #[inline]
+ pub fn new(
+ dev: &Device<Bound>,
+ config: Config,
+ ) -> impl PinInit<Devres<IoPageTable<F>>, Error> + '_ {
+ // SAFETY: Devres ensures that the value is dropped during device unbind.
+ Devres::new(dev, unsafe { Self::new_raw(dev, config) })
+ }
+
+ /// Create a new `IoPageTable`.
+ ///
+ /// # Safety
+ ///
+ /// If successful, then the returned `IoPageTable` must be dropped before the device is
+ /// unbound.
+ #[inline]
+ pub unsafe fn new_raw(dev: &Device<Bound>, config: Config) -> Result<IoPageTable<F>> {
+ let mut raw_cfg = bindings::io_pgtable_cfg {
+ quirks: config.quirks,
+ pgsize_bitmap: config.pgsize_bitmap,
+ ias: config.ias,
+ oas: config.oas,
+ coherent_walk: config.coherent_walk,
+ tlb: &raw const NOOP_FLUSH_OPS,
+ iommu_dev: dev.as_raw(),
+ // SAFETY: All zeroes is a valid value for `struct io_pgtable_cfg`.
+ ..unsafe { core::mem::zeroed() }
+ };
+
+ // SAFETY:
+ // * The raw_cfg pointer is valid for the duration of this call.
+ // * The provided `FLUSH_OPS` contains valid function pointers that accept a null pointer
+ // as cookie.
+ // * The caller ensures that the io pgtable does not outlive the device.
+ let ops = unsafe {
+ bindings::alloc_io_pgtable_ops(F::FORMAT, &mut raw_cfg, core::ptr::null_mut())
+ };
+
+ // INVARIANT: We successfully created a valid page table.
+ Ok(IoPageTable {
+ ptr: NonNull::new(ops).ok_or(ENOMEM)?,
+ _marker: PhantomData,
+ })
+ }
+
+ /// Obtain a raw pointer to the underlying `struct io_pgtable_ops`.
+ #[inline]
+ pub fn raw_ops(&self) -> *mut bindings::io_pgtable_ops {
+ self.ptr.as_ptr()
+ }
+
+ /// Obtain a raw pointer to the underlying `struct io_pgtable`.
+ #[inline]
+ pub fn raw_pgtable(&self) -> *mut bindings::io_pgtable {
+ // SAFETY: The io_pgtable_ops of an io-pgtable is always the ops field of a io_pgtable.
+ unsafe { kernel::container_of!(self.raw_ops(), bindings::io_pgtable, ops) }
+ }
+
+ /// Obtain a raw pointer to the underlying `struct io_pgtable_cfg`.
+ #[inline]
+ pub fn raw_cfg(&self) -> *mut bindings::io_pgtable_cfg {
+ // SAFETY: The `raw_pgtable()` method returns a valid pointer.
+ unsafe { &raw mut (*self.raw_pgtable()).cfg }
+ }
+
+ /// Map a physically contiguous range of pages of the same size.
+ ///
+ /// Even if successful, this operation may not map the entire range. In that case, only a
+ /// prefix of the range is mapped, and the returned integer indicates its length in bytes. In
+ /// this case, the caller will usually call `map_pages` again for the remaining range.
+ ///
+ /// The returned [`Result`] indicates whether an error was encountered while mapping pages.
+ /// Note that this may return a non-zero length even if an error was encountered. The caller
+ /// will usually [unmap the relevant pages](Self::unmap_pages) on error.
+ ///
+ /// The caller must flush the TLB before using the pgtable to access the newly created mapping.
+ ///
+ /// # Safety
+ ///
+ /// * No other io-pgtable operation may access the range `iova .. iova+pgsize*pgcount` while
+ /// this `map_pages` operation executes.
+ /// * This page table must not contain any mapping that overlaps with the mapping created by
+ /// this call.
+ /// * If this page table is live, then the caller must ensure that it's okay to access the
+ /// physical address being mapped for the duration in which it is mapped.
+ #[inline]
+ pub unsafe fn map_pages(
+ &self,
+ iova: usize,
+ paddr: PhysAddr,
+ pgsize: usize,
+ pgcount: usize,
+ prot: u32,
+ flags: alloc::Flags,
+ ) -> (usize, Result) {
+ let mut mapped: usize = 0;
+
+ // SAFETY: The `map_pages` function in `io_pgtable_ops` is never null.
+ let map_pages = unsafe { (*self.raw_ops()).map_pages.unwrap_unchecked() };
+
+ // SAFETY: The safety requirements of this method are sufficient to call `map_pages`.
+ let ret = to_result(unsafe {
+ (map_pages)(
+ self.raw_ops(),
+ iova,
+ paddr,
+ pgsize,
+ pgcount,
+ prot as i32,
+ flags.as_raw(),
+ &mut mapped,
+ )
+ });
+
+ (mapped, ret)
+ }
+
+ /// Unmap a range of virtually contiguous pages of the same size.
+ ///
+ /// This may not unmap the entire range, and returns the length of the unmapped prefix in
+ /// bytes.
+ ///
+ /// # Safety
+ ///
+ /// * No other io-pgtable operation may access the range `iova .. iova+pgsize*pgcount` while
+ /// this `unmap_pages` operation executes.
+ /// * This page table must contain one or more consecutive mappings starting at `iova` whose
+ /// total size is `pgcount * pgsize`.
+ #[inline]
+ #[must_use]
+ pub unsafe fn unmap_pages(&self, iova: usize, pgsize: usize, pgcount: usize) -> usize {
+ // SAFETY: The `unmap_pages` function in `io_pgtable_ops` is never null.
+ let unmap_pages = unsafe { (*self.raw_ops()).unmap_pages.unwrap_unchecked() };
+
+ // SAFETY: The safety requirements of this method are sufficient to call `unmap_pages`.
+ unsafe { (unmap_pages)(self.raw_ops(), iova, pgsize, pgcount, core::ptr::null_mut()) }
+ }
+}
+
+// For the initial users of these rust bindings, the GPU FW is managing the IOTLB and performs all
+// required invalidations using a range. There is no need for it get ARM style invalidation
+// instructions from the page table code.
+//
+// Support for flushing the TLB with ARM style invalidation instructions may be added in the
+// future.
+static NOOP_FLUSH_OPS: bindings::iommu_flush_ops = bindings::iommu_flush_ops {
+ tlb_flush_all: Some(rust_tlb_flush_all_noop),
+ tlb_flush_walk: Some(rust_tlb_flush_walk_noop),
+ tlb_add_page: None,
+};
+
+#[no_mangle]
+extern "C" fn rust_tlb_flush_all_noop(_cookie: *mut core::ffi::c_void) {}
+
+#[no_mangle]
+extern "C" fn rust_tlb_flush_walk_noop(
+ _iova: usize,
+ _size: usize,
+ _granule: usize,
+ _cookie: *mut core::ffi::c_void,
+) {
+}
+
+impl<F: IoPageTableFmt> Drop for IoPageTable<F> {
+ fn drop(&mut self) {
+ // SAFETY: The caller of `Self::ttbr()` promised that the page table is not live when this
+ // destructor runs.
+ unsafe { bindings::free_io_pgtable_ops(self.raw_ops()) };
+ }
+}
+
+/// The `ARM_64_LPAE_S1` page table format.
+pub enum ARM64LPAES1 {}
+
+impl IoPageTableFmt for ARM64LPAES1 {
+ const FORMAT: io_pgtable_fmt = bindings::io_pgtable_fmt_ARM_64_LPAE_S1 as io_pgtable_fmt;
+}
+
+impl IoPageTable<ARM64LPAES1> {
+ /// Access the `ttbr` field of the configuration.
+ ///
+ /// This is the physical address of the page table, which may be passed to the device that
+ /// needs to use it.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the device stops using the page table before dropping it.
+ #[inline]
+ pub unsafe fn ttbr(&self) -> u64 {
+ // SAFETY: `arm_lpae_s1_cfg` is the right cfg type for `ARM64LPAES1`.
+ unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.ttbr }
+ }
+
+ /// Access the `mair` field of the configuration.
+ #[inline]
+ pub fn mair(&self) -> u64 {
+ // SAFETY: `arm_lpae_s1_cfg` is the right cfg type for `ARM64LPAES1`.
+ unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.mair }
+ }
+}
diff --git a/rust/kernel/irq/flags.rs b/rust/kernel/irq/flags.rs
index adfde96ec47c..d26e25af06ee 100644
--- a/rust/kernel/irq/flags.rs
+++ b/rust/kernel/irq/flags.rs
@@ -96,6 +96,8 @@ impl Flags {
self.0
}
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
const fn new(value: u32) -> Self {
build_assert!(value as u64 <= c_ulong::MAX as u64);
Self(value as c_ulong)
diff --git a/rust/kernel/irq/request.rs b/rust/kernel/irq/request.rs
index b150563fdef8..67769800117c 100644
--- a/rust/kernel/irq/request.rs
+++ b/rust/kernel/irq/request.rs
@@ -139,7 +139,6 @@ impl<'a> IrqRequest<'a> {
/// [`Completion::wait_for_completion()`]: kernel::sync::Completion::wait_for_completion
///
/// ```
-/// use kernel::c_str;
/// use kernel::device::{Bound, Device};
/// use kernel::irq::{self, Flags, IrqRequest, IrqReturn, Registration};
/// use kernel::prelude::*;
@@ -167,7 +166,7 @@ impl<'a> IrqRequest<'a> {
/// handler: impl PinInit<Data, Error>,
/// request: IrqRequest<'_>,
/// ) -> Result<Arc<Registration<Data>>> {
-/// let registration = Registration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+/// let registration = Registration::new(request, Flags::SHARED, c"my_device", handler);
///
/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
///
@@ -340,7 +339,6 @@ impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
/// [`Mutex`](kernel::sync::Mutex) to provide interior mutability.
///
/// ```
-/// use kernel::c_str;
/// use kernel::device::{Bound, Device};
/// use kernel::irq::{
/// self, Flags, IrqRequest, IrqReturn, ThreadedHandler, ThreadedIrqReturn,
@@ -381,7 +379,7 @@ impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
/// request: IrqRequest<'_>,
/// ) -> Result<Arc<ThreadedRegistration<Data>>> {
/// let registration =
-/// ThreadedRegistration::new(request, Flags::SHARED, c_str!("my_device"), handler);
+/// ThreadedRegistration::new(request, Flags::SHARED, c"my_device", handler);
///
/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
///
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
index 79436509dd73..f93f24a60bdd 100644
--- a/rust/kernel/kunit.rs
+++ b/rust/kernel/kunit.rs
@@ -9,9 +9,6 @@
use crate::fmt;
use crate::prelude::*;
-#[cfg(CONFIG_PRINTK)]
-use crate::c_str;
-
/// Prints a KUnit error-level message.
///
/// Public but hidden since it should only be used from KUnit generated code.
@@ -22,7 +19,7 @@ pub fn err(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c_str!("\x013%pA").as_char_ptr(),
+ c"\x013%pA".as_char_ptr(),
core::ptr::from_ref(&args).cast::<c_void>(),
);
}
@@ -38,7 +35,7 @@ pub fn info(args: fmt::Arguments<'_>) {
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_printk(
- c_str!("\x016%pA").as_char_ptr(),
+ c"\x016%pA".as_char_ptr(),
core::ptr::from_ref(&args).cast::<c_void>(),
);
}
@@ -60,7 +57,7 @@ macro_rules! kunit_assert {
break 'out;
}
- static FILE: &'static $crate::str::CStr = $crate::c_str!($file);
+ static FILE: &'static $crate::str::CStr = $file;
static LINE: i32 = ::core::line!() as i32 - $diff;
static CONDITION: &'static $crate::str::CStr = $crate::c_str!(stringify!($condition));
@@ -192,9 +189,6 @@ pub fn is_test_result_ok(t: impl TestResult) -> bool {
}
/// Represents an individual test case.
-///
-/// The [`kunit_unsafe_test_suite!`] macro expects a NULL-terminated list of valid test cases.
-/// Use [`kunit_case_null`] to generate such a delimiter.
#[doc(hidden)]
pub const fn kunit_case(
name: &'static kernel::str::CStr,
@@ -215,32 +209,11 @@ pub const fn kunit_case(
}
}
-/// Represents the NULL test case delimiter.
-///
-/// The [`kunit_unsafe_test_suite!`] macro expects a NULL-terminated list of test cases. This
-/// function returns such a delimiter.
-#[doc(hidden)]
-pub const fn kunit_case_null() -> kernel::bindings::kunit_case {
- kernel::bindings::kunit_case {
- run_case: None,
- name: core::ptr::null_mut(),
- generate_params: None,
- attr: kernel::bindings::kunit_attributes {
- speed: kernel::bindings::kunit_speed_KUNIT_SPEED_NORMAL,
- },
- status: kernel::bindings::kunit_status_KUNIT_SUCCESS,
- module_name: core::ptr::null_mut(),
- log: core::ptr::null_mut(),
- param_init: None,
- param_exit: None,
- }
-}
-
/// Registers a KUnit test suite.
///
/// # Safety
///
-/// `test_cases` must be a NULL terminated array of valid test cases,
+/// `test_cases` must be a `NULL` terminated array of valid test cases,
/// whose lifetime is at least that of the test suite (i.e., static).
///
/// # Examples
@@ -253,8 +226,8 @@ pub const fn kunit_case_null() -> kernel::bindings::kunit_case {
/// }
///
/// static mut KUNIT_TEST_CASES: [kernel::bindings::kunit_case; 2] = [
-/// kernel::kunit::kunit_case(kernel::c_str!("name"), test_fn),
-/// kernel::kunit::kunit_case_null(),
+/// kernel::kunit::kunit_case(c"name", test_fn),
+/// pin_init::zeroed(),
/// ];
/// kernel::kunit_unsafe_test_suite!(suite_name, KUNIT_TEST_CASES);
/// ```
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index f812cf120042..3da92f18f4ee 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -100,9 +100,12 @@ pub mod fs;
#[cfg(CONFIG_I2C = "y")]
pub mod i2c;
pub mod id_pool;
+#[doc(hidden)]
+pub mod impl_flags;
pub mod init;
pub mod io;
pub mod ioctl;
+pub mod iommu;
pub mod iov;
pub mod irq;
pub mod jump_label;
@@ -133,11 +136,14 @@ pub mod pwm;
pub mod rbtree;
pub mod regulator;
pub mod revocable;
+pub mod safety;
pub mod scatterlist;
pub mod security;
pub mod seq_file;
pub mod sizes;
pub mod slice;
+#[cfg(CONFIG_SOC_BUS)]
+pub mod soc;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs
index d92bcf665c89..2282f33913ee 100644
--- a/rust/kernel/list/arc.rs
+++ b/rust/kernel/list/arc.rs
@@ -6,11 +6,11 @@
use crate::alloc::{AllocError, Flags};
use crate::prelude::*;
+use crate::sync::atomic::{ordering, Atomic};
use crate::sync::{Arc, ArcBorrow, UniqueArc};
use core::marker::PhantomPinned;
use core::ops::Deref;
use core::pin::Pin;
-use core::sync::atomic::{AtomicBool, Ordering};
/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for
/// this id.
@@ -469,7 +469,7 @@ where
/// If the boolean is `false`, then there is no [`ListArc`] for this value.
#[repr(transparent)]
pub struct AtomicTracker<const ID: u64 = 0> {
- inner: AtomicBool,
+ inner: Atomic<bool>,
// This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`.
_pin: PhantomPinned,
}
@@ -480,12 +480,12 @@ impl<const ID: u64> AtomicTracker<ID> {
// INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will
// not be constructed in an `Arc` that already has a `ListArc`.
Self {
- inner: AtomicBool::new(false),
+ inner: Atomic::new(false),
_pin: PhantomPinned,
}
}
- fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool {
+ fn project_inner(self: Pin<&mut Self>) -> &mut Atomic<bool> {
// SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable
// reference to it even if we only have a pinned reference to `self`.
unsafe { &mut Pin::into_inner_unchecked(self).inner }
@@ -500,7 +500,7 @@ impl<const ID: u64> ListArcSafe<ID> for AtomicTracker<ID> {
unsafe fn on_drop_list_arc(&self) {
// INVARIANT: We just dropped a ListArc, so the boolean should be false.
- self.inner.store(false, Ordering::Release);
+ self.inner.store(false, ordering::Release);
}
}
@@ -514,8 +514,6 @@ unsafe impl<const ID: u64> TryNewListArc<ID> for AtomicTracker<ID> {
fn try_new_list_arc(&self) -> bool {
// INVARIANT: If this method returns true, then the boolean used to be false, and is no
// longer false, so it is okay for the caller to create a new [`ListArc`].
- self.inner
- .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
- .is_ok()
+ self.inner.cmpxchg(false, true, ordering::Acquire).is_ok()
}
}
diff --git a/rust/kernel/maple_tree.rs b/rust/kernel/maple_tree.rs
index e72eec56bf57..265d6396a78a 100644
--- a/rust/kernel/maple_tree.rs
+++ b/rust/kernel/maple_tree.rs
@@ -265,7 +265,16 @@ impl<T: ForeignOwnable> MapleTree<T> {
loop {
// This uses the raw accessor because we're destroying pointers without removing them
// from the maple tree, which is only valid because this is the destructor.
- let ptr = ma_state.mas_find_raw(usize::MAX);
+ //
+ // Take the rcu lock because mas_find_raw() requires that you hold either the spinlock
+ // or the rcu read lock. This is only really required if memory reclaim might
+ // reallocate entries in the tree, as we otherwise have exclusive access. That feature
+ // doesn't exist yet, so for now, taking the rcu lock only serves the purpose of
+ // silencing lockdep.
+ let ptr = {
+ let _rcu = kernel::sync::rcu::Guard::new();
+ ma_state.mas_find_raw(usize::MAX)
+ };
if ptr.is_null() {
break;
}
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index d698cddcb4a5..c3c2052c9206 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -20,7 +20,7 @@ use crate::{
seq_file::SeqFile,
types::{ForeignOwnable, Opaque},
};
-use core::{marker::PhantomData, mem::MaybeUninit, pin::Pin};
+use core::{marker::PhantomData, pin::Pin};
/// Options for creating a misc device.
#[derive(Copy, Clone)]
@@ -32,8 +32,7 @@ pub struct MiscDeviceOptions {
impl MiscDeviceOptions {
/// Create a raw `struct miscdev` ready for registration.
pub const fn into_raw<T: MiscDevice>(self) -> bindings::miscdevice {
- // SAFETY: All zeros is valid for this C type.
- let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
+ let mut result: bindings::miscdevice = pin_init::zeroed();
result.minor = bindings::MISC_DYNAMIC_MINOR as ffi::c_int;
result.name = crate::str::as_char_ptr_in_const_context(self.name);
result.fops = MiscdeviceVTable::<T>::build();
@@ -411,7 +410,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
compat_ioctl: if T::HAS_COMPAT_IOCTL {
Some(Self::compat_ioctl)
} else if T::HAS_IOCTL {
- Some(bindings::compat_ptr_ioctl)
+ bindings::compat_ptr_ioctl
} else {
None
},
@@ -420,8 +419,7 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
} else {
None
},
- // SAFETY: All zeros is a valid value for `bindings::file_operations`.
- ..unsafe { MaybeUninit::zeroed().assume_init() }
+ ..pin_init::zeroed()
};
const fn build() -> &'static bindings::file_operations {
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index bf6272d87a7b..3ca99db5cccf 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -777,7 +777,6 @@ impl DeviceMask {
///
/// ```
/// # mod module_phy_driver_sample {
-/// use kernel::c_str;
/// use kernel::net::phy::{self, DeviceId};
/// use kernel::prelude::*;
///
@@ -796,7 +795,7 @@ impl DeviceMask {
///
/// #[vtable]
/// impl phy::Driver for PhySample {
-/// const NAME: &'static CStr = c_str!("PhySample");
+/// const NAME: &'static CStr = c"PhySample";
/// const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x00000001);
/// }
/// # }
@@ -805,7 +804,6 @@ impl DeviceMask {
/// This expands to the following code:
///
/// ```ignore
-/// use kernel::c_str;
/// use kernel::net::phy::{self, DeviceId};
/// use kernel::prelude::*;
///
@@ -825,7 +823,7 @@ impl DeviceMask {
///
/// #[vtable]
/// impl phy::Driver for PhySample {
-/// const NAME: &'static CStr = c_str!("PhySample");
+/// const NAME: &'static CStr = c"PhySample";
/// const PHY_DEVICE_ID: phy::DeviceId = phy::DeviceId::new_with_exact_mask(0x00000001);
/// }
///
diff --git a/rust/kernel/num/bounded.rs b/rust/kernel/num/bounded.rs
index f870080af8ac..fa81acbdc8c2 100644
--- a/rust/kernel/num/bounded.rs
+++ b/rust/kernel/num/bounded.rs
@@ -40,11 +40,11 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
fits_within!(value, T, num_bits)
}
-/// An integer value that requires only the `N` less significant bits of the wrapped type to be
+/// An integer value that requires only the `N` least significant bits of the wrapped type to be
/// encoded.
///
/// This limits the number of usable bits in the wrapped integer type, and thus the stored value to
-/// a narrower range, which provides guarantees that can be useful when working with in e.g.
+/// a narrower range, which provides guarantees that can be useful when working within e.g.
/// bitfields.
///
/// # Invariants
@@ -56,7 +56,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// # Examples
///
/// The preferred way to create values is through constants and the [`Bounded::new`] family of
-/// constructors, as they trigger a build error if the type invariants cannot be withheld.
+/// constructors, as they trigger a build error if the type invariants cannot be upheld.
///
/// ```
/// use kernel::num::Bounded;
@@ -82,7 +82,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// ```
/// use kernel::num::Bounded;
///
-/// // This succeeds because `15` can be represented with 4 unsigned bits.
+/// // This succeeds because `15` can be represented with 4 unsigned bits.
/// assert!(Bounded::<u8, 4>::try_new(15).is_some());
///
/// // This fails because `16` cannot be represented with 4 unsigned bits.
@@ -221,7 +221,7 @@ fn fits_within<T: Integer>(value: T, num_bits: u32) -> bool {
/// let v: Option<Bounded<u16, 8>> = 128u32.try_into_bounded();
/// assert_eq!(v.as_deref().copied(), Some(128));
///
-/// // Fails because `128` doesn't fits into 6 bits.
+/// // Fails because `128` doesn't fit into 6 bits.
/// let v: Option<Bounded<u16, 6>> = 128u32.try_into_bounded();
/// assert_eq!(v, None);
/// ```
@@ -259,9 +259,9 @@ macro_rules! impl_const_new {
assert!(fits_within!(VALUE, $type, N));
}
- // INVARIANT: `fits_within` confirmed that `VALUE` can be represented within
+ // SAFETY: `fits_within` confirmed that `VALUE` can be represented within
// `N` bits.
- Self::__new(VALUE)
+ unsafe { Self::__new(VALUE) }
}
}
)*
@@ -282,9 +282,10 @@ where
/// All instances of [`Bounded`] must be created through this method as it enforces most of the
/// type invariants.
///
- /// The caller remains responsible for checking, either statically or dynamically, that `value`
- /// can be represented as a `T` using at most `N` bits.
- const fn __new(value: T) -> Self {
+ /// # Safety
+ ///
+ /// The caller must ensure that `value` can be represented within `N` bits.
+ const unsafe fn __new(value: T) -> Self {
// Enforce the type invariants.
const {
// `N` cannot be zero.
@@ -293,6 +294,7 @@ where
assert!(N <= T::BITS);
}
+ // INVARIANT: The caller ensures `value` fits within `N` bits.
Self(value)
}
@@ -328,8 +330,8 @@ where
/// ```
pub fn try_new(value: T) -> Option<Self> {
fits_within(value, N).then(|| {
- // INVARIANT: `fits_within` confirmed that `value` can be represented within `N` bits.
- Self::__new(value)
+ // SAFETY: `fits_within` confirmed that `value` can be represented within `N` bits.
+ unsafe { Self::__new(value) }
})
}
@@ -363,6 +365,7 @@ where
/// assert_eq!(Bounded::<u8, 1>::from_expr(1).get(), 1);
/// assert_eq!(Bounded::<u16, 8>::from_expr(0xff).get(), 0xff);
/// ```
+ // Always inline to optimize out error path of `build_assert`.
#[inline(always)]
pub fn from_expr(expr: T) -> Self {
crate::build_assert!(
@@ -370,8 +373,8 @@ where
"Requested value larger than maximal representable value."
);
- // INVARIANT: `fits_within` confirmed that `expr` can be represented within `N` bits.
- Self::__new(expr)
+ // SAFETY: `fits_within` confirmed that `expr` can be represented within `N` bits.
+ unsafe { Self::__new(expr) }
}
/// Returns the wrapped value as the backing type.
@@ -410,9 +413,9 @@ where
);
}
- // INVARIANT: The value did fit within `N` bits, so it will all the more fit within
+ // SAFETY: The value did fit within `N` bits, so it will all the more fit within
// the larger `M` bits.
- Bounded::__new(self.0)
+ unsafe { Bounded::__new(self.0) }
}
/// Attempts to shrink the number of bits usable for `self`.
@@ -466,9 +469,9 @@ where
// `U` and `T` have the same sign, hence this conversion cannot fail.
let value = unsafe { U::try_from(self.get()).unwrap_unchecked() };
- // INVARIANT: Although the backing type has changed, the value is still represented within
+ // SAFETY: Although the backing type has changed, the value is still represented within
// `N` bits, and with the same signedness.
- Bounded::__new(value)
+ unsafe { Bounded::__new(value) }
}
}
@@ -501,7 +504,7 @@ where
/// let v: Option<Bounded<u16, 8>> = 128u32.try_into_bounded();
/// assert_eq!(v.as_deref().copied(), Some(128));
///
-/// // Fails because `128` doesn't fits into 6 bits.
+/// // Fails because `128` doesn't fit into 6 bits.
/// let v: Option<Bounded<u16, 6>> = 128u32.try_into_bounded();
/// assert_eq!(v, None);
/// ```
@@ -944,9 +947,9 @@ macro_rules! impl_from_primitive {
Self: AtLeastXBits<{ <$type as Integer>::BITS as usize }>,
{
fn from(value: $type) -> Self {
- // INVARIANT: The trait bound on `Self` guarantees that `N` bits is
+ // SAFETY: The trait bound on `Self` guarantees that `N` bits is
// enough to hold any value of the source type.
- Self::__new(T::from(value))
+ unsafe { Self::__new(T::from(value)) }
}
}
)*
@@ -1051,8 +1054,8 @@ where
T: Integer + From<bool>,
{
fn from(value: bool) -> Self {
- // INVARIANT: A boolean can be represented using a single bit, and thus fits within any
+ // SAFETY: A boolean can be represented using a single bit, and thus fits within any
// integer type for any `N` > 0.
- Self::__new(T::from(value))
+ unsafe { Self::__new(T::from(value)) }
}
}
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index 432fc0297d4a..adecb200c654 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -25,14 +25,36 @@ pub const PAGE_SIZE: usize = bindings::PAGE_SIZE;
/// A bitmask that gives the page containing a given address.
pub const PAGE_MASK: usize = !(PAGE_SIZE - 1);
-/// Round up the given number to the next multiple of [`PAGE_SIZE`].
+/// Rounds up to the next multiple of [`PAGE_SIZE`].
///
-/// It is incorrect to pass an address where the next multiple of [`PAGE_SIZE`] doesn't fit in a
-/// [`usize`].
-pub const fn page_align(addr: usize) -> usize {
- // Parentheses around `PAGE_SIZE - 1` to avoid triggering overflow sanitizers in the wrong
- // cases.
- (addr + (PAGE_SIZE - 1)) & PAGE_MASK
+/// Returns [`None`] on integer overflow.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::page::{
+/// page_align,
+/// PAGE_SIZE,
+/// };
+///
+/// // Requested address is already aligned.
+/// assert_eq!(page_align(0x0), Some(0x0));
+/// assert_eq!(page_align(PAGE_SIZE), Some(PAGE_SIZE));
+///
+/// // Requested address needs alignment up.
+/// assert_eq!(page_align(0x1), Some(PAGE_SIZE));
+/// assert_eq!(page_align(PAGE_SIZE + 1), Some(2 * PAGE_SIZE));
+///
+/// // Requested address causes overflow (returns `None`).
+/// let overflow_addr = usize::MAX - (PAGE_SIZE / 2);
+/// assert_eq!(page_align(overflow_addr), None);
+/// ```
+#[inline(always)]
+pub const fn page_align(addr: usize) -> Option<usize> {
+ let Some(sum) = addr.checked_add(PAGE_SIZE - 1) else {
+ return None;
+ };
+ Some(sum & PAGE_MASK)
}
/// Representation of a non-owning reference to a [`Page`].
diff --git a/rust/kernel/pci.rs b/rust/kernel/pci.rs
index 82e128431f08..af74ddff6114 100644
--- a/rust/kernel/pci.rs
+++ b/rust/kernel/pci.rs
@@ -40,7 +40,14 @@ pub use self::id::{
ClassMask,
Vendor, //
};
-pub use self::io::Bar;
+pub use self::io::{
+ Bar,
+ ConfigSpace,
+ ConfigSpaceKind,
+ ConfigSpaceSize,
+ Extended,
+ Normal, //
+};
pub use self::irq::{
IrqType,
IrqTypes,
@@ -50,13 +57,22 @@ pub use self::irq::{
/// An adapter for the registration of PCI drivers.
pub struct Adapter<T: Driver>(T);
-// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// SAFETY:
+// - `bindings::pci_driver` is a C type declared as `repr(C)`.
+// - `T` is the type of the driver's device private data.
+// - `struct pci_driver` embeds a `struct device_driver`.
+// - `DEVICE_DRIVER_OFFSET` is the correct byte offset to the embedded `struct device_driver`.
+unsafe impl<T: Driver + 'static> driver::DriverLayout for Adapter<T> {
+ type DriverType = bindings::pci_driver;
+ type DriverData = T;
+ const DEVICE_DRIVER_OFFSET: usize = core::mem::offset_of!(Self::DriverType, driver);
+}
+
+// SAFETY: A call to `unregister` for a given instance of `DriverType` is guaranteed to be valid if
// a preceding call to `register` has been successful.
unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
- type RegType = bindings::pci_driver;
-
unsafe fn register(
- pdrv: &Opaque<Self::RegType>,
+ pdrv: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
@@ -68,14 +84,14 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
(*pdrv.get()).id_table = T::ID_TABLE.as_ptr();
}
- // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ // SAFETY: `pdrv` is guaranteed to be a valid `DriverType`.
to_result(unsafe {
bindings::__pci_register_driver(pdrv.get(), module.0, name.as_char_ptr())
})
}
- unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
- // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe fn unregister(pdrv: &Opaque<Self::DriverType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `DriverType`.
unsafe { bindings::pci_unregister_driver(pdrv.get()) }
}
}
@@ -114,9 +130,9 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { pdev.as_ref().drvdata_obtain::<T>() };
+ let data = unsafe { pdev.as_ref().drvdata_borrow::<T>() };
- T::unbind(pdev, data.as_ref());
+ T::unbind(pdev, data);
}
}
@@ -342,7 +358,7 @@ impl Device {
/// // Get an instance of `Vendor`.
/// let vendor = pdev.vendor_id();
/// dev_info!(
- /// pdev.as_ref(),
+ /// pdev,
/// "Device: Vendor={}, Device=0x{:x}\n",
/// vendor,
/// pdev.device_id()
diff --git a/rust/kernel/pci/id.rs b/rust/kernel/pci/id.rs
index c09125946d9e..50005d176561 100644
--- a/rust/kernel/pci/id.rs
+++ b/rust/kernel/pci/id.rs
@@ -22,7 +22,7 @@ use crate::{
/// fn probe_device(pdev: &pci::Device<Core>) -> Result {
/// let pci_class = pdev.pci_class();
/// dev_info!(
-/// pdev.as_ref(),
+/// pdev,
/// "Detected PCI class: {}\n",
/// pci_class
/// );
@@ -416,7 +416,6 @@ define_all_pci_vendors! {
MICROSEMI = bindings::PCI_VENDOR_ID_MICROSEMI, // 0x11f8
RP = bindings::PCI_VENDOR_ID_RP, // 0x11fe
CYCLADES = bindings::PCI_VENDOR_ID_CYCLADES, // 0x120e
- ESSENTIAL = bindings::PCI_VENDOR_ID_ESSENTIAL, // 0x120f
O2 = bindings::PCI_VENDOR_ID_O2, // 0x1217
THREEDX = bindings::PCI_VENDOR_ID_3DFX, // 0x121a
AVM = bindings::PCI_VENDOR_ID_AVM, // 0x1244
diff --git a/rust/kernel/pci/io.rs b/rust/kernel/pci/io.rs
index 0d55c3139b6f..6ca4cf75594c 100644
--- a/rust/kernel/pci/io.rs
+++ b/rust/kernel/pci/io.rs
@@ -8,23 +8,186 @@ use crate::{
device,
devres::Devres,
io::{
+ define_read,
+ define_write,
Io,
- IoRaw, //
+ IoCapable,
+ IoKnownSize,
+ Mmio,
+ MmioRaw, //
},
prelude::*,
sync::aref::ARef, //
};
-use core::ops::Deref;
+use core::{
+ marker::PhantomData,
+ ops::Deref, //
+};
+
+/// Represents the size of a PCI configuration space.
+///
+/// PCI devices can have either a *normal* (legacy) configuration space of 256 bytes,
+/// or an *extended* configuration space of 4096 bytes as defined in the PCI Express
+/// specification.
+#[repr(usize)]
+#[derive(Eq, PartialEq)]
+pub enum ConfigSpaceSize {
+ /// 256-byte legacy PCI configuration space.
+ Normal = 256,
+
+ /// 4096-byte PCIe extended configuration space.
+ Extended = 4096,
+}
+
+impl ConfigSpaceSize {
+ /// Get the raw value of this enum.
+ #[inline(always)]
+ pub const fn into_raw(self) -> usize {
+ // CAST: PCI configuration space size is at most 4096 bytes, so the value always fits
+ // within `usize` without truncation or sign change.
+ self as usize
+ }
+}
+
+/// Marker type for normal (256-byte) PCI configuration space.
+pub struct Normal;
+
+/// Marker type for extended (4096-byte) PCIe configuration space.
+pub struct Extended;
+
+/// Trait for PCI configuration space size markers.
+///
+/// This trait is implemented by [`Normal`] and [`Extended`] to provide
+/// compile-time knowledge of the configuration space size.
+pub trait ConfigSpaceKind {
+ /// The size of this configuration space in bytes.
+ const SIZE: usize;
+}
+
+impl ConfigSpaceKind for Normal {
+ const SIZE: usize = 256;
+}
+
+impl ConfigSpaceKind for Extended {
+ const SIZE: usize = 4096;
+}
+
+/// The PCI configuration space of a device.
+///
+/// Provides typed read and write accessors for configuration registers
+/// using the standard `pci_read_config_*` and `pci_write_config_*` helpers.
+///
+/// The generic parameter `S` indicates the maximum size of the configuration space.
+/// Use [`Normal`] for 256-byte legacy configuration space or [`Extended`] for
+/// 4096-byte PCIe extended configuration space (default).
+pub struct ConfigSpace<'a, S: ConfigSpaceKind = Extended> {
+ pub(crate) pdev: &'a Device<device::Bound>,
+ _marker: PhantomData<S>,
+}
+
+/// Internal helper macros used to invoke C PCI configuration space read functions.
+///
+/// This macro is intended to be used by higher-level PCI configuration space access macros
+/// (define_read) and provides a unified expansion for infallible vs. fallible read semantics. It
+/// emits a direct call into the corresponding C helper and performs the required cast to the Rust
+/// return type.
+///
+/// # Parameters
+///
+/// * `$c_fn` – The C function performing the PCI configuration space write.
+/// * `$self` – The I/O backend object.
+/// * `$ty` – The type of the value to read.
+/// * `$addr` – The PCI configuration space offset to read.
+///
+/// This macro does not perform any validation; all invariants must be upheld by the higher-level
+/// abstraction invoking it.
+macro_rules! call_config_read {
+ (infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr) => {{
+ let mut val: $ty = 0;
+ // SAFETY: By the type invariant `$self.pdev` is a valid address.
+ // CAST: The offset is cast to `i32` because the C functions expect a 32-bit signed offset
+ // parameter. PCI configuration space size is at most 4096 bytes, so the value always fits
+ // within `i32` without truncation or sign change.
+ // Return value from C function is ignored in infallible accessors.
+ let _ret = unsafe { bindings::$c_fn($self.pdev.as_raw(), $addr as i32, &mut val) };
+ val
+ }};
+}
+
+/// Internal helper macros used to invoke C PCI configuration space write functions.
+///
+/// This macro is intended to be used by higher-level PCI configuration space access macros
+/// (define_write) and provides a unified expansion for infallible vs. fallible read semantics. It
+/// emits a direct call into the corresponding C helper and performs the required cast to the Rust
+/// return type.
+///
+/// # Parameters
+///
+/// * `$c_fn` – The C function performing the PCI configuration space write.
+/// * `$self` – The I/O backend object.
+/// * `$ty` – The type of the written value.
+/// * `$addr` – The configuration space offset to write.
+/// * `$value` – The value to write.
+///
+/// This macro does not perform any validation; all invariants must be upheld by the higher-level
+/// abstraction invoking it.
+macro_rules! call_config_write {
+ (infallible, $c_fn:ident, $self:ident, $ty:ty, $addr:expr, $value:expr) => {
+ // SAFETY: By the type invariant `$self.pdev` is a valid address.
+ // CAST: The offset is cast to `i32` because the C functions expect a 32-bit signed offset
+ // parameter. PCI configuration space size is at most 4096 bytes, so the value always fits
+ // within `i32` without truncation or sign change.
+ // Return value from C function is ignored in infallible accessors.
+ let _ret = unsafe { bindings::$c_fn($self.pdev.as_raw(), $addr as i32, $value) };
+ };
+}
+
+// PCI configuration space supports 8, 16, and 32-bit accesses.
+impl<'a, S: ConfigSpaceKind> IoCapable<u8> for ConfigSpace<'a, S> {}
+impl<'a, S: ConfigSpaceKind> IoCapable<u16> for ConfigSpace<'a, S> {}
+impl<'a, S: ConfigSpaceKind> IoCapable<u32> for ConfigSpace<'a, S> {}
+
+impl<'a, S: ConfigSpaceKind> Io for ConfigSpace<'a, S> {
+ /// Returns the base address of the I/O region. It is always 0 for configuration space.
+ #[inline]
+ fn addr(&self) -> usize {
+ 0
+ }
+
+ /// Returns the maximum size of the configuration space.
+ #[inline]
+ fn maxsize(&self) -> usize {
+ self.pdev.cfg_size().into_raw()
+ }
+
+ // PCI configuration space does not support fallible operations.
+ // The default implementations from the Io trait are not used.
+
+ define_read!(infallible, read8, call_config_read(pci_read_config_byte) -> u8);
+ define_read!(infallible, read16, call_config_read(pci_read_config_word) -> u16);
+ define_read!(infallible, read32, call_config_read(pci_read_config_dword) -> u32);
+
+ define_write!(infallible, write8, call_config_write(pci_write_config_byte) <- u8);
+ define_write!(infallible, write16, call_config_write(pci_write_config_word) <- u16);
+ define_write!(infallible, write32, call_config_write(pci_write_config_dword) <- u32);
+}
+
+impl<'a, S: ConfigSpaceKind> IoKnownSize for ConfigSpace<'a, S> {
+ const MIN_SIZE: usize = S::SIZE;
+}
/// A PCI BAR to perform I/O-Operations on.
///
+/// I/O backend assumes that the device is little-endian and will automatically
+/// convert from little-endian to CPU endianness.
+///
/// # Invariants
///
-/// `Bar` always holds an `IoRaw` inststance that holds a valid pointer to the start of the I/O
+/// `Bar` always holds an `IoRaw` instance that holds a valid pointer to the start of the I/O
/// memory mapped PCI BAR and its size.
pub struct Bar<const SIZE: usize = 0> {
pdev: ARef<Device>,
- io: IoRaw<SIZE>,
+ io: MmioRaw<SIZE>,
num: i32,
}
@@ -54,13 +217,13 @@ impl<const SIZE: usize> Bar<SIZE> {
let ioptr: usize = unsafe { bindings::pci_iomap(pdev.as_raw(), num, 0) } as usize;
if ioptr == 0 {
// SAFETY:
- // `pdev` valid by the invariants of `Device`.
+ // `pdev` is valid by the invariants of `Device`.
// `num` is checked for validity by a previous call to `Device::resource_len`.
unsafe { bindings::pci_release_region(pdev.as_raw(), num) };
return Err(ENOMEM);
}
- let io = match IoRaw::new(ioptr, len as usize) {
+ let io = match MmioRaw::new(ioptr, len as usize) {
Ok(io) => io,
Err(err) => {
// SAFETY:
@@ -114,11 +277,11 @@ impl<const SIZE: usize> Drop for Bar<SIZE> {
}
impl<const SIZE: usize> Deref for Bar<SIZE> {
- type Target = Io<SIZE>;
+ type Target = Mmio<SIZE>;
fn deref(&self) -> &Self::Target {
// SAFETY: By the type invariant of `Self`, the MMIO range in `self.io` is properly mapped.
- unsafe { Io::from_raw(&self.io) }
+ unsafe { Mmio::from_raw(&self.io) }
}
}
@@ -141,4 +304,39 @@ impl Device<device::Bound> {
) -> impl PinInit<Devres<Bar>, Error> + 'a {
self.iomap_region_sized::<0>(bar, name)
}
+
+ /// Returns the size of configuration space.
+ pub fn cfg_size(&self) -> ConfigSpaceSize {
+ // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
+ let size = unsafe { (*self.as_raw()).cfg_size };
+ match size {
+ 256 => ConfigSpaceSize::Normal,
+ 4096 => ConfigSpaceSize::Extended,
+ _ => {
+ // PANIC: The PCI subsystem only ever reports the configuration space size as either
+ // `ConfigSpaceSize::Normal` or `ConfigSpaceSize::Extended`.
+ unreachable!();
+ }
+ }
+ }
+
+ /// Return an initialized normal (256-byte) config space object.
+ pub fn config_space<'a>(&'a self) -> ConfigSpace<'a, Normal> {
+ ConfigSpace {
+ pdev: self,
+ _marker: PhantomData,
+ }
+ }
+
+ /// Return an initialized extended (4096-byte) config space object.
+ pub fn config_space_extended<'a>(&'a self) -> Result<ConfigSpace<'a, Extended>> {
+ if self.cfg_size() != ConfigSpaceSize::Extended {
+ return Err(EINVAL);
+ }
+
+ Ok(ConfigSpace {
+ pdev: self,
+ _marker: PhantomData,
+ })
+ }
}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
index ed889f079cab..8917d4ee499f 100644
--- a/rust/kernel/platform.rs
+++ b/rust/kernel/platform.rs
@@ -5,34 +5,60 @@
//! C header: [`include/linux/platform_device.h`](srctree/include/linux/platform_device.h)
use crate::{
- acpi, bindings, container_of,
- device::{self, Bound},
+ acpi,
+ bindings,
+ container_of,
+ device::{
+ self,
+ Bound, //
+ },
driver,
- error::{from_result, to_result, Result},
- io::{mem::IoRequest, Resource},
- irq::{self, IrqRequest},
+ error::{
+ from_result,
+ to_result, //
+ },
+ io::{
+ mem::IoRequest,
+ Resource, //
+ },
+ irq::{
+ self,
+ IrqRequest, //
+ },
of,
prelude::*,
types::Opaque,
- ThisModule,
+ ThisModule, //
};
use core::{
marker::PhantomData,
mem::offset_of,
- ptr::{addr_of_mut, NonNull},
+ ptr::{
+ addr_of_mut,
+ NonNull, //
+ },
};
/// An adapter for the registration of platform drivers.
pub struct Adapter<T: Driver>(T);
-// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// SAFETY:
+// - `bindings::platform_driver` is a C type declared as `repr(C)`.
+// - `T` is the type of the driver's device private data.
+// - `struct platform_driver` embeds a `struct device_driver`.
+// - `DEVICE_DRIVER_OFFSET` is the correct byte offset to the embedded `struct device_driver`.
+unsafe impl<T: Driver + 'static> driver::DriverLayout for Adapter<T> {
+ type DriverType = bindings::platform_driver;
+ type DriverData = T;
+ const DEVICE_DRIVER_OFFSET: usize = core::mem::offset_of!(Self::DriverType, driver);
+}
+
+// SAFETY: A call to `unregister` for a given instance of `DriverType` is guaranteed to be valid if
// a preceding call to `register` has been successful.
unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
- type RegType = bindings::platform_driver;
-
unsafe fn register(
- pdrv: &Opaque<Self::RegType>,
+ pdrv: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
@@ -55,12 +81,12 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
(*pdrv.get()).driver.acpi_match_table = acpi_table;
}
- // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ // SAFETY: `pdrv` is guaranteed to be a valid `DriverType`.
to_result(unsafe { bindings::__platform_driver_register(pdrv.get(), module.0) })
}
- unsafe fn unregister(pdrv: &Opaque<Self::RegType>) {
- // SAFETY: `pdrv` is guaranteed to be a valid `RegType`.
+ unsafe fn unregister(pdrv: &Opaque<Self::DriverType>) {
+ // SAFETY: `pdrv` is guaranteed to be a valid `DriverType`.
unsafe { bindings::platform_driver_unregister(pdrv.get()) };
}
}
@@ -86,15 +112,15 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: The platform bus only ever calls the remove callback with a valid pointer to a
// `struct platform_device`.
//
- // INVARIANT: `pdev` is valid for the duration of `probe_callback()`.
+ // INVARIANT: `pdev` is valid for the duration of `remove_callback()`.
let pdev = unsafe { &*pdev.cast::<Device<device::CoreInternal>>() };
// SAFETY: `remove_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { pdev.as_ref().drvdata_obtain::<T>() };
+ let data = unsafe { pdev.as_ref().drvdata_borrow::<T>() };
- T::unbind(pdev, data.as_ref());
+ T::unbind(pdev, data);
}
}
@@ -137,8 +163,13 @@ macro_rules! module_platform_driver {
/// # Examples
///
///```
-/// # use kernel::{acpi, bindings, c_str, device::Core, of, platform};
-///
+/// # use kernel::{
+/// # acpi,
+/// # bindings,
+/// # device::Core,
+/// # of,
+/// # platform,
+/// # };
/// struct MyDriver;
///
/// kernel::of_device_table!(
@@ -146,7 +177,7 @@ macro_rules! module_platform_driver {
/// MODULE_OF_TABLE,
/// <MyDriver as platform::Driver>::IdInfo,
/// [
-/// (of::DeviceId::new(c_str!("test,device")), ())
+/// (of::DeviceId::new(c"test,device"), ())
/// ]
/// );
///
@@ -155,7 +186,7 @@ macro_rules! module_platform_driver {
/// MODULE_ACPI_TABLE,
/// <MyDriver as platform::Driver>::IdInfo,
/// [
-/// (acpi::DeviceId::new(c_str!("LNUXBEEF")), ())
+/// (acpi::DeviceId::new(c"LNUXBEEF"), ())
/// ]
/// );
///
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
index 2d743d78d220..6fd84389a858 100644
--- a/rust/kernel/print.rs
+++ b/rust/kernel/print.rs
@@ -11,6 +11,11 @@ use crate::{
fmt,
prelude::*,
str::RawFormatter,
+ sync::atomic::{
+ Atomic,
+ AtomicType,
+ Relaxed, //
+ },
};
// Called from `vsprintf` with format specifier `%pA`.
@@ -423,3 +428,151 @@ macro_rules! pr_cont (
$crate::print_macro!($crate::print::format_strings::CONT, true, $($arg)*)
)
);
+
+/// A lightweight `call_once` primitive.
+///
+/// This structure provides the Rust equivalent of the kernel's `DO_ONCE_LITE` macro.
+/// While it would be possible to implement the feature entirely as a Rust macro,
+/// the functionality that can be implemented as regular functions has been
+/// extracted and implemented as the `OnceLite` struct for better code maintainability.
+pub struct OnceLite(Atomic<State>);
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[repr(i32)]
+enum State {
+ Incomplete = 0,
+ Complete = 1,
+}
+
+// SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip
+// transmutable to `i32`.
+unsafe impl AtomicType for State {
+ type Repr = i32;
+}
+
+impl OnceLite {
+ /// Creates a new [`OnceLite`] in the incomplete state.
+ #[inline(always)]
+ #[allow(clippy::new_without_default)]
+ pub const fn new() -> Self {
+ OnceLite(Atomic::new(State::Incomplete))
+ }
+
+ /// Calls the provided function exactly once.
+ ///
+ /// There is no other synchronization between two `call_once()`s
+ /// except that only one will execute `f`, in other words, callers
+ /// should not use a failed `call_once()` as a proof that another
+ /// `call_once()` has already finished and the effect is observable
+ /// to this thread.
+ pub fn call_once<F>(&self, f: F) -> bool
+ where
+ F: FnOnce(),
+ {
+ // Avoid expensive cmpxchg if already completed.
+ // ORDERING: `Relaxed` is used here since no synchronization is required.
+ let old = self.0.load(Relaxed);
+ if old == State::Complete {
+ return false;
+ }
+
+ // ORDERING: `Relaxed` is used here since no synchronization is required.
+ let old = self.0.xchg(State::Complete, Relaxed);
+ if old == State::Complete {
+ return false;
+ }
+
+ f();
+ true
+ }
+}
+
+/// Run the given function exactly once.
+///
+/// This is equivalent to the kernel's `DO_ONCE_LITE` macro.
+///
+/// # Examples
+///
+/// ```
+/// kernel::do_once_lite! {
+/// kernel::pr_info!("This will be printed only once\n");
+/// };
+/// ```
+#[macro_export]
+macro_rules! do_once_lite {
+ { $($e:tt)* } => {{
+ #[link_section = ".data..once"]
+ static ONCE: $crate::print::OnceLite = $crate::print::OnceLite::new();
+ ONCE.call_once(|| { $($e)* });
+ }};
+}
+
+/// Prints an emergency-level message (level 0) only once.
+///
+/// Equivalent to the kernel's `pr_emerg_once` macro.
+#[macro_export]
+macro_rules! pr_emerg_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_emerg!($($arg)*) }
+ )
+);
+
+/// Prints an alert-level message (level 1) only once.
+///
+/// Equivalent to the kernel's `pr_alert_once` macro.
+#[macro_export]
+macro_rules! pr_alert_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_alert!($($arg)*) }
+ )
+);
+
+/// Prints a critical-level message (level 2) only once.
+///
+/// Equivalent to the kernel's `pr_crit_once` macro.
+#[macro_export]
+macro_rules! pr_crit_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_crit!($($arg)*) }
+ )
+);
+
+/// Prints an error-level message (level 3) only once.
+///
+/// Equivalent to the kernel's `pr_err_once` macro.
+#[macro_export]
+macro_rules! pr_err_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_err!($($arg)*) }
+ )
+);
+
+/// Prints a warning-level message (level 4) only once.
+///
+/// Equivalent to the kernel's `pr_warn_once` macro.
+#[macro_export]
+macro_rules! pr_warn_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_warn!($($arg)*) }
+ )
+);
+
+/// Prints a notice-level message (level 5) only once.
+///
+/// Equivalent to the kernel's `pr_notice_once` macro.
+#[macro_export]
+macro_rules! pr_notice_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_notice!($($arg)*) }
+ )
+);
+
+/// Prints an info-level message (level 6) only once.
+///
+/// Equivalent to the kernel's `pr_info_once` macro.
+#[macro_export]
+macro_rules! pr_info_once (
+ ($($arg:tt)*) => (
+ $crate::do_once_lite! { $crate::pr_info!($($arg)*) }
+ )
+);
diff --git a/rust/kernel/ptr.rs b/rust/kernel/ptr.rs
index e3893ed04049..5b6a382637fe 100644
--- a/rust/kernel/ptr.rs
+++ b/rust/kernel/ptr.rs
@@ -5,8 +5,6 @@
use core::mem::align_of;
use core::num::NonZero;
-use crate::build_assert;
-
/// Type representing an alignment, which is always a power of two.
///
/// It is used to validate that a given value is a valid alignment, and to perform masking and
@@ -40,10 +38,12 @@ impl Alignment {
/// ```
#[inline(always)]
pub const fn new<const ALIGN: usize>() -> Self {
- build_assert!(
- ALIGN.is_power_of_two(),
- "Provided alignment is not a power of two."
- );
+ const {
+ assert!(
+ ALIGN.is_power_of_two(),
+ "Provided alignment is not a power of two."
+ );
+ }
// INVARIANT: `align` is a power of two.
// SAFETY: `align` is a power of two, and thus non-zero.
diff --git a/rust/kernel/pwm.rs b/rust/kernel/pwm.rs
index cb00f8a8765c..6c9d667009ef 100644
--- a/rust/kernel/pwm.rs
+++ b/rust/kernel/pwm.rs
@@ -13,9 +13,14 @@ use crate::{
devres,
error::{self, to_result},
prelude::*,
- types::{ARef, AlwaysRefCounted, Opaque}, //
+ sync::aref::{ARef, AlwaysRefCounted},
+ types::Opaque, //
+};
+use core::{
+ marker::PhantomData,
+ ops::Deref,
+ ptr::NonNull, //
};
-use core::{marker::PhantomData, ptr::NonNull};
/// Represents a PWM waveform configuration.
/// Mirrors struct [`struct pwm_waveform`](srctree/include/linux/pwm.h).
@@ -124,8 +129,7 @@ impl Device {
// SAFETY: `self.as_raw()` provides a valid `*mut pwm_device` pointer.
// `&c_wf` is a valid pointer to a `pwm_waveform` struct. The C function
// handles all necessary internal locking.
- let ret = unsafe { bindings::pwm_set_waveform_might_sleep(self.as_raw(), &c_wf, exact) };
- to_result(ret)
+ to_result(unsafe { bindings::pwm_set_waveform_might_sleep(self.as_raw(), &c_wf, exact) })
}
/// Queries the hardware for the configuration it would apply for a given
@@ -155,9 +159,7 @@ impl Device {
// SAFETY: `self.as_raw()` is a valid pointer. We provide a valid pointer
// to a stack-allocated `pwm_waveform` struct for the kernel to fill.
- let ret = unsafe { bindings::pwm_get_waveform_might_sleep(self.as_raw(), &mut c_wf) };
-
- to_result(ret)?;
+ to_result(unsafe { bindings::pwm_get_waveform_might_sleep(self.as_raw(), &mut c_wf) })?;
Ok(Waveform::from(c_wf))
}
@@ -173,7 +175,7 @@ pub struct RoundedWaveform<WfHw> {
}
/// Trait defining the operations for a PWM driver.
-pub trait PwmOps: 'static + Sized {
+pub trait PwmOps: 'static + Send + Sync + Sized {
/// The driver-specific hardware representation of a waveform.
///
/// This type must be [`Copy`], [`Default`], and fit within `PWM_WFHWSIZE`.
@@ -258,8 +260,8 @@ impl<T: PwmOps> Adapter<T> {
core::ptr::from_ref::<T::WfHw>(wfhw).cast::<u8>(),
wfhw_ptr.cast::<u8>(),
size,
- );
- }
+ )
+ };
Ok(())
}
@@ -279,8 +281,8 @@ impl<T: PwmOps> Adapter<T> {
wfhw_ptr.cast::<u8>(),
core::ptr::from_mut::<T::WfHw>(&mut wfhw).cast::<u8>(),
size,
- );
- }
+ )
+ };
Ok(wfhw)
}
@@ -306,9 +308,7 @@ impl<T: PwmOps> Adapter<T> {
// Now, call the original release function to free the `pwm_chip` itself.
// SAFETY: `dev` is the valid pointer passed into this callback, which is
// the expected argument for `pwmchip_release`.
- unsafe {
- bindings::pwmchip_release(dev);
- }
+ unsafe { bindings::pwmchip_release(dev) };
}
/// # Safety
@@ -408,9 +408,7 @@ impl<T: PwmOps> Adapter<T> {
match T::round_waveform_fromhw(chip, pwm, &wfhw, &mut rust_wf) {
Ok(()) => {
// SAFETY: `wf_ptr` is guaranteed valid by the C caller.
- unsafe {
- *wf_ptr = rust_wf.into();
- };
+ unsafe { *wf_ptr = rust_wf.into() };
0
}
Err(e) => e.to_errno(),
@@ -584,11 +582,12 @@ impl<T: PwmOps> Chip<T> {
///
/// Returns an [`ARef<Chip>`] managing the chip's lifetime via refcounting
/// on its embedded `struct device`.
- pub fn new(
- parent_dev: &device::Device,
+ #[allow(clippy::new_ret_no_self)]
+ pub fn new<'a>(
+ parent_dev: &'a device::Device<Bound>,
num_channels: u32,
data: impl pin_init::PinInit<T, Error>,
- ) -> Result<ARef<Self>> {
+ ) -> Result<UnregisteredChip<'a, T>> {
let sizeof_priv = core::mem::size_of::<T>();
// SAFETY: `pwmchip_alloc` allocates memory for the C struct and our private data.
let c_chip_ptr_raw =
@@ -601,19 +600,19 @@ impl<T: PwmOps> Chip<T> {
let drvdata_ptr = unsafe { bindings::pwmchip_get_drvdata(c_chip_ptr) };
// SAFETY: We construct the `T` object in-place in the allocated private memory.
- unsafe { data.__pinned_init(drvdata_ptr.cast())? };
+ unsafe { data.__pinned_init(drvdata_ptr.cast()) }.inspect_err(|_| {
+ // SAFETY: It is safe to call `pwmchip_put()` with a valid pointer obtained
+ // from `pwmchip_alloc()`. We will not use pointer after this.
+ unsafe { bindings::pwmchip_put(c_chip_ptr) }
+ })?;
// SAFETY: `c_chip_ptr` points to a valid chip.
- unsafe {
- (*c_chip_ptr).dev.release = Some(Adapter::<T>::release_callback);
- }
+ unsafe { (*c_chip_ptr).dev.release = Some(Adapter::<T>::release_callback) };
// SAFETY: `c_chip_ptr` points to a valid chip.
// The `Adapter`'s `VTABLE` has a 'static lifetime, so the pointer
// returned by `as_raw()` is always valid.
- unsafe {
- (*c_chip_ptr).ops = Adapter::<T>::VTABLE.as_raw();
- }
+ unsafe { (*c_chip_ptr).ops = Adapter::<T>::VTABLE.as_raw() };
// Cast the `*mut bindings::pwm_chip` to `*mut Chip`. This is valid because
// `Chip` is `repr(transparent)` over `Opaque<bindings::pwm_chip>`, and
@@ -623,7 +622,9 @@ impl<T: PwmOps> Chip<T> {
// SAFETY: `chip_ptr_as_self` points to a valid `Chip` (layout-compatible with
// `bindings::pwm_chip`) whose embedded device has refcount 1.
// `ARef::from_raw` takes this pointer and manages it via `AlwaysRefCounted`.
- Ok(unsafe { ARef::from_raw(NonNull::new_unchecked(chip_ptr_as_self)) })
+ let chip = unsafe { ARef::from_raw(NonNull::new_unchecked(chip_ptr_as_self)) };
+
+ Ok(UnregisteredChip { chip, parent_dev })
}
}
@@ -633,9 +634,7 @@ unsafe impl<T: PwmOps> AlwaysRefCounted for Chip<T> {
fn inc_ref(&self) {
// SAFETY: `self.0.get()` points to a valid `pwm_chip` because `self` exists.
// The embedded `dev` is valid. `get_device` increments its refcount.
- unsafe {
- bindings::get_device(&raw mut (*self.0.get()).dev);
- }
+ unsafe { bindings::get_device(&raw mut (*self.0.get()).dev) };
}
#[inline]
@@ -644,9 +643,7 @@ unsafe impl<T: PwmOps> AlwaysRefCounted for Chip<T> {
// SAFETY: `obj` is a valid pointer to a `Chip` (and thus `bindings::pwm_chip`)
// with a non-zero refcount. `put_device` handles decrement and final release.
- unsafe {
- bindings::put_device(&raw mut (*c_chip_ptr).dev);
- }
+ unsafe { bindings::put_device(&raw mut (*c_chip_ptr).dev) };
}
}
@@ -654,50 +651,61 @@ unsafe impl<T: PwmOps> AlwaysRefCounted for Chip<T> {
// structure's state is managed and synchronized by the kernel's device model
// and PWM core locking mechanisms. Therefore, it is safe to move the `Chip`
// wrapper (and the pointer it contains) across threads.
-unsafe impl<T: PwmOps + Send> Send for Chip<T> {}
+unsafe impl<T: PwmOps> Send for Chip<T> {}
// SAFETY: It is safe for multiple threads to have shared access (`&Chip`) because
// the `Chip` data is immutable from the Rust side without holding the appropriate
// kernel locks, which the C core is responsible for. Any interior mutability is
// handled and synchronized by the C kernel code.
-unsafe impl<T: PwmOps + Sync> Sync for Chip<T> {}
+unsafe impl<T: PwmOps> Sync for Chip<T> {}
-/// A resource guard that ensures `pwmchip_remove` is called on drop.
-///
-/// This struct is intended to be managed by the `devres` framework by transferring its ownership
-/// via [`devres::register`]. This ties the lifetime of the PWM chip registration
-/// to the lifetime of the underlying device.
-pub struct Registration<T: PwmOps> {
+/// A wrapper around `ARef<Chip<T>>` that ensures that `register` can only be called once.
+pub struct UnregisteredChip<'a, T: PwmOps> {
chip: ARef<Chip<T>>,
+ parent_dev: &'a device::Device<Bound>,
}
-impl<T: 'static + PwmOps + Send + Sync> Registration<T> {
+impl<T: PwmOps> UnregisteredChip<'_, T> {
/// Registers a PWM chip with the PWM subsystem.
///
/// Transfers its ownership to the `devres` framework, which ties its lifetime
/// to the parent device.
/// On unbind of the parent device, the `devres` entry will be dropped, automatically
/// calling `pwmchip_remove`. This function should be called from the driver's `probe`.
- pub fn register(dev: &device::Device<Bound>, chip: ARef<Chip<T>>) -> Result {
- let chip_parent = chip.device().parent().ok_or(EINVAL)?;
- if dev.as_raw() != chip_parent.as_raw() {
- return Err(EINVAL);
- }
-
- let c_chip_ptr = chip.as_raw();
+ pub fn register(self) -> Result<ARef<Chip<T>>> {
+ let c_chip_ptr = self.chip.as_raw();
// SAFETY: `c_chip_ptr` points to a valid chip with its ops initialized.
// `__pwmchip_add` is the C function to register the chip with the PWM core.
- unsafe {
- to_result(bindings::__pwmchip_add(c_chip_ptr, core::ptr::null_mut()))?;
- }
+ to_result(unsafe { bindings::__pwmchip_add(c_chip_ptr, core::ptr::null_mut()) })?;
+
+ let registration = Registration {
+ chip: ARef::clone(&self.chip),
+ };
+
+ devres::register(self.parent_dev, registration, GFP_KERNEL)?;
+
+ Ok(self.chip)
+ }
+}
- let registration = Registration { chip };
+impl<T: PwmOps> Deref for UnregisteredChip<'_, T> {
+ type Target = Chip<T>;
- devres::register(dev, registration, GFP_KERNEL)
+ fn deref(&self) -> &Self::Target {
+ &self.chip
}
}
+/// A resource guard that ensures `pwmchip_remove` is called on drop.
+///
+/// This struct is intended to be managed by the `devres` framework by transferring its ownership
+/// via [`devres::register`]. This ties the lifetime of the PWM chip registration
+/// to the lifetime of the underlying device.
+struct Registration<T: PwmOps> {
+ chip: ARef<Chip<T>>,
+}
+
impl<T: PwmOps> Drop for Registration<T> {
fn drop(&mut self) {
let chip_raw = self.chip.as_raw();
@@ -705,9 +713,7 @@ impl<T: PwmOps> Drop for Registration<T> {
// SAFETY: `chip_raw` points to a chip that was successfully registered.
// `bindings::pwmchip_remove` is the correct C function to unregister it.
// This `drop` implementation is called automatically by `devres` on driver unbind.
- unsafe {
- bindings::pwmchip_remove(chip_raw);
- }
+ unsafe { bindings::pwmchip_remove(chip_raw) };
}
}
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
index 4729eb56827a..6fbd579d4a43 100644
--- a/rust/kernel/rbtree.rs
+++ b/rust/kernel/rbtree.rs
@@ -414,14 +414,17 @@ where
// SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self`
// point to the links field of `Node<K, V>` objects.
let this = unsafe { container_of!(node, Node<K, V>, links) };
+
// SAFETY: `this` is a non-null node so it is valid by the type invariants.
- node = match key.cmp(unsafe { &(*this).key }) {
- // SAFETY: `node` is a non-null node so it is valid by the type invariants.
- Ordering::Less => unsafe { (*node).rb_left },
- // SAFETY: `node` is a non-null node so it is valid by the type invariants.
- Ordering::Greater => unsafe { (*node).rb_right },
- // SAFETY: `node` is a non-null node so it is valid by the type invariants.
- Ordering::Equal => return Some(unsafe { &(*this).value }),
+ let this_ref = unsafe { &*this };
+
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ let node_ref = unsafe { &*node };
+
+ node = match key.cmp(&this_ref.key) {
+ Ordering::Less => node_ref.rb_left,
+ Ordering::Greater => node_ref.rb_right,
+ Ordering::Equal => return Some(&this_ref.value),
}
}
None
@@ -498,10 +501,10 @@ where
let this = unsafe { container_of!(node, Node<K, V>, links) };
// SAFETY: `this` is a non-null node so it is valid by the type invariants.
let this_key = unsafe { &(*this).key };
+
// SAFETY: `node` is a non-null node so it is valid by the type invariants.
- let left_child = unsafe { (*node).rb_left };
- // SAFETY: `node` is a non-null node so it is valid by the type invariants.
- let right_child = unsafe { (*node).rb_right };
+ let node_ref = unsafe { &*node };
+
match key.cmp(this_key) {
Ordering::Equal => {
// SAFETY: `this` is a non-null node so it is valid by the type invariants.
@@ -509,7 +512,7 @@ where
break;
}
Ordering::Greater => {
- node = right_child;
+ node = node_ref.rb_right;
}
Ordering::Less => {
let is_better_match = match best_key {
@@ -521,7 +524,7 @@ where
// SAFETY: `this` is a non-null node so it is valid by the type invariants.
best_links = Some(unsafe { NonNull::new_unchecked(&mut (*this).links) });
}
- node = left_child;
+ node = node_ref.rb_left;
}
};
}
@@ -985,7 +988,7 @@ impl<'a, K, V> CursorMut<'a, K, V> {
self.peek(Direction::Prev)
}
- /// Access the previous node without moving the cursor.
+ /// Access the next node without moving the cursor.
pub fn peek_next(&self) -> Option<(&K, &V)> {
self.peek(Direction::Next)
}
@@ -1130,7 +1133,7 @@ pub struct IterMut<'a, K, V> {
}
// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`.
-// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same
+// The iterator only gives out immutable references to the keys, but since the iterator has exclusive access to those same
// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user.
unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
diff --git a/rust/kernel/regulator.rs b/rust/kernel/regulator.rs
index 2c44827ad0b7..4f7837c7e53a 100644
--- a/rust/kernel/regulator.rs
+++ b/rust/kernel/regulator.rs
@@ -122,12 +122,11 @@ pub fn devm_enable_optional(dev: &Device<Bound>, name: &CStr) -> Result {
///
/// ```
/// # use kernel::prelude::*;
-/// # use kernel::c_str;
/// # use kernel::device::Device;
/// # use kernel::regulator::{Voltage, Regulator, Disabled, Enabled};
/// fn enable(dev: &Device, min_voltage: Voltage, max_voltage: Voltage) -> Result {
/// // Obtain a reference to a (fictitious) regulator.
-/// let regulator: Regulator<Disabled> = Regulator::<Disabled>::get(dev, c_str!("vcc"))?;
+/// let regulator: Regulator<Disabled> = Regulator::<Disabled>::get(dev, c"vcc")?;
///
/// // The voltage can be set before enabling the regulator if needed, e.g.:
/// regulator.set_voltage(min_voltage, max_voltage)?;
@@ -166,12 +165,11 @@ pub fn devm_enable_optional(dev: &Device<Bound>, name: &CStr) -> Result {
///
/// ```
/// # use kernel::prelude::*;
-/// # use kernel::c_str;
/// # use kernel::device::Device;
/// # use kernel::regulator::{Voltage, Regulator, Enabled};
/// fn enable(dev: &Device) -> Result {
/// // Obtain a reference to a (fictitious) regulator and enable it.
-/// let regulator: Regulator<Enabled> = Regulator::<Enabled>::get(dev, c_str!("vcc"))?;
+/// let regulator: Regulator<Enabled> = Regulator::<Enabled>::get(dev, c"vcc")?;
///
/// // Dropping an enabled regulator will disable it. The refcount will be
/// // decremented.
@@ -193,13 +191,12 @@ pub fn devm_enable_optional(dev: &Device<Bound>, name: &CStr) -> Result {
///
/// ```
/// # use kernel::prelude::*;
-/// # use kernel::c_str;
/// # use kernel::device::{Bound, Device};
/// # use kernel::regulator;
/// fn enable(dev: &Device<Bound>) -> Result {
/// // Obtain a reference to a (fictitious) regulator and enable it. This
/// // call only returns whether the operation succeeded.
-/// regulator::devm_enable(dev, c_str!("vcc"))?;
+/// regulator::devm_enable(dev, c"vcc")?;
///
/// // The regulator will be disabled and put when `dev` is unbound.
/// Ok(())
diff --git a/rust/kernel/safety.rs b/rust/kernel/safety.rs
new file mode 100644
index 000000000000..c1c6bd0fa2cc
--- /dev/null
+++ b/rust/kernel/safety.rs
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Safety related APIs.
+
+/// Checks that a precondition of an unsafe function is followed.
+///
+/// The check is enabled at runtime if debug assertions (`CONFIG_RUST_DEBUG_ASSERTIONS`)
+/// are enabled. Otherwise, this macro is a no-op.
+///
+/// # Examples
+///
+/// ```no_run
+/// use kernel::unsafe_precondition_assert;
+///
+/// struct RawBuffer<T: Copy, const N: usize> {
+/// data: [T; N],
+/// }
+///
+/// impl<T: Copy, const N: usize> RawBuffer<T, N> {
+/// /// # Safety
+/// ///
+/// /// The caller must ensure that `index` is less than `N`.
+/// unsafe fn set_unchecked(&mut self, index: usize, value: T) {
+/// unsafe_precondition_assert!(
+/// index < N,
+/// "RawBuffer::set_unchecked() requires index ({index}) < N ({N})"
+/// );
+///
+/// // SAFETY: By the safety requirements of this function, `index` is valid.
+/// unsafe {
+/// *self.data.get_unchecked_mut(index) = value;
+/// }
+/// }
+/// }
+/// ```
+///
+/// # Panics
+///
+/// Panics if the expression is evaluated to [`false`] at runtime.
+#[macro_export]
+macro_rules! unsafe_precondition_assert {
+ ($cond:expr $(,)?) => {
+ $crate::unsafe_precondition_assert!(@inner $cond, ::core::stringify!($cond))
+ };
+
+ ($cond:expr, $($arg:tt)+) => {
+ $crate::unsafe_precondition_assert!(@inner $cond, $crate::prelude::fmt!($($arg)+))
+ };
+
+ (@inner $cond:expr, $msg:expr) => {
+ ::core::debug_assert!($cond, "unsafe precondition violated: {}", $msg)
+ };
+}
diff --git a/rust/kernel/scatterlist.rs b/rust/kernel/scatterlist.rs
index 196fdb9a75e7..b83c468b5c63 100644
--- a/rust/kernel/scatterlist.rs
+++ b/rust/kernel/scatterlist.rs
@@ -38,7 +38,8 @@ use crate::{
io::ResourceSize,
page,
prelude::*,
- types::{ARef, Opaque},
+ sync::aref::ARef,
+ types::Opaque,
};
use core::{ops::Deref, ptr::NonNull};
diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
index 855e533813a6..518265558d66 100644
--- a/rust/kernel/seq_file.rs
+++ b/rust/kernel/seq_file.rs
@@ -4,7 +4,7 @@
//!
//! C header: [`include/linux/seq_file.h`](srctree/include/linux/seq_file.h)
-use crate::{bindings, c_str, fmt, str::CStrExt as _, types::NotThreadSafe, types::Opaque};
+use crate::{bindings, fmt, str::CStrExt as _, types::NotThreadSafe, types::Opaque};
/// A utility for generating the contents of a seq file.
#[repr(transparent)]
@@ -36,7 +36,7 @@ impl SeqFile {
unsafe {
bindings::seq_printf(
self.inner.get(),
- c_str!("%pA").as_char_ptr(),
+ c"%pA".as_char_ptr(),
core::ptr::from_ref(&args).cast::<crate::ffi::c_void>(),
);
}
diff --git a/rust/kernel/soc.rs b/rust/kernel/soc.rs
new file mode 100644
index 000000000000..0d6a36c83cb6
--- /dev/null
+++ b/rust/kernel/soc.rs
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! SoC Driver Abstraction.
+//!
+//! C header: [`include/linux/sys_soc.h`](srctree/include/linux/sys_soc.h)
+
+use crate::{
+ bindings,
+ error,
+ prelude::*,
+ str::CString,
+ types::Opaque, //
+};
+use core::ptr::NonNull;
+
+/// Attributes for a SoC device.
+///
+/// These are both exported to userspace under /sys/devices/socX and provided to other drivers to
+/// match against via `soc_device_match` (not yet available in Rust) to enable quirks or
+/// device-specific support where necessary.
+///
+/// All fields are freeform - they have no specific formatting, just defined meanings.
+/// For example, the [`machine`](`Attributes::machine`) field could be "DB8500" or
+/// "Qualcomm Technologies, Inc. SM8560 HDK", but regardless it should identify a board or product.
+pub struct Attributes {
+ /// Should generally be a board ID or product ID. Examples
+ /// include DB8500 (ST-Ericsson) or "Qualcomm Technologies, inc. SM8560 HDK".
+ ///
+ /// If this field is not populated, the SoC infrastructure will try to populate it from
+ /// `/model` in the device tree.
+ pub machine: Option<CString>,
+ /// The broader class this SoC belongs to. Examples include ux500
+ /// (for DB8500) or Snapdragon (for SM8650).
+ ///
+ /// On chips with ARM firmware supporting SMCCC v1.2+, this may be a JEDEC JEP106 manufacturer
+ /// identification.
+ pub family: Option<CString>,
+ /// The manufacturing revision of the part. Frequently this is MAJOR.MINOR, but not always.
+ pub revision: Option<CString>,
+ /// Serial Number - uniquely identifies a specific SoC. If present, should be unique (buying a
+ /// replacement part should change it if present). This field cannot be matched on and is
+ /// solely present to export through /sys.
+ pub serial_number: Option<CString>,
+ /// SoC ID - identifies a specific SoC kind in question, sometimes more specifically than
+ /// `machine` if the same SoC is used in multiple products. Some devices use this to specify a
+ /// SoC name, e.g. "I.MX??", and others just print an ID number (e.g. Tegra and Qualcomm).
+ ///
+ /// On chips with ARM firmware supporting SMCCC v1.2+, this may be a JEDEC JEP106 manufacturer
+ /// identification (the family value) followed by a colon and then a 4-digit ID value.
+ pub soc_id: Option<CString>,
+}
+
+struct BuiltAttributes {
+ // While `inner` has pointers to `_backing`, it is to the interior of the `CStrings`, not
+ // `backing` itself, so it does not need to be pinned.
+ _backing: Attributes,
+ // `Opaque` makes us `!Unpin`, as the registration holds a pointer to `inner` when used.
+ inner: Opaque<bindings::soc_device_attribute>,
+}
+
+fn cstring_to_c(mcs: &Option<CString>) -> *const kernel::ffi::c_char {
+ mcs.as_ref()
+ .map(|cs| cs.as_char_ptr())
+ .unwrap_or(core::ptr::null())
+}
+
+impl BuiltAttributes {
+ fn as_mut_ptr(&self) -> *mut bindings::soc_device_attribute {
+ self.inner.get()
+ }
+}
+
+impl Attributes {
+ fn build(self) -> BuiltAttributes {
+ BuiltAttributes {
+ inner: Opaque::new(bindings::soc_device_attribute {
+ machine: cstring_to_c(&self.machine),
+ family: cstring_to_c(&self.family),
+ revision: cstring_to_c(&self.revision),
+ serial_number: cstring_to_c(&self.serial_number),
+ soc_id: cstring_to_c(&self.soc_id),
+ data: core::ptr::null(),
+ custom_attr_group: core::ptr::null(),
+ }),
+ _backing: self,
+ }
+ }
+}
+
+#[pin_data(PinnedDrop)]
+/// Registration handle for your soc_dev. If you let it go out of scope, your soc_dev will be
+/// unregistered.
+pub struct Registration {
+ #[pin]
+ attr: BuiltAttributes,
+ soc_dev: NonNull<bindings::soc_device>,
+}
+
+// SAFETY: We provide no operations through `&Registration`.
+unsafe impl Sync for Registration {}
+
+// SAFETY: All pointers are normal allocations, not thread-specific.
+unsafe impl Send for Registration {}
+
+#[pinned_drop]
+impl PinnedDrop for Registration {
+ fn drop(self: Pin<&mut Self>) {
+ // SAFETY: Device always contains a live pointer to a soc_device that can be unregistered
+ unsafe { bindings::soc_device_unregister(self.soc_dev.as_ptr()) }
+ }
+}
+
+impl Registration {
+ /// Register a new SoC device
+ pub fn new(attr: Attributes) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ attr: attr.build(),
+ soc_dev: {
+ // SAFETY:
+ // * The struct provided through attr is backed by pinned data next to it,
+ // so as long as attr lives, the strings pointed to by the struct will too.
+ // * `attr` is pinned, so the pinned data won't move.
+ // * If it returns a device, and so others may try to read this data, by
+ // caller invariant, `attr` won't be released until the device is.
+ let raw_soc = error::from_err_ptr(unsafe {
+ bindings::soc_device_register(attr.as_mut_ptr())
+ })?;
+
+ NonNull::new(raw_soc).ok_or(EINVAL)?
+ },
+ }? Error)
+ }
+}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
index 5df87e2bd212..993dbf2caa0e 100644
--- a/rust/kernel/sync.rs
+++ b/rust/kernel/sync.rs
@@ -32,7 +32,9 @@ pub use locked_by::LockedBy;
pub use refcount::Refcount;
pub use set_once::SetOnce;
-/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
+/// Represents a lockdep class.
+///
+/// Wraps the kernel's `struct lock_class_key`.
#[repr(transparent)]
#[pin_data(PinnedDrop)]
pub struct LockClassKey {
@@ -40,20 +42,42 @@ pub struct LockClassKey {
inner: Opaque<bindings::lock_class_key>,
}
+// SAFETY: Unregistering a lock class key from a different thread than where it was registered is
+// allowed.
+unsafe impl Send for LockClassKey {}
+
// SAFETY: `bindings::lock_class_key` is designed to be used concurrently from multiple threads and
// provides its own synchronization.
unsafe impl Sync for LockClassKey {}
impl LockClassKey {
- /// Initializes a dynamically allocated lock class key. In the common case of using a
- /// statically allocated lock class key, the static_lock_class! macro should be used instead.
+ /// Initializes a statically allocated lock class key.
+ ///
+ /// This is usually used indirectly through the [`static_lock_class!`] macro. See its
+ /// documentation for more information.
+ ///
+ /// # Safety
+ ///
+ /// * Before using the returned value, it must be pinned in a static memory location.
+ /// * The destructor must never run on the returned `LockClassKey`.
+ pub const unsafe fn new_static() -> Self {
+ LockClassKey {
+ inner: Opaque::uninit(),
+ }
+ }
+
+ /// Initializes a dynamically allocated lock class key.
+ ///
+ /// In the common case of using a statically allocated lock class key, the
+ /// [`static_lock_class!`] macro should be used instead.
///
/// # Examples
+ ///
/// ```
- /// # use kernel::alloc::KBox;
- /// # use kernel::types::ForeignOwnable;
- /// # use kernel::sync::{LockClassKey, SpinLock};
- /// # use pin_init::stack_pin_init;
+ /// use kernel::alloc::KBox;
+ /// use kernel::types::ForeignOwnable;
+ /// use kernel::sync::{LockClassKey, SpinLock};
+ /// use pin_init::stack_pin_init;
///
/// let key = KBox::pin_init(LockClassKey::new_dynamic(), GFP_KERNEL)?;
/// let key_ptr = key.into_foreign();
@@ -71,7 +95,6 @@ impl LockClassKey {
/// // SAFETY: We dropped `num`, the only use of the key, so the result of the previous
/// // `borrow` has also been dropped. Thus, it's safe to use from_foreign.
/// unsafe { drop(<Pin<KBox<LockClassKey>> as ForeignOwnable>::from_foreign(key_ptr)) };
- ///
/// # Ok::<(), Error>(())
/// ```
pub fn new_dynamic() -> impl PinInit<Self> {
@@ -81,7 +104,10 @@ impl LockClassKey {
})
}
- pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key {
+ /// Returns a raw pointer to the inner C struct.
+ ///
+ /// It is up to the caller to use the raw pointer correctly.
+ pub fn as_ptr(&self) -> *mut bindings::lock_class_key {
self.inner.get()
}
}
@@ -89,27 +115,38 @@ impl LockClassKey {
#[pinned_drop]
impl PinnedDrop for LockClassKey {
fn drop(self: Pin<&mut Self>) {
- // SAFETY: self.as_ptr was registered with lockdep and self is pinned, so the address
- // hasn't changed. Thus, it's safe to pass to unregister.
+ // SAFETY: `self.as_ptr()` was registered with lockdep and `self` is pinned, so the address
+ // hasn't changed. Thus, it's safe to pass it to unregister.
unsafe { bindings::lockdep_unregister_key(self.as_ptr()) }
}
}
/// Defines a new static lock class and returns a pointer to it.
-#[doc(hidden)]
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::{static_lock_class, Arc, SpinLock};
+///
+/// fn new_locked_int() -> Result<Arc<SpinLock<u32>>> {
+/// Arc::pin_init(SpinLock::new(
+/// 42,
+/// c"new_locked_int",
+/// static_lock_class!(),
+/// ), GFP_KERNEL)
+/// }
+/// ```
#[macro_export]
macro_rules! static_lock_class {
() => {{
static CLASS: $crate::sync::LockClassKey =
- // Lockdep expects uninitialized memory when it's handed a statically allocated `struct
- // lock_class_key`.
- //
- // SAFETY: `LockClassKey` transparently wraps `Opaque` which permits uninitialized
- // memory.
- unsafe { ::core::mem::MaybeUninit::uninit().assume_init() };
+ // SAFETY: The returned `LockClassKey` is stored in static memory and we pin it. Drop
+ // never runs on a static global.
+ unsafe { $crate::sync::LockClassKey::new_static() };
$crate::prelude::Pin::static_ref(&CLASS)
}};
}
+pub use static_lock_class;
/// Returns the given string, if one is provided, otherwise generates one based on the source code
/// location.
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 289f77abf415..921e19333b89 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -240,6 +240,9 @@ impl<T> Arc<T> {
// `Arc` object.
Ok(unsafe { Self::from_inner(inner) })
}
+
+ /// The offset that the value is stored at.
+ pub const DATA_OFFSET: usize = core::mem::offset_of!(ArcInner<T>, data);
}
impl<T: ?Sized> Arc<T> {
diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs
index 0d24a0432015..0616c0353c2b 100644
--- a/rust/kernel/sync/aref.rs
+++ b/rust/kernel/sync/aref.rs
@@ -83,6 +83,9 @@ unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {}
// example, when the reference count reaches zero and `T` is dropped.
unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {}
+// Even if `T` is pinned, pointers to `T` can still move.
+impl<T: AlwaysRefCounted> Unpin for ARef<T> {}
+
impl<T: AlwaysRefCounted> ARef<T> {
/// Creates a new instance of [`ARef`].
///
diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs
index 6fdd8e59f45b..0dac58bca2b3 100644
--- a/rust/kernel/sync/atomic/internal.rs
+++ b/rust/kernel/sync/atomic/internal.rs
@@ -13,17 +13,22 @@ mod private {
pub trait Sealed {}
}
-// `i32` and `i64` are only supported atomic implementations.
+// The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`),
+// while the Rust side also layers provides atomic support for `i8` and `i16`
+// on top of lower-level C primitives.
+impl private::Sealed for i8 {}
+impl private::Sealed for i16 {}
impl private::Sealed for i32 {}
impl private::Sealed for i64 {}
/// A marker trait for types that implement atomic operations with C side primitives.
///
-/// This trait is sealed, and only types that have directly mapping to the C side atomics should
-/// impl this:
+/// This trait is sealed, and only types that map directly to the C side atomics
+/// or can be implemented with lower-level C primitives are allowed to implement this:
///
-/// - `i32` maps to `atomic_t`.
-/// - `i64` maps to `atomic64_t`.
+/// - `i8` and `i16` are implemented with lower-level C primitives.
+/// - `i32` map to `atomic_t`
+/// - `i64` map to `atomic64_t`
pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
/// The type of the delta in arithmetic or logical operations.
///
@@ -32,6 +37,20 @@ pub trait AtomicImpl: Sized + Send + Copy + private::Sealed {
type Delta;
}
+// The current helpers of load/store uses `{WRITE,READ}_ONCE()` hence the atomicity is only
+// guaranteed against read-modify-write operations if the architecture supports native atomic RmW.
+#[cfg(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW)]
+impl AtomicImpl for i8 {
+ type Delta = Self;
+}
+
+// The current helpers of load/store uses `{WRITE,READ}_ONCE()` hence the atomicity is only
+// guaranteed against read-modify-write operations if the architecture supports native atomic RmW.
+#[cfg(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW)]
+impl AtomicImpl for i16 {
+ type Delta = Self;
+}
+
// `atomic_t` implements atomic operations on `i32`.
impl AtomicImpl for i32 {
type Delta = Self;
@@ -156,16 +175,17 @@ macro_rules! impl_atomic_method {
}
}
-// Delcares $ops trait with methods and implements the trait for `i32` and `i64`.
-macro_rules! declare_and_impl_atomic_methods {
- ($(#[$attr:meta])* $pub:vis trait $ops:ident {
- $(
- $(#[doc=$doc:expr])*
- fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
- $unsafe:tt { bindings::#call($($arg:tt)*) }
- }
- )*
- }) => {
+macro_rules! declare_atomic_ops_trait {
+ (
+ $(#[$attr:meta])* $pub:vis trait $ops:ident {
+ $(
+ $(#[doc=$doc:expr])*
+ fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { bindings::#call($($arg:tt)*) }
+ }
+ )*
+ }
+ ) => {
$(#[$attr])*
$pub trait $ops: AtomicImpl {
$(
@@ -175,21 +195,25 @@ macro_rules! declare_and_impl_atomic_methods {
);
)*
}
+ }
+}
- impl $ops for i32 {
+macro_rules! impl_atomic_ops_for_one {
+ (
+ $ty:ty => $ctype:ident,
+ $(#[$attr:meta])* $pub:vis trait $ops:ident {
$(
- impl_atomic_method!(
- (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
- $unsafe { call($($arg)*) }
- }
- );
+ $(#[doc=$doc:expr])*
+ fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? {
+ $unsafe:tt { bindings::#call($($arg:tt)*) }
+ }
)*
}
-
- impl $ops for i64 {
+ ) => {
+ impl $ops for $ty {
$(
impl_atomic_method!(
- (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
+ ($ctype) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? {
$unsafe { call($($arg)*) }
}
);
@@ -198,7 +222,47 @@ macro_rules! declare_and_impl_atomic_methods {
}
}
+// Declares $ops trait with methods and implements the trait.
+macro_rules! declare_and_impl_atomic_methods {
+ (
+ [ $($map:tt)* ]
+ $(#[$attr:meta])* $pub:vis trait $ops:ident { $($body:tt)* }
+ ) => {
+ declare_and_impl_atomic_methods!(
+ @with_ops_def
+ [ $($map)* ]
+ ( $(#[$attr])* $pub trait $ops { $($body)* } )
+ );
+ };
+
+ (@with_ops_def [ $($map:tt)* ] ( $($ops_def:tt)* )) => {
+ declare_atomic_ops_trait!( $($ops_def)* );
+
+ declare_and_impl_atomic_methods!(
+ @munch
+ [ $($map)* ]
+ ( $($ops_def)* )
+ );
+ };
+
+ (@munch [] ( $($ops_def:tt)* )) => {};
+
+ (@munch [ $ty:ty => $ctype:ident $(, $($rest:tt)*)? ] ( $($ops_def:tt)* )) => {
+ impl_atomic_ops_for_one!(
+ $ty => $ctype,
+ $($ops_def)*
+ );
+
+ declare_and_impl_atomic_methods!(
+ @munch
+ [ $($($rest)*)? ]
+ ( $($ops_def)* )
+ );
+ };
+}
+
declare_and_impl_atomic_methods!(
+ [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
/// Basic atomic operations
pub trait AtomicBasicOps {
/// Atomic read (load).
@@ -216,6 +280,7 @@ declare_and_impl_atomic_methods!(
);
declare_and_impl_atomic_methods!(
+ [ i8 => atomic_i8, i16 => atomic_i16, i32 => atomic, i64 => atomic64 ]
/// Exchange and compare-and-exchange atomic operations
pub trait AtomicExchangeOps {
/// Atomic exchange.
@@ -243,6 +308,7 @@ declare_and_impl_atomic_methods!(
);
declare_and_impl_atomic_methods!(
+ [ i32 => atomic, i64 => atomic64 ]
/// Atomic arithmetic operations
pub trait AtomicArithmeticOps {
/// Atomic add (wrapping).
diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs
index 45a17985cda4..67a0406d3ea4 100644
--- a/rust/kernel/sync/atomic/predefine.rs
+++ b/rust/kernel/sync/atomic/predefine.rs
@@ -5,6 +5,29 @@
use crate::static_assert;
use core::mem::{align_of, size_of};
+// Ensure size and alignment requirements are checked.
+static_assert!(size_of::<bool>() == size_of::<i8>());
+static_assert!(align_of::<bool>() == align_of::<i8>());
+
+// SAFETY: `bool` has the same size and alignment as `i8`, and Rust guarantees that `bool` has
+// only two valid bit patterns: 0 (false) and 1 (true). Those are valid `i8` values, so `bool` is
+// round-trip transmutable to `i8`.
+unsafe impl super::AtomicType for bool {
+ type Repr = i8;
+}
+
+// SAFETY: `i8` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i8 {
+ type Repr = i8;
+}
+
+// SAFETY: `i16` has the same size and alignment with itself, and is round-trip transmutable to
+// itself.
+unsafe impl super::AtomicType for i16 {
+ type Repr = i16;
+}
+
// SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to
// itself.
unsafe impl super::AtomicType for i32 {
@@ -35,12 +58,23 @@ unsafe impl super::AtomicAdd<i64> for i64 {
// as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to
// `isize_atomic_repr`, which also always implements `AtomicImpl`.
#[allow(non_camel_case_types)]
+#[cfg(not(testlib))]
#[cfg(not(CONFIG_64BIT))]
type isize_atomic_repr = i32;
#[allow(non_camel_case_types)]
+#[cfg(not(testlib))]
#[cfg(CONFIG_64BIT)]
type isize_atomic_repr = i64;
+#[allow(non_camel_case_types)]
+#[cfg(testlib)]
+#[cfg(target_pointer_width = "32")]
+type isize_atomic_repr = i32;
+#[allow(non_camel_case_types)]
+#[cfg(testlib)]
+#[cfg(target_pointer_width = "64")]
+type isize_atomic_repr = i64;
+
// Ensure size and alignment requirements are checked.
static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>());
static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>());
@@ -118,7 +152,7 @@ mod tests {
#[test]
fn atomic_basic_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
assert_eq!(v, x.load(Relaxed));
@@ -126,8 +160,18 @@ mod tests {
}
#[test]
+ fn atomic_acquire_release_tests() {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
+ let x = Atomic::new(0);
+
+ x.store(v, Release);
+ assert_eq!(v, x.load(Acquire));
+ });
+ }
+
+ #[test]
fn atomic_xchg_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
let old = v;
@@ -140,7 +184,7 @@ mod tests {
#[test]
fn atomic_cmpxchg_tests() {
- for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| {
+ for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| {
let x = Atomic::new(v);
let old = v;
@@ -166,4 +210,20 @@ mod tests {
assert_eq!(v + 25, x.load(Relaxed));
});
}
+
+ #[test]
+ fn atomic_bool_tests() {
+ let x = Atomic::new(false);
+
+ assert_eq!(false, x.load(Relaxed));
+ x.store(true, Relaxed);
+ assert_eq!(true, x.load(Relaxed));
+
+ assert_eq!(true, x.xchg(false, Relaxed));
+ assert_eq!(false, x.load(Relaxed));
+
+ assert_eq!(Err(false), x.cmpxchg(true, true, Relaxed));
+ assert_eq!(false, x.load(Relaxed));
+ assert_eq!(Ok(false), x.cmpxchg(false, true, Full));
+ }
}
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 46a57d1fc309..10b6b5e9b024 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -156,6 +156,7 @@ impl<B: Backend> Lock<(), B> {
/// the whole lifetime of `'a`.
///
/// [`State`]: Backend::State
+ #[inline]
pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
// SAFETY:
// - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
@@ -169,6 +170,7 @@ impl<B: Backend> Lock<(), B> {
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
+ #[inline]
pub fn lock(&self) -> Guard<'_, T, B> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -182,6 +184,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Returns a guard that can be used to access the data protected by the lock if successful.
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
#[must_use = "if unused, the lock will be immediately unlocked"]
+ #[inline]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -275,6 +278,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
type Target = T;
+ #[inline]
fn deref(&self) -> &Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &*self.lock.data.get() }
@@ -285,6 +289,7 @@ impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
where
T: Unpin,
{
+ #[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &mut *self.lock.data.get() }
@@ -292,6 +297,7 @@ where
}
impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ #[inline]
fn drop(&mut self) {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -304,6 +310,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
+ #[inline]
pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
// SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
unsafe { B::assert_is_held(lock.state.get()) };
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
index eab48108a4ae..aecbdc34738f 100644
--- a/rust/kernel/sync/lock/global.rs
+++ b/rust/kernel/sync/lock/global.rs
@@ -77,6 +77,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Lock this global lock.
+ #[inline]
pub fn lock(&'static self) -> GlobalGuard<B> {
GlobalGuard {
inner: self.inner.lock(),
@@ -84,6 +85,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Try to lock this global lock.
+ #[inline]
pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
Some(GlobalGuard {
inner: self.inner.try_lock()?,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 581cee7ab842..cda0203efefb 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -102,6 +102,7 @@ unsafe impl super::Backend for MutexBackend {
type State = bindings::mutex;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -112,18 +113,21 @@ unsafe impl super::Backend for MutexBackend {
unsafe { bindings::__mutex_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::mutex_lock(ptr) };
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::mutex_trylock(ptr) };
@@ -135,6 +139,7 @@ unsafe impl super::Backend for MutexBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::mutex_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index d7be38ccbdc7..ef76fa07ca3a 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -101,6 +101,7 @@ unsafe impl super::Backend for SpinLockBackend {
type State = bindings::spinlock_t;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -111,18 +112,21 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe { bindings::__spin_lock_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::spin_lock(ptr) }
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::spin_trylock(ptr) };
@@ -134,6 +138,7 @@ unsafe impl super::Backend for SpinLockBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::spin_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs
index 19236a5bccde..6c7ae8b05a0b 100644
--- a/rust/kernel/sync/refcount.rs
+++ b/rust/kernel/sync/refcount.rs
@@ -23,7 +23,8 @@ impl Refcount {
/// Construct a new [`Refcount`] from an initial value.
///
/// The initial value should be non-saturated.
- #[inline]
+ // Always inline to optimize out error path of `build_assert`.
+ #[inline(always)]
pub fn new(value: i32) -> Self {
build_assert!(value >= 0, "initial value saturated");
// SAFETY: There are no safety requirements for this FFI call.
diff --git a/rust/kernel/sync/set_once.rs b/rust/kernel/sync/set_once.rs
index bdba601807d8..139cef05e935 100644
--- a/rust/kernel/sync/set_once.rs
+++ b/rust/kernel/sync/set_once.rs
@@ -123,3 +123,11 @@ impl<T> Drop for SetOnce<T> {
}
}
}
+
+// SAFETY: `SetOnce` can be transferred across thread boundaries iff the data it contains can.
+unsafe impl<T: Send> Send for SetOnce<T> {}
+
+// SAFETY: `SetOnce` synchronises access to the inner value via atomic operations,
+// so shared references are safe when `T: Sync`. Since the inner `T` may be dropped
+// on any thread, we also require `T: Send`.
+unsafe impl<T: Send + Sync> Sync for SetOnce<T> {}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
index 49fad6de0674..cc907fb531bc 100644
--- a/rust/kernel/task.rs
+++ b/rust/kernel/task.rs
@@ -204,18 +204,6 @@ impl Task {
self.0.get()
}
- /// Returns the group leader of the given task.
- pub fn group_leader(&self) -> &Task {
- // SAFETY: The group leader of a task never changes after initialization, so reading this
- // field is not a data race.
- let ptr = unsafe { *ptr::addr_of!((*self.as_ptr()).group_leader) };
-
- // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
- // and given that a task has a reference to its group leader, we know it must be valid for
- // the lifetime of the returned task reference.
- unsafe { &*ptr.cast() }
- }
-
/// Returns the PID of the given task.
pub fn pid(&self) -> Pid {
// SAFETY: The pid of a task never changes after initialization, so reading this field is
@@ -345,6 +333,18 @@ impl CurrentTask {
// `release_task()` call.
Some(unsafe { PidNamespace::from_ptr(active_ns) })
}
+
+ /// Returns the group leader of the current task.
+ pub fn group_leader(&self) -> &Task {
+ // SAFETY: The group leader of a task never changes while the task is running, and `self`
+ // is the current task, which is guaranteed running.
+ let ptr = unsafe { (*self.as_ptr()).group_leader };
+
+ // SAFETY: `current->group_leader` stays valid for at least the duration in which `current`
+ // is running, and the signature of this function ensures that the returned `&Task` can
+ // only be used while `current` is still valid, thus still running.
+ unsafe { &*ptr.cast() }
+ }
}
// SAFETY: The type invariants guarantee that `Task` is always refcounted.
diff --git a/rust/kernel/transmute.rs b/rust/kernel/transmute.rs
index be5dbf3829e2..5711580c9f9b 100644
--- a/rust/kernel/transmute.rs
+++ b/rust/kernel/transmute.rs
@@ -170,6 +170,10 @@ macro_rules! impl_frombytes {
}
impl_frombytes! {
+ // SAFETY: Inhabited ZSTs only have one possible bit pattern, and these two have no invariant.
+ (),
+ {<T>} core::marker::PhantomData<T>,
+
// SAFETY: All bit patterns are acceptable values of the types below.
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
@@ -230,6 +234,10 @@ macro_rules! impl_asbytes {
}
impl_asbytes! {
+ // SAFETY: Inhabited ZSTs only have one possible bit pattern, and these two have no invariant.
+ (),
+ {<T>} core::marker::PhantomData<T>,
+
// SAFETY: Instances of the following types have no uninitialized portions.
u8, u16, u32, u64, usize,
i8, i16, i32, i64, isize,
diff --git a/rust/kernel/usb.rs b/rust/kernel/usb.rs
index d10b65e9fb6a..0e1b9a88f4f1 100644
--- a/rust/kernel/usb.rs
+++ b/rust/kernel/usb.rs
@@ -6,14 +6,23 @@
//! C header: [`include/linux/usb.h`](srctree/include/linux/usb.h)
use crate::{
- bindings, device,
- device_id::{RawDeviceId, RawDeviceIdIndex},
+ bindings,
+ device,
+ device_id::{
+ RawDeviceId,
+ RawDeviceIdIndex, //
+ },
driver,
- error::{from_result, to_result, Result},
+ error::{
+ from_result,
+ to_result, //
+ },
prelude::*,
- str::CStr,
- types::{AlwaysRefCounted, Opaque},
- ThisModule,
+ types::{
+ AlwaysRefCounted,
+ Opaque, //
+ },
+ ThisModule, //
};
use core::{
marker::PhantomData,
@@ -27,13 +36,22 @@ use core::{
/// An adapter for the registration of USB drivers.
pub struct Adapter<T: Driver>(T);
-// SAFETY: A call to `unregister` for a given instance of `RegType` is guaranteed to be valid if
+// SAFETY:
+// - `bindings::usb_driver` is a C type declared as `repr(C)`.
+// - `T` is the type of the driver's device private data.
+// - `struct usb_driver` embeds a `struct device_driver`.
+// - `DEVICE_DRIVER_OFFSET` is the correct byte offset to the embedded `struct device_driver`.
+unsafe impl<T: Driver + 'static> driver::DriverLayout for Adapter<T> {
+ type DriverType = bindings::usb_driver;
+ type DriverData = T;
+ const DEVICE_DRIVER_OFFSET: usize = core::mem::offset_of!(Self::DriverType, driver);
+}
+
+// SAFETY: A call to `unregister` for a given instance of `DriverType` is guaranteed to be valid if
// a preceding call to `register` has been successful.
unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
- type RegType = bindings::usb_driver;
-
unsafe fn register(
- udrv: &Opaque<Self::RegType>,
+ udrv: &Opaque<Self::DriverType>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
@@ -45,14 +63,14 @@ unsafe impl<T: Driver + 'static> driver::RegistrationOps for Adapter<T> {
(*udrv.get()).id_table = T::ID_TABLE.as_ptr();
}
- // SAFETY: `udrv` is guaranteed to be a valid `RegType`.
+ // SAFETY: `udrv` is guaranteed to be a valid `DriverType`.
to_result(unsafe {
bindings::usb_register_driver(udrv.get(), module.0, name.as_char_ptr())
})
}
- unsafe fn unregister(udrv: &Opaque<Self::RegType>) {
- // SAFETY: `udrv` is guaranteed to be a valid `RegType`.
+ unsafe fn unregister(udrv: &Opaque<Self::DriverType>) {
+ // SAFETY: `udrv` is guaranteed to be a valid `DriverType`.
unsafe { bindings::usb_deregister(udrv.get()) };
}
}
@@ -94,9 +112,9 @@ impl<T: Driver + 'static> Adapter<T> {
// SAFETY: `disconnect_callback` is only ever called after a successful call to
// `probe_callback`, hence it's guaranteed that `Device::set_drvdata()` has been called
// and stored a `Pin<KBox<T>>`.
- let data = unsafe { dev.drvdata_obtain::<T>() };
+ let data = unsafe { dev.drvdata_borrow::<T>() };
- T::disconnect(intf, data.as_ref());
+ T::disconnect(intf, data);
}
}