diff options
| author | Miguel Ojeda <ojeda@kernel.org> | 2026-04-08 09:46:01 +0200 |
|---|---|---|
| committer | Miguel Ojeda <ojeda@kernel.org> | 2026-04-08 10:44:11 +0200 |
| commit | b06b348e855383ed80e041299f3925cdd7dff3da (patch) | |
| tree | 711ca958418fe1b8253929f163fc4b279498111a /rust/kernel | |
| parent | f4231eb25cd215882d4a5da9110d3772c2644b6c (diff) | |
| parent | ddb1444d3335129ae87d9796ab1debf41c0ee51b (diff) | |
Merge tag 'rust-timekeeping-for-v7.1' of https://github.com/Rust-for-Linux/linux into rust-next
Pull timekeeping updates from Andreas Hindborg:
- Expand the example section in the 'HrTimer' documentation.
- Mark the 'ClockSource' trait as unsafe to ensure valid values for
'ktime_get()'.
- Add 'Delta::from_nanos()'.
This is a back merge since the pull request has a newer base -- we will
avoid that in the future.
And, given it is a back merge, it happens to resolve the "subtle" conflict
around '--remap-path-{prefix,scope}' that I discussed in linux-next [1],
plus a few other common conflicts. The result matches what we did for
next-20260407.
The actual diffstat (i.e. using a temporary merge of upstream first) is:
rust/kernel/time.rs | 32 ++++-
rust/kernel/time/hrtimer.rs | 336 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 362 insertions(+), 6 deletions(-)
Link: https://lore.kernel.org/linux-next/CANiq72kdxB=W3_CV1U44oOK3SssztPo2wLDZt6LP94TEO+Kj4g@mail.gmail.com/ [1]
* tag 'rust-timekeeping-for-v7.1' of https://github.com/Rust-for-Linux/linux:
hrtimer: add usage examples to documentation
rust: time: make ClockSource unsafe trait
rust/time: Add Delta::from_nanos()
Diffstat (limited to 'rust/kernel')
| -rw-r--r-- | rust/kernel/cpufreq.rs | 1 | ||||
| -rw-r--r-- | rust/kernel/dma.rs | 114 | ||||
| -rw-r--r-- | rust/kernel/ptr.rs | 30 | ||||
| -rw-r--r-- | rust/kernel/ptr/projection.rs | 305 | ||||
| -rw-r--r-- | rust/kernel/str.rs | 4 | ||||
| -rw-r--r-- | rust/kernel/time.rs | 32 | ||||
| -rw-r--r-- | rust/kernel/time/hrtimer.rs | 336 |
7 files changed, 749 insertions, 73 deletions
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs index 76faa1ac8501..f5adee48d40c 100644 --- a/rust/kernel/cpufreq.rs +++ b/rust/kernel/cpufreq.rs @@ -401,6 +401,7 @@ impl TableBuilder { /// ``` /// use kernel::cpufreq::{DEFAULT_TRANSITION_LATENCY_NS, Policy}; /// +/// #[allow(clippy::double_parens, reason = "False positive before 1.92.0")] /// fn update_policy(policy: &mut Policy) { /// policy /// .set_dvfs_possible_from_any_cpu(true) diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs index 909d56fd5118..a396f8435739 100644 --- a/rust/kernel/dma.rs +++ b/rust/kernel/dma.rs @@ -461,6 +461,19 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> { self.count * core::mem::size_of::<T>() } + /// Returns the raw pointer to the allocated region in the CPU's virtual address space. + #[inline] + pub fn as_ptr(&self) -> *const [T] { + core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count) + } + + /// Returns the raw pointer to the allocated region in the CPU's virtual address space as + /// a mutable pointer. + #[inline] + pub fn as_mut_ptr(&self) -> *mut [T] { + core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count) + } + /// Returns the base address to the allocated region in the CPU's virtual address space. pub fn start_ptr(&self) -> *const T { self.cpu_addr.as_ptr() @@ -581,23 +594,6 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> { Ok(()) } - /// Returns a pointer to an element from the region with bounds checking. `offset` is in - /// units of `T`, not the number of bytes. - /// - /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros. - #[doc(hidden)] - pub fn item_from_index(&self, offset: usize) -> Result<*mut T> { - if offset >= self.count { - return Err(EINVAL); - } - // SAFETY: - // - The pointer is valid due to type invariant on `CoherentAllocation` - // and we've just checked that the range and index is within bounds. - // - `offset` can't overflow since it is smaller than `self.count` and we've checked - // that `self.count` won't overflow early in the constructor. - Ok(unsafe { self.cpu_addr.as_ptr().add(offset) }) - } - /// Reads the value of `field` and ensures that its type is [`FromBytes`]. /// /// # Safety @@ -670,6 +666,9 @@ unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {} /// Reads a field of an item from an allocated region of structs. /// +/// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating +/// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!). +/// /// # Examples /// /// ``` @@ -684,36 +683,29 @@ unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {} /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; /// /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { -/// let whole = kernel::dma_read!(alloc[2]); -/// let field = kernel::dma_read!(alloc[1].field); +/// let whole = kernel::dma_read!(alloc, [2]?); +/// let field = kernel::dma_read!(alloc, [1]?.field); /// # Ok::<(), Error>(()) } /// ``` #[macro_export] macro_rules! dma_read { - ($dma:expr, $idx: expr, $($field:tt)*) => {{ - (|| -> ::core::result::Result<_, $crate::error::Error> { - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be - // dereferenced. The compiler also further validates the expression on whether `field` - // is a member of `item` when expanded by the macro. - unsafe { - let ptr_field = ::core::ptr::addr_of!((*item) $($field)*); - ::core::result::Result::Ok( - $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field) - ) - } - })() + ($dma:expr, $($proj:tt)*) => {{ + let dma = &$dma; + let ptr = $crate::ptr::project!( + $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)* + ); + // SAFETY: The pointer created by the projection is within the DMA region. + unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) } }}; - ($dma:ident [ $idx:expr ] $($field:tt)* ) => { - $crate::dma_read!($dma, $idx, $($field)*) - }; - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => { - $crate::dma_read!($($dma).*, $idx, $($field)*) - }; } /// Writes to a field of an item from an allocated region of structs. /// +/// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression +/// evaluating to a [`CoherentAllocation`], `proj` is a +/// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the +/// projected location. +/// /// # Examples /// /// ``` @@ -728,37 +720,31 @@ macro_rules! dma_read { /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; /// /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { -/// kernel::dma_write!(alloc[2].member = 0xf); -/// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf }); +/// kernel::dma_write!(alloc, [2]?.member, 0xf); +/// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf }); /// # Ok::<(), Error>(()) } /// ``` #[macro_export] macro_rules! dma_write { - ($dma:ident [ $idx:expr ] $($field:tt)*) => {{ - $crate::dma_write!($dma, $idx, $($field)*) - }}; - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{ - $crate::dma_write!($($dma).*, $idx, $($field)*) + (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{ + let dma = &$dma; + let ptr = $crate::ptr::project!( + mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)* + ); + let val = $val; + // SAFETY: The pointer created by the projection is within the DMA region. + unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) } }}; - ($dma:expr, $idx: expr, = $val:expr) => { - (|| -> ::core::result::Result<_, $crate::error::Error> { - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; - // SAFETY: `item_from_index` ensures that `item` is always a valid item. - unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) } - ::core::result::Result::Ok(()) - })() + (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => { + $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*]) + }; + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => { + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*]) + }; + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => { + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*]) }; - ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => { - (|| -> ::core::result::Result<_, $crate::error::Error> { - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be - // dereferenced. The compiler also further validates the expression on whether `field` - // is a member of `item` when expanded by the macro. - unsafe { - let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*); - $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val) - } - ::core::result::Result::Ok(()) - })() + ($dma:expr, $($rest:tt)*) => { + $crate::dma_write!(@parse [$dma] [] [$($rest)*]) }; } diff --git a/rust/kernel/ptr.rs b/rust/kernel/ptr.rs index 91811f5e27de..3f3e529e9f58 100644 --- a/rust/kernel/ptr.rs +++ b/rust/kernel/ptr.rs @@ -2,7 +2,13 @@ //! Types and functions to work with pointers and addresses. -use core::mem::align_of; +pub mod projection; +pub use crate::project_pointer as project; + +use core::mem::{ + align_of, + size_of, // +}; use core::num::NonZero; use crate::const_assert; @@ -225,6 +231,28 @@ macro_rules! impl_alignable_uint { impl_alignable_uint!(u8, u16, u32, u64, usize); +/// Trait to represent compile-time known size information. +/// +/// This is a generalization of [`size_of`] that works for dynamically sized types. +pub trait KnownSize { + /// Get the size of an object of this type in bytes, with the metadata of the given pointer. + fn size(p: *const Self) -> usize; +} + +impl<T> KnownSize for T { + #[inline(always)] + fn size(_: *const Self) -> usize { + size_of::<T>() + } +} + +impl<T> KnownSize for [T] { + #[inline(always)] + fn size(p: *const Self) -> usize { + p.len() * size_of::<T>() + } +} + /// Aligns `value` up to `align`. /// /// This is the const-compatible equivalent of [`Alignable::align_up`]. diff --git a/rust/kernel/ptr/projection.rs b/rust/kernel/ptr/projection.rs new file mode 100644 index 000000000000..140ea8e21617 --- /dev/null +++ b/rust/kernel/ptr/projection.rs @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Infrastructure for handling projections. + +use core::{ + mem::MaybeUninit, + ops::Deref, // +}; + +use crate::prelude::*; + +/// Error raised when a projection is attempted on an array or slice out of bounds. +pub struct OutOfBound; + +impl From<OutOfBound> for Error { + #[inline(always)] + fn from(_: OutOfBound) -> Self { + ERANGE + } +} + +/// A helper trait to perform index projection. +/// +/// This is similar to [`core::slice::SliceIndex`], but operates on raw pointers safely and +/// fallibly. +/// +/// # Safety +/// +/// The implementation of `index` and `get` (if [`Some`] is returned) must ensure that, if provided +/// input pointer `slice` and returned pointer `output`, then: +/// - `output` has the same provenance as `slice`; +/// - `output.byte_offset_from(slice)` is between 0 to +/// `KnownSize::size(slice) - KnownSize::size(output)`. +/// +/// This means that if the input pointer is valid, then pointer returned by `get` or `index` is +/// also valid. +#[diagnostic::on_unimplemented(message = "`{Self}` cannot be used to index `{T}`")] +#[doc(hidden)] +pub unsafe trait ProjectIndex<T: ?Sized>: Sized { + type Output: ?Sized; + + /// Returns an index-projected pointer, if in bounds. + fn get(self, slice: *mut T) -> Option<*mut Self::Output>; + + /// Returns an index-projected pointer; fail the build if it cannot be proved to be in bounds. + #[inline(always)] + fn index(self, slice: *mut T) -> *mut Self::Output { + Self::get(self, slice).unwrap_or_else(|| build_error!()) + } +} + +// Forward array impl to slice impl. +// +// SAFETY: Safety requirement guaranteed by the forwarded impl. +unsafe impl<T, I, const N: usize> ProjectIndex<[T; N]> for I +where + I: ProjectIndex<[T]>, +{ + type Output = <I as ProjectIndex<[T]>>::Output; + + #[inline(always)] + fn get(self, slice: *mut [T; N]) -> Option<*mut Self::Output> { + <I as ProjectIndex<[T]>>::get(self, slice) + } + + #[inline(always)] + fn index(self, slice: *mut [T; N]) -> *mut Self::Output { + <I as ProjectIndex<[T]>>::index(self, slice) + } +} + +// SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to +// not exceed the required bound. +unsafe impl<T> ProjectIndex<[T]> for usize { + type Output = T; + + #[inline(always)] + fn get(self, slice: *mut [T]) -> Option<*mut T> { + if self >= slice.len() { + None + } else { + Some(slice.cast::<T>().wrapping_add(self)) + } + } +} + +// SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to +// not exceed the required bound. +unsafe impl<T> ProjectIndex<[T]> for core::ops::Range<usize> { + type Output = [T]; + + #[inline(always)] + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { + let new_len = self.end.checked_sub(self.start)?; + if self.end > slice.len() { + return None; + } + Some(core::ptr::slice_from_raw_parts_mut( + slice.cast::<T>().wrapping_add(self.start), + new_len, + )) + } +} + +// SAFETY: Safety requirement guaranteed by the forwarded impl. +unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeTo<usize> { + type Output = [T]; + + #[inline(always)] + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { + (0..self.end).get(slice) + } +} + +// SAFETY: Safety requirement guaranteed by the forwarded impl. +unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFrom<usize> { + type Output = [T]; + + #[inline(always)] + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { + (self.start..slice.len()).get(slice) + } +} + +// SAFETY: `get` returned the pointer as is, so it always has the same provenance and offset of 0. +unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFull { + type Output = [T]; + + #[inline(always)] + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { + Some(slice) + } +} + +/// A helper trait to perform field projection. +/// +/// This trait has a `DEREF` generic parameter so it can be implemented twice for types that +/// implement [`Deref`]. This will cause an ambiguity error and thus block [`Deref`] types being +/// used as base of projection, as they can inject unsoundness. Users therefore must not specify +/// `DEREF` and should always leave it to be inferred. +/// +/// # Safety +/// +/// `proj` may only invoke `f` with a valid allocation, as the documentation of [`Self::proj`] +/// describes. +#[doc(hidden)] +pub unsafe trait ProjectField<const DEREF: bool> { + /// Project a pointer to a type to a pointer of a field. + /// + /// `f` may only be invoked with a valid allocation so it can safely obtain raw pointers to + /// fields using `&raw mut`. + /// + /// This is needed because `base` might not point to a valid allocation, while `&raw mut` + /// requires pointers to be in bounds of a valid allocation. + /// + /// # Safety + /// + /// `f` must return a pointer in bounds of the provided pointer. + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F; +} + +// NOTE: in theory, this API should work for `T: ?Sized` and `F: ?Sized`, too. However, we cannot +// currently support that as we need to obtain a valid allocation that `&raw const` can operate on. +// +// SAFETY: `proj` invokes `f` with valid allocation. +unsafe impl<T> ProjectField<false> for T { + #[inline(always)] + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F { + // Create a valid allocation to start projection, as `base` is not necessarily so. The + // memory is never actually used so it will be optimized out, so it should work even for + // very large `T` (`memoffset` crate also relies on this). To be extra certain, we also + // annotate `f` closure with `#[inline(always)]` in the macro. + let mut place = MaybeUninit::uninit(); + let place_base = place.as_mut_ptr(); + let field = f(place_base); + // SAFETY: `field` is in bounds from `base` per safety requirement. + let offset = unsafe { field.byte_offset_from(place_base) }; + // Use `wrapping_byte_offset` as `base` does not need to be of valid allocation. + base.wrapping_byte_offset(offset).cast() + } +} + +// SAFETY: Vacuously satisfied. +unsafe impl<T: Deref> ProjectField<true> for T { + #[inline(always)] + unsafe fn proj<F>(_: *mut Self, _: impl FnOnce(*mut Self) -> *mut F) -> *mut F { + build_error!("this function is a guard against `Deref` impl and is never invoked"); + } +} + +/// Create a projection from a raw pointer. +/// +/// The projected pointer is within the memory region marked by the input pointer. There is no +/// requirement that the input raw pointer needs to be valid, so this macro may be used for +/// projecting pointers outside normal address space, e.g. I/O pointers. However, if the input +/// pointer is valid, the projected pointer is also valid. +/// +/// Supported projections include field projections and index projections. +/// It is not allowed to project into types that implement custom [`Deref`] or +/// [`Index`](core::ops::Index). +/// +/// The macro has basic syntax of `kernel::ptr::project!(ptr, projection)`, where `ptr` is an +/// expression that evaluates to a raw pointer which serves as the base of projection. `projection` +/// can be a projection expression of form `.field` (normally identifier, or numeral in case of +/// tuple structs) or of form `[index]`. +/// +/// If a mutable pointer is needed, the macro input can be prefixed with the `mut` keyword, i.e. +/// `kernel::ptr::project!(mut ptr, projection)`. By default, a const pointer is created. +/// +/// `ptr::project!` macro can perform both fallible indexing and build-time checked indexing. +/// `[index]` form performs build-time bounds checking; if compiler fails to prove `[index]` is in +/// bounds, compilation will fail. `[index]?` can be used to perform runtime bounds checking; +/// `OutOfBound` error is raised via `?` if the index is out of bounds. +/// +/// # Examples +/// +/// Field projections are performed with `.field_name`: +/// +/// ``` +/// struct MyStruct { field: u32, } +/// let ptr: *const MyStruct = core::ptr::dangling(); +/// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .field); +/// +/// struct MyTupleStruct(u32, u32); +/// +/// fn proj(ptr: *const MyTupleStruct) { +/// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .1); +/// } +/// ``` +/// +/// Index projections are performed with `[index]`: +/// +/// ``` +/// fn proj(ptr: *const [u8; 32]) -> Result { +/// let field_ptr: *const u8 = kernel::ptr::project!(ptr, [1]); +/// // The following invocation, if uncommented, would fail the build. +/// // +/// // kernel::ptr::project!(ptr, [128]); +/// +/// // This will raise an `OutOfBound` error (which is convertible to `ERANGE`). +/// kernel::ptr::project!(ptr, [128]?); +/// Ok(()) +/// } +/// ``` +/// +/// If you need to match on the error instead of propagate, put the invocation inside a closure: +/// +/// ``` +/// let ptr: *const [u8; 32] = core::ptr::dangling(); +/// let field_ptr: Result<*const u8> = (|| -> Result<_> { +/// Ok(kernel::ptr::project!(ptr, [128]?)) +/// })(); +/// assert!(field_ptr.is_err()); +/// ``` +/// +/// For mutable pointers, put `mut` as the first token in macro invocation. +/// +/// ``` +/// let ptr: *mut [(u8, u16); 32] = core::ptr::dangling_mut(); +/// let field_ptr: *mut u16 = kernel::ptr::project!(mut ptr, [1].1); +/// ``` +#[macro_export] +macro_rules! project_pointer { + (@gen $ptr:ident, ) => {}; + // Field projection. `$field` needs to be `tt` to support tuple index like `.0`. + (@gen $ptr:ident, .$field:tt $($rest:tt)*) => { + // SAFETY: The provided closure always returns an in-bounds pointer. + let $ptr = unsafe { + $crate::ptr::projection::ProjectField::proj($ptr, #[inline(always)] |ptr| { + // Check unaligned field. Not all users (e.g. DMA) can handle unaligned + // projections. + if false { + let _ = &(*ptr).$field; + } + // SAFETY: `$field` is in bounds, and no implicit `Deref` is possible (if the + // type implements `Deref`, Rust cannot infer the generic parameter `DEREF`). + &raw mut (*ptr).$field + }) + }; + $crate::ptr::project!(@gen $ptr, $($rest)*) + }; + // Fallible index projection. + (@gen $ptr:ident, [$index:expr]? $($rest:tt)*) => { + let $ptr = $crate::ptr::projection::ProjectIndex::get($index, $ptr) + .ok_or($crate::ptr::projection::OutOfBound)?; + $crate::ptr::project!(@gen $ptr, $($rest)*) + }; + // Build-time checked index projection. + (@gen $ptr:ident, [$index:expr] $($rest:tt)*) => { + let $ptr = $crate::ptr::projection::ProjectIndex::index($index, $ptr); + $crate::ptr::project!(@gen $ptr, $($rest)*) + }; + (mut $ptr:expr, $($proj:tt)*) => {{ + let ptr: *mut _ = $ptr; + $crate::ptr::project!(@gen ptr, $($proj)*); + ptr + }}; + ($ptr:expr, $($proj:tt)*) => {{ + let ptr = <*const _>::cast_mut($ptr); + // We currently always project using mutable pointer, as it is not decided whether `&raw + // const` allows the resulting pointer to be mutated (see documentation of `addr_of!`). + $crate::ptr::project!(@gen ptr, $($proj)*); + ptr.cast_const() + }}; +} diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs index 9b89564ae6d8..8311d91549e1 100644 --- a/rust/kernel/str.rs +++ b/rust/kernel/str.rs @@ -680,13 +680,13 @@ impl fmt::Write for Formatter<'_> { /// /// * The first byte of `buffer` is always zero. /// * The length of `buffer` is at least 1. -pub(crate) struct NullTerminatedFormatter<'a> { +pub struct NullTerminatedFormatter<'a> { buffer: &'a mut [u8], } impl<'a> NullTerminatedFormatter<'a> { /// Create a new [`Self`] instance. - pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { + pub fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { *(buffer.first_mut()?) = 0; // INVARIANT: diff --git a/rust/kernel/time.rs b/rust/kernel/time.rs index 6ea98dfcd027..363e93cbb139 100644 --- a/rust/kernel/time.rs +++ b/rust/kernel/time.rs @@ -60,7 +60,13 @@ pub fn msecs_to_jiffies(msecs: Msecs) -> Jiffies { /// cases the user of the clock has to decide which clock is best suited for the /// purpose. In most scenarios clock [`Monotonic`] is the best choice as it /// provides a accurate monotonic notion of time (leap second smearing ignored). -pub trait ClockSource { +/// +/// # Safety +/// +/// Implementers must ensure that `ktime_get()` returns a value in the inclusive range +/// `0..=KTIME_MAX` (i.e., greater than or equal to 0 and less than or equal to +/// `KTIME_MAX`, where `KTIME_MAX` equals `i64::MAX`). +pub unsafe trait ClockSource { /// The kernel clock ID associated with this clock source. /// /// This constant corresponds to the C side `clockid_t` value. @@ -68,7 +74,7 @@ pub trait ClockSource { /// Get the current time from the clock source. /// - /// The function must return a value in the range from 0 to `KTIME_MAX`. + /// The function must return a value in the range `0..=KTIME_MAX`. fn ktime_get() -> bindings::ktime_t; } @@ -85,7 +91,9 @@ pub trait ClockSource { /// count time that the system is suspended. pub struct Monotonic; -impl ClockSource for Monotonic { +// SAFETY: The kernel's `ktime_get()` is guaranteed to return a value +// in `0..=KTIME_MAX`. +unsafe impl ClockSource for Monotonic { const ID: bindings::clockid_t = bindings::CLOCK_MONOTONIC as bindings::clockid_t; fn ktime_get() -> bindings::ktime_t { @@ -110,7 +118,9 @@ impl ClockSource for Monotonic { /// the clock will experience discontinuity around leap second adjustment. pub struct RealTime; -impl ClockSource for RealTime { +// SAFETY: The kernel's `ktime_get_real()` is guaranteed to return a value +// in `0..=KTIME_MAX`. +unsafe impl ClockSource for RealTime { const ID: bindings::clockid_t = bindings::CLOCK_REALTIME as bindings::clockid_t; fn ktime_get() -> bindings::ktime_t { @@ -128,7 +138,9 @@ impl ClockSource for RealTime { /// discontinuities if the time is changed using settimeofday(2) or similar. pub struct BootTime; -impl ClockSource for BootTime { +// SAFETY: The kernel's `ktime_get_boottime()` is guaranteed to return a value +// in `0..=KTIME_MAX`. +unsafe impl ClockSource for BootTime { const ID: bindings::clockid_t = bindings::CLOCK_BOOTTIME as bindings::clockid_t; fn ktime_get() -> bindings::ktime_t { @@ -150,7 +162,9 @@ impl ClockSource for BootTime { /// The acronym TAI refers to International Atomic Time. pub struct Tai; -impl ClockSource for Tai { +// SAFETY: The kernel's `ktime_get_clocktai()` is guaranteed to return a value +// in `0..=KTIME_MAX`. +unsafe impl ClockSource for Tai { const ID: bindings::clockid_t = bindings::CLOCK_TAI as bindings::clockid_t; fn ktime_get() -> bindings::ktime_t { @@ -363,6 +377,12 @@ impl Delta { /// A span of time equal to zero. pub const ZERO: Self = Self { nanos: 0 }; + /// Create a new [`Delta`] from a number of nanoseconds. + #[inline] + pub const fn from_nanos(nanos: i64) -> Self { + Self { nanos } + } + /// Create a new [`Delta`] from a number of microseconds. /// /// The `micros` can range from -9_223_372_036_854_775 to 9_223_372_036_854_775. diff --git a/rust/kernel/time/hrtimer.rs b/rust/kernel/time/hrtimer.rs index 856d2d929a00..2d7f1131a813 100644 --- a/rust/kernel/time/hrtimer.rs +++ b/rust/kernel/time/hrtimer.rs @@ -66,6 +66,342 @@ //! //! A `restart` operation on a timer in the **stopped** state is equivalent to a //! `start` operation. +//! +//! When a type implements both `HrTimerPointer` and `Clone`, it is possible to +//! issue the `start` operation while the timer is in the **started** state. In +//! this case the `start` operation is equivalent to the `restart` operation. +//! +//! # Examples +//! +//! ## Using an intrusive timer living in a [`Box`] +//! +//! ``` +//! # use kernel::{ +//! # alloc::flags, +//! # impl_has_hr_timer, +//! # prelude::*, +//! # sync::{ +//! # atomic::{ordering, Atomic}, +//! # completion::Completion, +//! # Arc, +//! # }, +//! # time::{ +//! # hrtimer::{ +//! # RelativeMode, HrTimer, HrTimerCallback, HrTimerPointer, +//! # HrTimerRestart, HrTimerCallbackContext +//! # }, +//! # Delta, Monotonic, +//! # }, +//! # }; +//! +//! #[pin_data] +//! struct Shared { +//! #[pin] +//! flag: Atomic<u64>, +//! #[pin] +//! cond: Completion, +//! } +//! +//! impl Shared { +//! fn new() -> impl PinInit<Self> { +//! pin_init!(Self { +//! flag <- Atomic::new(0), +//! cond <- Completion::new(), +//! }) +//! } +//! } +//! +//! #[pin_data] +//! struct BoxIntrusiveHrTimer { +//! #[pin] +//! timer: HrTimer<Self>, +//! shared: Arc<Shared>, +//! } +//! +//! impl BoxIntrusiveHrTimer { +//! fn new() -> impl PinInit<Self, kernel::error::Error> { +//! try_pin_init!(Self { +//! timer <- HrTimer::new(), +//! shared: Arc::pin_init(Shared::new(), flags::GFP_KERNEL)?, +//! }) +//! } +//! } +//! +//! impl HrTimerCallback for BoxIntrusiveHrTimer { +//! type Pointer<'a> = Pin<KBox<Self>>; +//! +//! fn run(this: Pin<&mut Self>, _ctx: HrTimerCallbackContext<'_, Self>) -> HrTimerRestart { +//! pr_info!("Timer called\n"); +//! +//! let flag = this.shared.flag.fetch_add(1, ordering::Full); +//! this.shared.cond.complete_all(); +//! +//! if flag == 4 { +//! HrTimerRestart::NoRestart +//! } else { +//! HrTimerRestart::Restart +//! } +//! } +//! } +//! +//! impl_has_hr_timer! { +//! impl HasHrTimer<Self> for BoxIntrusiveHrTimer { +//! mode: RelativeMode<Monotonic>, field: self.timer +//! } +//! } +//! +//! let has_timer = Box::pin_init(BoxIntrusiveHrTimer::new(), GFP_KERNEL)?; +//! let shared = has_timer.shared.clone(); +//! let _handle = has_timer.start(Delta::from_micros(200)); +//! +//! while shared.flag.load(ordering::Relaxed) != 5 { +//! shared.cond.wait_for_completion(); +//! } +//! +//! pr_info!("Counted to 5\n"); +//! # Ok::<(), kernel::error::Error>(()) +//! ``` +//! +//! ## Using an intrusive timer in an [`Arc`] +//! +//! ``` +//! # use kernel::{ +//! # alloc::flags, +//! # impl_has_hr_timer, +//! # prelude::*, +//! # sync::{ +//! # atomic::{ordering, Atomic}, +//! # completion::Completion, +//! # Arc, ArcBorrow, +//! # }, +//! # time::{ +//! # hrtimer::{ +//! # RelativeMode, HrTimer, HrTimerCallback, HrTimerPointer, HrTimerRestart, +//! # HasHrTimer, HrTimerCallbackContext +//! # }, +//! # Delta, Monotonic, +//! # }, +//! # }; +//! +//! #[pin_data] +//! struct ArcIntrusiveHrTimer { +//! #[pin] +//! timer: HrTimer<Self>, +//! #[pin] +//! flag: Atomic<u64>, +//! #[pin] +//! cond: Completion, +//! } +//! +//! impl ArcIntrusiveHrTimer { +//! fn new() -> impl PinInit<Self> { +//! pin_init!(Self { +//! timer <- HrTimer::new(), +//! flag <- Atomic::new(0), +//! cond <- Completion::new(), +//! }) +//! } +//! } +//! +//! impl HrTimerCallback for ArcIntrusiveHrTimer { +//! type Pointer<'a> = Arc<Self>; +//! +//! fn run( +//! this: ArcBorrow<'_, Self>, +//! _ctx: HrTimerCallbackContext<'_, Self>, +//! ) -> HrTimerRestart { +//! pr_info!("Timer called\n"); +//! +//! let flag = this.flag.fetch_add(1, ordering::Full); +//! this.cond.complete_all(); +//! +//! if flag == 4 { +//! HrTimerRestart::NoRestart +//! } else { +//! HrTimerRestart::Restart +//! } +//! } +//! } +//! +//! impl_has_hr_timer! { +//! impl HasHrTimer<Self> for ArcIntrusiveHrTimer { +//! mode: RelativeMode<Monotonic>, field: self.timer +//! } +//! } +//! +//! let has_timer = Arc::pin_init(ArcIntrusiveHrTimer::new(), GFP_KERNEL)?; +//! let _handle = has_timer.clone().start(Delta::from_micros(200)); +//! +//! while has_timer.flag.load(ordering::Relaxed) != 5 { +//! has_timer.cond.wait_for_completion(); +//! } +//! +//! pr_info!("Counted to 5\n"); +//! # Ok::<(), kernel::error::Error>(()) +//! ``` +//! +//! ## Using a stack-based timer +//! +//! ``` +//! # use kernel::{ +//! # impl_has_hr_timer, +//! # prelude::*, +//! # sync::{ +//! # atomic::{ordering, Atomic}, +//! # completion::Completion, +//! # }, +//! # time::{ +//! # hrtimer::{ +//! # ScopedHrTimerPointer, HrTimer, HrTimerCallback, HrTimerPointer, HrTimerRestart, +//! # HasHrTimer, RelativeMode, HrTimerCallbackContext +//! # }, +//! # Delta, Monotonic, +//! # }, +//! # }; +//! # use pin_init::stack_pin_init; +//! +//! #[pin_data] +//! struct IntrusiveHrTimer { +//! #[pin] +//! timer: HrTimer<Self>, +//! #[pin] +//! flag: Atomic<u64>, +//! #[pin] +//! cond: Completion, +//! } +//! +//! impl IntrusiveHrTimer { +//! fn new() -> impl PinInit<Self> { +//! pin_init!(Self { +//! timer <- HrTimer::new(), +//! flag <- Atomic::new(0), +//! cond <- Completion::new(), +//! }) +//! } +//! } +//! +//! impl HrTimerCallback for IntrusiveHrTimer { +//! type Pointer<'a> = Pin<&'a Self>; +//! +//! fn run(this: Pin<&Self>, _ctx: HrTimerCallbackContext<'_, Self>) -> HrTimerRestart { +//! pr_info!("Timer called\n"); +//! +//! this.flag.store(1, ordering::Release); +//! this.cond.complete_all(); +//! +//! HrTimerRestart::NoRestart +//! } +//! } +//! +//! impl_has_hr_timer! { +//! impl HasHrTimer<Self> for IntrusiveHrTimer { +//! mode: RelativeMode<Monotonic>, field: self.timer +//! } +//! } +//! +//! stack_pin_init!( let has_timer = IntrusiveHrTimer::new() ); +//! has_timer.as_ref().start_scoped(Delta::from_micros(200), || { +//! while has_timer.flag.load(ordering::Relaxed) != 1 { +//! has_timer.cond.wait_for_completion(); +//! } +//! }); +//! +//! pr_info!("Flag raised\n"); +//! # Ok::<(), kernel::error::Error>(()) +//! ``` +//! +//! ## Using a mutable stack-based timer +//! +//! ``` +//! # use kernel::{ +//! # alloc::flags, +//! # impl_has_hr_timer, +//! # prelude::*, +//! # sync::{ +//! # atomic::{ordering, Atomic}, +//! # completion::Completion, +//! # Arc, +//! # }, +//! # time::{ +//! # hrtimer::{ +//! # ScopedHrTimerPointer, HrTimer, HrTimerCallback, HrTimerPointer, HrTimerRestart, +//! # HasHrTimer, RelativeMode, HrTimerCallbackContext +//! # }, +//! # Delta, Monotonic, +//! # }, +//! # }; +//! # use pin_init::stack_try_pin_init; +//! +//! #[pin_data] +//! struct Shared { +//! #[pin] +//! flag: Atomic<u64>, +//! #[pin] +//! cond: Completion, +//! } +//! +//! impl Shared { +//! fn new() -> impl PinInit<Self> { +//! pin_init!(Self { +//! flag <- Atomic::new(0), +//! cond <- Completion::new(), +//! }) +//! } +//! } +//! +//! #[pin_data] +//! struct IntrusiveHrTimer { +//! #[pin] +//! timer: HrTimer<Self>, +//! shared: Arc<Shared>, +//! } +//! +//! impl IntrusiveHrTimer { +//! fn new() -> impl PinInit<Self, kernel::error::Error> { +//! try_pin_init!(Self { +//! timer <- HrTimer::new(), +//! shared: Arc::pin_init(Shared::new(), flags::GFP_KERNEL)?, +//! }) +//! } +//! } +//! +//! impl HrTimerCallback for IntrusiveHrTimer { +//! type Pointer<'a> = Pin<&'a mut Self>; +//! +//! fn run(this: Pin<&mut Self>, _ctx: HrTimerCallbackContext<'_, Self>) -> HrTimerRestart { +//! pr_info!("Timer called\n"); +//! +//! let flag = this.shared.flag.fetch_add(1, ordering::Full); +//! this.shared.cond.complete_all(); +//! +//! if flag == 4 { +//! HrTimerRestart::NoRestart +//! } else { +//! HrTimerRestart::Restart +//! } +//! } +//! } +//! +//! impl_has_hr_timer! { +//! impl HasHrTimer<Self> for IntrusiveHrTimer { +//! mode: RelativeMode<Monotonic>, field: self.timer +//! } +//! } +//! +//! stack_try_pin_init!( let has_timer =? IntrusiveHrTimer::new() ); +//! let shared = has_timer.shared.clone(); +//! +//! has_timer.as_mut().start_scoped(Delta::from_micros(200), || { +//! while shared.flag.load(ordering::Relaxed) != 5 { +//! shared.cond.wait_for_completion(); +//! } +//! }); +//! +//! pr_info!("Counted to 5\n"); +//! # Ok::<(), kernel::error::Error>(()) +//! ``` +//! +//! [`Arc`]: kernel::sync::Arc use super::{ClockSource, Delta, Instant}; use crate::{prelude::*, types::Opaque}; |
