diff options
Diffstat (limited to 'rust/kernel/sync')
| -rw-r--r-- | rust/kernel/sync/arc.rs | 24 | ||||
| -rw-r--r-- | rust/kernel/sync/aref.rs | 25 | ||||
| -rw-r--r-- | rust/kernel/sync/atomic.rs | 311 | ||||
| -rw-r--r-- | rust/kernel/sync/atomic/internal.rs | 136 | ||||
| -rw-r--r-- | rust/kernel/sync/atomic/predefine.rs | 175 | ||||
| -rw-r--r-- | rust/kernel/sync/lock.rs | 7 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/global.rs | 2 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/mutex.rs | 5 | ||||
| -rw-r--r-- | rust/kernel/sync/lock/spinlock.rs | 5 | ||||
| -rw-r--r-- | rust/kernel/sync/refcount.rs | 3 | ||||
| -rw-r--r-- | rust/kernel/sync/set_once.rs | 133 |
11 files changed, 766 insertions, 60 deletions
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 289f77abf415..18d6c0d62ce0 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -128,7 +128,7 @@ mod std_vendor; /// # Ok::<(), Error>(()) /// ``` #[repr(transparent)] -#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))] +#[derive(core::marker::CoercePointee)] pub struct Arc<T: ?Sized> { ptr: NonNull<ArcInner<T>>, // NB: this informs dropck that objects of type `ArcInner<T>` may be used in `<Arc<T> as @@ -182,15 +182,6 @@ impl<T: ?Sized> ArcInner<T> { } } -// This is to allow coercion from `Arc<T>` to `Arc<U>` if `T` can be converted to the -// dynamically-sized type (DST) `U`. -#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] -impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Arc<U>> for Arc<T> {} - -// This is to allow `Arc<U>` to be dispatched on when `Arc<T>` can be coerced into `Arc<U>`. -#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] -impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Arc<U>> for Arc<T> {} - // SAFETY: It is safe to send `Arc<T>` to another thread when the underlying `T` is `Sync` because // it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs // `T` to be `Send` because any thread that has an `Arc<T>` may ultimately access `T` using a @@ -240,6 +231,9 @@ impl<T> Arc<T> { // `Arc` object. Ok(unsafe { Self::from_inner(inner) }) } + + /// The offset that the value is stored at. + pub const DATA_OFFSET: usize = core::mem::offset_of!(ArcInner<T>, data); } impl<T: ?Sized> Arc<T> { @@ -544,20 +538,12 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> { /// # Ok::<(), Error>(()) /// ``` #[repr(transparent)] -#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))] +#[derive(core::marker::CoercePointee)] pub struct ArcBorrow<'a, T: ?Sized + 'a> { inner: NonNull<ArcInner<T>>, _p: PhantomData<&'a ()>, } -// This is to allow `ArcBorrow<U>` to be dispatched on when `ArcBorrow<T>` can be coerced into -// `ArcBorrow<U>`. -#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))] -impl<T: ?Sized + core::marker::Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<ArcBorrow<'_, U>> - for ArcBorrow<'_, T> -{ -} - impl<T: ?Sized> Clone for ArcBorrow<'_, T> { fn clone(&self) -> Self { *self diff --git a/rust/kernel/sync/aref.rs b/rust/kernel/sync/aref.rs index 0d24a0432015..9989f56d0605 100644 --- a/rust/kernel/sync/aref.rs +++ b/rust/kernel/sync/aref.rs @@ -83,6 +83,9 @@ unsafe impl<T: AlwaysRefCounted + Sync + Send> Send for ARef<T> {} // example, when the reference count reaches zero and `T` is dropped. unsafe impl<T: AlwaysRefCounted + Sync + Send> Sync for ARef<T> {} +// Even if `T` is pinned, pointers to `T` can still move. +impl<T: AlwaysRefCounted> Unpin for ARef<T> {} + impl<T: AlwaysRefCounted> ARef<T> { /// Creates a new instance of [`ARef`]. /// @@ -167,3 +170,25 @@ impl<T: AlwaysRefCounted> Drop for ARef<T> { unsafe { T::dec_ref(self.ptr) }; } } + +impl<T, U> PartialEq<ARef<U>> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, + U: AlwaysRefCounted, +{ + #[inline] + fn eq(&self, other: &ARef<U>) -> bool { + T::eq(&**self, &**other) + } +} +impl<T: AlwaysRefCounted + Eq> Eq for ARef<T> {} + +impl<T, U> PartialEq<&'_ U> for ARef<T> +where + T: AlwaysRefCounted + PartialEq<U>, +{ + #[inline] + fn eq(&self, other: &&U) -> bool { + T::eq(&**self, other) + } +} diff --git a/rust/kernel/sync/atomic.rs b/rust/kernel/sync/atomic.rs index 3afc376be42d..9cd009d57e35 100644 --- a/rust/kernel/sync/atomic.rs +++ b/rust/kernel/sync/atomic.rs @@ -22,6 +22,7 @@ mod predefine; pub use internal::AtomicImpl; pub use ordering::{Acquire, Full, Relaxed, Release}; + pub(crate) use internal::{AtomicArithmeticOps, AtomicBasicOps, AtomicExchangeOps}; use crate::build_error; @@ -50,6 +51,10 @@ use ordering::OrderingType; #[repr(transparent)] pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>); +// SAFETY: `Atomic<T>` is safe to transfer between execution contexts because of the safety +// requirement of `AtomicType`. +unsafe impl<T: AtomicType> Send for Atomic<T> {} + // SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic. unsafe impl<T: AtomicType> Sync for Atomic<T> {} @@ -67,6 +72,11 @@ unsafe impl<T: AtomicType> Sync for Atomic<T> {} /// /// - [`Self`] must have the same size and alignment as [`Self::Repr`]. /// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`]. +/// - [`Self`] must be safe to transfer between execution contexts, if it's [`Send`], this is +/// automatically satisfied. The exception is pointer types that are even though marked as +/// `!Send` (e.g. raw pointers and [`NonNull<T>`]) but requiring `unsafe` to do anything +/// meaningful on them. This is because transferring pointer values between execution contexts is +/// safe as long as the actual `unsafe` dereferencing is justified. /// /// Note that this is more relaxed than requiring the bi-directional transmutability (i.e. /// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic @@ -107,7 +117,8 @@ unsafe impl<T: AtomicType> Sync for Atomic<T> {} /// [`transmute()`]: core::mem::transmute /// [round-trip transmutable]: AtomicType#round-trip-transmutability /// [Examples]: AtomicType#examples -pub unsafe trait AtomicType: Sized + Send + Copy { +/// [`NonNull<T>`]: core::ptr::NonNull +pub unsafe trait AtomicType: Sized + Copy { /// The backing atomic implementation type. type Repr: AtomicImpl; } @@ -203,10 +214,7 @@ impl<T: AtomicType> Atomic<T> { /// // no data race. /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release); /// ``` - pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self - where - T: Sync, - { + pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self { // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity. // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement @@ -234,6 +242,17 @@ impl<T: AtomicType> Atomic<T> { /// Returns a mutable reference to the underlying atomic `T`. /// /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access. + /// + /// # Examples + /// + /// ``` + /// use kernel::sync::atomic::{Atomic, Relaxed}; + /// + /// let mut atomic_val = Atomic::new(0u32); + /// let val_mut = atomic_val.get_mut(); + /// *val_mut = 101; + /// assert_eq!(101, atomic_val.load(Relaxed)); + /// ``` pub fn get_mut(&mut self) -> &mut T { // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting @@ -526,16 +545,14 @@ where /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed}; /// /// let x = Atomic::new(42); - /// /// assert_eq!(42, x.load(Relaxed)); - /// - /// assert_eq!(54, { x.fetch_add(12, Acquire); x.load(Relaxed) }); + /// assert_eq!(42, x.fetch_add(12, Acquire)); + /// assert_eq!(54, x.load(Relaxed)); /// /// let x = Atomic::new(42); - /// /// assert_eq!(42, x.load(Relaxed)); - /// - /// assert_eq!(54, { x.fetch_add(12, Full); x.load(Relaxed) } ); + /// assert_eq!(42, x.fetch_add(12, Full)); + /// assert_eq!(54, x.load(Relaxed)); /// ``` #[inline(always)] pub fn fetch_add<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T @@ -558,4 +575,276 @@ where // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants. unsafe { from_repr(ret) } } + + /// Atomic fetch and subtract. + /// + /// Atomically updates `*self` to `(*self).wrapping_sub(v)`, and returns the value of `*self` + /// before the update. + /// + /// # Examples + /// + /// ``` + /// use kernel::sync::atomic::{Atomic, Acquire, Full, Relaxed}; + /// + /// let x = Atomic::new(42); + /// assert_eq!(42, x.load(Relaxed)); + /// assert_eq!(42, x.fetch_sub(12, Acquire)); + /// assert_eq!(30, x.load(Relaxed)); + /// + /// let x = Atomic::new(42); + /// assert_eq!(42, x.load(Relaxed)); + /// assert_eq!(42, x.fetch_sub(12, Full)); + /// assert_eq!(30, x.load(Relaxed)); + /// ``` + #[inline(always)] + pub fn fetch_sub<Rhs, Ordering: ordering::Ordering>(&self, v: Rhs, _: Ordering) -> T + where + // Types that support addition also support subtraction. + T: AtomicAdd<Rhs>, + { + let v = T::rhs_into_delta(v); + + // INVARIANT: `self.0` is a valid `T` after `atomic_fetch_sub*()` due to safety requirement + // of `AtomicAdd`. + let ret = { + match Ordering::TYPE { + OrderingType::Full => T::Repr::atomic_fetch_sub(&self.0, v), + OrderingType::Acquire => T::Repr::atomic_fetch_sub_acquire(&self.0, v), + OrderingType::Release => T::Repr::atomic_fetch_sub_release(&self.0, v), + OrderingType::Relaxed => T::Repr::atomic_fetch_sub_relaxed(&self.0, v), + } + }; + + // SAFETY: `ret` comes from reading `self.0`, which is a valid `T` per type invariants. + unsafe { from_repr(ret) } + } +} + +#[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))] +#[repr(C)] +#[derive(Clone, Copy)] +struct Flag { + bool_field: bool, +} + +/// # Invariants +/// +/// `padding` must be all zeroes. +#[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))] +#[repr(C, align(4))] +#[derive(Clone, Copy)] +struct Flag { + #[cfg(target_endian = "big")] + padding: [u8; 3], + bool_field: bool, + #[cfg(target_endian = "little")] + padding: [u8; 3], +} + +impl Flag { + #[inline(always)] + const fn new(b: bool) -> Self { + // INVARIANT: `padding` is all zeroes. + Self { + bool_field: b, + #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))] + padding: [0; 3], + } + } +} + +// SAFETY: `Flag` and `Repr` have the same size and alignment, and `Flag` is round-trip +// transmutable to the selected representation (`i8` or `i32`). +unsafe impl AtomicType for Flag { + #[cfg(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64))] + type Repr = i8; + #[cfg(not(any(CONFIG_X86_64, CONFIG_UML, CONFIG_ARM, CONFIG_ARM64)))] + type Repr = i32; +} + +/// An atomic flag type intended to be backed by performance-optimal integer type. +/// +/// The backing integer type is an implementation detail; it may vary by architecture and change +/// in the future. +/// +/// [`AtomicFlag`] is generally preferable to [`Atomic<bool>`] when you need read-modify-write +/// (RMW) operations (e.g. [`Atomic::xchg()`]/[`Atomic::cmpxchg()`]) or when [`Atomic<bool>`] does +/// not save memory due to padding. On some architectures that do not support byte-sized atomic +/// RMW operations, RMW operations on [`Atomic<bool>`] are slower. +/// +/// If you only use [`Atomic::load()`]/[`Atomic::store()`], [`Atomic<bool>`] is fine. +/// +/// # Examples +/// +/// ``` +/// use kernel::sync::atomic::{AtomicFlag, Relaxed}; +/// +/// let flag = AtomicFlag::new(false); +/// assert_eq!(false, flag.load(Relaxed)); +/// flag.store(true, Relaxed); +/// assert_eq!(true, flag.load(Relaxed)); +/// ``` +pub struct AtomicFlag(Atomic<Flag>); + +impl AtomicFlag { + /// Creates a new atomic flag. + #[inline(always)] + pub const fn new(b: bool) -> Self { + Self(Atomic::new(Flag::new(b))) + } + + /// Returns a mutable reference to the underlying flag as a [`bool`]. + /// + /// This is safe because the mutable reference of the atomic flag guarantees exclusive access. + /// + /// # Examples + /// + /// ``` + /// use kernel::sync::atomic::{AtomicFlag, Relaxed}; + /// + /// let mut atomic_flag = AtomicFlag::new(false); + /// assert_eq!(false, atomic_flag.load(Relaxed)); + /// *atomic_flag.get_mut() = true; + /// assert_eq!(true, atomic_flag.load(Relaxed)); + /// ``` + #[inline(always)] + pub fn get_mut(&mut self) -> &mut bool { + &mut self.0.get_mut().bool_field + } + + /// Loads the value from the atomic flag. + #[inline(always)] + pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, o: Ordering) -> bool { + self.0.load(o).bool_field + } + + /// Stores a value to the atomic flag. + #[inline(always)] + pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: bool, o: Ordering) { + self.0.store(Flag::new(v), o); + } + + /// Stores a value to the atomic flag and returns the previous value. + #[inline(always)] + pub fn xchg<Ordering: ordering::Ordering>(&self, new: bool, o: Ordering) -> bool { + self.0.xchg(Flag::new(new), o).bool_field + } + + /// Store a value to the atomic flag if the current value is equal to `old`. + #[inline(always)] + pub fn cmpxchg<Ordering: ordering::Ordering>( + &self, + old: bool, + new: bool, + o: Ordering, + ) -> Result<bool, bool> { + match self.0.cmpxchg(Flag::new(old), Flag::new(new), o) { + Ok(_) => Ok(old), + Err(f) => Err(f.bool_field), + } + } +} + +/// Atomic load over raw pointers. +/// +/// This function provides a short-cut of `Atomic::from_ptr().load(..)`, and can be used to work +/// with C side on synchronizations: +/// +/// - `atomic_load(.., Relaxed)` maps to `READ_ONCE()` when used for inter-thread communication. +/// - `atomic_load(.., Acquire)` maps to `smp_load_acquire()`. +/// +/// # Safety +/// +/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`. +/// - If there is a concurrent store from kernel (C or Rust), it has to be atomic. +#[doc(alias("READ_ONCE", "smp_load_acquire"))] +#[inline(always)] +pub unsafe fn atomic_load<T: AtomicType, Ordering: ordering::AcquireOrRelaxed>( + ptr: *mut T, + o: Ordering, +) -> T +where + T::Repr: AtomicBasicOps, +{ + // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to + // `align_of::<T>()`, and all concurrent stores from kernel are atomic, hence no data race per + // LKMM. + unsafe { Atomic::from_ptr(ptr) }.load(o) +} + +/// Atomic store over raw pointers. +/// +/// This function provides a short-cut of `Atomic::from_ptr().load(..)`, and can be used to work +/// with C side on synchronizations: +/// +/// - `atomic_store(.., Relaxed)` maps to `WRITE_ONCE()` when used for inter-thread communication. +/// - `atomic_load(.., Release)` maps to `smp_store_release()`. +/// +/// # Safety +/// +/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`. +/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic. +#[doc(alias("WRITE_ONCE", "smp_store_release"))] +#[inline(always)] +pub unsafe fn atomic_store<T: AtomicType, Ordering: ordering::ReleaseOrRelaxed>( + ptr: *mut T, + v: T, + o: Ordering, +) where + T::Repr: AtomicBasicOps, +{ + // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to + // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race + // per LKMM. + unsafe { Atomic::from_ptr(ptr) }.store(v, o); +} + +/// Atomic exchange over raw pointers. +/// +/// This function provides a short-cut of `Atomic::from_ptr().xchg(..)`, and can be used to work +/// with C side on synchronizations. +/// +/// # Safety +/// +/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`. +/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic. +#[inline(always)] +pub unsafe fn xchg<T: AtomicType, Ordering: ordering::Ordering>( + ptr: *mut T, + new: T, + o: Ordering, +) -> T +where + T::Repr: AtomicExchangeOps, +{ + // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to + // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race + // per LKMM. + unsafe { Atomic::from_ptr(ptr) }.xchg(new, o) +} + +/// Atomic compare and exchange over raw pointers. +/// +/// This function provides a short-cut of `Atomic::from_ptr().cmpxchg(..)`, and can be used to work +/// with C side on synchronizations. +/// +/// # Safety +/// +/// - `ptr` is a valid pointer to `T` and aligned to `align_of::<T>()`. +/// - If there is a concurrent access from kernel (C or Rust), it has to be atomic. +#[doc(alias("try_cmpxchg"))] +#[inline(always)] +pub unsafe fn cmpxchg<T: AtomicType, Ordering: ordering::Ordering>( + ptr: *mut T, + old: T, + new: T, + o: Ordering, +) -> Result<T, T> +where + T::Repr: AtomicExchangeOps, +{ + // SAFETY: Per the function safety requirement, `ptr` is valid and aligned to + // `align_of::<T>()`, and all concurrent accesses from kernel are atomic, hence no data race + // per LKMM. + unsafe { Atomic::from_ptr(ptr) }.cmpxchg(old, new, o) } diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs index 6fdd8e59f45b..ad810c2172ec 100644 --- a/rust/kernel/sync/atomic/internal.rs +++ b/rust/kernel/sync/atomic/internal.rs @@ -7,24 +7,31 @@ use crate::bindings; use crate::macros::paste; use core::cell::UnsafeCell; +use ffi::c_void; mod private { /// Sealed trait marker to disable customized impls on atomic implementation traits. pub trait Sealed {} } -// `i32` and `i64` are only supported atomic implementations. +// The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`), +// while the Rust side also provides atomic support for `i8`, `i16` and `*const c_void` on top of +// lower-level C primitives. +impl private::Sealed for i8 {} +impl private::Sealed for i16 {} +impl private::Sealed for *const c_void {} impl private::Sealed for i32 {} impl private::Sealed for i64 {} /// A marker trait for types that implement atomic operations with C side primitives. /// -/// This trait is sealed, and only types that have directly mapping to the C side atomics should -/// impl this: +/// This trait is sealed, and only types that map directly to the C side atomics +/// or can be implemented with lower-level C primitives are allowed to implement this: /// -/// - `i32` maps to `atomic_t`. -/// - `i64` maps to `atomic64_t`. -pub trait AtomicImpl: Sized + Send + Copy + private::Sealed { +/// - `i8`, `i16` and `*const c_void` are implemented with lower-level C primitives. +/// - `i32` map to `atomic_t` +/// - `i64` map to `atomic64_t` +pub trait AtomicImpl: Sized + Copy + private::Sealed { /// The type of the delta in arithmetic or logical operations. /// /// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of @@ -32,6 +39,31 @@ pub trait AtomicImpl: Sized + Send + Copy + private::Sealed { type Delta; } +// The current helpers of load/store of atomic `i8`, `i16` and pointers use `{WRITE,READ}_ONCE()` +// hence the atomicity is only guaranteed against read-modify-write operations if the architecture +// supports native atomic RmW. +// +// In the future when a CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=n architecture plans to support Rust, the +// load/store helpers that guarantee atomicity against RmW operations (usually via a lock) need to +// be added. +crate::static_assert!( + cfg!(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW), + "The current implementation of atomic i8/i16/ptr relies on the architecure being \ + ARCH_SUPPORTS_ATOMIC_RMW" +); + +impl AtomicImpl for i8 { + type Delta = Self; +} + +impl AtomicImpl for i16 { + type Delta = Self; +} + +impl AtomicImpl for *const c_void { + type Delta = isize; +} + // `atomic_t` implements atomic operations on `i32`. impl AtomicImpl for i32 { type Delta = Self; @@ -156,16 +188,17 @@ macro_rules! impl_atomic_method { } } -// Delcares $ops trait with methods and implements the trait for `i32` and `i64`. -macro_rules! declare_and_impl_atomic_methods { - ($(#[$attr:meta])* $pub:vis trait $ops:ident { - $( - $(#[doc=$doc:expr])* - fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { - $unsafe:tt { bindings::#call($($arg:tt)*) } - } - )* - }) => { +macro_rules! declare_atomic_ops_trait { + ( + $(#[$attr:meta])* $pub:vis trait $ops:ident { + $( + $(#[doc=$doc:expr])* + fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { + $unsafe:tt { bindings::#call($($arg:tt)*) } + } + )* + } + ) => { $(#[$attr])* $pub trait $ops: AtomicImpl { $( @@ -175,21 +208,25 @@ macro_rules! declare_and_impl_atomic_methods { ); )* } + } +} - impl $ops for i32 { +macro_rules! impl_atomic_ops_for_one { + ( + $ty:ty => $ctype:ident, + $(#[$attr:meta])* $pub:vis trait $ops:ident { $( - impl_atomic_method!( - (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { - $unsafe { call($($arg)*) } - } - ); + $(#[doc=$doc:expr])* + fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { + $unsafe:tt { bindings::#call($($arg:tt)*) } + } )* } - - impl $ops for i64 { + ) => { + impl $ops for $ty { $( impl_atomic_method!( - (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { + ($ctype) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { $unsafe { call($($arg)*) } } ); @@ -198,7 +235,47 @@ macro_rules! declare_and_impl_atomic_methods { } } +// Declares $ops trait with methods and implements the trait. +macro_rules! declare_and_impl_atomic_methods { + ( + [ $($map:tt)* ] + $(#[$attr:meta])* $pub:vis trait $ops:ident { $($body:tt)* } + ) => { + declare_and_impl_atomic_methods!( + @with_ops_def + [ $($map)* ] + ( $(#[$attr])* $pub trait $ops { $($body)* } ) + ); + }; + + (@with_ops_def [ $($map:tt)* ] ( $($ops_def:tt)* )) => { + declare_atomic_ops_trait!( $($ops_def)* ); + + declare_and_impl_atomic_methods!( + @munch + [ $($map)* ] + ( $($ops_def)* ) + ); + }; + + (@munch [] ( $($ops_def:tt)* )) => {}; + + (@munch [ $ty:ty => $ctype:ident $(, $($rest:tt)*)? ] ( $($ops_def:tt)* )) => { + impl_atomic_ops_for_one!( + $ty => $ctype, + $($ops_def)* + ); + + declare_and_impl_atomic_methods!( + @munch + [ $($($rest)*)? ] + ( $($ops_def)* ) + ); + }; +} + declare_and_impl_atomic_methods!( + [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ] /// Basic atomic operations pub trait AtomicBasicOps { /// Atomic read (load). @@ -216,6 +293,7 @@ declare_and_impl_atomic_methods!( ); declare_and_impl_atomic_methods!( + [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ] /// Exchange and compare-and-exchange atomic operations pub trait AtomicExchangeOps { /// Atomic exchange. @@ -243,6 +321,7 @@ declare_and_impl_atomic_methods!( ); declare_and_impl_atomic_methods!( + [ i32 => atomic, i64 => atomic64 ] /// Atomic arithmetic operations pub trait AtomicArithmeticOps { /// Atomic add (wrapping). @@ -258,7 +337,12 @@ declare_and_impl_atomic_methods!( /// Atomically updates `*a` to `(*a).wrapping_add(v)`, and returns the value of `*a` /// before the update. fn fetch_add[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self { - // SAFETY: `a.as_ptr()` is valid and properly aligned. + // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned. + unsafe { bindings::#call(v, a.as_ptr().cast()) } + } + + fn fetch_sub[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self { + // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned. unsafe { bindings::#call(v, a.as_ptr().cast()) } } } diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs index 45a17985cda4..1d53834fcb12 100644 --- a/rust/kernel/sync/atomic/predefine.rs +++ b/rust/kernel/sync/atomic/predefine.rs @@ -4,6 +4,50 @@ use crate::static_assert; use core::mem::{align_of, size_of}; +use ffi::c_void; + +// Ensure size and alignment requirements are checked. +static_assert!(size_of::<bool>() == size_of::<i8>()); +static_assert!(align_of::<bool>() == align_of::<i8>()); + +// SAFETY: `bool` has the same size and alignment as `i8`, and Rust guarantees that `bool` has +// only two valid bit patterns: 0 (false) and 1 (true). Those are valid `i8` values, so `bool` is +// round-trip transmutable to `i8`. +unsafe impl super::AtomicType for bool { + type Repr = i8; +} + +// SAFETY: `i8` has the same size and alignment with itself, and is round-trip transmutable to +// itself. +unsafe impl super::AtomicType for i8 { + type Repr = i8; +} + +// SAFETY: `i16` has the same size and alignment with itself, and is round-trip transmutable to +// itself. +unsafe impl super::AtomicType for i16 { + type Repr = i16; +} + +// SAFETY: +// +// - `*mut T` has the same size and alignment with `*const c_void`, and is round-trip +// transmutable to `*const c_void`. +// - `*mut T` is safe to transfer between execution contexts. See the safety requirement of +// [`AtomicType`]. +unsafe impl<T: Sized> super::AtomicType for *mut T { + type Repr = *const c_void; +} + +// SAFETY: +// +// - `*const T` has the same size and alignment with `*const c_void`, and is round-trip +// transmutable to `*const c_void`. +// - `*const T` is safe to transfer between execution contexts. See the safety requirement of +// [`AtomicType`]. +unsafe impl<T: Sized> super::AtomicType for *const T { + type Repr = *const c_void; +} // SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to // itself. @@ -35,12 +79,23 @@ unsafe impl super::AtomicAdd<i64> for i64 { // as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to // `isize_atomic_repr`, which also always implements `AtomicImpl`. #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(not(CONFIG_64BIT))] type isize_atomic_repr = i32; #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(CONFIG_64BIT)] type isize_atomic_repr = i64; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "32")] +type isize_atomic_repr = i32; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "64")] +type isize_atomic_repr = i64; + // Ensure size and alignment requirements are checked. static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>()); static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>()); @@ -118,16 +173,45 @@ mod tests { #[test] fn atomic_basic_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); assert_eq!(v, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(v, unsafe { atomic_load(ptr, Relaxed) }); + }); + } + + #[test] + fn atomic_acquire_release_tests() { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(0); + + x.store(v, Release); + assert_eq!(v, x.load(Acquire)); + }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(0); + let ptr = x.as_ptr(); + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + unsafe { atomic_store(ptr, v, Release) }; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(v, unsafe { atomic_load(ptr, Acquire) }); + }); } #[test] fn atomic_xchg_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); let old = v; @@ -136,11 +220,23 @@ mod tests { assert_eq!(old, x.xchg(new, Full)); assert_eq!(new, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + let old = v; + let new = v + 1; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(old, unsafe { xchg(ptr, new, Full) }); + assert_eq!(new, x.load(Relaxed)); + }); } #[test] fn atomic_cmpxchg_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); let old = v; @@ -151,6 +247,21 @@ mod tests { assert_eq!(Ok(old), x.cmpxchg(old, new, Relaxed)); assert_eq!(new, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + let old = v; + let new = v + 1; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(Err(old), unsafe { cmpxchg(ptr, new, new, Full) }); + assert_eq!(old, x.load(Relaxed)); + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(Ok(old), unsafe { cmpxchg(ptr, old, new, Relaxed) }); + assert_eq!(new, x.load(Relaxed)); + }); } #[test] @@ -166,4 +277,62 @@ mod tests { assert_eq!(v + 25, x.load(Relaxed)); }); } + + #[test] + fn atomic_bool_tests() { + let x = Atomic::new(false); + + assert_eq!(false, x.load(Relaxed)); + x.store(true, Relaxed); + assert_eq!(true, x.load(Relaxed)); + + assert_eq!(true, x.xchg(false, Relaxed)); + assert_eq!(false, x.load(Relaxed)); + + assert_eq!(Err(false), x.cmpxchg(true, true, Relaxed)); + assert_eq!(false, x.load(Relaxed)); + assert_eq!(Ok(false), x.cmpxchg(false, true, Full)); + } + + #[test] + fn atomic_ptr_tests() { + let mut v = 42; + let mut u = 43; + let x = Atomic::new(&raw mut v); + + assert_eq!(x.load(Acquire), &raw mut v); + assert_eq!(x.cmpxchg(&raw mut u, &raw mut u, Relaxed), Err(&raw mut v)); + assert_eq!(x.cmpxchg(&raw mut v, &raw mut u, Relaxed), Ok(&raw mut v)); + assert_eq!(x.load(Relaxed), &raw mut u); + + let x = Atomic::new(&raw const v); + + assert_eq!(x.load(Acquire), &raw const v); + assert_eq!( + x.cmpxchg(&raw const u, &raw const u, Relaxed), + Err(&raw const v) + ); + assert_eq!( + x.cmpxchg(&raw const v, &raw const u, Relaxed), + Ok(&raw const v) + ); + assert_eq!(x.load(Relaxed), &raw const u); + } + + #[test] + fn atomic_flag_tests() { + let mut flag = AtomicFlag::new(false); + + assert_eq!(false, flag.load(Relaxed)); + + *flag.get_mut() = true; + assert_eq!(true, flag.load(Relaxed)); + + assert_eq!(true, flag.xchg(false, Relaxed)); |
