diff options
Diffstat (limited to 'rust/kernel/sync/atomic')
| -rw-r--r-- | rust/kernel/sync/atomic/internal.rs | 136 | ||||
| -rw-r--r-- | rust/kernel/sync/atomic/predefine.rs | 175 |
2 files changed, 282 insertions, 29 deletions
diff --git a/rust/kernel/sync/atomic/internal.rs b/rust/kernel/sync/atomic/internal.rs index 6fdd8e59f45b..ad810c2172ec 100644 --- a/rust/kernel/sync/atomic/internal.rs +++ b/rust/kernel/sync/atomic/internal.rs @@ -7,24 +7,31 @@ use crate::bindings; use crate::macros::paste; use core::cell::UnsafeCell; +use ffi::c_void; mod private { /// Sealed trait marker to disable customized impls on atomic implementation traits. pub trait Sealed {} } -// `i32` and `i64` are only supported atomic implementations. +// The C side supports atomic primitives only for `i32` and `i64` (`atomic_t` and `atomic64_t`), +// while the Rust side also provides atomic support for `i8`, `i16` and `*const c_void` on top of +// lower-level C primitives. +impl private::Sealed for i8 {} +impl private::Sealed for i16 {} +impl private::Sealed for *const c_void {} impl private::Sealed for i32 {} impl private::Sealed for i64 {} /// A marker trait for types that implement atomic operations with C side primitives. /// -/// This trait is sealed, and only types that have directly mapping to the C side atomics should -/// impl this: +/// This trait is sealed, and only types that map directly to the C side atomics +/// or can be implemented with lower-level C primitives are allowed to implement this: /// -/// - `i32` maps to `atomic_t`. -/// - `i64` maps to `atomic64_t`. -pub trait AtomicImpl: Sized + Send + Copy + private::Sealed { +/// - `i8`, `i16` and `*const c_void` are implemented with lower-level C primitives. +/// - `i32` map to `atomic_t` +/// - `i64` map to `atomic64_t` +pub trait AtomicImpl: Sized + Copy + private::Sealed { /// The type of the delta in arithmetic or logical operations. /// /// For example, in `atomic_add(ptr, v)`, it's the type of `v`. Usually it's the same type of @@ -32,6 +39,31 @@ pub trait AtomicImpl: Sized + Send + Copy + private::Sealed { type Delta; } +// The current helpers of load/store of atomic `i8`, `i16` and pointers use `{WRITE,READ}_ONCE()` +// hence the atomicity is only guaranteed against read-modify-write operations if the architecture +// supports native atomic RmW. +// +// In the future when a CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=n architecture plans to support Rust, the +// load/store helpers that guarantee atomicity against RmW operations (usually via a lock) need to +// be added. +crate::static_assert!( + cfg!(CONFIG_ARCH_SUPPORTS_ATOMIC_RMW), + "The current implementation of atomic i8/i16/ptr relies on the architecure being \ + ARCH_SUPPORTS_ATOMIC_RMW" +); + +impl AtomicImpl for i8 { + type Delta = Self; +} + +impl AtomicImpl for i16 { + type Delta = Self; +} + +impl AtomicImpl for *const c_void { + type Delta = isize; +} + // `atomic_t` implements atomic operations on `i32`. impl AtomicImpl for i32 { type Delta = Self; @@ -156,16 +188,17 @@ macro_rules! impl_atomic_method { } } -// Delcares $ops trait with methods and implements the trait for `i32` and `i64`. -macro_rules! declare_and_impl_atomic_methods { - ($(#[$attr:meta])* $pub:vis trait $ops:ident { - $( - $(#[doc=$doc:expr])* - fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { - $unsafe:tt { bindings::#call($($arg:tt)*) } - } - )* - }) => { +macro_rules! declare_atomic_ops_trait { + ( + $(#[$attr:meta])* $pub:vis trait $ops:ident { + $( + $(#[doc=$doc:expr])* + fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { + $unsafe:tt { bindings::#call($($arg:tt)*) } + } + )* + } + ) => { $(#[$attr])* $pub trait $ops: AtomicImpl { $( @@ -175,21 +208,25 @@ macro_rules! declare_and_impl_atomic_methods { ); )* } + } +} - impl $ops for i32 { +macro_rules! impl_atomic_ops_for_one { + ( + $ty:ty => $ctype:ident, + $(#[$attr:meta])* $pub:vis trait $ops:ident { $( - impl_atomic_method!( - (atomic) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { - $unsafe { call($($arg)*) } - } - ); + $(#[doc=$doc:expr])* + fn $func:ident [$($variant:ident),*]($($arg_sig:tt)*) $( -> $ret:ty)? { + $unsafe:tt { bindings::#call($($arg:tt)*) } + } )* } - - impl $ops for i64 { + ) => { + impl $ops for $ty { $( impl_atomic_method!( - (atomic64) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { + ($ctype) $func[$($variant)*]($($arg_sig)*) $(-> $ret)? { $unsafe { call($($arg)*) } } ); @@ -198,7 +235,47 @@ macro_rules! declare_and_impl_atomic_methods { } } +// Declares $ops trait with methods and implements the trait. +macro_rules! declare_and_impl_atomic_methods { + ( + [ $($map:tt)* ] + $(#[$attr:meta])* $pub:vis trait $ops:ident { $($body:tt)* } + ) => { + declare_and_impl_atomic_methods!( + @with_ops_def + [ $($map)* ] + ( $(#[$attr])* $pub trait $ops { $($body)* } ) + ); + }; + + (@with_ops_def [ $($map:tt)* ] ( $($ops_def:tt)* )) => { + declare_atomic_ops_trait!( $($ops_def)* ); + + declare_and_impl_atomic_methods!( + @munch + [ $($map)* ] + ( $($ops_def)* ) + ); + }; + + (@munch [] ( $($ops_def:tt)* )) => {}; + + (@munch [ $ty:ty => $ctype:ident $(, $($rest:tt)*)? ] ( $($ops_def:tt)* )) => { + impl_atomic_ops_for_one!( + $ty => $ctype, + $($ops_def)* + ); + + declare_and_impl_atomic_methods!( + @munch + [ $($($rest)*)? ] + ( $($ops_def)* ) + ); + }; +} + declare_and_impl_atomic_methods!( + [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ] /// Basic atomic operations pub trait AtomicBasicOps { /// Atomic read (load). @@ -216,6 +293,7 @@ declare_and_impl_atomic_methods!( ); declare_and_impl_atomic_methods!( + [ i8 => atomic_i8, i16 => atomic_i16, *const c_void => atomic_ptr, i32 => atomic, i64 => atomic64 ] /// Exchange and compare-and-exchange atomic operations pub trait AtomicExchangeOps { /// Atomic exchange. @@ -243,6 +321,7 @@ declare_and_impl_atomic_methods!( ); declare_and_impl_atomic_methods!( + [ i32 => atomic, i64 => atomic64 ] /// Atomic arithmetic operations pub trait AtomicArithmeticOps { /// Atomic add (wrapping). @@ -258,7 +337,12 @@ declare_and_impl_atomic_methods!( /// Atomically updates `*a` to `(*a).wrapping_add(v)`, and returns the value of `*a` /// before the update. fn fetch_add[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self { - // SAFETY: `a.as_ptr()` is valid and properly aligned. + // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned. + unsafe { bindings::#call(v, a.as_ptr().cast()) } + } + + fn fetch_sub[acquire, release, relaxed](a: &AtomicRepr<Self>, v: Self::Delta) -> Self { + // SAFETY: `a.as_ptr()` guarantees the returned pointer is valid and properly aligned. unsafe { bindings::#call(v, a.as_ptr().cast()) } } } diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs index 45a17985cda4..1d53834fcb12 100644 --- a/rust/kernel/sync/atomic/predefine.rs +++ b/rust/kernel/sync/atomic/predefine.rs @@ -4,6 +4,50 @@ use crate::static_assert; use core::mem::{align_of, size_of}; +use ffi::c_void; + +// Ensure size and alignment requirements are checked. +static_assert!(size_of::<bool>() == size_of::<i8>()); +static_assert!(align_of::<bool>() == align_of::<i8>()); + +// SAFETY: `bool` has the same size and alignment as `i8`, and Rust guarantees that `bool` has +// only two valid bit patterns: 0 (false) and 1 (true). Those are valid `i8` values, so `bool` is +// round-trip transmutable to `i8`. +unsafe impl super::AtomicType for bool { + type Repr = i8; +} + +// SAFETY: `i8` has the same size and alignment with itself, and is round-trip transmutable to +// itself. +unsafe impl super::AtomicType for i8 { + type Repr = i8; +} + +// SAFETY: `i16` has the same size and alignment with itself, and is round-trip transmutable to +// itself. +unsafe impl super::AtomicType for i16 { + type Repr = i16; +} + +// SAFETY: +// +// - `*mut T` has the same size and alignment with `*const c_void`, and is round-trip +// transmutable to `*const c_void`. +// - `*mut T` is safe to transfer between execution contexts. See the safety requirement of +// [`AtomicType`]. +unsafe impl<T: Sized> super::AtomicType for *mut T { + type Repr = *const c_void; +} + +// SAFETY: +// +// - `*const T` has the same size and alignment with `*const c_void`, and is round-trip +// transmutable to `*const c_void`. +// - `*const T` is safe to transfer between execution contexts. See the safety requirement of +// [`AtomicType`]. +unsafe impl<T: Sized> super::AtomicType for *const T { + type Repr = *const c_void; +} // SAFETY: `i32` has the same size and alignment with itself, and is round-trip transmutable to // itself. @@ -35,12 +79,23 @@ unsafe impl super::AtomicAdd<i64> for i64 { // as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to // `isize_atomic_repr`, which also always implements `AtomicImpl`. #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(not(CONFIG_64BIT))] type isize_atomic_repr = i32; #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(CONFIG_64BIT)] type isize_atomic_repr = i64; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "32")] +type isize_atomic_repr = i32; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "64")] +type isize_atomic_repr = i64; + // Ensure size and alignment requirements are checked. static_assert!(size_of::<isize>() == size_of::<isize_atomic_repr>()); static_assert!(align_of::<isize>() == align_of::<isize_atomic_repr>()); @@ -118,16 +173,45 @@ mod tests { #[test] fn atomic_basic_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); assert_eq!(v, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(v, unsafe { atomic_load(ptr, Relaxed) }); + }); + } + + #[test] + fn atomic_acquire_release_tests() { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(0); + + x.store(v, Release); + assert_eq!(v, x.load(Acquire)); + }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(0); + let ptr = x.as_ptr(); + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + unsafe { atomic_store(ptr, v, Release) }; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(v, unsafe { atomic_load(ptr, Acquire) }); + }); } #[test] fn atomic_xchg_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); let old = v; @@ -136,11 +220,23 @@ mod tests { assert_eq!(old, x.xchg(new, Full)); assert_eq!(new, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + let old = v; + let new = v + 1; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(old, unsafe { xchg(ptr, new, Full) }); + assert_eq!(new, x.load(Relaxed)); + }); } #[test] fn atomic_cmpxchg_tests() { - for_each_type!(42 in [i32, i64, u32, u64, isize, usize] |v| { + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { let x = Atomic::new(v); let old = v; @@ -151,6 +247,21 @@ mod tests { assert_eq!(Ok(old), x.cmpxchg(old, new, Relaxed)); assert_eq!(new, x.load(Relaxed)); }); + + for_each_type!(42 in [i8, i16, i32, i64, u32, u64, isize, usize] |v| { + let x = Atomic::new(v); + let ptr = x.as_ptr(); + + let old = v; + let new = v + 1; + + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(Err(old), unsafe { cmpxchg(ptr, new, new, Full) }); + assert_eq!(old, x.load(Relaxed)); + // SAFETY: `ptr` is a valid pointer and no concurrent access. + assert_eq!(Ok(old), unsafe { cmpxchg(ptr, old, new, Relaxed) }); + assert_eq!(new, x.load(Relaxed)); + }); } #[test] @@ -166,4 +277,62 @@ mod tests { assert_eq!(v + 25, x.load(Relaxed)); }); } + + #[test] + fn atomic_bool_tests() { + let x = Atomic::new(false); + + assert_eq!(false, x.load(Relaxed)); + x.store(true, Relaxed); + assert_eq!(true, x.load(Relaxed)); + + assert_eq!(true, x.xchg(false, Relaxed)); + assert_eq!(false, x.load(Relaxed)); + + assert_eq!(Err(false), x.cmpxchg(true, true, Relaxed)); + assert_eq!(false, x.load(Relaxed)); + assert_eq!(Ok(false), x.cmpxchg(false, true, Full)); + } + + #[test] + fn atomic_ptr_tests() { + let mut v = 42; + let mut u = 43; + let x = Atomic::new(&raw mut v); + + assert_eq!(x.load(Acquire), &raw mut v); + assert_eq!(x.cmpxchg(&raw mut u, &raw mut u, Relaxed), Err(&raw mut v)); + assert_eq!(x.cmpxchg(&raw mut v, &raw mut u, Relaxed), Ok(&raw mut v)); + assert_eq!(x.load(Relaxed), &raw mut u); + + let x = Atomic::new(&raw const v); + + assert_eq!(x.load(Acquire), &raw const v); + assert_eq!( + x.cmpxchg(&raw const u, &raw const u, Relaxed), + Err(&raw const v) + ); + assert_eq!( + x.cmpxchg(&raw const v, &raw const u, Relaxed), + Ok(&raw const v) + ); + assert_eq!(x.load(Relaxed), &raw const u); + } + + #[test] + fn atomic_flag_tests() { + let mut flag = AtomicFlag::new(false); + + assert_eq!(false, flag.load(Relaxed)); + + *flag.get_mut() = true; + assert_eq!(true, flag.load(Relaxed)); + + assert_eq!(true, flag.xchg(false, Relaxed)); + assert_eq!(false, flag.load(Relaxed)); + + *flag.get_mut() = true; + assert_eq!(Ok(true), flag.cmpxchg(true, false, Full)); + assert_eq!(false, flag.load(Relaxed)); + } } |
