aboutsummaryrefslogtreecommitdiff
path: root/rust/kernel/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'rust/kernel/alloc')
-rw-r--r--rust/kernel/alloc/allocator/iter.rs8
-rw-r--r--rust/kernel/alloc/kbox.rs29
-rw-r--r--rust/kernel/alloc/kvec.rs216
-rw-r--r--rust/kernel/alloc/kvec/errors.rs3
4 files changed, 221 insertions, 35 deletions
diff --git a/rust/kernel/alloc/allocator/iter.rs b/rust/kernel/alloc/allocator/iter.rs
index 5759f86029b7..e0a70b7a744a 100644
--- a/rust/kernel/alloc/allocator/iter.rs
+++ b/rust/kernel/alloc/allocator/iter.rs
@@ -42,15 +42,9 @@ impl<'a> Iterator for VmallocPageIter<'a> {
return None;
}
- // TODO: Use `NonNull::add()` instead, once the minimum supported compiler version is
- // bumped to 1.80 or later.
- //
// SAFETY: `offset` is in the interval `[0, (self.page_count() - 1) * page::PAGE_SIZE]`,
// hence the resulting pointer is guaranteed to be within the same allocation.
- let ptr = unsafe { self.buf.as_ptr().add(offset) };
-
- // SAFETY: `ptr` is guaranteed to be non-null given that it is derived from `self.buf`.
- let ptr = unsafe { NonNull::new_unchecked(ptr) };
+ let ptr = unsafe { self.buf.add(offset) };
// SAFETY:
// - `ptr` is a valid pointer to a `Vmalloc` allocation.
diff --git a/rust/kernel/alloc/kbox.rs b/rust/kernel/alloc/kbox.rs
index 622b3529edfc..bd6da02c7ab8 100644
--- a/rust/kernel/alloc/kbox.rs
+++ b/rust/kernel/alloc/kbox.rs
@@ -77,33 +77,8 @@ use pin_init::{InPlaceWrite, Init, PinInit, ZeroableOption};
/// `self.0` is always properly aligned and either points to memory allocated with `A` or, for
/// zero-sized types, is a dangling, well aligned pointer.
#[repr(transparent)]
-#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, derive(core::marker::CoercePointee))]
-pub struct Box<#[cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, pointee)] T: ?Sized, A: Allocator>(
- NonNull<T>,
- PhantomData<A>,
-);
-
-// This is to allow coercion from `Box<T, A>` to `Box<U, A>` if `T` can be converted to the
-// dynamically-sized type (DST) `U`.
-#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
-impl<T, U, A> core::ops::CoerceUnsized<Box<U, A>> for Box<T, A>
-where
- T: ?Sized + core::marker::Unsize<U>,
- U: ?Sized,
- A: Allocator,
-{
-}
-
-// This is to allow `Box<U, A>` to be dispatched on when `Box<T, A>` can be coerced into `Box<U,
-// A>`.
-#[cfg(not(CONFIG_RUSTC_HAS_COERCE_POINTEE))]
-impl<T, U, A> core::ops::DispatchFromDyn<Box<U, A>> for Box<T, A>
-where
- T: ?Sized + core::marker::Unsize<U>,
- U: ?Sized,
- A: Allocator,
-{
-}
+#[derive(core::marker::CoercePointee)]
+pub struct Box<#[pointee] T: ?Sized, A: Allocator>(NonNull<T>, PhantomData<A>);
/// Type alias for [`Box`] with a [`Kmalloc`] allocator.
///
diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs
index ac8d6f763ae8..6438385e4322 100644
--- a/rust/kernel/alloc/kvec.rs
+++ b/rust/kernel/alloc/kvec.rs
@@ -9,7 +9,10 @@ use super::{
};
use crate::{
fmt,
- page::AsPageIter, //
+ page::{
+ AsPageIter,
+ PAGE_SIZE, //
+ },
};
use core::{
borrow::{Borrow, BorrowMut},
@@ -734,6 +737,115 @@ where
self.truncate(num_kept);
}
}
+// TODO: This is a temporary KVVec-specific implementation. It should be replaced with a generic
+// `shrink_to()` for `impl<T, A: Allocator> Vec<T, A>` that uses `A::realloc()` once the
+// underlying allocators properly support shrinking via realloc.
+impl<T> Vec<T, KVmalloc> {
+ /// Shrinks the capacity of the vector with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length and the supplied value.
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// For `kmalloc` allocations, this delegates to `realloc()`, which decides whether
+ /// shrinking is worthwhile. For `vmalloc` allocations, shrinking only occurs if the
+ /// operation would free at least one page of memory, and performs a deep copy since
+ /// `vrealloc` does not yet support in-place shrinking.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate enough capacity to span multiple pages.
+ /// let elements_per_page = kernel::page::PAGE_SIZE / core::mem::size_of::<u32>();
+ /// let mut v = KVVec::with_capacity(elements_per_page * 4, GFP_KERNEL)?;
+ /// v.push(1, GFP_KERNEL)?;
+ /// v.push(2, GFP_KERNEL)?;
+ ///
+ /// v.shrink_to(0, GFP_KERNEL)?;
+ /// # Ok::<(), Error>(())
+ /// ```
+ pub fn shrink_to(&mut self, min_capacity: usize, flags: Flags) -> Result<(), AllocError> {
+ let target_cap = core::cmp::max(self.len(), min_capacity);
+
+ if self.capacity() <= target_cap {
+ return Ok(());
+ }
+
+ if Self::is_zst() {
+ return Ok(());
+ }
+
+ // For kmalloc allocations, delegate to realloc() and let the allocator decide
+ // whether shrinking is worthwhile.
+ //
+ // SAFETY: `self.ptr` points to a valid `KVmalloc` allocation.
+ if !unsafe { bindings::is_vmalloc_addr(self.ptr.as_ptr().cast()) } {
+ let new_layout = ArrayLayout::<T>::new(target_cap).map_err(|_| AllocError)?;
+
+ // SAFETY:
+ // - `self.ptr` is valid and was previously allocated with `KVmalloc`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ let ptr = unsafe {
+ KVmalloc::realloc(
+ Some(self.ptr.cast()),
+ new_layout.into(),
+ self.layout.into(),
+ flags,
+ NumaNode::NO_NODE,
+ )?
+ };
+
+ self.ptr = ptr.cast();
+ self.layout = new_layout;
+ return Ok(());
+ }
+
+ // Only shrink if we would free at least one page.
+ let current_size = self.capacity() * core::mem::size_of::<T>();
+ let target_size = target_cap * core::mem::size_of::<T>();
+ let current_pages = current_size.div_ceil(PAGE_SIZE);
+ let target_pages = target_size.div_ceil(PAGE_SIZE);
+
+ if current_pages <= target_pages {
+ return Ok(());
+ }
+
+ if target_cap == 0 {
+ if !self.layout.is_empty() {
+ // SAFETY:
+ // - `self.ptr` was previously allocated with `KVmalloc`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) };
+ }
+ self.ptr = NonNull::dangling();
+ self.layout = ArrayLayout::empty();
+ return Ok(());
+ }
+
+ // SAFETY: `target_cap <= self.capacity()` and original capacity was valid.
+ let new_layout = unsafe { ArrayLayout::<T>::new_unchecked(target_cap) };
+
+ let new_ptr = KVmalloc::alloc(new_layout.into(), flags, NumaNode::NO_NODE)?;
+
+ // SAFETY:
+ // - `self.as_ptr()` is valid for reads of `self.len()` elements of `T`.
+ // - `new_ptr` is valid for writes of at least `target_cap >= self.len()` elements.
+ // - The two allocations do not overlap since `new_ptr` is freshly allocated.
+ // - Both pointers are properly aligned for `T`.
+ unsafe {
+ ptr::copy_nonoverlapping(self.as_ptr(), new_ptr.as_ptr().cast::<T>(), self.len())
+ };
+
+ // SAFETY:
+ // - `self.ptr` was previously allocated with `KVmalloc`.
+ // - `self.layout` matches the `ArrayLayout` of the preceding allocation.
+ unsafe { KVmalloc::free(self.ptr.cast(), self.layout.into()) };
+
+ self.ptr = new_ptr.cast::<T>();
+ self.layout = new_layout;
+
+ Ok(())
+ }
+}
impl<T: Clone, A: Allocator> Vec<T, A> {
/// Extend the vector by `n` clones of `value`.
@@ -1398,4 +1510,106 @@ mod tests {
func.push_within_capacity(false).unwrap();
}
}
+
+ #[test]
+ fn test_kvvec_shrink_to() {
+ use crate::page::PAGE_SIZE;
+
+ // Create a vector with capacity spanning multiple pages.
+ let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap();
+
+ // Add a few elements.
+ v.push(1, GFP_KERNEL).unwrap();
+ v.push(2, GFP_KERNEL).unwrap();
+ v.push(3, GFP_KERNEL).unwrap();
+
+ let initial_capacity = v.capacity();
+ assert!(initial_capacity >= PAGE_SIZE * 4);
+
+ // Shrink to a capacity that would free at least one page.
+ v.shrink_to(PAGE_SIZE, GFP_KERNEL).unwrap();
+
+ // Capacity should have been reduced.
+ assert!(v.capacity() < initial_capacity);
+ assert!(v.capacity() >= PAGE_SIZE);
+
+ // Elements should be preserved.
+ assert_eq!(v.len(), 3);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 2);
+ assert_eq!(v[2], 3);
+
+ // Shrink to zero (should shrink to len).
+ v.shrink_to(0, GFP_KERNEL).unwrap();
+
+ // Capacity should be at least the length.
+ assert!(v.capacity() >= v.len());
+
+ // Elements should still be preserved.
+ assert_eq!(v.len(), 3);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 2);
+ assert_eq!(v[2], 3);
+ }
+
+ #[test]
+ fn test_kvvec_shrink_to_empty() {
+ use crate::page::PAGE_SIZE;
+
+ // Create a vector with large capacity but no elements.
+ let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap();
+
+ assert!(v.is_empty());
+
+ // Shrink empty vector to zero.
+ v.shrink_to(0, GFP_KERNEL).unwrap();
+
+ // Should have freed the allocation.
+ assert_eq!(v.capacity(), 0);
+ assert!(v.is_empty());
+ }
+
+ #[test]
+ fn test_kvvec_shrink_to_no_op() {
+ use crate::page::PAGE_SIZE;
+
+ // Create a small vector.
+ let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE, GFP_KERNEL).unwrap();
+ v.push(1, GFP_KERNEL).unwrap();
+
+ let capacity_before = v.capacity();
+
+ // Try to shrink to a capacity larger than current - should be no-op.
+ v.shrink_to(capacity_before + 100, GFP_KERNEL).unwrap();
+
+ assert_eq!(v.capacity(), capacity_before);
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0], 1);
+ }
+
+ #[test]
+ fn test_kvvec_shrink_to_respects_min_capacity() {
+ use crate::page::PAGE_SIZE;
+
+ // Create a vector with large capacity.
+ let mut v = KVVec::<u8>::with_capacity(PAGE_SIZE * 4, GFP_KERNEL).unwrap();
+
+ // Add some elements.
+ for i in 0..10u8 {
+ v.push(i, GFP_KERNEL).unwrap();
+ }
+
+ // Shrink to a min_capacity larger than length.
+ let min_cap = PAGE_SIZE * 2;
+ v.shrink_to(min_cap, GFP_KERNEL).unwrap();
+
+ // Capacity should be at least min_capacity.
+ assert!(v.capacity() >= min_cap);
+
+ // All elements preserved.
+ assert_eq!(v.len(), 10);
+ for i in 0..10u8 {
+ assert_eq!(v[i as usize], i);
+ }
+ }
}
diff --git a/rust/kernel/alloc/kvec/errors.rs b/rust/kernel/alloc/kvec/errors.rs
index e7de5049ee47..985c5f2c3962 100644
--- a/rust/kernel/alloc/kvec/errors.rs
+++ b/rust/kernel/alloc/kvec/errors.rs
@@ -15,6 +15,7 @@ impl<T> fmt::Debug for PushError<T> {
}
impl<T> From<PushError<T>> for Error {
+ #[inline]
fn from(_: PushError<T>) -> Error {
// Returning ENOMEM isn't appropriate because the system is not out of memory. The vector
// is just full and we are refusing to resize it.
@@ -32,6 +33,7 @@ impl fmt::Debug for RemoveError {
}
impl From<RemoveError> for Error {
+ #[inline]
fn from(_: RemoveError) -> Error {
EINVAL
}
@@ -55,6 +57,7 @@ impl<T> fmt::Debug for InsertError<T> {
}
impl<T> From<InsertError<T>> for Error {
+ #[inline]
fn from(_: InsertError<T>) -> Error {
EINVAL
}