at 24.11-pre 792 lines 32 kB view raw
1From d69265b7d756931b2e763a3262f22ba4100895a0 Mon Sep 17 00:00:00 2001 2From: Miguel Ojeda <ojeda@kernel.org> 3Date: Sat, 17 Feb 2024 01:27:17 +0100 4Subject: [PATCH] rust: upgrade to Rust 1.77.0 5 6This is the next upgrade to the Rust toolchain, from 1.76.0 to 1.77.0 7(i.e. the latest) [1]. 8 9See the upgrade policy [2] and the comments on the first upgrade in 10commit 3ed03f4da06e ("rust: upgrade to Rust 1.68.2"). 11 12The `offset_of` feature (single-field `offset_of!`) that we were using 13got stabilized in Rust 1.77.0 [3]. 14 15Therefore, now the only unstable features allowed to be used outside the 16`kernel` crate is `new_uninit`, though other code to be upstreamed may 17increase the list. 18 19Please see [4] for details. 20 21Rust 1.77.0 merged the `unused_tuple_struct_fields` lint into `dead_code`, 22thus upgrading it from `allow` to `warn` [5]. In turn, this makes `rustc` 23complain about the `ThisModule`'s pointer field being never read. Thus 24locally `allow` it for the moment, since we will have users later on 25(e.g. Binder needs a `as_ptr` method [6]). 26 27Rust 1.77.0 introduces the `--check-cfg` feature [7], for which there 28is a Call for Testing going on [8]. We were requested to test it and 29we found it useful [9] -- we will likely enable it in the future. 30 31The vast majority of changes are due to our `alloc` fork being upgraded 32at once. 33 34There are two kinds of changes to be aware of: the ones coming from 35upstream, which we should follow as closely as possible, and the updates 36needed in our added fallible APIs to keep them matching the newer 37infallible APIs coming from upstream. 38 39Instead of taking a look at the diff of this patch, an alternative 40approach is reviewing a diff of the changes between upstream `alloc` and 41the kernel's. This allows to easily inspect the kernel additions only, 42especially to check if the fallible methods we already have still match 43the infallible ones in the new version coming from upstream. 44 45Another approach is reviewing the changes introduced in the additions in 46the kernel fork between the two versions. This is useful to spot 47potentially unintended changes to our additions. 48 49To apply these approaches, one may follow steps similar to the following 50to generate a pair of patches that show the differences between upstream 51Rust and the kernel (for the subset of `alloc` we use) before and after 52applying this patch: 53 54 # Get the difference with respect to the old version. 55 git -C rust checkout $(linux/scripts/min-tool-version.sh rustc) 56 git -C linux ls-tree -r --name-only HEAD -- rust/alloc | 57 cut -d/ -f3- | 58 grep -Fv README.md | 59 xargs -IPATH cp rust/library/alloc/src/PATH linux/rust/alloc/PATH 60 git -C linux diff --patch-with-stat --summary -R > old.patch 61 git -C linux restore rust/alloc 62 63 # Apply this patch. 64 git -C linux am rust-upgrade.patch 65 66 # Get the difference with respect to the new version. 67 git -C rust checkout $(linux/scripts/min-tool-version.sh rustc) 68 git -C linux ls-tree -r --name-only HEAD -- rust/alloc | 69 cut -d/ -f3- | 70 grep -Fv README.md | 71 xargs -IPATH cp rust/library/alloc/src/PATH linux/rust/alloc/PATH 72 git -C linux diff --patch-with-stat --summary -R > new.patch 73 git -C linux restore rust/alloc 74 75Now one may check the `new.patch` to take a look at the additions (first 76approach) or at the difference between those two patches (second 77approach). For the latter, a side-by-side tool is recommended. 78 79Link: https://github.com/rust-lang/rust/blob/stable/RELEASES.md#version-1770-2024-03-21 [1] 80Link: https://rust-for-linux.com/rust-version-policy [2] 81Link: https://github.com/rust-lang/rust/pull/118799 [3] 82Link: https://github.com/Rust-for-Linux/linux/issues/2 [4] 83Link: https://github.com/rust-lang/rust/pull/118297 [5] 84Link: https://lore.kernel.org/rust-for-linux/20231101-rust-binder-v1-2-08ba9197f637@google.com/#Z31rust:kernel:lib.rs [6] 85Link: https://doc.rust-lang.org/nightly/unstable-book/compiler-flags/check-cfg.html [7] 86Link: https://github.com/rust-lang/rfcs/pull/3013#issuecomment-1936648479 [8] 87Link: https://github.com/rust-lang/rust/issues/82450#issuecomment-1947462977 [9] 88Signed-off-by: Miguel Ojeda <ojeda@kernel.org> 89Link: https://lore.kernel.org/r/20240217002717.57507-1-ojeda@kernel.org 90Link: https://github.com/Rust-for-Linux/linux/commit/d69265b7d756931b2e763a3262f22ba4100895a0 91Signed-off-by: Alyssa Ross <hi@alyssa.is> 92--- 93 Documentation/process/changes.rst | 2 +- 94 rust/alloc/alloc.rs | 6 +- 95 rust/alloc/boxed.rs | 4 +- 96 rust/alloc/lib.rs | 7 +- 97 rust/alloc/raw_vec.rs | 13 ++-- 98 rust/alloc/slice.rs | 4 +- 99 rust/alloc/vec/into_iter.rs | 108 +++++++++++++++++++----------- 100 rust/alloc/vec/mod.rs | 101 +++++++++++++++++++--------- 101 rust/kernel/lib.rs | 3 +- 102 scripts/Makefile.build | 2 +- 103 scripts/min-tool-version.sh | 2 +- 104 11 files changed, 161 insertions(+), 91 deletions(-) 105 106diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst 107index 7ef8de58f7f892..879ee628893ae1 100644 108--- a/Documentation/process/changes.rst 109+++ b/Documentation/process/changes.rst 110@@ -31,7 +31,7 @@ you probably needn't concern yourself with pcmciautils. 111 ====================== =============== ======================================== 112 GNU C 5.1 gcc --version 113 Clang/LLVM (optional) 13.0.1 clang --version 114-Rust (optional) 1.76.0 rustc --version 115+Rust (optional) 1.77.0 rustc --version 116 bindgen (optional) 0.65.1 bindgen --version 117 GNU make 3.82 make --version 118 bash 4.2 bash --version 119diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs 120index abb791cc23715a..b1204f87227b23 100644 121--- a/rust/alloc/alloc.rs 122+++ b/rust/alloc/alloc.rs 123@@ -5,7 +5,7 @@ 124 #![stable(feature = "alloc_module", since = "1.28.0")] 125 126 #[cfg(not(test))] 127-use core::intrinsics; 128+use core::hint; 129 130 #[cfg(not(test))] 131 use core::ptr::{self, NonNull}; 132@@ -210,7 +210,7 @@ impl Global { 133 let new_size = new_layout.size(); 134 135 // `realloc` probably checks for `new_size >= old_layout.size()` or something similar. 136- intrinsics::assume(new_size >= old_layout.size()); 137+ hint::assert_unchecked(new_size >= old_layout.size()); 138 139 let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); 140 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; 141@@ -301,7 +301,7 @@ unsafe impl Allocator for Global { 142 // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller 143 new_size if old_layout.align() == new_layout.align() => unsafe { 144 // `realloc` probably checks for `new_size <= old_layout.size()` or something similar. 145- intrinsics::assume(new_size <= old_layout.size()); 146+ hint::assert_unchecked(new_size <= old_layout.size()); 147 148 let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); 149 let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; 150diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs 151index c93a22a5c97f14..5fc39dfeb8e7bf 100644 152--- a/rust/alloc/boxed.rs 153+++ b/rust/alloc/boxed.rs 154@@ -26,6 +26,7 @@ 155 //! Creating a recursive data structure: 156 //! 157 //! ``` 158+//! ##[allow(dead_code)] 159 //! #[derive(Debug)] 160 //! enum List<T> { 161 //! Cons(T, Box<List<T>>), 162@@ -194,8 +195,7 @@ mod thin; 163 #[fundamental] 164 #[stable(feature = "rust1", since = "1.0.0")] 165 // The declaration of the `Box` struct must be kept in sync with the 166-// `alloc::alloc::box_free` function or ICEs will happen. See the comment 167-// on `box_free` for more details. 168+// compiler or ICEs will happen. 169 pub struct Box< 170 T: ?Sized, 171 #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, 172diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs 173index 36f79c07559338..39afd55ec0749e 100644 174--- a/rust/alloc/lib.rs 175+++ b/rust/alloc/lib.rs 176@@ -105,7 +105,6 @@ 177 #![feature(allocator_api)] 178 #![feature(array_chunks)] 179 #![feature(array_into_iter_constructors)] 180-#![feature(array_methods)] 181 #![feature(array_windows)] 182 #![feature(ascii_char)] 183 #![feature(assert_matches)] 184@@ -122,7 +121,6 @@ 185 #![feature(const_size_of_val)] 186 #![feature(const_waker)] 187 #![feature(core_intrinsics)] 188-#![feature(core_panic)] 189 #![feature(deprecated_suggestion)] 190 #![feature(dispatch_from_dyn)] 191 #![feature(error_generic_member_access)] 192@@ -132,6 +130,7 @@ 193 #![feature(fmt_internals)] 194 #![feature(fn_traits)] 195 #![feature(hasher_prefixfree_extras)] 196+#![feature(hint_assert_unchecked)] 197 #![feature(inline_const)] 198 #![feature(inplace_iteration)] 199 #![feature(iter_advance_by)] 200@@ -141,6 +140,8 @@ 201 #![feature(maybe_uninit_slice)] 202 #![feature(maybe_uninit_uninit_array)] 203 #![feature(maybe_uninit_uninit_array_transpose)] 204+#![feature(non_null_convenience)] 205+#![feature(panic_internals)] 206 #![feature(pattern)] 207 #![feature(ptr_internals)] 208 #![feature(ptr_metadata)] 209@@ -149,7 +150,6 @@ 210 #![feature(set_ptr_value)] 211 #![feature(sized_type_properties)] 212 #![feature(slice_from_ptr_range)] 213-#![feature(slice_group_by)] 214 #![feature(slice_ptr_get)] 215 #![feature(slice_ptr_len)] 216 #![feature(slice_range)] 217@@ -182,6 +182,7 @@ 218 #![feature(const_ptr_write)] 219 #![feature(const_trait_impl)] 220 #![feature(const_try)] 221+#![feature(decl_macro)] 222 #![feature(dropck_eyepatch)] 223 #![feature(exclusive_range_pattern)] 224 #![feature(fundamental)] 225diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs 226index 98b6abf30af6e4..1839d1c8ee7a04 100644 227--- a/rust/alloc/raw_vec.rs 228+++ b/rust/alloc/raw_vec.rs 229@@ -4,7 +4,7 @@ 230 231 use core::alloc::LayoutError; 232 use core::cmp; 233-use core::intrinsics; 234+use core::hint; 235 use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; 236 use core::ptr::{self, NonNull, Unique}; 237 use core::slice; 238@@ -317,7 +317,7 @@ impl<T, A: Allocator> RawVec<T, A> { 239 /// 240 /// # Panics 241 /// 242- /// Panics if the new capacity exceeds `isize::MAX` bytes. 243+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 244 /// 245 /// # Aborts 246 /// 247@@ -358,7 +358,7 @@ impl<T, A: Allocator> RawVec<T, A> { 248 } 249 unsafe { 250 // Inform the optimizer that the reservation has succeeded or wasn't needed 251- core::intrinsics::assume(!self.needs_to_grow(len, additional)); 252+ hint::assert_unchecked(!self.needs_to_grow(len, additional)); 253 } 254 Ok(()) 255 } 256@@ -381,7 +381,7 @@ impl<T, A: Allocator> RawVec<T, A> { 257 /// 258 /// # Panics 259 /// 260- /// Panics if the new capacity exceeds `isize::MAX` bytes. 261+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 262 /// 263 /// # Aborts 264 /// 265@@ -402,7 +402,7 @@ impl<T, A: Allocator> RawVec<T, A> { 266 } 267 unsafe { 268 // Inform the optimizer that the reservation has succeeded or wasn't needed 269- core::intrinsics::assume(!self.needs_to_grow(len, additional)); 270+ hint::assert_unchecked(!self.needs_to_grow(len, additional)); 271 } 272 Ok(()) 273 } 274@@ -553,7 +553,7 @@ where 275 debug_assert_eq!(old_layout.align(), new_layout.align()); 276 unsafe { 277 // The allocator checks for alignment equality 278- intrinsics::assume(old_layout.align() == new_layout.align()); 279+ hint::assert_unchecked(old_layout.align() == new_layout.align()); 280 alloc.grow(ptr, old_layout, new_layout) 281 } 282 } else { 283@@ -591,7 +591,6 @@ fn handle_reserve(result: Result<(), TryReserveError>) { 284 // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add 285 // an extra guard for this in case we're running on a platform which can use 286 // all 4GB in user-space, e.g., PAE or x32. 287- 288 #[inline] 289 fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { 290 if usize::BITS < 64 && alloc_size > isize::MAX as usize { 291diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs 292index 1181836da5f462..a36b072c95195f 100644 293--- a/rust/alloc/slice.rs 294+++ b/rust/alloc/slice.rs 295@@ -53,14 +53,14 @@ pub use core::slice::{from_mut, from_ref}; 296 pub use core::slice::{from_mut_ptr_range, from_ptr_range}; 297 #[stable(feature = "rust1", since = "1.0.0")] 298 pub use core::slice::{from_raw_parts, from_raw_parts_mut}; 299+#[stable(feature = "slice_group_by", since = "1.77.0")] 300+pub use core::slice::{ChunkBy, ChunkByMut}; 301 #[stable(feature = "rust1", since = "1.0.0")] 302 pub use core::slice::{Chunks, Windows}; 303 #[stable(feature = "chunks_exact", since = "1.31.0")] 304 pub use core::slice::{ChunksExact, ChunksExactMut}; 305 #[stable(feature = "rust1", since = "1.0.0")] 306 pub use core::slice::{ChunksMut, Split, SplitMut}; 307-#[unstable(feature = "slice_group_by", issue = "80552")] 308-pub use core::slice::{GroupBy, GroupByMut}; 309 #[stable(feature = "rust1", since = "1.0.0")] 310 pub use core::slice::{Iter, IterMut}; 311 #[stable(feature = "rchunks", since = "1.31.0")] 312diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs 313index 136bfe94af6c83..0f11744c44b34c 100644 314--- a/rust/alloc/vec/into_iter.rs 315+++ b/rust/alloc/vec/into_iter.rs 316@@ -20,6 +20,17 @@ use core::ops::Deref; 317 use core::ptr::{self, NonNull}; 318 use core::slice::{self}; 319 320+macro non_null { 321+ (mut $place:expr, $t:ident) => {{ 322+ #![allow(unused_unsafe)] // we're sometimes used within an unsafe block 323+ unsafe { &mut *(ptr::addr_of_mut!($place) as *mut NonNull<$t>) } 324+ }}, 325+ ($place:expr, $t:ident) => {{ 326+ #![allow(unused_unsafe)] // we're sometimes used within an unsafe block 327+ unsafe { *(ptr::addr_of!($place) as *const NonNull<$t>) } 328+ }}, 329+} 330+ 331 /// An iterator that moves out of a vector. 332 /// 333 /// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec) 334@@ -43,10 +54,12 @@ pub struct IntoIter< 335 // the drop impl reconstructs a RawVec from buf, cap and alloc 336 // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop 337 pub(super) alloc: ManuallyDrop<A>, 338- pub(super) ptr: *const T, 339- pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that 340- // ptr == end is a quick test for the Iterator being empty, that works 341- // for both ZST and non-ZST. 342+ pub(super) ptr: NonNull<T>, 343+ /// If T is a ZST, this is actually ptr+len. This encoding is picked so that 344+ /// ptr == end is a quick test for the Iterator being empty, that works 345+ /// for both ZST and non-ZST. 346+ /// For non-ZSTs the pointer is treated as `NonNull<T>` 347+ pub(super) end: *const T, 348 } 349 350 #[stable(feature = "vec_intoiter_debug", since = "1.13.0")] 351@@ -70,7 +83,7 @@ impl<T, A: Allocator> IntoIter<T, A> { 352 /// ``` 353 #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")] 354 pub fn as_slice(&self) -> &[T] { 355- unsafe { slice::from_raw_parts(self.ptr, self.len()) } 356+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len()) } 357 } 358 359 /// Returns the remaining items of this iterator as a mutable slice. 360@@ -99,7 +112,7 @@ impl<T, A: Allocator> IntoIter<T, A> { 361 } 362 363 fn as_raw_mut_slice(&mut self) -> *mut [T] { 364- ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len()) 365+ ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), self.len()) 366 } 367 368 /// Drops remaining elements and relinquishes the backing allocation. 369@@ -126,7 +139,7 @@ impl<T, A: Allocator> IntoIter<T, A> { 370 // this creates less assembly 371 self.cap = 0; 372 self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) }; 373- self.ptr = self.buf.as_ptr(); 374+ self.ptr = self.buf; 375 self.end = self.buf.as_ptr(); 376 377 // Dropping the remaining elements can panic, so this needs to be 378@@ -138,9 +151,9 @@ impl<T, A: Allocator> IntoIter<T, A> { 379 380 /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed. 381 pub(crate) fn forget_remaining_elements(&mut self) { 382- // For th ZST case, it is crucial that we mutate `end` here, not `ptr`. 383+ // For the ZST case, it is crucial that we mutate `end` here, not `ptr`. 384 // `ptr` must stay aligned, while `end` may be unaligned. 385- self.end = self.ptr; 386+ self.end = self.ptr.as_ptr(); 387 } 388 389 #[cfg(not(no_global_oom_handling))] 390@@ -162,7 +175,7 @@ impl<T, A: Allocator> IntoIter<T, A> { 391 // say that they're all at the beginning of the "allocation". 392 0..this.len() 393 } else { 394- this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf) 395+ this.ptr.sub_ptr(this.buf)..this.end.sub_ptr(buf) 396 }; 397 let cap = this.cap; 398 let alloc = ManuallyDrop::take(&mut this.alloc); 399@@ -189,29 +202,35 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 400 401 #[inline] 402 fn next(&mut self) -> Option<T> { 403- if self.ptr == self.end { 404- None 405- } else if T::IS_ZST { 406- // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by 407- // reducing the `end`. 408- self.end = self.end.wrapping_byte_sub(1); 409- 410- // Make up a value of this ZST. 411- Some(unsafe { mem::zeroed() }) 412+ if T::IS_ZST { 413+ if self.ptr.as_ptr() == self.end as *mut _ { 414+ None 415+ } else { 416+ // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by 417+ // reducing the `end`. 418+ self.end = self.end.wrapping_byte_sub(1); 419+ 420+ // Make up a value of this ZST. 421+ Some(unsafe { mem::zeroed() }) 422+ } 423 } else { 424- let old = self.ptr; 425- self.ptr = unsafe { self.ptr.add(1) }; 426+ if self.ptr == non_null!(self.end, T) { 427+ None 428+ } else { 429+ let old = self.ptr; 430+ self.ptr = unsafe { old.add(1) }; 431 432- Some(unsafe { ptr::read(old) }) 433+ Some(unsafe { ptr::read(old.as_ptr()) }) 434+ } 435 } 436 } 437 438 #[inline] 439 fn size_hint(&self) -> (usize, Option<usize>) { 440 let exact = if T::IS_ZST { 441- self.end.addr().wrapping_sub(self.ptr.addr()) 442+ self.end.addr().wrapping_sub(self.ptr.as_ptr().addr()) 443 } else { 444- unsafe { self.end.sub_ptr(self.ptr) } 445+ unsafe { non_null!(self.end, T).sub_ptr(self.ptr) } 446 }; 447 (exact, Some(exact)) 448 } 449@@ -219,7 +238,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 450 #[inline] 451 fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> { 452 let step_size = self.len().min(n); 453- let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size); 454+ let to_drop = ptr::slice_from_raw_parts_mut(self.ptr.as_ptr(), step_size); 455 if T::IS_ZST { 456 // See `next` for why we sub `end` here. 457 self.end = self.end.wrapping_byte_sub(step_size); 458@@ -261,7 +280,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 459 // Safety: `len` indicates that this many elements are available and we just checked that 460 // it fits into the array. 461 unsafe { 462- ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len); 463+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, len); 464 self.forget_remaining_elements(); 465 return Err(array::IntoIter::new_unchecked(raw_ary, 0..len)); 466 } 467@@ -270,7 +289,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 468 // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize 469 // the array. 470 return unsafe { 471- ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N); 472+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), raw_ary.as_mut_ptr() as *mut T, N); 473 self.ptr = self.ptr.add(N); 474 Ok(raw_ary.transpose().assume_init()) 475 }; 476@@ -288,7 +307,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 477 // Also note the implementation of `Self: TrustedRandomAccess` requires 478 // that `T: Copy` so reading elements from the buffer doesn't invalidate 479 // them for `Drop`. 480- unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } } 481+ unsafe { if T::IS_ZST { mem::zeroed() } else { self.ptr.add(i).read() } } 482 } 483 } 484 485@@ -296,18 +315,25 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { 486 impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { 487 #[inline] 488 fn next_back(&mut self) -> Option<T> { 489- if self.end == self.ptr { 490- None 491- } else if T::IS_ZST { 492- // See above for why 'ptr.offset' isn't used 493- self.end = self.end.wrapping_byte_sub(1); 494- 495- // Make up a value of this ZST. 496- Some(unsafe { mem::zeroed() }) 497+ if T::IS_ZST { 498+ if self.end as *mut _ == self.ptr.as_ptr() { 499+ None 500+ } else { 501+ // See above for why 'ptr.offset' isn't used 502+ self.end = self.end.wrapping_byte_sub(1); 503+ 504+ // Make up a value of this ZST. 505+ Some(unsafe { mem::zeroed() }) 506+ } 507 } else { 508- self.end = unsafe { self.end.sub(1) }; 509+ if non_null!(self.end, T) == self.ptr { 510+ None 511+ } else { 512+ let new_end = unsafe { non_null!(self.end, T).sub(1) }; 513+ *non_null!(mut self.end, T) = new_end; 514 515- Some(unsafe { ptr::read(self.end) }) 516+ Some(unsafe { ptr::read(new_end.as_ptr()) }) 517+ } 518 } 519 } 520 521@@ -333,7 +359,11 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { 522 #[stable(feature = "rust1", since = "1.0.0")] 523 impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { 524 fn is_empty(&self) -> bool { 525- self.ptr == self.end 526+ if T::IS_ZST { 527+ self.ptr.as_ptr() == self.end as *mut _ 528+ } else { 529+ self.ptr == non_null!(self.end, T) 530+ } 531 } 532 } 533 534diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs 535index 220fb9d6f45b3f..0be27fff4554a1 100644 536--- a/rust/alloc/vec/mod.rs 537+++ b/rust/alloc/vec/mod.rs 538@@ -360,7 +360,7 @@ mod spec_extend; 539 /// 540 /// `vec![x; n]`, `vec![a, b, c, d]`, and 541 /// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec` 542-/// with exactly the requested capacity. If <code>[len] == [capacity]</code>, 543+/// with at least the requested capacity. If <code>[len] == [capacity]</code>, 544 /// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to 545 /// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements. 546 /// 547@@ -447,7 +447,7 @@ impl<T> Vec<T> { 548 /// 549 /// # Panics 550 /// 551- /// Panics if the new capacity exceeds `isize::MAX` bytes. 552+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 553 /// 554 /// # Examples 555 /// 556@@ -690,7 +690,7 @@ impl<T, A: Allocator> Vec<T, A> { 557 /// 558 /// # Panics 559 /// 560- /// Panics if the new capacity exceeds `isize::MAX` bytes. 561+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 562 /// 563 /// # Examples 564 /// 565@@ -1013,7 +1013,7 @@ impl<T, A: Allocator> Vec<T, A> { 566 /// 567 /// # Panics 568 /// 569- /// Panics if the new capacity exceeds `isize::MAX` bytes. 570+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 571 /// 572 /// # Examples 573 /// 574@@ -1043,7 +1043,7 @@ impl<T, A: Allocator> Vec<T, A> { 575 /// 576 /// # Panics 577 /// 578- /// Panics if the new capacity exceeds `isize::MAX` bytes. 579+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 580 /// 581 /// # Examples 582 /// 583@@ -1140,8 +1140,11 @@ impl<T, A: Allocator> Vec<T, A> { 584 585 /// Shrinks the capacity of the vector as much as possible. 586 /// 587- /// It will drop down as close as possible to the length but the allocator 588- /// may still inform the vector that there is space for a few more elements. 589+ /// The behavior of this method depends on the allocator, which may either shrink the vector 590+ /// in-place or reallocate. The resulting vector might still have some excess capacity, just as 591+ /// is the case for [`with_capacity`]. See [`Allocator::shrink`] for more details. 592+ /// 593+ /// [`with_capacity`]: Vec::with_capacity 594 /// 595 /// # Examples 596 /// 597@@ -1191,10 +1194,10 @@ impl<T, A: Allocator> Vec<T, A> { 598 599 /// Converts the vector into [`Box<[T]>`][owned slice]. 600 /// 601- /// If the vector has excess capacity, its items will be moved into a 602- /// newly-allocated buffer with exactly the right capacity. 603+ /// Before doing the conversion, this method discards excess capacity like [`shrink_to_fit`]. 604 /// 605 /// [owned slice]: Box 606+ /// [`shrink_to_fit`]: Vec::shrink_to_fit 607 /// 608 /// # Examples 609 /// 610@@ -2017,7 +2020,7 @@ impl<T, A: Allocator> Vec<T, A> { 611 /// 612 /// # Panics 613 /// 614- /// Panics if the new capacity exceeds `isize::MAX` bytes. 615+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 616 /// 617 /// # Examples 618 /// 619@@ -2133,7 +2136,7 @@ impl<T, A: Allocator> Vec<T, A> { 620 } else { 621 unsafe { 622 self.len -= 1; 623- core::intrinsics::assume(self.len < self.capacity()); 624+ core::hint::assert_unchecked(self.len < self.capacity()); 625 Some(ptr::read(self.as_ptr().add(self.len()))) 626 } 627 } 628@@ -2143,7 +2146,7 @@ impl<T, A: Allocator> Vec<T, A> { 629 /// 630 /// # Panics 631 /// 632- /// Panics if the new capacity exceeds `isize::MAX` bytes. 633+ /// Panics if the new capacity exceeds `isize::MAX` _bytes_. 634 /// 635 /// # Examples 636 /// 637@@ -2315,6 +2318,12 @@ impl<T, A: Allocator> Vec<T, A> { 638 /// `[at, len)`. After the call, the original vector will be left containing 639 /// the elements `[0, at)` with its previous capacity unchanged. 640 /// 641+ /// - If you want to take ownership of the entire contents and capacity of 642+ /// the vector, see [`mem::take`] or [`mem::replace`]. 643+ /// - If you don't need the returned vector at all, see [`Vec::truncate`]. 644+ /// - If you want to take ownership of an arbitrary subslice, or you don't 645+ /// necessarily want to store the removed items in a vector, see [`Vec::drain`]. 646+ /// 647 /// # Panics 648 /// 649 /// Panics if `at > len`. 650@@ -2346,14 +2355,6 @@ impl<T, A: Allocator> Vec<T, A> { 651 assert_failed(at, self.len()); 652 } 653 654- if at == 0 { 655- // the new vector can take over the original buffer and avoid the copy 656- return mem::replace( 657- self, 658- Vec::with_capacity_in(self.capacity(), self.allocator().clone()), 659- ); 660- } 661- 662 let other_len = self.len - at; 663 let mut other = Vec::with_capacity_in(other_len, self.allocator().clone()); 664 665@@ -3027,6 +3028,50 @@ impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> { 666 } 667 } 668 669+/// Collects an iterator into a Vec, commonly called via [`Iterator::collect()`] 670+/// 671+/// # Allocation behavior 672+/// 673+/// In general `Vec` does not guarantee any particular growth or allocation strategy. 674+/// That also applies to this trait impl. 675+/// 676+/// **Note:** This section covers implementation details and is therefore exempt from 677+/// stability guarantees. 678+/// 679+/// Vec may use any or none of the following strategies, 680+/// depending on the supplied iterator: 681+/// 682+/// * preallocate based on [`Iterator::size_hint()`] 683+/// * and panic if the number of items is outside the provided lower/upper bounds 684+/// * use an amortized growth strategy similar to `pushing` one item at a time 685+/// * perform the iteration in-place on the original allocation backing the iterator 686+/// 687+/// The last case warrants some attention. It is an optimization that in many cases reduces peak memory 688+/// consumption and improves cache locality. But when big, short-lived allocations are created, 689+/// only a small fraction of their items get collected, no further use is made of the spare capacity 690+/// and the resulting `Vec` is moved into a longer-lived structure, then this can lead to the large 691+/// allocations having their lifetimes unnecessarily extended which can result in increased memory 692+/// footprint. 693+/// 694+/// In cases where this is an issue, the excess capacity can be discarded with [`Vec::shrink_to()`], 695+/// [`Vec::shrink_to_fit()`] or by collecting into [`Box<[T]>`][owned slice] instead, which additionally reduces 696+/// the size of the long-lived struct. 697+/// 698+/// [owned slice]: Box 699+/// 700+/// ```rust 701+/// # use std::sync::Mutex; 702+/// static LONG_LIVED: Mutex<Vec<Vec<u16>>> = Mutex::new(Vec::new()); 703+/// 704+/// for i in 0..10 { 705+/// let big_temporary: Vec<u16> = (0..1024).collect(); 706+/// // discard most items 707+/// let mut result: Vec<_> = big_temporary.into_iter().filter(|i| i % 100 == 0).collect(); 708+/// // without this a lot of unused capacity might be moved into the global 709+/// result.shrink_to_fit(); 710+/// LONG_LIVED.lock().unwrap().push(result); 711+/// } 712+/// ``` 713 #[cfg(not(no_global_oom_handling))] 714 #[stable(feature = "rust1", since = "1.0.0")] 715 impl<T> FromIterator<T> for Vec<T> { 716@@ -3069,14 +3114,8 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> { 717 begin.add(me.len()) as *const T 718 }; 719 let cap = me.buf.capacity(); 720- IntoIter { 721- buf: NonNull::new_unchecked(begin), 722- phantom: PhantomData, 723- cap, 724- alloc, 725- ptr: begin, 726- end, 727- } 728+ let buf = NonNull::new_unchecked(begin); 729+ IntoIter { buf, phantom: PhantomData, cap, alloc, ptr: buf, end } 730 } 731 } 732 } 733@@ -3598,8 +3637,10 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> { 734 impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> { 735 /// Convert a vector into a boxed slice. 736 /// 737- /// If `v` has excess capacity, its items will be moved into a 738- /// newly-allocated buffer with exactly the right capacity. 739+ /// Before doing the conversion, this method discards excess capacity like [`Vec::shrink_to_fit`]. 740+ /// 741+ /// [owned slice]: Box 742+ /// [`Vec::shrink_to_fit`]: Vec::shrink_to_fit 743 /// 744 /// # Examples 745 /// 746diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs 747index be68d5e567b1a1..71f95e5aa09abd 100644 748--- a/rust/kernel/lib.rs 749+++ b/rust/kernel/lib.rs 750@@ -16,7 +16,6 @@ 751 #![feature(coerce_unsized)] 752 #![feature(dispatch_from_dyn)] 753 #![feature(new_uninit)] 754-#![feature(offset_of)] 755 #![feature(receiver_trait)] 756 #![feature(unsize)] 757 758@@ -78,7 +77,7 @@ pub trait Module: Sized + Sync { 759 /// Equivalent to `THIS_MODULE` in the C API. 760 /// 761 /// C header: [`include/linux/export.h`](srctree/include/linux/export.h) 762-pub struct ThisModule(*mut bindings::module); 763+pub struct ThisModule(#[allow(dead_code)] *mut bindings::module); 764 765 // SAFETY: `THIS_MODULE` may be used from all threads within a module. 766 unsafe impl Sync for ThisModule {} 767diff --git a/scripts/Makefile.build b/scripts/Makefile.build 768index baf86c0880b6d7..367cfeea74c5f5 100644 769--- a/scripts/Makefile.build 770+++ b/scripts/Makefile.build 771@@ -263,7 +263,7 @@ $(obj)/%.lst: $(src)/%.c FORCE 772 # Compile Rust sources (.rs) 773 # --------------------------------------------------------------------------- 774 775-rust_allowed_features := new_uninit,offset_of 776+rust_allowed_features := new_uninit 777 778 # `--out-dir` is required to avoid temporaries being created by `rustc` in the 779 # current working directory, which may be not accessible in the out-of-tree 780diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh 781index 5927cc6b7de338..cc5141b67b4a71 100755 782--- a/scripts/min-tool-version.sh 783+++ b/scripts/min-tool-version.sh 784@@ -33,7 +33,7 @@ llvm) 785 fi 786 ;; 787 rustc) 788- echo 1.76.0 789+ echo 1.77.0 790 ;; 791 bindgen) 792 echo 0.65.1