this repo has no description
1//! Node layout and allocation pools.
2//!
3//! Each node can hold `B` pivots and `B` values where `B` depends on the
4//! [`RangeTreeInteger`] that the tree is based on.
5//!
6//! Common invariants:
7//! - All pivots must be initialized.
8//! - `I::MAX` pivots must be at the end, after all other pivots.
9//! - Node must have at least `B / 2` elements, except the root node.
10//! - The root node must have at least 2 elements if it is an internal node.
11//! - Nodes can have at most `B - 1` elements.
12//! - This means that the last pivot is always `I::MAX` and it doesn't hold a
13//! valid value.
14//!
15//! Notably, it is not a safety violation for non-max pivots to be out of order,
16//! although it may cause tree operations to behave incorrectly (but safely).
17//!
18//! ## Leaf nodes
19//!
20//! Leaf nodes use a pivot of `I` and a value of `V`.
21//!
22//! Invariants:
23//! - Each pivot is tied to the corresponding value.
24//! - A pivot of `I::MAX` indicates that a value is absent.
25//!
26//! Leaf nodes also hold a `NodeRef` pointing to the next leaf node in the tree.
27//! This `NodeRef` overlaps with the last value, which is always unused except
28//! temporarily during insertion. The last
29//!
30//! ## Internal nodes
31//!
32//! Internal nodes use a pivot of `I` and a value of `NodeRef`.
33//!
34//! Invariants:
35//! - The pivot of a sub-tree indicates the right-most leaf pivot in that sub-tree.
36//! - Safety relies on this being the case even if leaf pivots are out of order.
37//! - The last element has a pivot of `I::MAX`.
38//! - All later elements are considered absent.
39//! - Since nodes can only have `B - 1` elements, the last 2 pivots must have a
40//! value of `I::MAX`.
41
42use core::alloc::{AllocError, Allocator, Layout};
43use core::marker::PhantomData;
44use core::mem::MaybeUninit;
45use core::ptr::NonNull;
46use core::slice;
47
48use crate::RangeTreeInteger;
49
50// Maximum allocation size of a `NodePool`. This is used to derive the maximum
51// tree height.
52#[cfg(target_pointer_width = "64")]
53pub(crate) const MAX_POOL_SIZE: usize = u32::MAX as usize;
54#[cfg(target_pointer_width = "32")]
55pub(crate) const MAX_POOL_SIZE: usize = i32::MAX as usize;
56
57/// A position within a node.
58///
59/// This has the invariant of always being less than B, which allows safe
60/// unchecked indexing within a node.
61#[derive(Clone, Copy, Debug)]
62pub(crate) struct NodePos<I: RangeTreeInteger> {
63 // Using a u32 internally so each stack entry in a cursor is 8 bytes.
64 pos: u32,
65 marker: PhantomData<fn() -> I>,
66}
67
68/// Helper macro to create a `NodePos` at a constant index.
69///
70/// This is safe since a static assert is used to ensure that `POS` is in
71/// bounds.
72macro_rules! pos {
73 ($expr:expr) => {{
74 const { assert!($expr < I::Int::B) };
75 #[allow(unused_unsafe, reason = "inside macro")]
76 const {
77 // Safety: we checked the position to be `< B` above
78 unsafe { $crate::node::NodePos::<I::Int>::new_unchecked($expr) }
79 }
80 }};
81}
82
83impl<I: RangeTreeInteger> NodePos<I> {
84 /// A `NodePos` pointing to the first element of a node.
85 pub(crate) const ZERO: Self = Self {
86 pos: 0,
87 marker: PhantomData,
88 };
89
90 /// Returns a `NodePos` at the given index.
91 ///
92 /// # Safety
93 ///
94 /// The index must be less than `I::B`.
95 #[inline]
96 #[allow(
97 clippy::cast_possible_truncation,
98 reason = "B is *much less* than u32::MAX, this can not truncate"
99 )]
100 pub(crate) const unsafe fn new_unchecked(pos: usize) -> Self {
101 debug_assert!(pos < I::B);
102 Self {
103 pos: pos as u32,
104 marker: PhantomData,
105 }
106 }
107
108 /// Returns the position as a `usize`.
109 #[inline]
110 pub(crate) fn index(self) -> usize {
111 self.pos as usize
112 }
113
114 /// Returns the position after `self`.
115 ///
116 /// # Safety
117 ///
118 /// The resulting index must be less than `B`.
119 #[inline]
120 pub(crate) unsafe fn next(self) -> Self {
121 debug_assert!(self.index() + 1 < I::B);
122 Self {
123 pos: self.pos + 1,
124 marker: PhantomData,
125 }
126 }
127
128 /// Returns the position before `self`.
129 ///
130 /// # Safety
131 ///
132 /// The current index must not be 0.
133 #[inline]
134 pub(crate) unsafe fn prev(self) -> Self {
135 debug_assert_ne!(self.pos, 0);
136 Self {
137 pos: self.pos - 1,
138 marker: PhantomData,
139 }
140 }
141
142 /// If this position is in the right half of a node, returns the equivalent
143 /// position in the destination node after the split.
144 #[inline]
145 #[allow(
146 clippy::cast_possible_truncation,
147 reason = "B is *much less* than u32::MAX, this can not truncate"
148 )]
149 pub(crate) fn split_right_half(self) -> Option<Self> {
150 if self.index() >= I::B / 2 {
151 Some(Self {
152 pos: self.pos - I::B as u32 / 2,
153 marker: PhantomData,
154 })
155 } else {
156 None
157 }
158 }
159}
160
161/// A reference to a node inside a `NodePool`.
162///
163/// This is encoded as a `u32` offset within the pool to save space.
164///
165/// This doesn't have a lifetime, but is logically bound to the `NodePool` that
166/// it was allocated from and is only valid for the lifetime of that pool.
167#[derive(Clone, Copy, Debug, PartialEq, Eq)]
168pub(crate) struct NodeRef(u32);
169
170impl NodeRef {
171 /// Creates a `NodeRef` with a value of 0.
172 ///
173 /// This is only meant for use when initializing a stack, it is not a valid
174 /// `NodeRef`.
175 pub(crate) const ZERO: Self = Self(0);
176
177 /// Returns a pointer to the pivot array in the node.
178 ///
179 /// # Safety
180 ///
181 /// `self` must be allocated from `pool`.
182 #[inline]
183 unsafe fn pivots_ptr<I: RangeTreeInteger, V>(self, pool: &NodePool<I, V>) -> NonNull<I::Raw> {
184 #[cfg(debug_assertions)]
185 self.assert_valid(pool);
186
187 // Safety: ensured by caller implementation of the node pool and node layout
188 unsafe { pool.ptr.byte_add(self.0 as usize).cast() }
189 }
190
191 /// Returns a pointer to the value array in the node.
192 ///
193 /// # Safety
194 ///
195 /// `self` must be allocated from `pool`.
196 #[inline]
197 pub(crate) unsafe fn values_ptr<I: RangeTreeInteger, V>(
198 self,
199 pool: &NodePool<I, V>,
200 ) -> NonNull<MaybeUninit<V>> {
201 #[cfg(debug_assertions)]
202 self.assert_valid(pool);
203
204 let (_, values_offset, _) = const { node_layout::<I, V>() };
205 // Safety: ensured by caller implementation of the node pool and node layout
206 unsafe {
207 let ptr = pool.ptr.byte_add(self.0 as usize);
208 ptr.byte_add(values_offset).cast::<MaybeUninit<V>>()
209 }
210 }
211
212 /// Returns the end of the elements of a leaf node.
213 ///
214 /// # Safety
215 ///
216 /// `self` must be allocated from `pool`.
217 #[inline]
218 pub(crate) unsafe fn leaf_end<I: RangeTreeInteger, V>(
219 self,
220 pool: &NodePool<I, V>,
221 ) -> NodePos<I> {
222 #[cfg(debug_assertions)]
223 self.assert_valid(pool);
224
225 // Safety: ensured by caller implementation of the node pool and node layout
226 unsafe { I::search(self.pivots(pool), I::MAX) }
227 }
228
229 /// Returns the end of the elements of an internal node.
230 ///
231 /// # Safety
232 ///
233 /// `self` must be allocated from `pool` and must have at least 1 element.
234 #[inline]
235 pub(crate) unsafe fn internal_end<I: RangeTreeInteger, V>(
236 self,
237 pool: &NodePool<I, V>,
238 ) -> NodePos<I> {
239 #[cfg(debug_assertions)]
240 self.assert_valid(pool);
241
242 // Safety: ensured by caller implementation of the node pool and node layout
243 unsafe { I::search(self.pivots(pool), I::MAX).next() }
244 }
245
246 /// Returns a reference to all the pivots in this node.
247 ///
248 /// # Safety
249 ///
250 /// `self` must be allocated from `pool`.
251 #[inline]
252 pub(crate) unsafe fn pivots<I: RangeTreeInteger, V>(self, pool: &NodePool<I, V>) -> &I::Pivots {
253 // Safety: ensured by caller implementation of the node pool and node layout
254 unsafe { self.pivots_ptr(pool).cast::<I::Pivots>().as_ref() }
255 }
256
257 /// Returns the pivot at `pos` in this node.
258 ///
259 /// # Safety
260 ///
261 /// `self` must be allocated from `pool`.
262 #[inline]
263 pub(crate) unsafe fn pivot<I: RangeTreeInteger, V>(
264 self,
265 pos: NodePos<I>,
266 pool: &NodePool<I, V>,
267 ) -> I::Raw {
268 // Safety: ensured by caller implementation of the node pool and node layout
269 unsafe { self.pivots_ptr(pool).add(pos.index()).read() }
270 }
271
272 /// Sets the pivot at `pos` in this node.
273 ///
274 /// # Safety
275 ///
276 /// `self` must be allocated from `pool`.
277 #[inline]
278 pub(crate) unsafe fn set_pivot<I: RangeTreeInteger, V>(
279 self,
280 pivot: I::Raw,
281 pos: NodePos<I>,
282 pool: &mut NodePool<I, V>,
283 ) {
284 // Safety: ensured by caller implementation of the node pool and node layout
285 unsafe { self.pivots_ptr(pool).add(pos.index()).write(pivot) }
286 }
287
288 /// Returns a reference to the value at `pos` in this node.
289 ///
290 /// # Safety
291 ///
292 /// `self` must be allocated from `pool`.
293 #[inline]
294 pub(crate) unsafe fn value<I: RangeTreeInteger, V>(
295 self,
296 pos: NodePos<I>,
297 pool: &NodePool<I, V>,
298 ) -> &MaybeUninit<V> {
299 // Safety: ensured by caller implementation of the node pool and node layout
300 unsafe { self.values_ptr(pool).add(pos.index()).as_ref() }
301 }
302
303 /// Returns a mutable reference to the value at `pos` in this node.
304 ///
305 /// # Safety
306 ///
307 /// `self` must be allocated from `pool`.
308 #[inline]
309 pub(crate) unsafe fn value_mut<I: RangeTreeInteger, V>(
310 self,
311 pos: NodePos<I>,
312 pool: &mut NodePool<I, V>,
313 ) -> &mut MaybeUninit<V> {
314 // Safety: ensured by caller implementation of the node pool and node layout
315 unsafe { self.values_ptr(pool).add(pos.index()).as_mut() }
316 }
317
318 /// Returns a `NodeRef` pointing to the next leaf node in the tree.
319 ///
320 /// This overlaps with the last value in the node.
321 ///
322 /// # Safety
323 ///
324 /// `self` must be allocated from `pool`.
325 #[inline]
326 pub(crate) unsafe fn next_leaf<I: RangeTreeInteger, V>(
327 self,
328 pool: &NodePool<I, V>,
329 ) -> Option<NodeRef> {
330 #[cfg(debug_assertions)]
331 self.assert_valid(pool);
332
333 let (_, _, next_leaf_offset) = const { node_layout::<I, V>() };
334 // Safety: ensured by caller implementation of the node pool and node layout
335 let next_leaf = unsafe {
336 let ptr = pool.ptr.byte_add(self.0 as usize);
337 ptr.byte_add(next_leaf_offset).cast::<NodeRef>().read()
338 };
339 (next_leaf.0 != !0).then_some(next_leaf)
340 }
341
342 /// Sets the `NodeRef` pointing to the next leaf node in the tree.
343 ///
344 /// This overlaps with the last value in the node.
345 ///
346 /// # Safety
347 ///
348 /// `self` must be allocated from `pool`.
349 #[inline]
350 pub(crate) unsafe fn set_next_leaf<I: RangeTreeInteger, V>(
351 self,
352 next_leaf: Option<NodeRef>,
353 pool: &mut NodePool<I, V>,
354 ) {
355 #[cfg(debug_assertions)]
356 self.assert_valid(pool);
357
358 let (_, _, next_leaf_offset) = const { node_layout::<I, V>() };
359
360 // Safety: ensured by caller implementation of the node pool and node layout
361 unsafe {
362 let ptr = pool.ptr.byte_add(self.0 as usize);
363 ptr.byte_add(next_leaf_offset)
364 .cast::<NodeRef>()
365 .write(next_leaf.unwrap_or(NodeRef(!0)));
366 }
367 }
368
369 /// Inserts `pivot` at `pos`, shifting other pivots up to `node_size` to make
370 /// room.
371 ///
372 /// The pivot previously in the last slot are discarded.
373 ///
374 /// # Safety
375 ///
376 /// `self` must be allocated from `pool`.
377 ///
378 /// `node_size` must be greater than `pos.index()` and less than or equal to
379 /// `I::B`.
380 #[inline]
381 pub(crate) unsafe fn insert_pivot<I: RangeTreeInteger, V>(
382 self,
383 pivot: I::Raw,
384 pos: NodePos<I>,
385 node_size: usize,
386 pool: &mut NodePool<I, V>,
387 ) {
388 debug_assert!(node_size <= I::B);
389 debug_assert!(node_size > pos.index());
390 // Safety: ensured by caller implementation of the node pool
391 unsafe {
392 let ptr = self.pivots_ptr(pool).add(pos.index());
393 let count = node_size - pos.index() - 1;
394 ptr.copy_to(ptr.add(1), count);
395 ptr.write(pivot);
396 }
397 }
398
399 /// Inserts `value` at `pos`, shifting other values up to `node_size` to
400 /// make room.
401 ///
402 /// The value previously in the last slot are discarded without being
403 /// dropped.
404 ///
405 /// If `node_size == I::B` then this will clobber the next leaf pointer in
406 /// the node.
407 ///
408 /// # Safety
409 ///
410 /// `self` must be allocated from `pool`.
411 ///
412 /// `node_size` must be greater than `pos.index()` and less than or equal to
413 /// `I::B`.
414 #[inline]
415 pub(crate) unsafe fn insert_value<I: RangeTreeInteger, V>(
416 self,
417 value: V,
418 pos: NodePos<I>,
419 node_size: usize,
420 pool: &mut NodePool<I, V>,
421 ) {
422 debug_assert!(node_size <= I::B);
423 debug_assert!(node_size > pos.index());
424 // Safety: ensured by caller implementation of the node pool
425 unsafe {
426 let ptr = self.values_ptr(pool).add(pos.index());
427 let count = node_size - pos.index() - 1;
428 ptr.copy_to(ptr.add(1), count);
429 ptr.write(MaybeUninit::new(value));
430 }
431 }
432
433 /// Removes the pivot at `pos`, shifting other pivots.
434 ///
435 /// The pivot in the last slot is initialized to `I::MAX`.
436 ///
437 /// # Safety
438 ///
439 /// `self` must be allocated from `pool`.
440 #[inline]
441 pub(crate) unsafe fn remove_pivot<I: RangeTreeInteger, V>(
442 self,
443 pos: NodePos<I>,
444 pool: &mut NodePool<I, V>,
445 ) {
446 // Safety: ensured by caller implementation of the node pool
447 unsafe {
448 let ptr = self.pivots_ptr(pool).add(pos.index());
449 let count = I::B - pos.index() - 1;
450 ptr.copy_from(ptr.add(1), count);
451 self.pivots_ptr(pool).add(I::B - 1).write(I::MAX);
452 }
453 }
454
455 /// Removes the value at `pos`, shifting other values.
456 ///
457 /// The value in the last slot is not modified.
458 ///
459 /// # Safety
460 ///
461 /// `self` must be allocated from `pool`.
462 #[inline]
463 pub(crate) unsafe fn remove_value<I: RangeTreeInteger, V>(
464 self,
465 pos: NodePos<I>,
466 pool: &mut NodePool<I, V>,
467 ) {
468 // Safety: ensured by caller implementation of the node pool
469 unsafe {
470 let ptr = self.values_ptr(pool).add(pos.index());
471 let count = I::B - pos.index() - 1;
472 ptr.copy_from(ptr.add(1), count);
473 }
474 }
475
476 /// Moves the second half of the pivots and values of `self` to `dest`,
477 /// replacing the pivots with `I::MAX`.
478 ///
479 /// The second half of `dest` is initialized with `I::MAX`.
480 ///
481 /// # Safety
482 ///
483 /// `self` and `dest` must be allocated from `pool`.
484 #[inline]
485 pub(crate) unsafe fn split_into<I: RangeTreeInteger, V>(
486 self,
487 dest: UninitNodeRef,
488 pool: &mut NodePool<I, V>,
489 ) -> NodeRef {
490 // Safety: ensured by caller and const params
491 unsafe {
492 self.pivots_ptr(pool)
493 .add(I::B / 2)
494 .copy_to_nonoverlapping(dest.0.pivots_ptr(pool), I::B / 2);
495 self.values_ptr(pool)
496 .add(I::B / 2)
497 .copy_to_nonoverlapping(dest.0.values_ptr(pool), I::B / 2);
498 slice::from_raw_parts_mut(self.pivots_ptr(pool).add(I::B / 2).as_ptr(), I::B / 2)
499 .fill(I::MAX);
500 // Make sure not to create a reference to uninitialized memory.
501 slice::from_raw_parts_mut(
502 dest.0
503 .pivots_ptr(pool)
504 .add(I::B / 2)
505 .cast::<MaybeUninit<I::Raw>>()
506 .as_ptr(),
507 I::B / 2,
508 )
509 .fill(MaybeUninit::new(I::MAX));
510 }
511 dest.0
512 }
513
514 /// Copies the first `count` elements of `src` and appends them to `self` at
515 /// `offset`.
516 ///
517 /// # Safety
518 ///
519 /// `self` and `src` must be allocated from `pool`.
520 ///
521 /// `offset.len() + count <= I::B`
522 #[inline]
523 pub(crate) unsafe fn merge_from<I: RangeTreeInteger, V>(
524 self,
525 src: NodeRef,
526 offset: NodePos<I>,
527 count: usize,
528 pool: &mut NodePool<I, V>,
529 ) {
530 // Safety: ensured by caller
531 unsafe {
532 self.pivots_ptr(pool)
533 .add(offset.index())
534 .copy_from_nonoverlapping(src.pivots_ptr(pool), count);
535 self.values_ptr(pool)
536 .add(offset.index())
537 .copy_from_nonoverlapping(src.values_ptr(pool), count);
538 }
539 }
540
541 pub(crate) fn assert_valid<I: RangeTreeInteger, V>(self, pool: &NodePool<I, V>) {
542 let node_layout = const { node_layout::<I, V>().0 };
543 assert_eq!(self.0 as usize % node_layout.size(), 0);
544 assert!(self.0 < pool.len);
545 }
546}
547
548/// A `NodeRef` pointing to a node whose pivots have not been initialized yet.
549///
550/// The node must be initialized with `split_into` or `init_pivots`.
551#[derive(Debug)]
552pub(crate) struct UninitNodeRef(NodeRef);
553
554impl UninitNodeRef {
555 /// Initializes all pivots of the node with `I::MAX`.
556 ///
557 /// # Safety
558 ///
559 /// `self` must be allocated from `pool`.
560 #[inline]
561 pub(crate) unsafe fn init_pivots<I: RangeTreeInteger, V>(
562 self,
563 pool: &mut NodePool<I, V>,
564 ) -> NodeRef {
565 // Safety: ensured by caller
566 unsafe {
567 let ptr = self.0.pivots_ptr(pool).cast::<MaybeUninit<I::Raw>>();
568 // Make sure not to create a reference to uninitialized memory.
569 let slice = slice::from_raw_parts_mut(ptr.as_ptr(), I::B);
570 slice.fill(MaybeUninit::new(I::MAX));
571 }
572 self.0
573 }
574}
575
576/// Returns :
577/// - the layout of a node with pivot `I` and value `V`.
578/// - the offset of the value array within the node.
579/// - the offset of the next leaf pointer within the node.
580#[inline]
581pub(crate) const fn node_layout<I: RangeTreeInteger, V>() -> (Layout, usize, usize) {
582 // We require nodes to have at least 4 elements, and the number of elements
583 // must be a multiple of 2.
584 const { assert!(I::B >= 4) };
585 const { assert!(I::B.is_multiple_of(2)) };
586
587 const fn max(a: usize, b: usize) -> usize {
588 if a > b { a } else { b }
589 }
590
591 // The node layout is effectively:
592 // struct Node<I, V> {
593 // pivots: I::pivots, // [I::Raw; I::B]
594 // values: [MaybeUninit<V>; I::B - 1],
595 // last_value: union {
596 // MaybeUninit<V>,
597 // NodeRef,
598 // },
599 // }
600 //
601 // However this can't be expressed directly due to limitations on const
602 // generics.
603 let pivots = Layout::new::<I::Pivots>();
604
605 let Ok(values) = Layout::array::<V>(I::B - 1) else {
606 panic!("Could not calculate node layout");
607 };
608 let Ok((node, values_offset)) = pivots.extend(values) else {
609 panic!("Could not calculate node layout");
610 };
611
612 let Ok(last_value) = Layout::from_size_align(
613 max(size_of::<V>(), size_of::<NodeRef>()),
614 max(align_of::<V>(), align_of::<NodeRef>()),
615 ) else {
616 panic!("Could not calculate node layout");
617 };
618 let Ok((node, next_leaf_offset)) = node.extend(last_value) else {
619 panic!("Could not calculate node layout");
620 };
621
622 // Freed nodes are kept as a linked list of NodeRef, so ensure we can fit a
623 // NodeRef in the node.
624 let Ok(layout) = node.align_to(4) else {
625 panic!("Could not calculate node layout");
626 };
627 (layout.pad_to_align(), values_offset, next_leaf_offset)
628}
629
630/// Memory pool for allocating nodes with pivot `I` and value `V`.
631///
632/// All nodes are sourced from a single allocation and can therefore be
633/// referred to using just a `u32` offset instead of a full pointer.
634pub(crate) struct NodePool<I: RangeTreeInteger, V> {
635 /// Base of the allocation.
636 ptr: NonNull<u8>,
637
638 /// Size of the allocation.
639 capacity: u32,
640
641 /// Amount of the allocation currently in use. This is always a multiple of
642 /// the node size.
643 len: u32,
644
645 /// Linked list of freed nodes, terminated by `!0`.
646 free_list: u32,
647
648 marker: PhantomData<(I, V)>,
649}
650
651// These impls are needed because `NonNull` suppresses the automatic impls.
652unsafe impl<I: RangeTreeInteger + Send, V: Send> Send for NodePool<I, V> {}
653unsafe impl<I: RangeTreeInteger + Sync, V: Sync> Sync for NodePool<I, V> {}
654
655impl<I: RangeTreeInteger, V> NodePool<I, V> {
656 #[inline]
657 pub(crate) fn new() -> Self {
658 Self {
659 ptr: NonNull::dangling(),
660 len: 0,
661 capacity: 0,
662 free_list: !0,
663 marker: PhantomData,
664 }
665 }
666
667 /// Grows the allocation when it is full.
668 ///
669 /// This also handles the initial allocation.
670 ///
671 /// This function is marked `extern "C"` so that any panics in the allocator
672 /// cause an abort. This is necessary since the current insert
673 /// implementation cannot recover from a failed allocation.
674 ///
675 /// # Safety
676 ///
677 /// This pool must always be used with the same allocator.
678 fn grow(&mut self, alloc: &impl Allocator) -> Result<(), AllocError> {
679 let node_layout = const { node_layout::<I, V>().0 };
680
681 if self.capacity == 0 {
682 // Allocate space for 2 nodes for the initial allocation.
683 let new_layout = Layout::from_size_align(node_layout.size() * 2, node_layout.align())
684 .expect("exceeded RangeTree maximum allocation size");
685 assert!(
686 new_layout.size() <= MAX_POOL_SIZE,
687 "exceeded RangeTree maximum allocation size"
688 );
689 self.ptr = alloc.allocate(new_layout)?.cast();
690
691 debug_assert!(u32::try_from(new_layout.size()).is_ok());
692 // Safety: layouts `> u32::MAX` are very unlikely AND we check this with debug-assertions enabled.
693 self.capacity = unsafe { u32::try_from(new_layout.size()).unwrap_unchecked() };
694 } else {
695 let old_layout = unsafe {
696 Layout::from_size_align_unchecked(self.capacity as usize, node_layout.align())
697 };
698
699 // This multiplication cannot overflow because the capacity in a
700 // layout cannot exceed `isize::MAX`.
701 let new_layout =
702 Layout::from_size_align(self.capacity as usize * 2, node_layout.align())
703 .expect("exceeded RangeTree maximum allocation size");
704 assert!(
705 new_layout.size() <= MAX_POOL_SIZE,
706 "exceeded RangeTree maximum allocation size"
707 );
708 // Safety: we constructed the new layout using the same const params
709 self.ptr = unsafe { alloc.grow(self.ptr, old_layout, new_layout)?.cast() };
710
711 debug_assert!(u32::try_from(new_layout.size()).is_ok());
712 // Safety: layouts `> u32::MAX` are very unlikely AND we check this with debug-assertions enabled.
713 self.capacity = unsafe { u32::try_from(new_layout.size()).unwrap_unchecked() };
714 }
715
716 Ok(())
717 }
718
719 /// Allocates a new uninitialized node from the pool.
720 ///
721 /// # Safety
722 ///
723 /// This pool must always be used with the same allocator.
724 #[inline]
725 pub(crate) unsafe fn alloc_node(
726 &mut self,
727 alloc: &impl Allocator,
728 ) -> Result<UninitNodeRef, AllocError> {
729 let node_layout = const { node_layout::<I, V>().0 };
730
731 // First try re-using a node from the free list.
732 if self.free_list != !0 {
733 // Freed nodes hold a single `NodeRef` with the next element in the
734 // free list.
735 let node = UninitNodeRef(NodeRef(self.free_list));
736 // Safety: `free_list` is a node-pointer that is within this allocation by construction
737 self.free_list = unsafe { self.ptr.byte_add(self.free_list as usize).cast().read() };
738 return Ok(node);
739 }
740
741 // Grow the allocation if we've reached capacity.
742 if self.len == self.capacity {
743 self.grow(alloc)?;
744 }
745
746 // grow() will have doubled the capacity or initialized it, which
747 // guarantees at least enough space to allocate a single node.
748 let node = UninitNodeRef(NodeRef(self.len));
749
750 debug_assert!(u32::try_from(node_layout.size()).is_ok());
751 // Safety: layouts `> u32::MAX` are very unlikely AND we check this with debug-assertions enabled.
752 self.len += unsafe { u32::try_from(node_layout.size()).unwrap_unchecked() };
753
754 debug_assert!(self.len <= self.capacity);
755 Ok(node)
756 }
757
758 /// Releases an unused node back to the pool.
759 ///
760 /// # Safety
761 ///
762 /// `node` must be allocated from this pool.
763 #[inline]
764 pub(crate) unsafe fn free_node(&mut self, node: NodeRef) {
765 // Just add the node to the linked list of freed nodes.
766 // Safety: ensured by caller
767 unsafe {
768 self.ptr
769 .byte_add(node.0 as usize)
770 .cast()
771 .write(self.free_list);
772 }
773 self.free_list = node.0;
774 }
775
776 /// Frees all `NodeRef`s allocated from this pool.
777 pub(crate) fn clear(&mut self) {
778 self.len = 0;
779 self.free_list = !0;
780 }
781
782 /// Same as `clear` but then allocates the first node.
783 pub(crate) fn clear_and_alloc_node(&mut self) -> UninitNodeRef {
784 let node_layout = const { node_layout::<I, V>().0 };
785
786 debug_assert!(u32::try_from(node_layout.size()).is_ok());
787 // Safety: layouts `> u32::MAX` are very unlikely
788 self.len = unsafe { u32::try_from(node_layout.size()).unwrap_unchecked() };
789 self.free_list = !0;
790 UninitNodeRef(NodeRef::ZERO)
791 }
792
793 /// Frees the pool and its allocation. This invalidates all `NodeRef`s
794 /// allocated from this pool.
795 ///
796 /// # Safety
797 ///
798 /// This pool must always be used with the same allocator.
799 #[inline]
800 pub(crate) unsafe fn clear_and_free(&mut self, alloc: &impl Allocator) {
801 self.clear();
802 let node_layout = const { node_layout::<I, V>().0 };
803 // Safety: ensured by caller
804 let layout = unsafe {
805 Layout::from_size_align_unchecked(self.capacity as usize, node_layout.align())
806 };
807 // Safety: ensured by caller
808 unsafe {
809 alloc.deallocate(self.ptr, layout);
810 }
811 self.capacity = 0;
812 }
813}
814
815#[cfg(test)]
816mod tests {
817 use alloc::alloc::Global;
818 use core::num::NonZeroU32;
819
820 use super::NodePool;
821
822 #[test]
823 fn smoke() {
824 let mut pool = NodePool::<NonZeroU32, u32>::new();
825 let node = unsafe { pool.alloc_node(&Global).unwrap().0 };
826 let node2 = unsafe { pool.alloc_node(&Global).unwrap().0 };
827 unsafe {
828 pool.free_node(node);
829 }
830 let node3 = unsafe { pool.alloc_node(&Global).unwrap().0 };
831 debug_assert_ne!(node, node2);
832 debug_assert_eq!(node, node3);
833 unsafe {
834 pool.clear_and_free(&Global);
835 }
836 }
837}