old school music tracker audio backend
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

better code structure, especially with unsafe.

luca3s 7c0f3055 9879fe62

+121 -87
+1
Cargo.lock
··· 359 359 "basedrop", 360 360 "cpal", 361 361 "crossbeam-utils", 362 + "futures-io", 362 363 "futures-lite", 363 364 "hound", 364 365 "rtrb",
+29 -28
simple-left-right/src/inner.rs
··· 1 1 use core::{cell::UnsafeCell, hint::unreachable_unchecked, sync::atomic::AtomicU8}; 2 2 3 3 #[derive(PartialEq, Eq, Clone, Copy, Debug)] 4 - pub enum Ptr { 4 + pub(crate) enum Ptr { 5 5 Value1, 6 6 Value2, 7 7 } 8 8 9 9 impl Ptr { 10 - #[inline] 11 - pub fn switch(&mut self) { 10 + pub(crate) fn switch(&mut self) { 12 11 *self = match self { 13 12 Ptr::Value1 => Self::Value2, 14 13 Ptr::Value2 => Self::Value1, 15 14 }; 16 15 } 17 - } 16 + 17 + /// SAFETY: Assumes no read bits (lower two) are set 18 + pub(crate) unsafe fn from_u8_no_read(value: u8) -> Self { 19 + match value { 20 + 0b000 => Self::Value1, 21 + 0b100 => Self::Value2, 22 + // SAFETY: unsafe fn. communicated in docs 23 + _ => unsafe { unreachable_unchecked() }, 24 + } 25 + } 18 26 19 - impl From<u8> for Ptr { 20 - #[inline] 21 - fn from(value: u8) -> Self { 27 + /// ignores potentially set read bits. 28 + /// SAFETY: no bits except the for bottom three can be set 29 + pub(crate) unsafe fn from_u8_ignore_read(value: u8) -> Self { 22 30 match value & 0b100 { 23 31 0b000 => Self::Value1, 24 32 0b100 => Self::Value2, 25 - // SAFETY: Internal Library Value only. 33 + // SAFETY: unsafe fn. communicated in docs 26 34 _ => unsafe { unreachable_unchecked() }, 27 35 } 28 36 } 29 37 } 30 38 31 39 #[derive(Debug)] 32 - pub enum ReadState { 40 + pub(crate) enum ReadState { 33 41 None, 34 42 Value(Ptr), 35 - /// Is written before loading the ptr value and then instantly overwritten with the specific ptr value 36 - /// This makes sure the Writer doesn't swap and load between loading the ptr and setting the read 37 - Both, 38 43 } 39 44 40 45 impl ReadState { 41 46 /// is writing on the passed ptr parameter valid with the current read state? 42 47 #[inline] 43 - pub fn can_write(&self, ptr: Ptr) -> bool { 48 + pub(crate) fn can_write(&self, ptr: Ptr) -> bool { 44 49 match self { 45 50 ReadState::None => true, 46 51 ReadState::Value(p) => *p != ptr, 47 - ReadState::Both => false, 48 52 } 49 53 } 50 - } 51 54 52 - impl From<Ptr> for ReadState { 53 - #[inline] 54 - fn from(value: Ptr) -> Self { 55 - Self::Value(value) 56 - } 57 - } 58 - 59 - impl From<u8> for ReadState { 60 - #[inline] 61 - fn from(value: u8) -> Self { 55 + /// SAFETY: only the read state bits are set 56 + pub(crate) unsafe fn from_u8_ignore_ptr(value: u8) -> Self { 62 57 match value & 0b011 { 63 58 0b00 => Self::None, 64 59 0b01 => Self::Value(Ptr::Value1), 65 60 0b10 => Self::Value(Ptr::Value2), 66 - 0b11 => Self::Both, 67 61 // SAFETY: Internal Library Value only. 68 62 _ => unsafe { unreachable_unchecked() }, 69 63 } 70 64 } 71 65 } 72 66 67 + impl From<Ptr> for ReadState { 68 + #[inline] 69 + fn from(value: Ptr) -> Self { 70 + Self::Value(value) 71 + } 72 + } 73 + 73 74 #[derive(Debug)] 74 - pub struct Shared<T> { 75 + pub(crate) struct Shared<T> { 75 76 pub value_1: UnsafeCell<T>, 76 77 pub value_2: UnsafeCell<T>, 77 78 /// bit 0: is value 1 being read ··· 87 88 unsafe impl<T: Send + Sync> Sync for Shared<T> {} 88 89 89 90 impl<T> Shared<T> { 90 - pub fn get_value(&self, ptr: Ptr) -> &UnsafeCell<T> { 91 + pub(crate) fn get_value(&self, ptr: Ptr) -> &UnsafeCell<T> { 91 92 match ptr { 92 93 Ptr::Value1 => &self.value_1, 93 94 Ptr::Value2 => &self.value_2,
+51 -31
simple-left-right/src/lib.rs
··· 21 21 clippy::missing_safety_doc, 22 22 clippy::undocumented_unsafe_blocks 23 23 )] 24 - 25 24 #![cfg_attr(not(feature = "std"), no_std)] 26 25 27 26 extern crate alloc; 28 27 29 28 #[cfg(feature = "std")] 30 - use std::thread; 31 - #[cfg(feature = "std")] 32 29 use core::time::Duration; 30 + #[cfg(feature = "std")] 31 + use std::thread; 33 32 34 33 use core::{ 35 - hint::assert_unchecked, 34 + cell::UnsafeCell, 36 35 marker::PhantomData, 37 36 mem::MaybeUninit, 38 37 ops::Deref, 39 - sync::atomic::{fence, AtomicU8, Ordering} 38 + sync::atomic::{fence, AtomicU8, Ordering}, 40 39 }; 41 40 42 41 use alloc::{collections::vec_deque::VecDeque, sync::Arc}; ··· 105 104 // sets the corresponding read bit to the write ptr bit 106 105 // happens as a single atomic operation so the 'double read' state isn't needed 107 106 // ptr bit doesnt get changed 107 + // always Ok, as the passed closure never returns None 108 108 let update_result = 109 109 self.inner 110 110 .state 111 111 .fetch_update(Ordering::Relaxed, Ordering::Acquire, |value| { 112 112 // SAFETY: At this point no Read bit is set, as creating a ReadGuard requires a &mut Reader and the Guard holds the &mut Reader 113 113 unsafe { 114 - assert_unchecked(value & 0b011 == 0); 115 - } 116 - match value.into() { 117 - Ptr::Value1 => Some(0b001), 118 - Ptr::Value2 => Some(0b110), 114 + match Ptr::from_u8_no_read(value) { 115 + Ptr::Value1 => Some(0b001), 116 + Ptr::Value2 => Some(0b110), 117 + } 119 118 } 120 119 }); 121 120 122 - // SAFETY: the passed clorusure always returns Some, so fetch_update never returns Err 123 - let ptr = unsafe { update_result.unwrap_unchecked().into() }; 121 + // here the read ptr and read state of update_result match. maybe the atomic was already changed, but that doesn't matter. 122 + // we continue working with the state that we set. 123 + 124 + // SAFETY: the passed closure always returns Some, so fetch_update never returns Err 125 + let ptr = unsafe { 126 + // here it doesn't matter if we create the Ptr from the read bits or from the ptr bit, as they match 127 + Ptr::from_u8_ignore_read(update_result.unwrap_unchecked()) 128 + }; 124 129 125 - // SAFETY: the Writer always sets the Read bit to the opposite of its write_ptr 130 + // SAFETY: the Writer allowed the read on this value because the ptr bit was set. The read bit has been set 126 131 let data = unsafe { self.inner.get_value(ptr).get().as_ref().unwrap_unchecked() }; 127 132 128 - // SAFETY: the read_state is set to the value that is being 129 133 ReadGuard { 130 134 data, 131 135 state: &self.inner.state, ··· 150 154 pub fn swap(self) {} 151 155 152 156 /// Gets the value currently being written to. 153 - #[must_use] 154 157 pub fn read(&self) -> &T { 155 158 self.writer.read() 156 159 } ··· 160 163 fn get_data_mut(&mut self) -> &mut T { 161 164 // SAFETY: When creating the writeguad it is checked that the reader doesnt have access to the same data 162 165 // This function requires &mut self so there also isn't any ref created by writeguard. 163 - unsafe { self.get_data_ptr().as_mut().unwrap() } 164 - } 165 - 166 - fn get_data_ptr(&self) -> *mut T { 167 - self.writer.shared.get_value(self.writer.write_ptr).get() 166 + // SAFETY: the ptr is never null, therefore unwrap_unchecked 167 + unsafe { 168 + self.writer 169 + .shared 170 + .get_value(self.writer.write_ptr) 171 + .get() 172 + .as_mut() 173 + .unwrap_unchecked() 174 + } 168 175 } 169 176 } 170 177 ··· 264 271 /// Blocks if the Reader has a `ReadGuard` pointing to the old value. 265 272 /// 266 273 /// Uses a Spinlock because for anything else the OS needs to be involved and `Reader` can't talk to the OS. 267 - #[cfg(not(feature = "std"))] 268 274 pub fn lock(&mut self) -> WriteGuard<'_, T, O> { 269 275 let backoff = crossbeam_utils::Backoff::new(); 270 276 ··· 272 278 // operation has to be aquire, but only the time it breaks the loop 273 279 let state = self.shared.state.load(Ordering::Relaxed); 274 280 275 - if ReadState::from(state).can_write(self.write_ptr) { 281 + // SAFETY: is in state internal only value which is only set by library code 282 + let state = unsafe { ReadState::from_u8_ignore_ptr(state) }; 283 + 284 + if state.can_write(self.write_ptr) { 276 285 // make the load operation aquire only when it actually breaks the loop 277 286 // the important (last) load is aquire, while all loads before are relaxed 278 287 fence(Ordering::Acquire); ··· 288 297 } 289 298 290 299 /// Blocks if the Reader has a `ReadGuard` pointing to the old value. 291 - /// 300 + /// 292 301 /// Uses a spin-lock, because the `Reader` can't talk to the OS. Sleeping and Yielding is done to avoid wasting cycles. 293 302 /// Equivalent to ´lock´, except that it starts sleeping the given duration after a certaint point until the lock could be aquired. 294 303 #[cfg(feature = "std")] 295 - pub fn lock(&mut self, sleep: Duration) -> WriteGuard<'_, T, O> { 304 + pub fn sleep_lock(&mut self, sleep: Duration) -> WriteGuard<'_, T, O> { 296 305 let backoff = crossbeam_utils::Backoff::new(); 297 306 298 307 loop { 299 308 // operation has to be aquire, but only the time it breaks the loop 300 309 let state = self.shared.state.load(Ordering::Relaxed); 301 310 302 - if ReadState::from(state).can_write(self.write_ptr) { 311 + // SAFETY: is in state internal only value which is only set by library code 312 + let state = unsafe { ReadState::from_u8_ignore_ptr(state) }; 313 + 314 + if state.can_write(self.write_ptr) { 303 315 // make the load operation aquire, only when it actually breaks the loop 304 316 // the important (last) load is aquire, while all loads before are relaxed 305 317 fence(Ordering::Acquire); ··· 328 340 // operation has to be aquire, but only the time it breaks the loop 329 341 let state = self.shared.state.load(Ordering::Relaxed); 330 342 331 - if ReadState::from(state).can_write(self.write_ptr) { 343 + // SAFETY: is in state internal only value which is only set by library code 344 + let state = unsafe { ReadState::from_u8_ignore_ptr(state) }; 345 + 346 + if state.can_write(self.write_ptr) { 332 347 // make the load operation aquire, only when it actually breaks the loop 333 348 // the important (last) load is aquire, while all loads before are relaxed 334 349 fence(Ordering::Acquire); ··· 352 367 pub fn try_lock(&mut self) -> Option<WriteGuard<'_, T, O>> { 353 368 let state = self.shared.state.load(Ordering::Acquire); 354 369 355 - if ReadState::from(state).can_write(self.write_ptr) { 370 + // SAFETY: is in state internal only value which is only set by library code 371 + let state = unsafe { ReadState::from_u8_ignore_ptr(state) }; 372 + 373 + if state.can_write(self.write_ptr) { 356 374 // SAFETY: ReadState allows this 357 375 unsafe { Some(WriteGuard::new(self)) } 358 376 } else { ··· 371 389 let shared = unsafe { 372 390 let shared_ptr = Arc::get_mut(&mut shared).unwrap_unchecked().as_mut_ptr(); 373 391 (&raw mut (*shared_ptr).state).write(AtomicU8::new(0b000)); 374 - (&raw mut (*shared_ptr).value_1).cast::<T>().write(value.clone()); 375 - (&raw mut (*shared_ptr).value_2).cast::<T>().write(value); 392 + UnsafeCell::raw_get(&raw const (*shared_ptr).value_1).write(value.clone()); 393 + UnsafeCell::raw_get(&raw const (*shared_ptr).value_1).write(value); 376 394 shared.assume_init() 377 395 }; 378 396 ··· 388 406 /// Creates a new Writer by calling `T::default()` twice to create the two values 389 407 /// 390 408 /// Default impl of T needs to give the same result every time. Not upholding this doens't lead to UB, but turns the library basically useless 409 + /// 410 + /// Could leak a T object if T::default() panics. 391 411 fn default() -> Self { 392 412 let mut shared: Arc<MaybeUninit<Shared<T>>> = Arc::new_uninit(); 393 413 ··· 396 416 let shared = unsafe { 397 417 let shared_ptr = Arc::get_mut(&mut shared).unwrap_unchecked().as_mut_ptr(); 398 418 (&raw mut (*shared_ptr).state).write(AtomicU8::new(0b000)); 399 - (&raw mut (*shared_ptr).value_1).cast::<T>().write(T::default()); 400 - (&raw mut (*shared_ptr).value_2).cast::<T>().write(T::default()); 419 + UnsafeCell::raw_get(&raw const (*shared_ptr).value_1).write(T::default()); 420 + UnsafeCell::raw_get(&raw const (*shared_ptr).value_1).write(T::default()); 401 421 shared.assume_init() 402 422 }; 403 423
+22 -9
simple-left-right/tests/tests.rs
··· 1 1 #[cfg(test)] 2 2 mod tests { 3 - use simple_left_right::Writer; 4 - use std::time::Duration; 3 + use simple_left_right::{Absorb, Writer}; 4 + use std::{cell::Cell, time::Duration}; 5 + 6 + #[derive(Clone)] 7 + pub struct CounterAddOp(i32); 8 + 9 + impl Absorb<CounterAddOp> for i32 { 10 + fn absorb(&mut self, operation: CounterAddOp) { 11 + *self += operation.0; 12 + } 13 + } 5 14 6 - include!("utilities.rs"); 15 + impl Absorb<CounterAddOp> for Cell<i32> { 16 + fn absorb(&mut self, operation: CounterAddOp) { 17 + self.set(self.get() + operation.0); 18 + } 19 + } 7 20 8 21 #[test] 9 22 fn send_writer() { ··· 23 36 let mut reader = writer.build_reader().unwrap(); 24 37 let sleep = Duration::from_millis(5); 25 38 assert_eq!(*reader.lock(), 0); 26 - let mut write_lock = writer.lock(sleep); 39 + let mut write_lock = writer.sleep_lock(sleep); 27 40 assert_eq!(*write_lock.read(), 0); 28 41 write_lock.apply_op(CounterAddOp(2)); 29 42 assert_eq!(*write_lock.read(), 2); 30 43 drop(write_lock); 31 - let write_lock = writer.lock(sleep); 44 + let write_lock = writer.sleep_lock(sleep); 32 45 assert_eq!(*write_lock.read(), 2); 33 46 write_lock.swap(); 34 47 assert_eq!(*reader.lock(), 2); 35 - assert_eq!(*writer.lock(sleep).read(), 2); 48 + assert_eq!(*writer.sleep_lock(sleep).read(), 2); 36 49 } 37 50 38 51 #[test] ··· 95 108 let mut reader = writer.build_reader().unwrap(); 96 109 let sleep = Duration::from_millis(5); 97 110 98 - writer.lock(sleep).apply_op(CounterAddOp(2)); 111 + writer.sleep_lock(sleep).apply_op(CounterAddOp(2)); 99 112 std::thread::spawn(move || { 100 113 let lock = reader.lock(); 101 114 assert_eq!(*lock, 0); ··· 104 117 assert_eq!(*reader.lock(), 2); 105 118 }); 106 119 std::thread::sleep(Duration::from_secs(1)); 107 - writer.lock(sleep).swap(); 120 + writer.sleep_lock(sleep).swap(); 108 121 // blocks until the spawned thread drops the read_lock 109 - let write_lock = writer.lock(sleep); 122 + let write_lock = writer.sleep_lock(sleep); 110 123 assert_eq!(*write_lock.read(), 2); 111 124 } 112 125
-12
simple-left-right/tests/utilities.rs
··· 1 - use simple_left_right::Absorb; 2 - 3 - #[cfg(test)] 4 - #[derive(Clone)] 5 - pub struct CounterAddOp(i32); 6 - 7 - #[cfg(test)] 8 - impl Absorb<CounterAddOp> for i32 { 9 - fn absorb(&mut self, operation: CounterAddOp) { 10 - *self += operation.0; 11 - } 12 - }
+2 -1
tracker-engine/Cargo.toml
··· 12 12 categories = ["audio"] 13 13 14 14 [features] 15 - async = ["dep:async-io", "dep:futures-lite", "dep:async-channel", "simple-left-right/async"] 15 + async = ["dep:async-io", "dep:futures-lite", "dep:async-channel", "dep:futures-io", "simple-left-right/async"] 16 16 17 17 [dependencies] 18 18 async-io = { version = "2.3.4", optional = true } 19 19 futures-lite = { version = "2.3.0", optional = true } 20 20 async-channel = { version = "2.3.1", optional = true } 21 + futures-io = { version = "0.3.31", optional = true } 21 22 # added a couple features to it. pull requests don' really progress 22 23 basedrop = "0.1.2" # miri reports Race Condition. Wait for fix or pull inside 23 24 cpal = "0.15.3"
+7 -1
tracker-engine/src/live_audio.rs
··· 180 180 } 181 181 } 182 182 183 + // unsure wether i want to use this or untyped_callback 184 + // also relevant when cpal gets made into a generic that maybe this gets useful 185 + #[expect(dead_code)] 183 186 pub fn get_typed_callback<S: cpal::SizedSample + cpal::FromSample<f32>>( 184 187 mut self, 185 188 ) -> impl FnMut(&mut [S], &cpal::OutputCallbackInfo) { 186 - move |data, info| { 189 + move |data, _info| { 187 190 assert_eq!( 188 191 data.len(), 189 192 usize::try_from(self.config.buffer_size).unwrap() ··· 197 200 } 198 201 } 199 202 203 + // only used for testing 204 + // if not testing is unused 205 + #[allow(dead_code)] 200 206 fn sine(output: &mut [[f32; 2]], sample_rate: f32) { 201 207 let mut sample_clock = 0f32; 202 208 for frame in output {
+1 -1
tracker-engine/src/manager.rs
··· 167 167 /// Spinloops until no more ReadGuard to the old value exists 168 168 pub fn edit_song(&mut self) -> SongEdit<'_> { 169 169 SongEdit { 170 - song: self.song.lock(Self::SPIN_SLEEP), 170 + song: self.song.sleep_lock(Self::SPIN_SLEEP), 171 171 gc: &mut self.gc 172 172 } 173 173 }
+3 -1
tracker-engine/src/project/song.rs
··· 101 101 } 102 102 103 103 impl Song<false> { 104 + // to avoid cloning patterns 105 + #[expect(clippy::wrong_self_convention)] 104 106 pub(crate) fn to_gc(self, handle: &basedrop::Handle) -> Song<true> { 105 107 Song { 106 108 global_volume: self.global_volume, ··· 135 137 initial_tempo: self.initial_tempo, 136 138 pan_separation: self.pan_separation, 137 139 pitch_wheel_depth: self.pitch_wheel_depth, 138 - patterns: self.patterns.clone(), 140 + patterns: self.patterns, 139 141 pattern_order: self.pattern_order, 140 142 volume: self.volume, 141 143 pan: self.pan,
+5 -3
tracker-engine/src/sample.rs
··· 91 91 } 92 92 } 93 93 94 - pub fn get_ref(&self) -> SampleRef<'static, true> { 94 + pub(crate) fn get_ref(&self) -> SampleRef<'static, true> { 95 95 let data = unsafe { self.gc.deref().clone() }; 96 96 SampleRef::<'static, true>::new(data) 97 97 } ··· 123 123 unsafe { &self.owned } 124 124 } 125 125 126 - pub fn get_ref<'a>(&'a self) -> SampleRef<'a, false> { 126 + pub(crate) fn get_ref<'a>(&'a self) -> SampleRef<'a, false> { 127 127 SampleRef::<'a, false>::new(unsafe { &self.owned }) 128 128 } 129 129 ··· 133 133 out 134 134 } 135 135 136 - pub fn to_gc(self, handle: &basedrop::Handle) -> Sample<true> { 136 + // avoids copying the underlaying data. As soon as SampleData gets ?Sized this should change 137 + #[expect(clippy::wrong_self_convention)] 138 + pub(crate) fn to_gc(self, handle: &basedrop::Handle) -> Sample<true> { 137 139 let data = self.take(); 138 140 let shared = basedrop::Shared::new(handle, data); 139 141 Sample::<true>::new(shared)