old school music tracker audio backend
1use core::f32;
2use std::ops::{AddAssign, IndexMut};
3
4use crate::audio_processing::playback::{PlaybackState, PlaybackStatus};
5use crate::audio_processing::sample::SamplePlayer;
6use crate::audio_processing::{Frame, Interpolation};
7use crate::project::song::Song;
8use crate::sample::Sample;
9use crate::{OutputConfig, ToWorkerMsg};
10use dasp::sample::ToSample;
11use simple_left_right::Reader;
12
13pub(crate) struct LiveAudio<StreamData> {
14 song: Reader<Song>,
15 playback_state: Option<PlaybackState>,
16 live_note: Option<SamplePlayer>,
17 manager: rtrb::Consumer<ToWorkerMsg>,
18 state_sender: rt_write_lock::Writer<(Option<PlaybackStatus>, StreamData)>,
19 config: OutputConfig,
20
21 buffer: Box<[Frame]>,
22}
23
24impl<S> LiveAudio<S> {
25 /// Not realtime safe.
26 pub fn new(
27 song: Reader<Song>,
28 manager: rtrb::Consumer<ToWorkerMsg>,
29 state_sender: rt_write_lock::Writer<(Option<PlaybackStatus>, S)>,
30 config: OutputConfig,
31 ) -> Self {
32 Self {
33 song,
34 playback_state: None,
35 live_note: None,
36 manager,
37 state_sender,
38 config,
39 buffer: vec![Frame::default(); usize::try_from(config.buffer_size).unwrap() * 2].into(),
40 }
41 }
42
43 #[rtsan_standalone::nonblocking]
44 fn send_state(&mut self, stream_data: S) {
45 let playback_state = self.playback_state.as_ref().map(|s| s.get_status());
46 let mut write_guard = self.state_sender.write();
47 // make this more granular once the state includes AudioData or other allocated data
48 *write_guard = (playback_state, stream_data);
49 }
50
51 #[rtsan_standalone::nonblocking]
52 /// returns true if work was done
53 fn fill_internal_buffer(&mut self, len: usize) -> bool {
54 // the output buffer should be smaller than the internal buffer
55 let buffer = &mut self.buffer[..len];
56
57 let song = self.song.lock();
58
59 // process manager events
60 while let Ok(event) = self.manager.pop() {
61 match event {
62 ToWorkerMsg::StopPlayback => self.playback_state = None,
63 ToWorkerMsg::Playback(settings) => {
64 self.playback_state =
65 PlaybackState::new(&song, self.config.sample_rate, settings);
66 }
67 ToWorkerMsg::PlayEvent(note) => {
68 if let Some(sample) = &song.samples[usize::from(note.sample_instr)] {
69 let sample_player = SamplePlayer::new(
70 Sample::clone(&sample.1),
71 sample.0,
72 // this at some point was divided by two, if i ever figure out why, maybe put it back
73 self.config.sample_rate,
74 note.note,
75 );
76 self.live_note = Some(sample_player);
77 }
78 }
79 ToWorkerMsg::StopLiveNote => self.live_note = None,
80 ToWorkerMsg::SetInterpolation(i) => self.config.interpolation = i,
81 }
82 }
83 if self.live_note.is_none() && self.playback_state.is_none() {
84 // no processing todo
85 return false;
86 }
87
88 // clear buffer from past run
89 // only happens if there is work todo
90 buffer.fill(Frame::default());
91
92 // process live_note
93 if let Some(live_note) = &mut self.live_note {
94 fn process_note<const INTERPOLATION: u8>(
95 buffer: &mut [Frame],
96 note: &mut SamplePlayer,
97 ) {
98 // constant power panning leads to a lower volume. As the live_note isn't channel panned
99 // increase the volume. If i change the pan law i use i need to change this here.
100 //
101 // This is sin(PI/4) or cos(PI/4).
102 // TODO: sin or cos const replace this
103 #[expect(clippy::excessive_precision)]
104 const CENTER_VOL: f32 = 0.707106769084930419921875;
105 buffer
106 .iter_mut()
107 .zip(note.iter::<{ INTERPOLATION }>())
108 .for_each(|(buf, note)| buf.add_assign(note * CENTER_VOL));
109 }
110 match self.config.interpolation {
111 Interpolation::Nearest => {
112 process_note::<{ Interpolation::Nearest as u8 }>(buffer, live_note)
113 }
114 Interpolation::Linear => {
115 process_note::<{ Interpolation::Linear as u8 }>(buffer, live_note)
116 }
117 Interpolation::Quadratic => {
118 process_note::<{ Interpolation::Quadratic as u8 }>(buffer, live_note)
119 }
120 }
121
122 if live_note.check_position().is_break() {
123 self.live_note = None;
124 }
125 }
126
127 // process song playback
128 if let Some(playback) = &mut self.playback_state {
129 fn process_playback<const INTERPOLATION: u8>(
130 buffer: &mut [Frame],
131 playback: &mut PlaybackState,
132 song: &Song,
133 ) {
134 buffer
135 .iter_mut()
136 .zip(playback.iter::<{ INTERPOLATION }>(song))
137 .for_each(|(buf, note)| buf.add_assign(note));
138 }
139 match self.config.interpolation {
140 Interpolation::Nearest => {
141 process_playback::<{ Interpolation::Nearest as u8 }>(buffer, playback, &song)
142 }
143 Interpolation::Linear => {
144 process_playback::<{ Interpolation::Linear as u8 }>(buffer, playback, &song)
145 }
146 Interpolation::Quadratic => {
147 process_playback::<{ Interpolation::Quadratic as u8 }>(buffer, playback, &song)
148 }
149 }
150
151 if playback.is_done() {
152 self.playback_state = None;
153 }
154 }
155
156 true
157 }
158
159 /// converts the internal buffer to any possible output format and channel count
160 /// sums stereo to mono and fills channels 3 and up with silence
161 #[rtsan_standalone::nonblocking]
162 #[inline]
163 fn fill_from_internal<Sample: dasp::sample::Sample + dasp::sample::FromSample<f32>>(
164 &mut self,
165 data: &mut [Sample],
166 ) {
167 // convert the internal buffer and move it to the out_buffer
168 if self.config.channel_count.get() == 1 {
169 data.iter_mut()
170 .zip(self.buffer.iter())
171 .for_each(|(out, buf)| *out = buf.sum_to_mono().to_sample_());
172 } else {
173 data.chunks_exact_mut(usize::from(self.config.channel_count.get()))
174 .map(|frame| frame.split_first_chunk_mut::<2>().unwrap().0)
175 .zip(self.buffer.iter())
176 .for_each(|(out, buf)| *out = buf.to_sample());
177 }
178 }
179
180 // unsure wether i want to use this or untyped_callback
181 // also relevant when cpal gets made into a generic that maybe this gets useful
182 pub fn get_typed_callback<Sample: dasp::sample::Sample + dasp::sample::FromSample<f32>>(
183 mut self,
184 ) -> impl FnMut(&mut [Sample], S) {
185 move |audio_data, stream_data| {
186 let channel_count = usize::from(self.config.channel_count.get());
187 assert!(audio_data.len().is_multiple_of(channel_count));
188 let out_frames = audio_data.len() / channel_count;
189 assert!(self.buffer.len() > out_frames);
190 // assert_eq!(
191 // data.len(),
192 // usize::try_from(self.config.buffer_size).unwrap()
193 // * usize::from(self.config.channel_count.get())
194 // );
195
196 if self.fill_internal_buffer(out_frames) {
197 self.fill_from_internal(audio_data);
198 }
199 self.send_state(stream_data);
200 }
201 // move |data, info| {
202 // assert_eq!(
203 // data.len(),
204 // usize::try_from(self.config.buffer_size).unwrap()
205 // * usize::from(self.config.channel_count.get())
206 // );
207 // self.send_state(Some(info));
208 // }
209 }
210
211 // pub fn get_callback(mut self) -> impl FnMut(&mut [Frame], S::BufferInformation) {
212 // move |data, info| {
213 // assert_eq!(data.len(), self.config.buffer_size as usize * self.config.channel_count.get() as usize)
214
215 // if self.fill_internal_buffer() {
216 // self.fill_from_internal(data);
217 // }
218 // }
219 // }
220}
221
222// only used for testing
223// if not testing is unused
224#[allow(dead_code)]
225fn sine(output: &mut [[f32; 2]], sample_rate: f32) {
226 let mut sample_clock = 0f32;
227 for frame in output {
228 sample_clock = (sample_clock + 1.) % sample_rate;
229 let value = (sample_clock * 440. * 2. * std::f32::consts::PI / sample_rate).sin();
230 *frame.index_mut(0) = value;
231 *frame.index_mut(1) = value;
232 }
233}