tangled
alpha
login
or
join now
inkreas.ing
/
torque-tracker-engine
0
fork
atom
old school music tracker audio backend
0
fork
atom
overview
issues
pulls
pipelines
small cleanups & bugfix
inkreas.ing
8 months ago
bbd60cda
22b80416
+65
-36
8 changed files
expand all
collapse all
unified
split
Cargo.lock
examples
live_note.rs
pattern_playback.rs
src
audio_processing
playback.rs
sample.rs
live_audio.rs
manager.rs
sample.rs
+1
-1
Cargo.lock
···
707
707
]
708
708
709
709
[[package]]
710
710
-
name = "torque_tracker_engine"
710
710
+
name = "torque-tracker-engine"
711
711
version = "0.1.0"
712
712
dependencies = [
713
713
"cpal",
+15
-5
examples/live_note.rs
···
1
1
-
use std::{num::NonZeroU16, time::Duration};
1
1
+
use std::{
2
2
+
num::{NonZero, NonZeroU16},
3
3
+
time::Duration,
4
4
+
};
2
5
3
6
use cpal::traits::{DeviceTrait, HostTrait};
4
7
use torque_tracker_engine::{
8
8
+
file::impulse_format::sample::VibratoWave,
5
9
manager::{AudioManager, OutputConfig, ToWorkerMsg},
6
10
project::{
7
11
event_command::NoteCommand,
···
22
26
.map(|result| <f32 as dasp::Sample>::from_sample(result.unwrap()));
23
27
let sample = Sample::new_mono(sample_data);
24
28
let meta = SampleMetaData {
25
25
-
sample_rate: spec.sample_rate,
29
29
+
sample_rate: NonZero::new(spec.sample_rate).unwrap(),
26
30
default_volume: 150,
27
27
-
..Default::default()
31
31
+
global_volume: 20,
32
32
+
default_pan: None,
33
33
+
vibrato_speed: 0,
34
34
+
vibrato_depth: 0,
35
35
+
vibrato_rate: 0,
36
36
+
vibrato_waveform: VibratoWave::default(),
37
37
+
base_note: Note::new(64).unwrap(),
28
38
};
29
39
30
40
manager
···
41
51
let config = OutputConfig {
42
52
buffer_size: 2048,
43
53
channel_count: NonZeroU16::new(2).unwrap(),
44
44
-
sample_rate: default_config.sample_rate().0,
54
54
+
sample_rate: NonZero::new(default_config.sample_rate().0).unwrap(),
45
55
};
46
56
47
57
let mut audio_callback = manager.get_callback::<f32>(config);
···
55
65
.unwrap();
56
66
57
67
let note_event = NoteEvent {
58
58
-
note: Note::new(90).unwrap(),
68
68
+
note: Note::new(70).unwrap(),
59
69
sample_instr: 1,
60
70
vol: VolumeEffect::None,
61
71
command: NoteCommand::None,
+17
-10
examples/pattern_playback.rs
···
1
1
-
use std::{num::NonZeroU16, time::Duration};
1
1
+
use std::{
2
2
+
num::{NonZero, NonZeroU16},
3
3
+
time::Duration,
4
4
+
};
2
5
3
6
use cpal::traits::{DeviceTrait, HostTrait};
4
4
-
use tracker_engine::{
7
7
+
use torque_tracker_engine::{
8
8
+
file::impulse_format::{header::PatternOrder, sample::VibratoWave},
5
9
manager::{AudioManager, OutputConfig, PlaybackSettings, ToWorkerMsg},
6
10
project::{
7
11
event_command::NoteCommand,
···
23
27
.map(|result| <f32 as dasp::Sample>::from_sample(result.unwrap()));
24
28
let sample = Sample::new_mono(sample_data);
25
29
let meta = SampleMetaData {
26
26
-
sample_rate: spec.sample_rate,
30
30
+
sample_rate: NonZero::new(spec.sample_rate).unwrap(),
27
31
base_note: Note::new(64).unwrap(),
28
28
-
..Default::default()
32
32
+
default_volume: 20,
33
33
+
global_volume: 20,
34
34
+
default_pan: None,
35
35
+
vibrato_speed: 0,
36
36
+
vibrato_depth: 0,
37
37
+
vibrato_rate: 0,
38
38
+
vibrato_waveform: VibratoWave::default(),
29
39
};
30
40
31
41
let mut song = manager.try_edit_song().unwrap();
···
47
57
song.apply_operation(SongOperation::PatternOperation(0, command))
48
58
.unwrap();
49
59
}
50
50
-
song.apply_operation(SongOperation::SetOrder(
51
51
-
0,
52
52
-
tracker_engine::file::impulse_format::header::PatternOrder::Number(0),
53
53
-
))
54
54
-
.unwrap();
60
60
+
song.apply_operation(SongOperation::SetOrder(0, PatternOrder::Number(0)))
61
61
+
.unwrap();
55
62
56
63
song.finish();
57
64
···
63
70
let config = OutputConfig {
64
71
buffer_size: 1024,
65
72
channel_count: NonZeroU16::new(2).unwrap(),
66
66
-
sample_rate: default_config.sample_rate().0,
73
73
+
sample_rate: NonZero::new(default_config.sample_rate().0).unwrap(),
67
74
};
68
75
69
76
let mut callback = manager.get_callback::<f32>(config);
+6
-6
src/audio_processing/playback.rs
···
1
1
-
use std::ops::ControlFlow;
1
1
+
use std::{num::NonZero, ops::ControlFlow};
2
2
3
3
use crate::{
4
4
audio_processing::{sample::SamplePlayer, Frame},
···
108
108
frame: u32,
109
109
110
110
// add current state to support Effects
111
111
-
samplerate: u32,
111
111
+
samplerate: NonZero<u32>,
112
112
113
113
voices: [Option<SamplePlayer>; PlaybackState::VOICES],
114
114
}
···
123
123
PlaybackIter { state: self, song }
124
124
}
125
125
126
126
-
fn frames_per_tick(samplerate: u32, tempo: u8) -> u32 {
127
127
-
(samplerate * 10) / u32::from(tempo)
126
126
+
fn frames_per_tick(samplerate: NonZero<u32>, tempo: u8) -> u32 {
127
127
+
(samplerate.get() * 10) / u32::from(tempo)
128
128
}
129
129
130
130
pub fn get_status(&self) -> PlaybackStatus {
···
134
134
}
135
135
}
136
136
137
137
-
pub fn set_samplerate(&mut self, samplerate: u32) {
137
137
+
pub fn set_samplerate(&mut self, samplerate: NonZero<u32>) {
138
138
self.samplerate = samplerate;
139
139
self.voices
140
140
.iter_mut()
···
149
149
150
150
impl PlaybackState {
151
151
/// None if the settings in the order variant don't have any pattern to play
152
152
-
pub fn new(song: &Song, samplerate: u32, settings: PlaybackSettings) -> Option<Self> {
152
152
+
pub fn new(song: &Song, samplerate: NonZero<u32>, settings: PlaybackSettings) -> Option<Self> {
153
153
let mut out = Self {
154
154
position: PlaybackPosition::new(settings, song)?,
155
155
is_done: false,
+10
-8
src/audio_processing/sample.rs
···
1
1
-
use std::ops::ControlFlow;
1
1
+
use std::{num::NonZero, ops::ControlFlow};
2
2
3
3
use crate::{
4
4
project::note_event::Note,
···
51
51
// f32 ranges 0..1
52
52
position: (usize, f32),
53
53
// is_done: bool,
54
54
-
out_rate: u32,
54
54
+
out_rate: NonZero<u32>,
55
55
// how much the position is advanced for each output sample.
56
56
// computed from in and out rate
57
57
step_size: f32,
58
58
}
59
59
60
60
impl SamplePlayer {
61
61
-
pub fn new(sample: Sample, meta: SampleMetaData, out_rate: u32, note: Note) -> Self {
61
61
+
pub fn new(sample: Sample, meta: SampleMetaData, out_rate: NonZero<u32>, note: Note) -> Self {
62
62
let step_size = Self::compute_step_size(meta.sample_rate, out_rate, meta.base_note, note);
63
63
Self {
64
64
sample,
···
80
80
81
81
#[inline]
82
82
fn compute_step_size(
83
83
-
in_rate: u32,
84
84
-
out_rate: u32,
83
83
+
in_rate: NonZero<u32>,
84
84
+
out_rate: NonZero<u32>,
85
85
sample_base_note: Note,
86
86
playing_note: Note,
87
87
) -> f32 {
···
90
90
// manually reduced formula: 2^((play_note - sample_base_note)/12) * (outrate / inrate)
91
91
// herbie (https://herbie.uwplse.org/demo/index.html) can't optimize further: https://herbie.uwplse.org/demo/e096ef89ee257ad611dd56378bd139a065a6bea0.02e7ec5a3709ad3e06968daa97db50d636f1e44b/graph.html
92
92
(f32::from(i16::from(playing_note.get()) - i16::from(sample_base_note.get())) / 12.).exp2()
93
93
-
* (out_rate as f32 / in_rate as f32)
93
93
+
* (out_rate.get() as f32 / in_rate.get() as f32)
94
94
}
95
95
96
96
fn set_step_size(&mut self) {
···
102
102
);
103
103
}
104
104
105
105
-
pub fn set_out_samplerate(&mut self, samplerate: u32) {
105
105
+
pub fn set_out_samplerate(&mut self, samplerate: NonZero<u32>) {
106
106
self.out_rate = samplerate;
107
107
self.set_step_size();
108
108
}
···
137
137
}
138
138
139
139
fn compute_linear(&mut self) -> Frame {
140
140
+
// There are two types that implement ProcessingFrame: f32 and Frame, so stereo and mono audio data.
141
141
+
// the compiler will monomorphize this function to both versions and depending on wether that sample is mono
142
142
+
// or stereo the correct version will be called.
140
143
struct Linear(f32);
141
141
-
142
144
impl<S: ProcessingFrame> ProcessingFunction<2, S> for Linear {
143
145
fn process(self, data: &[S; 2]) -> S {
144
146
let diff = data[1] - data[0];
+3
-1
src/live_audio.rs
···
69
69
let sample_player = SamplePlayer::new(
70
70
sample.1.clone(),
71
71
sample.0,
72
72
-
self.config.sample_rate / 2,
72
72
+
// why is this div by 2 here
73
73
+
// self.config.sample_rate / 2,
74
74
+
self.config.sample_rate,
73
75
note.note,
74
76
);
75
77
self.live_note = Some(sample_player);
+7
-3
src/manager.rs
···
1
1
-
use std::{fmt::Debug, num::NonZeroU16, time::Duration};
1
1
+
use std::{
2
2
+
fmt::Debug,
3
3
+
num::{NonZero, NonZeroU16},
4
4
+
time::Duration,
5
5
+
};
2
6
3
7
use simple_left_right::{WriteGuard, Writer};
4
8
···
166
170
167
171
let audio_worker = LiveAudio::new(reader, to_worker.1, from_worker.0, config);
168
172
let buffer_time =
169
169
-
Duration::from_millis((config.buffer_size * 1000 / config.buffer_size).into());
173
173
+
Duration::from_millis((config.buffer_size * 1000 / config.sample_rate).into());
170
174
171
175
self.stream_comms = Some(ActiveStreamComms {
172
176
buffer_time,
···
231
235
pub struct OutputConfig {
232
236
pub buffer_size: u32,
233
237
pub channel_count: NonZeroU16,
234
234
-
pub sample_rate: u32,
238
238
+
pub sample_rate: NonZero<u32>,
235
239
}
236
240
237
241
#[derive(Debug, Clone, Copy)]
+6
-2
src/sample.rs
···
1
1
use std::{
2
2
fmt::Debug,
3
3
iter::repeat_n,
4
4
+
num::NonZero,
4
5
ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign},
5
6
sync::Arc,
6
7
};
···
55
56
56
57
pub(crate) fn compute<
57
58
const N: usize,
59
59
+
// all implementations are generic over the ProcessingFrame type. here both possible ProcessingFrame types
60
60
+
// are required, so that it can be decided at runtime which one to call. both are generated by the compiler
61
61
+
// from the generic implementation
58
62
Proc: ProcessingFunction<N, f32> + ProcessingFunction<N, Frame>,
59
63
>(
60
64
&self,
···
124
128
}
125
129
}
126
130
127
127
-
#[derive(Clone, Copy, Debug, Default)]
131
131
+
#[derive(Clone, Copy, Debug)]
128
132
pub struct SampleMetaData {
129
133
pub default_volume: u8,
130
134
pub global_volume: u8,
···
133
137
pub vibrato_depth: u8,
134
138
pub vibrato_rate: u8,
135
139
pub vibrato_waveform: VibratoWave,
136
136
-
pub sample_rate: u32,
140
140
+
pub sample_rate: NonZero<u32>,
137
141
pub base_note: Note,
138
142
}