Serenity Operating System
1/*
2 * Copyright (c) 2021, kleines Filmröllchen <filmroellchen@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/FixedArray.h>
8#include <AK/NoAllocationGuard.h>
9#include <AK/NonnullRefPtr.h>
10#include <AK/Optional.h>
11#include <AK/StdLibExtras.h>
12#include <AK/TypedTransfer.h>
13#include <AK/Types.h>
14#include <LibDSP/Music.h>
15#include <LibDSP/Processor.h>
16#include <LibDSP/Track.h>
17
18namespace DSP {
19
20bool Track::add_processor(NonnullRefPtr<Processor> new_processor)
21{
22 m_processor_chain.append(move(new_processor));
23 if (!check_processor_chain_valid()) {
24 (void)m_processor_chain.take_last();
25 return false;
26 }
27 return true;
28}
29
30bool Track::check_processor_chain_valid_with_initial_type(SignalType initial_type) const
31{
32 Processor const* previous_processor = nullptr;
33 for (auto& processor : m_processor_chain) {
34 // The first processor must have the given initial signal type as input.
35 if (previous_processor == nullptr) {
36 if (processor->input_type() != initial_type)
37 return false;
38 } else if (previous_processor->output_type() != processor->input_type())
39 return false;
40 previous_processor = processor.ptr();
41 }
42 return true;
43}
44
45NonnullRefPtr<Synthesizers::Classic> Track::synth()
46{
47 return static_ptr_cast<Synthesizers::Classic>(m_processor_chain[0]);
48}
49NonnullRefPtr<Effects::Delay> Track::delay()
50{
51 return static_ptr_cast<Effects::Delay>(m_processor_chain[1]);
52}
53
54bool AudioTrack::check_processor_chain_valid() const
55{
56 return check_processor_chain_valid_with_initial_type(SignalType::Sample);
57}
58
59bool NoteTrack::check_processor_chain_valid() const
60{
61 return check_processor_chain_valid_with_initial_type(SignalType::Note);
62}
63
64ErrorOr<void> Track::resize_internal_buffers_to(size_t buffer_size)
65{
66 m_secondary_sample_buffer = TRY(FixedArray<Sample>::create(buffer_size));
67 return {};
68}
69
70void Track::current_signal(FixedArray<Sample>& output_signal)
71{
72 // This is real-time code. We must NEVER EVER EVER allocate.
73 NoAllocationGuard guard;
74 VERIFY(m_secondary_sample_buffer.type() == SignalType::Sample);
75 VERIFY(output_signal.size() == m_secondary_sample_buffer.get<FixedArray<Sample>>().size());
76
77 compute_current_clips_signal();
78 Signal* source_signal = &m_current_signal;
79 // This provides an audio buffer of the right size. It is not allocated here, but whenever we are informed about a buffer size change.
80 Signal* target_signal = &m_secondary_sample_buffer;
81
82 for (auto& processor : m_processor_chain) {
83 // Depending on what the processor needs to have as output, we need to place either a pre-allocated note hash map or a pre-allocated sample buffer in the target signal.
84 if (processor->output_type() == SignalType::Note)
85 target_signal = &m_secondary_note_buffer;
86 else
87 target_signal = &m_secondary_sample_buffer;
88 processor->process(*source_signal, *target_signal);
89 swap(source_signal, target_signal);
90 }
91 VERIFY(source_signal->type() == SignalType::Sample);
92 VERIFY(output_signal.size() == source_signal->get<FixedArray<Sample>>().size());
93 // The last processor is the fixed mastering processor. This can write directly to the output data. We also just trust this processor that it does the right thing :^)
94 m_track_mastering->process_to_fixed_array(*source_signal, output_signal);
95}
96
97void NoteTrack::compute_current_clips_signal()
98{
99 // FIXME: Handle looping properly
100 u32 start_time = m_transport->time();
101 VERIFY(m_secondary_sample_buffer.type() == SignalType::Sample);
102 size_t sample_count = m_secondary_sample_buffer.get<FixedArray<Sample>>().size();
103 u32 end_time = start_time + static_cast<u32>(sample_count);
104
105 // Find the currently playing clips.
106 // We can't handle more than 32 playing clips at a time, but that is a ridiculous number.
107 Array<RefPtr<NoteClip>, 32> playing_clips;
108 size_t playing_clips_index = 0;
109 for (auto& clip : m_clips) {
110 // A clip is playing if its start time or end time fall in the current time range.
111 // Or, if they both enclose the current time range.
112 if ((clip->start() <= start_time && clip->end() >= end_time)
113 || (clip->start() >= start_time && clip->start() < end_time)
114 || (clip->end() > start_time && clip->end() <= end_time)) {
115 VERIFY(playing_clips_index < playing_clips.size());
116 playing_clips[playing_clips_index++] = clip;
117 }
118 }
119
120 auto& current_notes = m_current_signal.get<RollNotes>();
121 m_current_signal.get<RollNotes>().fill({});
122
123 if (playing_clips_index == 0)
124 return;
125
126 for (auto const& playing_clip : playing_clips) {
127 if (playing_clip.is_null())
128 break;
129 for (auto const& note : playing_clip->notes()) {
130 if (note.is_playing_during(start_time, end_time))
131 current_notes[note.pitch] = note;
132 }
133 }
134
135 for (auto const& keyboard_note : m_keyboard->notes()) {
136 if (!keyboard_note.has_value() || !keyboard_note->is_playing_during(start_time, end_time))
137 continue;
138 // Always overwrite roll notes with keyboard notes.
139 current_notes[keyboard_note->pitch] = keyboard_note;
140 }
141}
142
143void AudioTrack::compute_current_clips_signal()
144{
145 // This is quite involved as we need to look at multiple clips and take looping into account.
146 TODO();
147}
148
149Optional<RollNote> NoteTrack::note_at(u32 time, u8 pitch) const
150{
151 for (auto& clip : m_clips) {
152 if (time >= clip->start() && time <= clip->end())
153 return clip->note_at(time, pitch);
154 }
155
156 return {};
157}
158
159void NoteTrack::set_note(RollNote note)
160{
161 for (auto& clip : m_clips) {
162 if (clip->start() <= note.on_sample && clip->end() >= note.on_sample)
163 clip->set_note(note);
164 }
165}
166
167void NoteTrack::remove_note(RollNote note)
168{
169 for (auto& clip : m_clips)
170 clip->remove_note(note);
171}
172
173void NoteTrack::add_clip(u32 start_time, u32 end_time)
174{
175 m_clips.append(AK::make_ref_counted<NoteClip>(start_time, end_time));
176}
177
178}