jcs's openbsd hax
openbsd
1/* $OpenBSD: dev.c,v 1.127 2026/01/22 09:24:26 ratchov Exp $ */
2/*
3 * Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#include <stdio.h>
18#include <string.h>
19
20#include "abuf.h"
21#include "defs.h"
22#include "dev.h"
23#include "dsp.h"
24#include "siofile.h"
25#include "midi.h"
26#include "opt.h"
27#include "sysex.h"
28#include "utils.h"
29
30void zomb_onmove(void *);
31void zomb_onxrun(void *);
32void zomb_onvol(void *);
33void zomb_fill(void *);
34void zomb_flush(void *);
35void zomb_eof(void *);
36void zomb_exit(void *);
37
38void dev_mix_badd(struct dev *, struct slot *);
39void dev_mix_adjvol(struct dev *);
40void dev_sub_bcopy(struct dev *, struct slot *);
41
42void dev_onmove(struct dev *, int);
43void dev_master(struct dev *, unsigned int);
44void dev_cycle(struct dev *);
45void dev_adjpar(struct dev *, int, int, int);
46int dev_allocbufs(struct dev *);
47void dev_freebufs(struct dev *);
48int dev_ref(struct dev *);
49void dev_unref(struct dev *);
50int dev_init(struct dev *);
51void dev_done(struct dev *);
52struct dev *dev_bynum(int);
53void dev_del(struct dev *);
54unsigned int dev_roundof(struct dev *, unsigned int);
55void dev_wakeup(struct dev *);
56
57void slot_del(struct slot *);
58void slot_ready(struct slot *);
59void slot_allocbufs(struct slot *);
60void slot_freebufs(struct slot *);
61void slot_skip_update(struct slot *);
62void slot_write(struct slot *);
63void slot_read(struct slot *);
64int slot_skip(struct slot *);
65
66struct slotops zomb_slotops = {
67 zomb_onmove,
68 zomb_onxrun,
69 zomb_onvol,
70 zomb_fill,
71 zomb_flush,
72 zomb_eof,
73 zomb_exit
74};
75
76struct ctl *ctl_list = NULL;
77struct dev *dev_list = NULL;
78unsigned int dev_sndnum = 0;
79
80struct ctlslot ctlslot_array[DEV_NCTLSLOT];
81struct slot slot_array[DEV_NSLOT];
82
83/*
84 * we support/need a single MTC clock source only
85 */
86struct mtc mtc_array[1] = {
87 {.dev = NULL, .tstate = MTC_STOP}
88};
89
90void
91zomb_onmove(void *arg)
92{
93}
94
95void
96zomb_onxrun(void *arg)
97{
98}
99
100void
101zomb_onvol(void *arg)
102{
103}
104
105void
106zomb_fill(void *arg)
107{
108}
109
110void
111zomb_flush(void *arg)
112{
113}
114
115void
116zomb_eof(void *arg)
117{
118 struct slot *s = arg;
119
120#ifdef DEBUG
121 logx(3, "slot%zu: %s", s - slot_array, __func__);
122#endif
123 s->ops = NULL;
124}
125
126void
127zomb_exit(void *arg)
128{
129#ifdef DEBUG
130 struct slot *s = arg;
131
132 logx(3, "slot%zu: %s", s - slot_array, __func__);
133#endif
134}
135
136size_t
137chans_fmt(char *buf, size_t size, int mode, int pmin, int pmax, int rmin, int rmax)
138{
139 const char *sep = "";
140 char *end = buf + size;
141 char *p = buf;
142
143 if (mode & MODE_PLAY) {
144 p += snprintf(p, p < end ? end - p : 0, "play %d:%d", pmin, pmax);
145 sep = ", ";
146 }
147 if (mode & MODE_RECMASK) {
148 p += snprintf(p, p < end ? end - p : 0, "%s%s %d:%d", sep,
149 (mode & MODE_MON) ? "mon" : "rec", rmin, rmax);
150 }
151
152 return p - buf;
153}
154
155/*
156 * Broadcast MIDI data to all opts using this device
157 */
158void
159dev_midi_send(struct dev *d, void *msg, int msglen)
160{
161 struct opt *o;
162
163 for (o = opt_list; o != NULL; o = o->next) {
164 if (o->dev != d)
165 continue;
166 midi_send(o->midi, msg, msglen);
167 }
168}
169
170/*
171 * send a quarter frame MTC message
172 */
173void
174mtc_midi_qfr(struct mtc *mtc, int delta)
175{
176 unsigned char buf[2];
177 unsigned int data;
178 int qfrlen;
179
180 mtc->delta += delta * MTC_SEC;
181 qfrlen = mtc->dev->rate * (MTC_SEC / (4 * mtc->fps));
182 while (mtc->delta >= qfrlen) {
183 switch (mtc->qfr) {
184 case 0:
185 data = mtc->fr & 0xf;
186 break;
187 case 1:
188 data = mtc->fr >> 4;
189 break;
190 case 2:
191 data = mtc->sec & 0xf;
192 break;
193 case 3:
194 data = mtc->sec >> 4;
195 break;
196 case 4:
197 data = mtc->min & 0xf;
198 break;
199 case 5:
200 data = mtc->min >> 4;
201 break;
202 case 6:
203 data = mtc->hr & 0xf;
204 break;
205 case 7:
206 data = (mtc->hr >> 4) | (mtc->fps_id << 1);
207 /*
208 * tick messages are sent 2 frames ahead
209 */
210 mtc->fr += 2;
211 if (mtc->fr < mtc->fps)
212 break;
213 mtc->fr -= mtc->fps;
214 mtc->sec++;
215 if (mtc->sec < 60)
216 break;
217 mtc->sec = 0;
218 mtc->min++;
219 if (mtc->min < 60)
220 break;
221 mtc->min = 0;
222 mtc->hr++;
223 if (mtc->hr < 24)
224 break;
225 mtc->hr = 0;
226 break;
227 default:
228 /* NOTREACHED */
229 data = 0;
230 }
231 buf[0] = 0xf1;
232 buf[1] = (mtc->qfr << 4) | data;
233 mtc->qfr++;
234 mtc->qfr &= 7;
235 dev_midi_send(mtc->dev, buf, 2);
236 mtc->delta -= qfrlen;
237 }
238}
239
240/*
241 * send a full frame MTC message
242 */
243void
244mtc_midi_full(struct mtc *mtc)
245{
246 struct sysex x;
247 unsigned int fps;
248
249 mtc->delta = -MTC_SEC * (int)mtc->dev->bufsz;
250 if (mtc->dev->rate % (30 * 4 * mtc->dev->round) == 0) {
251 mtc->fps_id = MTC_FPS_30;
252 mtc->fps = 30;
253 } else if (mtc->dev->rate % (25 * 4 * mtc->dev->round) == 0) {
254 mtc->fps_id = MTC_FPS_25;
255 mtc->fps = 25;
256 } else {
257 mtc->fps_id = MTC_FPS_24;
258 mtc->fps = 24;
259 }
260#ifdef DEBUG
261 logx(3, "%s: mtc full frame at %d, %d fps", mtc->dev->path, mtc->delta, mtc->fps);
262#endif
263 fps = mtc->fps;
264 mtc->hr = (mtc->origin / (MTC_SEC * 3600)) % 24;
265 mtc->min = (mtc->origin / (MTC_SEC * 60)) % 60;
266 mtc->sec = (mtc->origin / (MTC_SEC)) % 60;
267 mtc->fr = (mtc->origin / (MTC_SEC / fps)) % fps;
268
269 x.start = SYSEX_START;
270 x.type = SYSEX_TYPE_RT;
271 x.dev = SYSEX_DEV_ANY;
272 x.id0 = SYSEX_MTC;
273 x.id1 = SYSEX_MTC_FULL;
274 x.u.full.hr = mtc->hr | (mtc->fps_id << 5);
275 x.u.full.min = mtc->min;
276 x.u.full.sec = mtc->sec;
277 x.u.full.fr = mtc->fr;
278 x.u.full.end = SYSEX_END;
279 mtc->qfr = 0;
280 dev_midi_send(mtc->dev, (unsigned char *)&x, SYSEX_SIZE(full));
281}
282
283/*
284 * send a master volume MIDI message
285 */
286void
287dev_midi_master(struct dev *d)
288{
289 struct ctl *c;
290 unsigned int master, v;
291 struct sysex x;
292
293 if (d->master_enabled)
294 master = d->master;
295 else {
296 master = 0;
297 for (c = ctl_list; c != NULL; c = c->next) {
298 if (c->type != CTL_NUM ||
299 strcmp(c->group, d->name) != 0 ||
300 strcmp(c->node0.name, "output") != 0 ||
301 strcmp(c->func, "level") != 0)
302 continue;
303 if (c->u.any.arg0 != d)
304 continue;
305 v = (c->curval * 127 + c->maxval / 2) / c->maxval;
306 if (master < v)
307 master = v;
308 }
309 }
310
311 memset(&x, 0, sizeof(struct sysex));
312 x.start = SYSEX_START;
313 x.type = SYSEX_TYPE_RT;
314 x.dev = SYSEX_DEV_ANY;
315 x.id0 = SYSEX_CONTROL;
316 x.id1 = SYSEX_MASTER;
317 x.u.master.fine = 0;
318 x.u.master.coarse = master;
319 x.u.master.end = SYSEX_END;
320 dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(master));
321}
322
323int
324slot_skip(struct slot *s)
325{
326 unsigned char *data = (unsigned char *)0xdeadbeef; /* please gcc */
327 int max, count;
328
329 max = s->skip;
330 while (s->skip > 0) {
331 if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
332 data = abuf_wgetblk(&s->sub.buf, &count);
333 if (count < s->round * s->sub.bpf)
334 break;
335 }
336 if (s->mode & MODE_PLAY) {
337 if (s->mix.buf.used < s->round * s->mix.bpf)
338 break;
339 }
340#ifdef DEBUG
341 logx(4, "slot%zu: skipped a cycle", s - slot_array);
342#endif
343 if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
344 if (s->sub.encbuf)
345 enc_sil_do(&s->sub.enc, data, s->round);
346 else
347 memset(data, 0, s->round * s->sub.bpf);
348 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
349 }
350 if (s->mode & MODE_PLAY) {
351 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
352 }
353 s->skip--;
354 }
355 return max - s->skip;
356}
357
358/*
359 * Mix the slot input block over the output block
360 */
361void
362dev_mix_badd(struct dev *d, struct slot *s)
363{
364 adata_t *idata, *odata, *in;
365 int icount, i, offs, vol, nch;
366
367 odata = DEV_PBUF(d);
368 idata = (adata_t *)abuf_rgetblk(&s->mix.buf, &icount);
369#ifdef DEBUG
370 if (icount < s->round * s->mix.bpf) {
371 logx(0, "slot%zu: not enough data to mix (%u bytes)",
372 s - slot_array, icount);
373 panic();
374 }
375#endif
376 if (!(s->opt->mode & MODE_PLAY)) {
377 /*
378 * playback not allowed in opt structure, produce silence
379 */
380 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
381 return;
382 }
383
384
385 /*
386 * Apply the following processing chain:
387 *
388 * dec -> resamp-> cmap
389 *
390 * where the first two are optional.
391 */
392
393 in = idata;
394
395 if (s->mix.decbuf) {
396 dec_do(&s->mix.dec, (void *)in, s->mix.decbuf, s->round);
397 in = s->mix.decbuf;
398 }
399
400 if (s->mix.resampbuf) {
401 resamp_do(&s->mix.resamp,
402 in, s->mix.resampbuf, s->round, d->round);
403 in = s->mix.resampbuf;
404 }
405
406 nch = s->mix.cmap.nch;
407 vol = ADATA_MUL(s->mix.weight, s->mix.vol) / s->mix.join;
408 cmap_add(&s->mix.cmap, in, odata, vol, d->round);
409
410 offs = 0;
411 for (i = s->mix.join - 1; i > 0; i--) {
412 offs += nch;
413 cmap_add(&s->mix.cmap, in + offs, odata, vol, d->round);
414 }
415
416 offs = 0;
417 for (i = s->mix.expand - 1; i > 0; i--) {
418 offs += nch;
419 cmap_add(&s->mix.cmap, in, odata + offs, vol, d->round);
420 }
421
422 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
423}
424
425/*
426 * Normalize input levels.
427 */
428void
429dev_mix_adjvol(struct dev *d)
430{
431 unsigned int n;
432 struct slot *i, *j;
433 int jcmax, icmax, weight;
434
435 for (i = d->slot_list; i != NULL; i = i->next) {
436 if (!(i->mode & MODE_PLAY))
437 continue;
438 icmax = i->opt->pmin + i->mix.nch - 1;
439 weight = ADATA_UNIT;
440 if (d->autovol) {
441 /*
442 * count the number of inputs that have
443 * overlapping channel sets
444 */
445 n = 0;
446 for (j = d->slot_list; j != NULL; j = j->next) {
447 if (!(j->mode & MODE_PLAY))
448 continue;
449 jcmax = j->opt->pmin + j->mix.nch - 1;
450 if (i->opt->pmin <= jcmax &&
451 icmax >= j->opt->pmin)
452 n++;
453 }
454 weight /= n;
455 }
456 if (weight > i->opt->maxweight)
457 weight = i->opt->maxweight;
458 i->mix.weight = d->master_enabled ?
459 ADATA_MUL(weight, MIDI_TO_ADATA(d->master)) : weight;
460#ifdef DEBUG
461 logx(3, "slot%zu: set weight: %d / %d", i - slot_array, i->mix.weight,
462 i->opt->maxweight);
463#endif
464 }
465}
466
467/*
468 * Copy data from slot to device
469 */
470void
471dev_sub_bcopy(struct dev *d, struct slot *s)
472{
473 adata_t *idata, *enc_out, *resamp_out, *cmap_out;
474 void *odata;
475 int ocount, moffs;
476 int i, vol, offs, nch;
477
478 odata = (adata_t *)abuf_wgetblk(&s->sub.buf, &ocount);
479#ifdef DEBUG
480 if (ocount < s->round * s->sub.bpf) {
481 logx(0, "dev_sub_bcopy: not enough space");
482 panic();
483 }
484#endif
485 if (s->opt->mode & MODE_MON) {
486 moffs = d->poffs + d->round;
487 if (moffs == d->psize)
488 moffs = 0;
489 idata = d->pbuf + moffs * d->pchan;
490 } else if (s->opt->mode & MODE_REC) {
491 idata = d->rbuf;
492 } else {
493 /*
494 * recording not allowed in opt structure, produce silence
495 */
496 enc_sil_do(&s->sub.enc, odata, s->round);
497 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
498 return;
499 }
500
501 /*
502 * Apply the following processing chain:
503 *
504 * cmap -> resamp -> enc
505 *
506 * where the last two are optional.
507 */
508
509 enc_out = odata;
510 resamp_out = s->sub.encbuf ? s->sub.encbuf : enc_out;
511 cmap_out = s->sub.resampbuf ? s->sub.resampbuf : resamp_out;
512
513 nch = s->sub.cmap.nch;
514 vol = ADATA_UNIT / s->sub.join;
515 cmap_copy(&s->sub.cmap, idata, cmap_out, vol, d->round);
516
517 offs = 0;
518 for (i = s->sub.join - 1; i > 0; i--) {
519 offs += nch;
520 cmap_add(&s->sub.cmap, idata + offs, cmap_out, vol, d->round);
521 }
522
523 offs = 0;
524 for (i = s->sub.expand - 1; i > 0; i--) {
525 offs += nch;
526 cmap_copy(&s->sub.cmap, idata, cmap_out + offs, vol, d->round);
527 }
528
529 if (s->sub.resampbuf) {
530 resamp_do(&s->sub.resamp,
531 s->sub.resampbuf, resamp_out, d->round, s->round);
532 }
533
534 if (s->sub.encbuf)
535 enc_do(&s->sub.enc, s->sub.encbuf, (void *)enc_out, s->round);
536
537 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
538}
539
540/*
541 * run a one block cycle: consume one recorded block from
542 * rbuf and produce one play block in pbuf
543 */
544void
545dev_cycle(struct dev *d)
546{
547 struct slot *s, **ps;
548 unsigned char *base;
549 int nsamp;
550
551 /*
552 * check if the device is actually used. If it isn't,
553 * then close it
554 */
555 if (d->slot_list == NULL && d->idle >= d->bufsz &&
556 (mtc_array[0].dev != d || mtc_array[0].tstate != MTC_RUN)) {
557 logx(2, "%s: device stopped", d->path);
558 dev_sio_stop(d);
559 d->pstate = DEV_INIT;
560 if (d->refcnt == 0)
561 dev_close(d);
562 return;
563 }
564
565 if (d->prime > 0) {
566#ifdef DEBUG
567 logx(4, "%s: empty cycle, prime = %u", d->path, d->prime);
568#endif
569 base = (unsigned char *)DEV_PBUF(d);
570 nsamp = d->round * d->pchan;
571 memset(base, 0, nsamp * sizeof(adata_t));
572 if (d->encbuf) {
573 enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
574 d->encbuf, d->round);
575 }
576 d->prime -= d->round;
577 return;
578 }
579
580 d->delta -= d->round;
581#ifdef DEBUG
582 logx(4, "%s: full cycle: delta = %d", d->path, d->delta);
583#endif
584 if (d->mode & MODE_PLAY) {
585 base = (unsigned char *)DEV_PBUF(d);
586 nsamp = d->round * d->pchan;
587 memset(base, 0, nsamp * sizeof(adata_t));
588 }
589 if ((d->mode & MODE_REC) && d->decbuf)
590 dec_do(&d->dec, d->decbuf, (unsigned char *)d->rbuf, d->round);
591 ps = &d->slot_list;
592 while ((s = *ps) != NULL) {
593#ifdef DEBUG
594 logx(4, "slot%zu: running, skip = %d", s - slot_array, s->skip);
595#endif
596 d->idle = 0;
597
598 /*
599 * skip cycles for XRUN_SYNC correction
600 */
601 slot_skip(s);
602 if (s->skip < 0) {
603 s->skip++;
604 ps = &s->next;
605 continue;
606 }
607
608#ifdef DEBUG
609 if (s->pstate == SLOT_STOP && !(s->mode & MODE_PLAY)) {
610 logx(0, "slot%zu: rec-only slots can't be drained",
611 s - slot_array);
612 panic();
613 }
614#endif
615 /*
616 * check if stopped stream finished draining
617 */
618 if (s->pstate == SLOT_STOP &&
619 s->mix.buf.used < s->round * s->mix.bpf) {
620 /*
621 * partial blocks are zero-filled by socket
622 * layer, so s->mix.buf.used == 0 and we can
623 * destroy the buffer
624 */
625 *ps = s->next;
626 s->pstate = SLOT_INIT;
627 s->ops->eof(s->arg);
628 slot_freebufs(s);
629 dev_mix_adjvol(d);
630#ifdef DEBUG
631 logx(3, "slot%zu: drained", s - slot_array);
632#endif
633 continue;
634 }
635
636 /*
637 * check for xruns
638 */
639 if (((s->mode & MODE_PLAY) &&
640 s->mix.buf.used < s->round * s->mix.bpf) ||
641 ((s->mode & MODE_RECMASK) &&
642 s->sub.buf.len - s->sub.buf.used <
643 s->round * s->sub.bpf)) {
644
645 if (!s->paused) {
646#ifdef DEBUG
647 logx(3, "slot%zu: xrun, paused", s - slot_array);
648#endif
649 s->paused = 1;
650 s->ops->onxrun(s->arg);
651 }
652 if (s->xrun == XRUN_IGNORE) {
653 s->delta -= s->round;
654 ps = &s->next;
655 } else if (s->xrun == XRUN_SYNC) {
656 s->skip++;
657 ps = &s->next;
658 } else if (s->xrun == XRUN_ERROR) {
659 s->ops->exit(s->arg);
660 *ps = s->next;
661 } else {
662#ifdef DEBUG
663 logx(0, "slot%zu: bad xrun mode", s - slot_array);
664 panic();
665#endif
666 }
667 continue;
668 } else {
669 if (s->paused) {
670#ifdef DEBUG
671 logx(3, "slot%zu: resumed\n", s - slot_array);
672#endif
673 s->paused = 0;
674 }
675 }
676
677 if ((s->mode & MODE_RECMASK) && !(s->pstate == SLOT_STOP)) {
678 if (s->sub.prime == 0) {
679 dev_sub_bcopy(d, s);
680 s->ops->flush(s->arg);
681 } else {
682#ifdef DEBUG
683 logx(3, "slot%zu: prime = %d", s - slot_array,
684 s->sub.prime);
685#endif
686 s->sub.prime--;
687 }
688 }
689 if (s->mode & MODE_PLAY) {
690 dev_mix_badd(d, s);
691 if (s->pstate != SLOT_STOP)
692 s->ops->fill(s->arg);
693 }
694 ps = &s->next;
695 }
696 if ((d->mode & MODE_PLAY) && d->encbuf) {
697 enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
698 d->encbuf, d->round);
699 }
700}
701
702/*
703 * called at every clock tick by the device
704 */
705void
706dev_onmove(struct dev *d, int delta)
707{
708 long long pos;
709 struct slot *s, *snext;
710
711 d->delta += delta;
712
713 if (d->slot_list == NULL)
714 d->idle += delta;
715
716 for (s = d->slot_list; s != NULL; s = snext) {
717 /*
718 * s->ops->onmove() may remove the slot
719 */
720 snext = s->next;
721 pos = s->delta_rem +
722 (long long)s->delta * d->round +
723 (long long)delta * s->round;
724 s->delta = pos / (int)d->round;
725 s->delta_rem = pos % d->round;
726 if (s->delta_rem < 0) {
727 s->delta_rem += d->round;
728 s->delta--;
729 }
730 if (s->delta >= 0)
731 s->ops->onmove(s->arg);
732 }
733
734 if (mtc_array[0].dev == d && mtc_array[0].tstate == MTC_RUN)
735 mtc_midi_qfr(&mtc_array[0], delta);
736}
737
738void
739dev_master(struct dev *d, unsigned int master)
740{
741 struct ctl *c;
742 unsigned int v;
743
744 logx(2, "%s: master volume set to %u", d->path, master);
745
746 if (d->master_enabled) {
747 d->master = master;
748 if (d->mode & MODE_PLAY)
749 dev_mix_adjvol(d);
750 } else {
751 for (c = ctl_list; c != NULL; c = c->next) {
752 if (c->scope != CTL_HW || c->u.hw.dev != d)
753 continue;
754 if (c->type != CTL_NUM ||
755 strcmp(c->group, d->name) != 0 ||
756 strcmp(c->node0.name, "output") != 0 ||
757 strcmp(c->func, "level") != 0)
758 continue;
759 v = (master * c->maxval + 64) / 127;
760 ctl_setval(c, v);
761 }
762 }
763}
764
765/*
766 * Create a sndio device
767 */
768struct dev *
769dev_new(char *path, struct aparams *par,
770 unsigned int mode, unsigned int bufsz, unsigned int round,
771 unsigned int rate, unsigned int hold, unsigned int autovol)
772{
773 struct dev *d, **pd;
774
775 if (dev_sndnum == DEV_NMAX) {
776 logx(1, "too many devices");
777 return NULL;
778 }
779 d = xmalloc(sizeof(struct dev));
780 d->path = path;
781 d->num = dev_sndnum++;
782
783 d->reqpar = *par;
784 d->reqmode = mode;
785 d->reqpchan = d->reqrchan = 0;
786 d->reqbufsz = bufsz;
787 d->reqround = round;
788 d->reqrate = rate;
789 d->hold = hold;
790 d->autovol = autovol;
791 d->refcnt = 0;
792 d->pstate = DEV_CFG;
793 d->slot_list = NULL;
794 d->master = MIDI_MAXCTL;
795 d->master_enabled = 0;
796 snprintf(d->name, CTL_NAMEMAX, "%u", d->num);
797 for (pd = &dev_list; *pd != NULL; pd = &(*pd)->next)
798 ;
799 d->next = *pd;
800 *pd = d;
801 return d;
802}
803
804/*
805 * adjust device parameters and mode
806 */
807void
808dev_adjpar(struct dev *d, int mode,
809 int pmax, int rmax)
810{
811 d->reqmode |= mode & MODE_AUDIOMASK;
812 if (mode & MODE_PLAY) {
813 if (d->reqpchan < pmax + 1)
814 d->reqpchan = pmax + 1;
815 }
816 if (mode & MODE_REC) {
817 if (d->reqrchan < rmax + 1)
818 d->reqrchan = rmax + 1;
819 }
820}
821
822/*
823 * Open the device with the dev_reqxxx capabilities. Setup a mixer, demuxer,
824 * monitor, midi control, and any necessary conversions.
825 *
826 * Note that record and play buffers are always allocated, even if the
827 * underlying device doesn't support both modes.
828 */
829int
830dev_allocbufs(struct dev *d)
831{
832 char enc_str[ENCMAX], chans_str[64];
833
834 /*
835 * Create record buffer.
836 */
837
838 /* Create device <-> demuxer buffer */
839 d->rbuf = xmalloc(d->round * d->rchan * sizeof(adata_t));
840
841 /* Insert a converter, if needed. */
842 if (!aparams_native(&d->par)) {
843 dec_init(&d->dec, &d->par, d->rchan);
844 d->decbuf = xmalloc(d->round * d->rchan * d->par.bps);
845 } else
846 d->decbuf = NULL;
847
848 /*
849 * Create play buffer
850 */
851
852 /* Create device <-> mixer buffer */
853 d->poffs = 0;
854 d->psize = d->bufsz + d->round;
855 d->pbuf = xmalloc(d->psize * d->pchan * sizeof(adata_t));
856 d->mode |= MODE_MON;
857
858 /* Append a converter, if needed. */
859 if (!aparams_native(&d->par)) {
860 enc_init(&d->enc, &d->par, d->pchan);
861 d->encbuf = xmalloc(d->round * d->pchan * d->par.bps);
862 } else
863 d->encbuf = NULL;
864
865 /*
866 * Initially fill the record buffer with zeroed samples. This ensures
867 * that when a client records from a play-only device the client just
868 * gets silence.
869 */
870 memset(d->rbuf, 0, d->round * d->rchan * sizeof(adata_t));
871
872 logx(2, "%s: %dHz, %s, %s, %d blocks of %d frames",
873 d->path, d->rate,
874 (aparams_enctostr(&d->par, enc_str), enc_str),
875 (chans_fmt(chans_str, sizeof(chans_str),
876 d->mode & (MODE_PLAY | MODE_REC),
877 0, d->pchan - 1, 0, d->rchan - 1), chans_str),
878 d->bufsz / d->round, d->round);
879
880 return 1;
881}
882
883/*
884 * Reset parameters and open the device.
885 */
886int
887dev_open(struct dev *d)
888{
889 d->mode = d->reqmode;
890 d->round = d->reqround;
891 d->bufsz = d->reqbufsz;
892 d->rate = d->reqrate;
893 d->pchan = d->reqpchan;
894 d->rchan = d->reqrchan;
895 d->par = d->reqpar;
896 if (d->pchan == 0)
897 d->pchan = 2;
898 if (d->rchan == 0)
899 d->rchan = 2;
900 if (!dev_sio_open(d)) {
901 logx(1, "%s: failed to open audio device", d->path);
902 return 0;
903 }
904 if (!dev_allocbufs(d))
905 return 0;
906
907 d->pstate = DEV_INIT;
908 return 1;
909}
910
911/*
912 * Force all slots to exit and close device, called after an error
913 */
914void
915dev_abort(struct dev *d)
916{
917 int i;
918 struct slot *s;
919 struct ctlslot *c;
920 struct opt *o;
921
922 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
923 if (s->opt == NULL || s->opt->dev != d)
924 continue;
925 if (s->ops) {
926 s->ops->exit(s->arg);
927 s->ops = NULL;
928 }
929 }
930 d->slot_list = NULL;
931
932 for (o = opt_list; o != NULL; o = o->next) {
933 if (o->dev != d)
934 continue;
935 for (c = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, c++) {
936 if (c->ops == NULL)
937 continue;
938 if (c->opt == o) {
939 c->ops->exit(c->arg);
940 c->ops = NULL;
941 }
942 }
943
944 midi_abort(o->midi);
945 }
946
947 if (d->pstate != DEV_CFG)
948 dev_close(d);
949}
950
951/*
952 * force the device to go in DEV_CFG state, the caller is supposed to
953 * ensure buffers are drained
954 */
955void
956dev_freebufs(struct dev *d)
957{
958#ifdef DEBUG
959 logx(3, "%s: closing", d->path);
960#endif
961 if (d->mode & MODE_PLAY) {
962 if (d->encbuf != NULL)
963 xfree(d->encbuf);
964 xfree(d->pbuf);
965 }
966 if (d->mode & MODE_REC) {
967 if (d->decbuf != NULL)
968 xfree(d->decbuf);
969 xfree(d->rbuf);
970 }
971}
972
973/*
974 * Close the device and exit all slots
975 */
976void
977dev_close(struct dev *d)
978{
979 d->pstate = DEV_CFG;
980 dev_sio_close(d);
981 dev_freebufs(d);
982
983 if (d->master_enabled) {
984 d->master_enabled = 0;
985 ctl_del(CTL_DEV_MASTER, d, NULL);
986 }
987}
988
989int
990dev_ref(struct dev *d)
991{
992#ifdef DEBUG
993 logx(3, "%s: device requested", d->path);
994#endif
995 if (d->pstate == DEV_CFG && !dev_open(d))
996 return 0;
997 d->refcnt++;
998 return 1;
999}
1000
1001void
1002dev_unref(struct dev *d)
1003{
1004#ifdef DEBUG
1005 logx(3, "%s: device released", d->path);
1006#endif
1007 d->refcnt--;
1008 if (d->refcnt == 0 && d->pstate == DEV_INIT)
1009 dev_close(d);
1010}
1011
1012/*
1013 * initialize the device with the current parameters
1014 */
1015int
1016dev_init(struct dev *d)
1017{
1018 if ((d->reqmode & MODE_AUDIOMASK) == 0) {
1019#ifdef DEBUG
1020 logx(1, "%s: has no streams", d->path);
1021#endif
1022 return 0;
1023 }
1024 if (d->hold && !dev_ref(d))
1025 return 0;
1026 return 1;
1027}
1028
1029/*
1030 * Unless the device is already in process of closing, request it to close
1031 */
1032void
1033dev_done(struct dev *d)
1034{
1035#ifdef DEBUG
1036 logx(3, "%s: draining", d->path);
1037#endif
1038 if (mtc_array[0].dev == d && mtc_array[0].tstate != MTC_STOP)
1039 mtc_stop(&mtc_array[0]);
1040 if (d->hold)
1041 dev_unref(d);
1042}
1043
1044struct dev *
1045dev_bynum(int num)
1046{
1047 struct dev *d;
1048
1049 for (d = dev_list; d != NULL; d = d->next) {
1050 if (d->num == num)
1051 return d;
1052 }
1053 return NULL;
1054}
1055
1056/*
1057 * Free the device
1058 */
1059void
1060dev_del(struct dev *d)
1061{
1062 struct dev **p;
1063
1064#ifdef DEBUG
1065 logx(3, "%s: deleting", d->path);
1066#endif
1067 if (d->pstate != DEV_CFG)
1068 dev_close(d);
1069 for (p = &dev_list; *p != d; p = &(*p)->next) {
1070#ifdef DEBUG
1071 if (*p == NULL) {
1072 logx(0, "%s: not on the list", d->path);
1073 panic();
1074 }
1075#endif
1076 }
1077 *p = d->next;
1078 xfree(d);
1079}
1080
1081unsigned int
1082dev_roundof(struct dev *d, unsigned int newrate)
1083{
1084 return (d->round * newrate + d->rate / 2) / d->rate;
1085}
1086
1087/*
1088 * If the device is paused, then resume it.
1089 */
1090void
1091dev_wakeup(struct dev *d)
1092{
1093 if (d->pstate == DEV_INIT) {
1094 logx(2, "%s: started", d->path);
1095
1096 if (d->mode & MODE_PLAY) {
1097 d->prime = d->bufsz;
1098 } else {
1099 d->prime = 0;
1100 }
1101 d->idle = 0;
1102 d->poffs = 0;
1103
1104 /*
1105 * empty cycles don't increment delta, so it's ok to
1106 * start at 0
1107 **/
1108 d->delta = 0;
1109
1110 d->pstate = DEV_RUN;
1111 dev_sio_start(d);
1112 }
1113}
1114
1115/*
1116 * Return true if both of the given devices can run the same
1117 * clients
1118 */
1119int
1120dev_iscompat(struct dev *o, struct dev *n)
1121{
1122 if (((long long)o->round * n->rate != (long long)n->round * o->rate) ||
1123 ((long long)o->bufsz * n->rate != (long long)n->bufsz * o->rate)) {
1124 logx(1, "%s: not compatible with %s", n->name, o->name);
1125 return 0;
1126 }
1127 return 1;
1128}
1129
1130/*
1131 * Close the device, but attempt to migrate everything to a new sndio
1132 * device.
1133 */
1134void
1135dev_migrate(struct dev *odev)
1136{
1137 struct opt *o;
1138
1139 /* not opened */
1140 if (odev->pstate == DEV_CFG)
1141 return;
1142
1143 /* move opts to new device (also moves clients using the opts) */
1144 for (o = opt_list; o != NULL; o = o->next) {
1145 if (o->dev != odev)
1146 continue;
1147 opt_migrate(o, odev);
1148 }
1149}
1150
1151/*
1152 * check that all clients controlled by MMC are ready to start, if so,
1153 * attach them all at the same position
1154 */
1155void
1156mtc_trigger(struct mtc *mtc)
1157{
1158 int i;
1159 struct slot *s;
1160
1161 if (mtc->tstate != MTC_START) {
1162 logx(2, "%s: not started by mmc yet, waiting.", mtc->dev->path);
1163 return;
1164 }
1165
1166 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1167 if (s->opt == NULL || s->opt->mtc != mtc)
1168 continue;
1169 if (s->pstate != SLOT_READY) {
1170#ifdef DEBUG
1171 logx(3, "slot%zu: not ready, start delayed", s - slot_array);
1172#endif
1173 return;
1174 }
1175 }
1176 if (!dev_ref(mtc->dev))
1177 return;
1178
1179 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1180 if (s->opt == NULL || s->opt->mtc != mtc)
1181 continue;
1182 slot_attach(s);
1183 s->pstate = SLOT_RUN;
1184 }
1185 mtc->tstate = MTC_RUN;
1186 mtc_midi_full(mtc);
1187 dev_wakeup(mtc->dev);
1188}
1189
1190/*
1191 * start all slots simultaneously
1192 */
1193void
1194mtc_start(struct mtc *mtc)
1195{
1196 if (mtc->tstate == MTC_STOP) {
1197 mtc->tstate = MTC_START;
1198 mtc_trigger(mtc);
1199#ifdef DEBUG
1200 } else {
1201 logx(3, "%s: ignoring mmc start", mtc->dev->path);
1202#endif
1203 }
1204}
1205
1206/*
1207 * stop all slots simultaneously
1208 */
1209void
1210mtc_stop(struct mtc *mtc)
1211{
1212 switch (mtc->tstate) {
1213 case MTC_START:
1214 mtc->tstate = MTC_STOP;
1215 return;
1216 case MTC_RUN:
1217 mtc->tstate = MTC_STOP;
1218 dev_unref(mtc->dev);
1219 break;
1220 default:
1221#ifdef DEBUG
1222 logx(3, "%s: ignored mmc stop", mtc->dev->path);
1223#endif
1224 return;
1225 }
1226}
1227
1228/*
1229 * relocate all slots simultaneously
1230 */
1231void
1232mtc_loc(struct mtc *mtc, unsigned int origin)
1233{
1234 logx(2, "%s: relocated to %u", mtc->dev->path, origin);
1235
1236 if (mtc->tstate == MTC_RUN)
1237 mtc_stop(mtc);
1238 mtc->origin = origin;
1239 if (mtc->tstate == MTC_RUN)
1240 mtc_start(mtc);
1241}
1242
1243/*
1244 * set MMC device
1245 */
1246void
1247mtc_setdev(struct mtc *mtc, struct dev *d)
1248{
1249 struct opt *o;
1250
1251 if (mtc->dev == d)
1252 return;
1253
1254 logx(2, "%s: set to be MIDI clock source", d->path);
1255
1256 /* adjust clock and ref counter, if needed */
1257 if (mtc->tstate == MTC_RUN) {
1258 mtc->delta -= mtc->dev->delta;
1259 dev_unref(mtc->dev);
1260 }
1261
1262 mtc->dev = d;
1263
1264 if (mtc->tstate == MTC_RUN) {
1265 mtc->delta += mtc->dev->delta;
1266 dev_ref(mtc->dev);
1267 dev_wakeup(mtc->dev);
1268 }
1269
1270 /* move in once anything using MMC */
1271 for (o = opt_list; o != NULL; o = o->next) {
1272 if (o->mtc == mtc)
1273 opt_setdev(o, mtc->dev);
1274 }
1275}
1276
1277/*
1278 * allocate buffers & conversion chain
1279 */
1280void
1281slot_initconv(struct slot *s)
1282{
1283 unsigned int dev_nch;
1284 struct dev *d = s->opt->dev;
1285
1286 if (s->mode & MODE_PLAY) {
1287 cmap_init(&s->mix.cmap,
1288 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1289 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1290 0, d->pchan - 1,
1291 s->opt->pmin, s->opt->pmax);
1292 s->mix.decbuf = NULL;
1293 s->mix.resampbuf = NULL;
1294 if (!aparams_native(&s->par)) {
1295 dec_init(&s->mix.dec, &s->par, s->mix.nch);
1296 s->mix.decbuf =
1297 xmalloc(s->round * s->mix.nch * sizeof(adata_t));
1298 }
1299 if (s->rate != d->rate) {
1300 resamp_init(&s->mix.resamp, s->round, d->round,
1301 s->mix.nch);
1302 s->mix.resampbuf =
1303 xmalloc(d->round * s->mix.nch * sizeof(adata_t));
1304 }
1305 s->mix.join = 1;
1306 s->mix.expand = 1;
1307 if (s->opt->dup && s->mix.cmap.nch > 0) {
1308 dev_nch = d->pchan < (s->opt->pmax + 1) ?
1309 d->pchan - s->opt->pmin :
1310 s->opt->pmax - s->opt->pmin + 1;
1311 if (dev_nch > s->mix.nch)
1312 s->mix.expand = dev_nch / s->mix.nch;
1313 else if (s->mix.nch > dev_nch)
1314 s->mix.join = s->mix.nch / dev_nch;
1315 }
1316 }
1317
1318 if (s->mode & MODE_RECMASK) {
1319 unsigned int outchan = (s->opt->mode & MODE_MON) ?
1320 d->pchan : d->rchan;
1321
1322 s->sub.encbuf = NULL;
1323 s->sub.resampbuf = NULL;
1324 cmap_init(&s->sub.cmap,
1325 0, outchan - 1,
1326 s->opt->rmin, s->opt->rmax,
1327 s->opt->rmin, s->opt->rmin + s->sub.nch - 1,
1328 s->opt->rmin, s->opt->rmin + s->sub.nch - 1);
1329 if (s->rate != d->rate) {
1330 resamp_init(&s->sub.resamp, d->round, s->round,
1331 s->sub.nch);
1332 s->sub.resampbuf =
1333 xmalloc(d->round * s->sub.nch * sizeof(adata_t));
1334 }
1335 if (!aparams_native(&s->par)) {
1336 enc_init(&s->sub.enc, &s->par, s->sub.nch);
1337 s->sub.encbuf =
1338 xmalloc(s->round * s->sub.nch * sizeof(adata_t));
1339 }
1340 s->sub.join = 1;
1341 s->sub.expand = 1;
1342 if (s->opt->dup && s->sub.cmap.nch > 0) {
1343 dev_nch = outchan < (s->opt->rmax + 1) ?
1344 outchan - s->opt->rmin :
1345 s->opt->rmax - s->opt->rmin + 1;
1346 if (dev_nch > s->sub.nch)
1347 s->sub.join = dev_nch / s->sub.nch;
1348 else if (s->sub.nch > dev_nch)
1349 s->sub.expand = s->sub.nch / dev_nch;
1350 }
1351
1352 /*
1353 * cmap_copy() doesn't write samples in all channels,
1354 * for instance when mono->stereo conversion is
1355 * disabled. So we have to prefill cmap_copy() output
1356 * with silence.
1357 */
1358 if (s->sub.resampbuf) {
1359 memset(s->sub.resampbuf, 0,
1360 d->round * s->sub.nch * sizeof(adata_t));
1361 } else if (s->sub.encbuf) {
1362 memset(s->sub.encbuf, 0,
1363 s->round * s->sub.nch * sizeof(adata_t));
1364 } else {
1365 memset(s->sub.buf.data, 0,
1366 s->appbufsz * s->sub.nch * sizeof(adata_t));
1367 }
1368 }
1369}
1370
1371/*
1372 * allocate buffers & conversion chain
1373 */
1374void
1375slot_allocbufs(struct slot *s)
1376{
1377 if (s->mode & MODE_PLAY) {
1378 s->mix.bpf = s->par.bps * s->mix.nch;
1379 abuf_init(&s->mix.buf, s->appbufsz * s->mix.bpf);
1380 }
1381
1382 if (s->mode & MODE_RECMASK) {
1383 s->sub.bpf = s->par.bps * s->sub.nch;
1384 abuf_init(&s->sub.buf, s->appbufsz * s->sub.bpf);
1385 }
1386
1387#ifdef DEBUG
1388 logx(3, "slot%zu: allocated %u/%u fr buffers",
1389 s - slot_array, s->appbufsz, SLOT_BUFSZ(s));
1390#endif
1391}
1392
1393/*
1394 * free buffers & conversion chain
1395 */
1396void
1397slot_freebufs(struct slot *s)
1398{
1399 if (s->mode & MODE_RECMASK) {
1400 abuf_done(&s->sub.buf);
1401 }
1402
1403 if (s->mode & MODE_PLAY) {
1404 abuf_done(&s->mix.buf);
1405 }
1406}
1407
1408/*
1409 * allocate a new slot and register the given call-backs
1410 */
1411struct slot *
1412slot_new(struct opt *opt, unsigned int id, char *who,
1413 struct slotops *ops, void *arg, int mode)
1414{
1415 struct app *a;
1416 struct slot *s;
1417 int i;
1418
1419 a = opt_mkapp(opt, who);
1420 if (a == NULL)
1421 return NULL;
1422
1423 /*
1424 * find a free slot and assign it the smallest possible unit number
1425 */
1426 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1427 if (s->ops == NULL)
1428 break;
1429 }
1430 if (i == DEV_NSLOT) {
1431 logx(1, "%s: too many connections", a->name);
1432 return NULL;
1433 }
1434
1435 if (!opt_ref(opt))
1436 return NULL;
1437
1438 s->app = a;
1439 s->opt = opt;
1440 s->ops = ops;
1441 s->arg = arg;
1442 s->pstate = SLOT_INIT;
1443 s->mode = mode;
1444 aparams_init(&s->par);
1445 if (s->mode & MODE_PLAY)
1446 s->mix.nch = s->opt->pmax - s->opt->pmin + 1;
1447 if (s->mode & MODE_RECMASK)
1448 s->sub.nch = s->opt->rmax - s->opt->rmin + 1;
1449 s->xrun = s->opt->mtc != NULL ? XRUN_SYNC : XRUN_IGNORE;
1450 s->appbufsz = s->opt->dev->bufsz;
1451 s->round = s->opt->dev->round;
1452 s->rate = s->opt->dev->rate;
1453#ifdef DEBUG
1454 logx(3, "slot%zu: %s/%s", s - slot_array, s->opt->name, s->app->name);
1455#endif
1456 return s;
1457}
1458
1459/*
1460 * release the given slot
1461 */
1462void
1463slot_del(struct slot *s)
1464{
1465 s->arg = s;
1466 s->ops = &zomb_slotops;
1467 switch (s->pstate) {
1468 case SLOT_INIT:
1469 s->ops = NULL;
1470 break;
1471 case SLOT_START:
1472 case SLOT_READY:
1473 case SLOT_RUN:
1474 case SLOT_STOP:
1475 slot_stop(s, 0);
1476 break;
1477 }
1478 opt_unref(s->opt);
1479}
1480
1481/*
1482 * change the slot play volume; called by the client
1483 */
1484void
1485slot_setvol(struct slot *s, unsigned int vol)
1486{
1487 struct opt *o = s->opt;
1488 struct app *a = s->app;
1489
1490#ifdef DEBUG
1491 logx(3, "slot%zu: setting volume %u", s - slot_array, vol);
1492#endif
1493 if (a->vol != vol) {
1494 opt_appvol(o, a, vol);
1495 opt_midi_vol(o, a);
1496 ctl_onval(CTL_APP_LEVEL, o, a, vol);
1497 }
1498}
1499
1500/*
1501 * attach the slot to the device (ie start playing & recording
1502 */
1503void
1504slot_attach(struct slot *s)
1505{
1506 struct dev *d = s->opt->dev;
1507 long long pos;
1508
1509 if (((s->mode & MODE_PLAY) && !(s->opt->mode & MODE_PLAY)) ||
1510 ((s->mode & MODE_RECMASK) && !(s->opt->mode & MODE_RECMASK))) {
1511 logx(1, "slot%zu at %s: mode not allowed", s - slot_array, s->opt->name);
1512 return;
1513 }
1514
1515 /*
1516 * setup conversions layer
1517 */
1518 slot_initconv(s);
1519
1520 /*
1521 * start the device if not started
1522 */
1523 dev_wakeup(d);
1524
1525 /*
1526 * adjust initial clock
1527 */
1528 pos = s->delta_rem +
1529 (long long)s->delta * d->round +
1530 (long long)d->delta * s->round;
1531 s->delta = pos / (int)d->round;
1532 s->delta_rem = pos % d->round;
1533 if (s->delta_rem < 0) {
1534 s->delta_rem += d->round;
1535 s->delta--;
1536 }
1537
1538#ifdef DEBUG
1539 logx(2, "slot%zu: attached at %d + %d / %d",
1540 s - slot_array, s->delta, s->delta_rem, s->round);
1541#endif
1542
1543 /*
1544 * We dont check whether the device is dying,
1545 * because dev_xxx() functions are supposed to
1546 * work (i.e., not to crash)
1547 */
1548
1549 s->next = d->slot_list;
1550 d->slot_list = s;
1551 if (s->mode & MODE_PLAY) {
1552 s->mix.vol = MIDI_TO_ADATA(s->app->vol);
1553 dev_mix_adjvol(d);
1554 }
1555}
1556
1557/*
1558 * if MMC is enabled, and try to attach all slots synchronously, else
1559 * simply attach the slot
1560 */
1561void
1562slot_ready(struct slot *s)
1563{
1564 /*
1565 * device may be disconnected, and if so we're called from
1566 * slot->ops->exit() on a closed device
1567 */
1568 if (s->opt->dev->pstate == DEV_CFG)
1569 return;
1570 if (s->opt->mtc == NULL) {
1571 slot_attach(s);
1572 s->pstate = SLOT_RUN;
1573 } else
1574 mtc_trigger(s->opt->mtc);
1575}
1576
1577/*
1578 * setup buffers & conversion layers, prepare the slot to receive data
1579 * (for playback) or start (recording).
1580 */
1581void
1582slot_start(struct slot *s)
1583{
1584 struct dev *d = s->opt->dev;
1585#ifdef DEBUG
1586 char enc_str[ENCMAX], chans_str[64];
1587
1588 if (s->pstate != SLOT_INIT) {
1589 logx(0, "slot%zu: slot_start: wrong state", s - slot_array);
1590 panic();
1591 }
1592
1593 logx(2, "slot%zu: %dHz, %s, %s, %d blocks of %d frames",
1594 s - slot_array, s->rate,
1595 (aparams_enctostr(&s->par, enc_str), enc_str),
1596 (chans_fmt(chans_str, sizeof(chans_str), s->mode,
1597 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1598 s->opt->rmin, s->opt->rmin + s->sub.nch - 1), chans_str),
1599 s->appbufsz / s->round, s->round);
1600#endif
1601 slot_allocbufs(s);
1602
1603 if (s->mode & MODE_RECMASK) {
1604 /*
1605 * N-th recorded block is the N-th played block
1606 */
1607 s->sub.prime = d->bufsz / d->round;
1608 }
1609 s->skip = 0;
1610 s->paused = 0;
1611
1612 /*
1613 * get the current position, the origin is when the first sample
1614 * played and/or recorded
1615 */
1616 s->delta = -(long long)d->bufsz * s->round / d->round;
1617 s->delta_rem = 0;
1618
1619 if (s->mode & MODE_PLAY) {
1620 s->pstate = SLOT_START;
1621 } else {
1622 s->pstate = SLOT_READY;
1623 slot_ready(s);
1624 }
1625}
1626
1627/*
1628 * stop playback and recording, and free conversion layers
1629 */
1630void
1631slot_detach(struct slot *s)
1632{
1633 struct slot **ps;
1634 struct dev *d = s->opt->dev;
1635 long long pos;
1636
1637 for (ps = &d->slot_list; *ps != s; ps = &(*ps)->next) {
1638#ifdef DEBUG
1639 if (*ps == NULL) {
1640 logx(0, "slot%zu: can't detach, not on list", s - slot_array);
1641 panic();
1642 }
1643#endif
1644 }
1645 *ps = s->next;
1646
1647 /*
1648 * adjust clock, go back d->delta ticks so that slot_attach()
1649 * could be called with the resulting state
1650 */
1651 pos = s->delta_rem +
1652 (long long)s->delta * d->round -
1653 (long long)d->delta * s->round;
1654 s->delta = pos / (int)d->round;
1655 s->delta_rem = pos % d->round;
1656 if (s->delta_rem < 0) {
1657 s->delta_rem += d->round;
1658 s->delta--;
1659 }
1660
1661#ifdef DEBUG
1662 logx(2, "slot%zu: detached at %d + %d / %d",
1663 s - slot_array, s->delta, s->delta_rem, d->round);
1664#endif
1665 if (s->mode & MODE_PLAY)
1666 dev_mix_adjvol(d);
1667
1668 if (s->mode & MODE_RECMASK) {
1669 if (s->sub.encbuf) {
1670 xfree(s->sub.encbuf);
1671 s->sub.encbuf = NULL;
1672 }
1673 if (s->sub.resampbuf) {
1674 xfree(s->sub.resampbuf);
1675 s->sub.resampbuf = NULL;
1676 }
1677 }
1678
1679 if (s->mode & MODE_PLAY) {
1680 if (s->mix.decbuf) {
1681 xfree(s->mix.decbuf);
1682 s->mix.decbuf = NULL;
1683 }
1684 if (s->mix.resampbuf) {
1685 xfree(s->mix.resampbuf);
1686 s->mix.resampbuf = NULL;
1687 }
1688 }
1689}
1690
1691/*
1692 * put the slot in stopping state (draining play buffers) or
1693 * stop & detach if no data to drain.
1694 */
1695void
1696slot_stop(struct slot *s, int drain)
1697{
1698#ifdef DEBUG
1699 logx(3, "slot%zu: stopping (drain = %d)", s - slot_array, drain);
1700#endif
1701 if (s->pstate == SLOT_START) {
1702 /*
1703 * If in rec-only mode, we're already in the READY or
1704 * RUN states. We're here because the play buffer was
1705 * not full enough, try to start so it's drained.
1706 */
1707 s->pstate = SLOT_READY;
1708 slot_ready(s);
1709 }
1710
1711 if (s->pstate == SLOT_RUN) {
1712 if ((s->mode & MODE_PLAY) && drain) {
1713 /*
1714 * Don't detach, dev_cycle() will do it for us
1715 * when the buffer is drained.
1716 */
1717 s->pstate = SLOT_STOP;
1718 return;
1719 }
1720 slot_detach(s);
1721 } else if (s->pstate == SLOT_STOP) {
1722 slot_detach(s);
1723 } else {
1724#ifdef DEBUG
1725 logx(3, "slot%zu: not drained (blocked by mmc)", s - slot_array);
1726#endif
1727 }
1728
1729 s->pstate = SLOT_INIT;
1730 s->ops->eof(s->arg);
1731 slot_freebufs(s);
1732}
1733
1734void
1735slot_skip_update(struct slot *s)
1736{
1737 int skip;
1738
1739 skip = slot_skip(s);
1740 while (skip > 0) {
1741#ifdef DEBUG
1742 logx(4, "slot%zu: catching skipped block", s - slot_array);
1743#endif
1744 if (s->mode & MODE_RECMASK)
1745 s->ops->flush(s->arg);
1746 if (s->mode & MODE_PLAY)
1747 s->ops->fill(s->arg);
1748 skip--;
1749 }
1750}
1751
1752/*
1753 * notify the slot that we just wrote in the play buffer, must be called
1754 * after each write
1755 */
1756void
1757slot_write(struct slot *s)
1758{
1759 if (s->pstate == SLOT_START && s->mix.buf.used == s->mix.buf.len) {
1760#ifdef DEBUG
1761 logx(4, "slot%zu: switching to READY state", s - slot_array);
1762#endif
1763 s->pstate = SLOT_READY;
1764 slot_ready(s);
1765 }
1766 slot_skip_update(s);
1767}
1768
1769/*
1770 * notify the slot that we freed some space in the rec buffer
1771 */
1772void
1773slot_read(struct slot *s)
1774{
1775 slot_skip_update(s);
1776}
1777
1778/*
1779 * allocate at control slot
1780 */
1781struct ctlslot *
1782ctlslot_new(struct opt *o, struct ctlops *ops, void *arg)
1783{
1784 struct ctlslot *s;
1785 struct ctl *c;
1786 int i;
1787
1788 i = 0;
1789 for (;;) {
1790 if (i == DEV_NCTLSLOT)
1791 return NULL;
1792 s = ctlslot_array + i;
1793 if (s->ops == NULL)
1794 break;
1795 i++;
1796 }
1797 s->opt = o;
1798 s->self = 1 << i;
1799 if (!opt_ref(o))
1800 return NULL;
1801 s->ops = ops;
1802 s->arg = arg;
1803 for (c = ctl_list; c != NULL; c = c->next) {
1804 if (!ctlslot_visible(s, c))
1805 continue;
1806 c->refs_mask |= s->self;
1807 }
1808 return s;
1809}
1810
1811/*
1812 * free control slot
1813 */
1814void
1815ctlslot_del(struct ctlslot *s)
1816{
1817 struct ctl *c, **pc;
1818
1819 pc = &ctl_list;
1820 while ((c = *pc) != NULL) {
1821 c->refs_mask &= ~s->self;
1822 if (c->refs_mask == 0) {
1823 *pc = c->next;
1824 xfree(c);
1825 } else
1826 pc = &c->next;
1827 }
1828 s->ops = NULL;
1829 opt_unref(s->opt);
1830}
1831
1832int
1833ctlslot_visible(struct ctlslot *s, struct ctl *c)
1834{
1835 if (s->opt == NULL)
1836 return 1;
1837 switch (c->scope) {
1838 case CTL_HW:
1839 /*
1840 * Disable hardware's server.device control as its
1841 * replaced by sndiod's one
1842 */
1843 if (strcmp(c->node0.name, "server") == 0 &&
1844 strcmp(c->func, "device") == 0)
1845 return 0;
1846 /* FALLTHROUGH */
1847 case CTL_DEV_MASTER:
1848 return (s->opt->dev == c->u.any.arg0);
1849 case CTL_OPT_DEV:
1850 return (s->opt == c->u.any.arg0);
1851 case CTL_APP_LEVEL:
1852 return (s->opt == c->u.app_level.opt);
1853 default:
1854 return 0;
1855 }
1856}
1857
1858struct ctl *
1859ctlslot_lookup(struct ctlslot *s, int addr)
1860{
1861 struct ctl *c;
1862
1863 c = ctl_list;
1864 while (1) {
1865 if (c == NULL)
1866 return NULL;
1867 if (c->type != CTL_NONE && c->addr == addr)
1868 break;
1869 c = c->next;
1870 }
1871 if (!ctlslot_visible(s, c))
1872 return NULL;
1873 return c;
1874}
1875
1876void
1877ctlslot_update(struct ctlslot *s)
1878{
1879 struct ctl *c;
1880 unsigned int refs_mask;
1881
1882 for (c = ctl_list; c != NULL; c = c->next) {
1883 if (c->type == CTL_NONE)
1884 continue;
1885 refs_mask = ctlslot_visible(s, c) ? s->self : 0;
1886
1887 /* nothing to do if no visibility change */
1888 if (((c->refs_mask & s->self) ^ refs_mask) == 0)
1889 continue;
1890 /* if control becomes visible */
1891 if (refs_mask)
1892 c->refs_mask |= s->self;
1893 /* if control is hidden */
1894 c->desc_mask |= s->self;
1895 }
1896 if (s->ops)
1897 s->ops->sync(s->arg);
1898}
1899
1900size_t
1901ctl_node_fmt(char *buf, size_t size, struct ctl_node *c)
1902{
1903 char *end = buf + size;
1904 char *p = buf;
1905
1906 p += snprintf(buf, size, "%s", c->name);
1907
1908 if (c->unit >= 0)
1909 p += snprintf(p, p < end ? end - p : 0, "%d", c->unit);
1910
1911 return p - buf;
1912}
1913
1914size_t
1915ctl_scope_fmt(char *buf, size_t size, struct ctl *c)
1916{
1917 switch (c->scope) {
1918 case CTL_HW:
1919 return snprintf(buf, size, "hw:%s/%u",
1920 c->u.hw.dev->name, c->u.hw.addr);
1921 case CTL_DEV_MASTER:
1922 return snprintf(buf, size, "dev_master:%s",
1923 c->u.dev_master.dev->name);
1924 case CTL_APP_LEVEL:
1925 return snprintf(buf, size, "app_level:%s/%s",
1926 c->u.app_level.opt->name, c->u.app_level.app->name);
1927 case CTL_OPT_DEV:
1928 return snprintf(buf, size, "opt_dev:%s/%s",
1929 c->u.opt_dev.opt->name, c->u.opt_dev.dev->name);
1930 default:
1931 return snprintf(buf, size, "unknown");
1932 }
1933}
1934
1935size_t
1936ctl_fmt(char *buf, size_t size, struct ctl *c)
1937{
1938 char *end = buf + size;
1939 char *p = buf;
1940
1941 p += snprintf(p, size, "%s/", c->group);
1942 p += ctl_node_fmt(p, p < end ? end - p : 0, &c->node0);
1943 p += snprintf(p, p < end ? end - p : 0, ".%s", c->func);
1944
1945 switch (c->type) {
1946 case CTL_VEC:
1947 case CTL_LIST:
1948 case CTL_SEL:
1949 p += snprintf(p, p < end ? end - p : 0, "[");
1950 p += ctl_node_fmt(p, p < end ? end - p : 0, &c->node1);
1951 p += snprintf(p, p < end ? end - p : 0, "]");
1952 }
1953
1954 if (c->display[0] != 0)
1955 p += snprintf(p, size, " (%s)", c->display);
1956
1957 return p - buf;
1958}
1959
1960int
1961ctl_setval(struct ctl *c, int val)
1962{
1963 if (c->curval == val) {
1964 logx(3, "ctl%u: already set", c->addr);
1965 return 1;
1966 }
1967 if (val < 0 || val > c->maxval) {
1968 logx(3, "ctl%u: %d: out of range", c->addr, val);
1969 return 0;
1970 }
1971
1972 switch (c->scope) {
1973 case CTL_HW:
1974 logx(3, "ctl%u: marked as dirty", c->addr);
1975 c->curval = val;
1976 c->dirty = 1;
1977 return dev_ref(c->u.hw.dev);
1978 case CTL_DEV_MASTER:
1979 if (!c->u.dev_master.dev->master_enabled)
1980 return 1;
1981 dev_master(c->u.dev_master.dev, val);
1982 dev_midi_master(c->u.dev_master.dev);
1983 c->val_mask = ~0U;
1984 c->curval = val;
1985 return 1;
1986 case CTL_APP_LEVEL:
1987 opt_appvol(c->u.app_level.opt, c->u.app_level.app, val);
1988 opt_midi_vol(c->u.app_level.opt, c->u.app_level.app);
1989 c->val_mask = ~0U;
1990 c->curval = val;
1991 return 1;
1992 case CTL_OPT_DEV:
1993 if (opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev)) {
1994 /* make this the prefered device */
1995 opt_setalt(c->u.opt_dev.opt, c->u.opt_dev.dev);
1996 }
1997 return 1;
1998 default:
1999 logx(2, "ctl%u: not writable", c->addr);
2000 return 1;
2001 }
2002}
2003
2004/*
2005 * add a ctl
2006 */
2007struct ctl *
2008ctl_new(int scope, void *arg0, void *arg1,
2009 int type, char *display, char *gstr,
2010 char *str0, int unit0, char *func,
2011 char *str1, int unit1, int maxval, int val)
2012{
2013#ifdef DEBUG
2014 char ctl_str[64], scope_str[32];
2015#endif
2016 struct ctl *c, **pc;
2017 struct ctlslot *s;
2018 int addr;
2019 int i;
2020
2021 /*
2022 * find the smallest unused addr number and
2023 * the last position in the list
2024 */
2025 addr = 0;
2026 for (pc = &ctl_list; (c = *pc) != NULL; pc = &c->next) {
2027 if (c->addr > addr)
2028 addr = c->addr;
2029 }
2030 addr++;
2031
2032 c = xmalloc(sizeof(struct ctl));
2033 c->type = type;
2034 strlcpy(c->func, func, CTL_NAMEMAX);
2035 strlcpy(c->group, gstr, CTL_NAMEMAX);
2036 strlcpy(c->display, display, CTL_DISPLAYMAX);
2037 strlcpy(c->node0.name, str0, CTL_NAMEMAX);
2038 c->node0.unit = unit0;
2039 if (c->type == CTL_VEC || c->type == CTL_LIST || c->type == CTL_SEL) {
2040 strlcpy(c->node1.name, str1, CTL_NAMEMAX);
2041 c->node1.unit = unit1;
2042 } else
2043 memset(&c->node1, 0, sizeof(struct ctl_node));
2044 c->scope = scope;
2045 c->u.any.arg0 = arg0;
2046 switch (scope) {
2047 case CTL_HW:
2048 c->u.hw.addr = *(unsigned int *)arg1;
2049 break;
2050 case CTL_OPT_DEV:
2051 case CTL_APP_LEVEL:
2052 c->u.any.arg1 = arg1;
2053 break;
2054 default:
2055 c->u.any.arg1 = NULL;
2056 }
2057 c->addr = addr;
2058 c->maxval = maxval;
2059 c->val_mask = ~0;
2060 c->desc_mask = ~0;
2061 c->curval = val;
2062 c->dirty = 0;
2063 c->refs_mask = CTL_DEVMASK;
2064 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2065 if (s->ops == NULL)
2066 continue;
2067 if (ctlslot_visible(s, c))
2068 c->refs_mask |= 1 << i;
2069 }
2070 c->next = *pc;
2071 *pc = c;
2072#ifdef DEBUG
2073 logx(2, "ctl%u: %s = %d at %s: added", c->addr,
2074 (ctl_fmt(ctl_str, sizeof(ctl_str), c), ctl_str), c->curval,
2075 (ctl_scope_fmt(scope_str, sizeof(scope_str), c), scope_str));
2076#endif
2077 return c;
2078}
2079
2080void
2081ctl_update(struct ctl *c)
2082{
2083 struct ctlslot *s;
2084 unsigned int refs_mask;
2085 int i;
2086
2087 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2088 if (s->ops == NULL)
2089 continue;
2090 refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2091
2092 /* nothing to do if no visibility change */
2093 if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2094 continue;
2095 /* if control becomes visible */
2096 if (refs_mask)
2097 c->refs_mask |= s->self;
2098 /* if control is hidden */
2099 c->desc_mask |= s->self;
2100 s->ops->sync(s->arg);
2101 }
2102}
2103
2104int
2105ctl_match(struct ctl *c, int scope, void *arg0, void *arg1)
2106{
2107 if (c->type == CTL_NONE || c->scope != scope || c->u.any.arg0 != arg0)
2108 return 0;
2109 if (arg0 != NULL && c->u.any.arg0 != arg0)
2110 return 0;
2111 switch (scope) {
2112 case CTL_HW:
2113 if (arg1 != NULL && c->u.hw.addr != *(unsigned int *)arg1)
2114 return 0;
2115 break;
2116 case CTL_OPT_DEV:
2117 case CTL_APP_LEVEL:
2118 if (arg1 != NULL && c->u.any.arg1 != arg1)
2119 return 0;
2120 break;
2121 }
2122 return 1;
2123}
2124
2125struct ctl *
2126ctl_find(int scope, void *arg0, void *arg1)
2127{
2128 struct ctl *c;
2129
2130 for (c = ctl_list; c != NULL; c = c->next) {
2131 if (ctl_match(c, scope, arg0, arg1))
2132 return c;
2133 }
2134 return NULL;
2135}
2136
2137int
2138ctl_onval(int scope, void *arg0, void *arg1, int val)
2139{
2140 struct ctl *c;
2141
2142 c = ctl_find(scope, arg0, arg1);
2143 if (c == NULL)
2144 return 0;
2145 c->curval = val;
2146 c->val_mask = ~0U;
2147 return 1;
2148}
2149
2150int
2151ctl_del(int scope, void *arg0, void *arg1)
2152{
2153#ifdef DEBUG
2154 char str[64];
2155#endif
2156 struct ctl *c, **pc;
2157 int found;
2158
2159 found = 0;
2160 pc = &ctl_list;
2161 for (;;) {
2162 c = *pc;
2163 if (c == NULL)
2164 return found;
2165 if (ctl_match(c, scope, arg0, arg1)) {
2166#ifdef DEBUG
2167 logx(2, "ctl%u: %s: removed", c->addr,
2168 (ctl_fmt(str, sizeof(str), c), str));
2169#endif
2170 found++;
2171 c->refs_mask &= ~CTL_DEVMASK;
2172 if (c->refs_mask == 0) {
2173 *pc = c->next;
2174 xfree(c);
2175 continue;
2176 }
2177 c->type = CTL_NONE;
2178 c->desc_mask = ~0;
2179 }
2180 pc = &c->next;
2181 }
2182}
2183
2184char *
2185dev_getdisplay(struct dev *d)
2186{
2187 struct ctl *c;
2188 char *display;
2189
2190 display = "";
2191 for (c = ctl_list; c != NULL; c = c->next) {
2192 if (c->scope == CTL_HW &&
2193 c->u.hw.dev == d &&
2194 c->type == CTL_SEL &&
2195 strcmp(c->group, d->name) == 0 &&
2196 strcmp(c->node0.name, "server") == 0 &&
2197 strcmp(c->func, "device") == 0 &&
2198 c->curval == 1)
2199 display = c->display;
2200 }
2201 return display;
2202}
2203
2204void
2205dev_ctlsync(struct dev *d)
2206{
2207 struct ctl *c;
2208 struct ctlslot *s;
2209 const char *display;
2210 int found, i;
2211
2212 found = 0;
2213 for (c = ctl_list; c != NULL; c = c->next) {
2214 if (c->scope == CTL_HW &&
2215 c->u.hw.dev == d &&
2216 c->type == CTL_NUM &&
2217 strcmp(c->group, d->name) == 0 &&
2218 strcmp(c->node0.name, "output") == 0 &&
2219 strcmp(c->func, "level") == 0)
2220 found = 1;
2221 }
2222
2223 if (d->master_enabled && found) {
2224 logx(2, "%s: software master level control disabled", d->path);
2225 d->master_enabled = 0;
2226 ctl_del(CTL_DEV_MASTER, d, NULL);
2227 } else if (!d->master_enabled && !found) {
2228 logx(2, "%s: software master level control enabled", d->path);
2229 d->master_enabled = 1;
2230 ctl_new(CTL_DEV_MASTER, d, NULL,
2231 CTL_NUM, "", d->name, "output", -1, "level",
2232 NULL, -1, 127, d->master);
2233 }
2234
2235 /*
2236 * if the hardware's server.device changed, update the display name
2237 */
2238 display = dev_getdisplay(d);
2239 for (c = ctl_list; c != NULL; c = c->next) {
2240 if (c->scope != CTL_OPT_DEV ||
2241 c->u.opt_dev.dev != d ||
2242 strcmp(c->display, display) == 0)
2243 continue;
2244 strlcpy(c->display, display, CTL_DISPLAYMAX);
2245 c->desc_mask = ~0;
2246 }
2247
2248 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2249 if (s->ops == NULL)
2250 continue;
2251 if (s->opt->dev == d)
2252 s->ops->sync(s->arg);
2253 }
2254}