1 /* $OpenBSD: dev.c,v 1.119 2024/12/20 07:35:56 ratchov Exp $ */
2 /*
3 * Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #include <stdio.h>
18 #include <string.h>
19
20 #include "abuf.h"
21 #include "defs.h"
22 #include "dev.h"
23 #include "dsp.h"
24 #include "siofile.h"
25 #include "midi.h"
26 #include "opt.h"
27 #include "sysex.h"
28 #include "utils.h"
29
30 void zomb_onmove(void *);
31 void zomb_onvol(void *);
32 void zomb_fill(void *);
33 void zomb_flush(void *);
34 void zomb_eof(void *);
35 void zomb_exit(void *);
36
37 void dev_mix_badd(struct dev *, struct slot *);
38 void dev_mix_adjvol(struct dev *);
39 void dev_sub_bcopy(struct dev *, struct slot *);
40
41 void dev_onmove(struct dev *, int);
42 void dev_master(struct dev *, unsigned int);
43 void dev_cycle(struct dev *);
44 void dev_adjpar(struct dev *, int, int, int);
45 int dev_allocbufs(struct dev *);
46 void dev_freebufs(struct dev *);
47 int dev_ref(struct dev *);
48 void dev_unref(struct dev *);
49 int dev_init(struct dev *);
50 void dev_done(struct dev *);
51 struct dev *dev_bynum(int);
52 void dev_del(struct dev *);
53 unsigned int dev_roundof(struct dev *, unsigned int);
54 void dev_wakeup(struct dev *);
55
56 void slot_ctlname(struct slot *, char *, size_t);
57 void slot_del(struct slot *);
58 void slot_setvol(struct slot *, unsigned int);
59 void slot_ready(struct slot *);
60 void slot_allocbufs(struct slot *);
61 void slot_freebufs(struct slot *);
62 void slot_skip_update(struct slot *);
63 void slot_write(struct slot *);
64 void slot_read(struct slot *);
65 int slot_skip(struct slot *);
66
67 struct slotops zomb_slotops = {
68 zomb_onmove,
69 zomb_onvol,
70 zomb_fill,
71 zomb_flush,
72 zomb_eof,
73 zomb_exit
74 };
75
76 struct ctl *ctl_list = NULL;
77 struct dev *dev_list = NULL;
78 unsigned int dev_sndnum = 0;
79
80 struct ctlslot ctlslot_array[DEV_NCTLSLOT];
81 struct slot slot_array[DEV_NSLOT];
82 unsigned int slot_serial; /* for slot allocation */
83
84 /*
85 * we support/need a single MTC clock source only
86 */
87 struct mtc mtc_array[1] = {
88 {.dev = NULL, .tstate = MTC_STOP}
89 };
90
91 void
slot_array_init(void)92 slot_array_init(void)
93 {
94 unsigned int i;
95
96 for (i = 0; i < DEV_NSLOT; i++) {
97 slot_array[i].unit = i;
98 slot_array[i].ops = NULL;
99 slot_array[i].vol = MIDI_MAXCTL;
100 slot_array[i].opt = NULL;
101 slot_array[i].serial = slot_serial++;
102 memset(slot_array[i].name, 0, SLOT_NAMEMAX);
103 }
104 }
105
106 void
slot_ctlname(struct slot * s,char * name,size_t size)107 slot_ctlname(struct slot *s, char *name, size_t size)
108 {
109 snprintf(name, size, "%s%u", s->name, s->unit);
110 }
111
112 void
zomb_onmove(void * arg)113 zomb_onmove(void *arg)
114 {
115 }
116
117 void
zomb_onvol(void * arg)118 zomb_onvol(void *arg)
119 {
120 }
121
122 void
zomb_fill(void * arg)123 zomb_fill(void *arg)
124 {
125 }
126
127 void
zomb_flush(void * arg)128 zomb_flush(void *arg)
129 {
130 }
131
132 void
zomb_eof(void * arg)133 zomb_eof(void *arg)
134 {
135 struct slot *s = arg;
136
137 #ifdef DEBUG
138 logx(3, "%s%u: %s", s->name, s->unit, __func__);
139 #endif
140 s->ops = NULL;
141 }
142
143 void
zomb_exit(void * arg)144 zomb_exit(void *arg)
145 {
146 #ifdef DEBUG
147 struct slot *s = arg;
148
149 logx(3, "%s%u: %s", s->name, s->unit, __func__);
150 #endif
151 }
152
153 size_t
chans_fmt(char * buf,size_t size,int mode,int pmin,int pmax,int rmin,int rmax)154 chans_fmt(char *buf, size_t size, int mode, int pmin, int pmax, int rmin, int rmax)
155 {
156 const char *sep = "";
157 char *end = buf + size;
158 char *p = buf;
159
160 if (mode & MODE_PLAY) {
161 p += snprintf(p, p < end ? end - p : 0, "play %d:%d", pmin, pmax);
162 sep = ", ";
163 }
164 if (mode & MODE_RECMASK) {
165 p += snprintf(p, p < end ? end - p : 0, "%s%s %d:%d", sep,
166 (mode & MODE_MON) ? "mon" : "rec", rmin, rmax);
167 }
168
169 return p - buf;
170 }
171
172 /*
173 * Broadcast MIDI data to all opts using this device
174 */
175 void
dev_midi_send(struct dev * d,void * msg,int msglen)176 dev_midi_send(struct dev *d, void *msg, int msglen)
177 {
178 struct opt *o;
179
180 for (o = opt_list; o != NULL; o = o->next) {
181 if (o->dev != d)
182 continue;
183 midi_send(o->midi, msg, msglen);
184 }
185 }
186
187 /*
188 * send a quarter frame MTC message
189 */
190 void
mtc_midi_qfr(struct mtc * mtc,int delta)191 mtc_midi_qfr(struct mtc *mtc, int delta)
192 {
193 unsigned char buf[2];
194 unsigned int data;
195 int qfrlen;
196
197 mtc->delta += delta * MTC_SEC;
198 qfrlen = mtc->dev->rate * (MTC_SEC / (4 * mtc->fps));
199 while (mtc->delta >= qfrlen) {
200 switch (mtc->qfr) {
201 case 0:
202 data = mtc->fr & 0xf;
203 break;
204 case 1:
205 data = mtc->fr >> 4;
206 break;
207 case 2:
208 data = mtc->sec & 0xf;
209 break;
210 case 3:
211 data = mtc->sec >> 4;
212 break;
213 case 4:
214 data = mtc->min & 0xf;
215 break;
216 case 5:
217 data = mtc->min >> 4;
218 break;
219 case 6:
220 data = mtc->hr & 0xf;
221 break;
222 case 7:
223 data = (mtc->hr >> 4) | (mtc->fps_id << 1);
224 /*
225 * tick messages are sent 2 frames ahead
226 */
227 mtc->fr += 2;
228 if (mtc->fr < mtc->fps)
229 break;
230 mtc->fr -= mtc->fps;
231 mtc->sec++;
232 if (mtc->sec < 60)
233 break;
234 mtc->sec = 0;
235 mtc->min++;
236 if (mtc->min < 60)
237 break;
238 mtc->min = 0;
239 mtc->hr++;
240 if (mtc->hr < 24)
241 break;
242 mtc->hr = 0;
243 break;
244 default:
245 /* NOTREACHED */
246 data = 0;
247 }
248 buf[0] = 0xf1;
249 buf[1] = (mtc->qfr << 4) | data;
250 mtc->qfr++;
251 mtc->qfr &= 7;
252 dev_midi_send(mtc->dev, buf, 2);
253 mtc->delta -= qfrlen;
254 }
255 }
256
257 /*
258 * send a full frame MTC message
259 */
260 void
mtc_midi_full(struct mtc * mtc)261 mtc_midi_full(struct mtc *mtc)
262 {
263 struct sysex x;
264 unsigned int fps;
265
266 mtc->delta = -MTC_SEC * (int)mtc->dev->bufsz;
267 if (mtc->dev->rate % (30 * 4 * mtc->dev->round) == 0) {
268 mtc->fps_id = MTC_FPS_30;
269 mtc->fps = 30;
270 } else if (mtc->dev->rate % (25 * 4 * mtc->dev->round) == 0) {
271 mtc->fps_id = MTC_FPS_25;
272 mtc->fps = 25;
273 } else {
274 mtc->fps_id = MTC_FPS_24;
275 mtc->fps = 24;
276 }
277 #ifdef DEBUG
278 logx(3, "%s: mtc full frame at %d, %d fps", mtc->dev->path, mtc->delta, mtc->fps);
279 #endif
280 fps = mtc->fps;
281 mtc->hr = (mtc->origin / (MTC_SEC * 3600)) % 24;
282 mtc->min = (mtc->origin / (MTC_SEC * 60)) % 60;
283 mtc->sec = (mtc->origin / (MTC_SEC)) % 60;
284 mtc->fr = (mtc->origin / (MTC_SEC / fps)) % fps;
285
286 x.start = SYSEX_START;
287 x.type = SYSEX_TYPE_RT;
288 x.dev = SYSEX_DEV_ANY;
289 x.id0 = SYSEX_MTC;
290 x.id1 = SYSEX_MTC_FULL;
291 x.u.full.hr = mtc->hr | (mtc->fps_id << 5);
292 x.u.full.min = mtc->min;
293 x.u.full.sec = mtc->sec;
294 x.u.full.fr = mtc->fr;
295 x.u.full.end = SYSEX_END;
296 mtc->qfr = 0;
297 dev_midi_send(mtc->dev, (unsigned char *)&x, SYSEX_SIZE(full));
298 }
299
300 /*
301 * send a volume change MIDI message
302 */
303 void
dev_midi_vol(struct dev * d,struct slot * s)304 dev_midi_vol(struct dev *d, struct slot *s)
305 {
306 unsigned char msg[3];
307
308 msg[0] = MIDI_CTL | (s - slot_array);
309 msg[1] = MIDI_CTL_VOL;
310 msg[2] = s->vol;
311 dev_midi_send(d, msg, 3);
312 }
313
314 /*
315 * send a master volume MIDI message
316 */
317 void
dev_midi_master(struct dev * d)318 dev_midi_master(struct dev *d)
319 {
320 struct ctl *c;
321 unsigned int master, v;
322 struct sysex x;
323
324 if (d->master_enabled)
325 master = d->master;
326 else {
327 master = 0;
328 for (c = ctl_list; c != NULL; c = c->next) {
329 if (c->type != CTL_NUM ||
330 strcmp(c->group, d->name) != 0 ||
331 strcmp(c->node0.name, "output") != 0 ||
332 strcmp(c->func, "level") != 0)
333 continue;
334 if (c->u.any.arg0 != d)
335 continue;
336 v = (c->curval * 127 + c->maxval / 2) / c->maxval;
337 if (master < v)
338 master = v;
339 }
340 }
341
342 memset(&x, 0, sizeof(struct sysex));
343 x.start = SYSEX_START;
344 x.type = SYSEX_TYPE_RT;
345 x.dev = SYSEX_DEV_ANY;
346 x.id0 = SYSEX_CONTROL;
347 x.id1 = SYSEX_MASTER;
348 x.u.master.fine = 0;
349 x.u.master.coarse = master;
350 x.u.master.end = SYSEX_END;
351 dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(master));
352 }
353
354 /*
355 * send a sndiod-specific slot description MIDI message
356 */
357 void
dev_midi_slotdesc(struct dev * d,struct slot * s)358 dev_midi_slotdesc(struct dev *d, struct slot *s)
359 {
360 struct sysex x;
361
362 memset(&x, 0, sizeof(struct sysex));
363 x.start = SYSEX_START;
364 x.type = SYSEX_TYPE_EDU;
365 x.dev = SYSEX_DEV_ANY;
366 x.id0 = SYSEX_AUCAT;
367 x.id1 = SYSEX_AUCAT_SLOTDESC;
368 if (s->opt != NULL && s->opt->dev == d)
369 slot_ctlname(s, (char *)x.u.slotdesc.name, SYSEX_NAMELEN);
370 x.u.slotdesc.chan = (s - slot_array);
371 x.u.slotdesc.end = SYSEX_END;
372 dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(slotdesc));
373 }
374
375 void
dev_midi_dump(struct dev * d)376 dev_midi_dump(struct dev *d)
377 {
378 struct sysex x;
379 struct slot *s;
380 int i;
381
382 dev_midi_master(d);
383 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
384 if (s->opt != NULL && s->opt->dev != d)
385 continue;
386 dev_midi_slotdesc(d, s);
387 dev_midi_vol(d, s);
388 }
389 x.start = SYSEX_START;
390 x.type = SYSEX_TYPE_EDU;
391 x.dev = SYSEX_DEV_ANY;
392 x.id0 = SYSEX_AUCAT;
393 x.id1 = SYSEX_AUCAT_DUMPEND;
394 x.u.dumpend.end = SYSEX_END;
395 dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(dumpend));
396 }
397
398 int
slot_skip(struct slot * s)399 slot_skip(struct slot *s)
400 {
401 unsigned char *data = (unsigned char *)0xdeadbeef; /* please gcc */
402 int max, count;
403
404 max = s->skip;
405 while (s->skip > 0) {
406 if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
407 data = abuf_wgetblk(&s->sub.buf, &count);
408 if (count < s->round * s->sub.bpf)
409 break;
410 }
411 if (s->mode & MODE_PLAY) {
412 if (s->mix.buf.used < s->round * s->mix.bpf)
413 break;
414 }
415 #ifdef DEBUG
416 logx(4, "%s%u: skipped a cycle", s->name, s->unit);
417 #endif
418 if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
419 if (s->sub.encbuf)
420 enc_sil_do(&s->sub.enc, data, s->round);
421 else
422 memset(data, 0, s->round * s->sub.bpf);
423 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
424 }
425 if (s->mode & MODE_PLAY) {
426 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
427 }
428 s->skip--;
429 }
430 return max - s->skip;
431 }
432
433 /*
434 * Mix the slot input block over the output block
435 */
436 void
dev_mix_badd(struct dev * d,struct slot * s)437 dev_mix_badd(struct dev *d, struct slot *s)
438 {
439 adata_t *idata, *odata, *in;
440 int icount, i, offs, vol, nch;
441
442 odata = DEV_PBUF(d);
443 idata = (adata_t *)abuf_rgetblk(&s->mix.buf, &icount);
444 #ifdef DEBUG
445 if (icount < s->round * s->mix.bpf) {
446 logx(0, "%s%u: not enough data to mix (%u bytes)",
447 s->name, s->unit, icount);
448 panic();
449 }
450 #endif
451 if (!(s->opt->mode & MODE_PLAY)) {
452 /*
453 * playback not allowed in opt structure, produce silence
454 */
455 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
456 return;
457 }
458
459
460 /*
461 * Apply the following processing chain:
462 *
463 * dec -> resamp-> cmap
464 *
465 * where the first two are optional.
466 */
467
468 in = idata;
469
470 if (s->mix.decbuf) {
471 dec_do(&s->mix.dec, (void *)in, s->mix.decbuf, s->round);
472 in = s->mix.decbuf;
473 }
474
475 if (s->mix.resampbuf) {
476 resamp_do(&s->mix.resamp,
477 in, s->mix.resampbuf, s->round, d->round);
478 in = s->mix.resampbuf;
479 }
480
481 nch = s->mix.cmap.nch;
482 vol = ADATA_MUL(s->mix.weight, s->mix.vol) / s->mix.join;
483 cmap_add(&s->mix.cmap, in, odata, vol, d->round);
484
485 offs = 0;
486 for (i = s->mix.join - 1; i > 0; i--) {
487 offs += nch;
488 cmap_add(&s->mix.cmap, in + offs, odata, vol, d->round);
489 }
490
491 offs = 0;
492 for (i = s->mix.expand - 1; i > 0; i--) {
493 offs += nch;
494 cmap_add(&s->mix.cmap, in, odata + offs, vol, d->round);
495 }
496
497 abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
498 }
499
500 /*
501 * Normalize input levels.
502 */
503 void
dev_mix_adjvol(struct dev * d)504 dev_mix_adjvol(struct dev *d)
505 {
506 unsigned int n;
507 struct slot *i, *j;
508 int jcmax, icmax, weight;
509
510 for (i = d->slot_list; i != NULL; i = i->next) {
511 if (!(i->mode & MODE_PLAY))
512 continue;
513 icmax = i->opt->pmin + i->mix.nch - 1;
514 weight = ADATA_UNIT;
515 if (d->autovol) {
516 /*
517 * count the number of inputs that have
518 * overlapping channel sets
519 */
520 n = 0;
521 for (j = d->slot_list; j != NULL; j = j->next) {
522 if (!(j->mode & MODE_PLAY))
523 continue;
524 jcmax = j->opt->pmin + j->mix.nch - 1;
525 if (i->opt->pmin <= jcmax &&
526 icmax >= j->opt->pmin)
527 n++;
528 }
529 weight /= n;
530 }
531 if (weight > i->opt->maxweight)
532 weight = i->opt->maxweight;
533 i->mix.weight = d->master_enabled ?
534 ADATA_MUL(weight, MIDI_TO_ADATA(d->master)) : weight;
535 #ifdef DEBUG
536 logx(3, "%s%u: set weight: %d / %d", i->name, i->unit, i->mix.weight,
537 i->opt->maxweight);
538 #endif
539 }
540 }
541
542 /*
543 * Copy data from slot to device
544 */
545 void
dev_sub_bcopy(struct dev * d,struct slot * s)546 dev_sub_bcopy(struct dev *d, struct slot *s)
547 {
548 adata_t *idata, *enc_out, *resamp_out, *cmap_out;
549 void *odata;
550 int ocount, moffs;
551
552 int i, vol, offs, nch;
553
554
555 odata = (adata_t *)abuf_wgetblk(&s->sub.buf, &ocount);
556 #ifdef DEBUG
557 if (ocount < s->round * s->sub.bpf) {
558 logx(0, "dev_sub_bcopy: not enough space");
559 panic();
560 }
561 #endif
562 if (s->opt->mode & MODE_MON) {
563 moffs = d->poffs + d->round;
564 if (moffs == d->psize)
565 moffs = 0;
566 idata = d->pbuf + moffs * d->pchan;
567 } else if (s->opt->mode & MODE_REC) {
568 idata = d->rbuf;
569 } else {
570 /*
571 * recording not allowed in opt structure, produce silence
572 */
573 enc_sil_do(&s->sub.enc, odata, s->round);
574 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
575 return;
576 }
577
578 /*
579 * Apply the following processing chain:
580 *
581 * cmap -> resamp -> enc
582 *
583 * where the last two are optional.
584 */
585
586 enc_out = odata;
587 resamp_out = s->sub.encbuf ? s->sub.encbuf : enc_out;
588 cmap_out = s->sub.resampbuf ? s->sub.resampbuf : resamp_out;
589
590 nch = s->sub.cmap.nch;
591 vol = ADATA_UNIT / s->sub.join;
592 cmap_copy(&s->sub.cmap, idata, cmap_out, vol, d->round);
593
594 offs = 0;
595 for (i = s->sub.join - 1; i > 0; i--) {
596 offs += nch;
597 cmap_add(&s->sub.cmap, idata + offs, cmap_out, vol, d->round);
598 }
599
600 offs = 0;
601 for (i = s->sub.expand - 1; i > 0; i--) {
602 offs += nch;
603 cmap_copy(&s->sub.cmap, idata, cmap_out + offs, vol, d->round);
604 }
605
606 if (s->sub.resampbuf) {
607 resamp_do(&s->sub.resamp,
608 s->sub.resampbuf, resamp_out, d->round, s->round);
609 }
610
611 if (s->sub.encbuf)
612 enc_do(&s->sub.enc, s->sub.encbuf, (void *)enc_out, s->round);
613
614 abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
615 }
616
617 /*
618 * run a one block cycle: consume one recorded block from
619 * rbuf and produce one play block in pbuf
620 */
621 void
dev_cycle(struct dev * d)622 dev_cycle(struct dev *d)
623 {
624 struct slot *s, **ps;
625 unsigned char *base;
626 int nsamp;
627
628 /*
629 * check if the device is actually used. If it isn't,
630 * then close it
631 */
632 if (d->slot_list == NULL && d->idle >= d->bufsz &&
633 (mtc_array[0].dev != d || mtc_array[0].tstate != MTC_RUN)) {
634 logx(2, "%s: device stopped", d->path);
635 dev_sio_stop(d);
636 d->pstate = DEV_INIT;
637 if (d->refcnt == 0)
638 dev_close(d);
639 return;
640 }
641
642 if (d->prime > 0) {
643 #ifdef DEBUG
644 logx(4, "%s: empty cycle, prime = %u", d->path, d->prime);
645 #endif
646 base = (unsigned char *)DEV_PBUF(d);
647 nsamp = d->round * d->pchan;
648 memset(base, 0, nsamp * sizeof(adata_t));
649 if (d->encbuf) {
650 enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
651 d->encbuf, d->round);
652 }
653 d->prime -= d->round;
654 return;
655 }
656
657 d->delta -= d->round;
658 #ifdef DEBUG
659 logx(4, "%s: full cycle: delta = %d", d->path, d->delta);
660 #endif
661 if (d->mode & MODE_PLAY) {
662 base = (unsigned char *)DEV_PBUF(d);
663 nsamp = d->round * d->pchan;
664 memset(base, 0, nsamp * sizeof(adata_t));
665 }
666 if ((d->mode & MODE_REC) && d->decbuf)
667 dec_do(&d->dec, d->decbuf, (unsigned char *)d->rbuf, d->round);
668 ps = &d->slot_list;
669 while ((s = *ps) != NULL) {
670 #ifdef DEBUG
671 logx(4, "%s%u: running, skip = %d", s->name, s->unit, s->skip);
672 #endif
673 d->idle = 0;
674
675 /*
676 * skip cycles for XRUN_SYNC correction
677 */
678 slot_skip(s);
679 if (s->skip < 0) {
680 s->skip++;
681 ps = &s->next;
682 continue;
683 }
684
685 #ifdef DEBUG
686 if (s->pstate == SLOT_STOP && !(s->mode & MODE_PLAY)) {
687 logx(0, "%s%u: rec-only slots can't be drained",
688 s->name, s->unit);
689 panic();
690 }
691 #endif
692 /*
693 * check if stopped stream finished draining
694 */
695 if (s->pstate == SLOT_STOP &&
696 s->mix.buf.used < s->round * s->mix.bpf) {
697 /*
698 * partial blocks are zero-filled by socket
699 * layer, so s->mix.buf.used == 0 and we can
700 * destroy the buffer
701 */
702 *ps = s->next;
703 s->pstate = SLOT_INIT;
704 s->ops->eof(s->arg);
705 slot_freebufs(s);
706 dev_mix_adjvol(d);
707 #ifdef DEBUG
708 logx(3, "%s%u: drained", s->name, s->unit);
709 #endif
710 continue;
711 }
712
713 /*
714 * check for xruns
715 */
716 if (((s->mode & MODE_PLAY) &&
717 s->mix.buf.used < s->round * s->mix.bpf) ||
718 ((s->mode & MODE_RECMASK) &&
719 s->sub.buf.len - s->sub.buf.used <
720 s->round * s->sub.bpf)) {
721
722 #ifdef DEBUG
723 logx(3, "%s%u: xrun, pause cycle", s->name, s->unit);
724 #endif
725 if (s->xrun == XRUN_IGNORE) {
726 s->delta -= s->round;
727 ps = &s->next;
728 } else if (s->xrun == XRUN_SYNC) {
729 s->skip++;
730 ps = &s->next;
731 } else if (s->xrun == XRUN_ERROR) {
732 s->ops->exit(s->arg);
733 *ps = s->next;
734 } else {
735 #ifdef DEBUG
736 logx(0, "%s%u: bad xrun mode", s->name, s->unit);
737 panic();
738 #endif
739 }
740 continue;
741 }
742 if ((s->mode & MODE_RECMASK) && !(s->pstate == SLOT_STOP)) {
743 if (s->sub.prime == 0) {
744 dev_sub_bcopy(d, s);
745 s->ops->flush(s->arg);
746 } else {
747 #ifdef DEBUG
748 logx(3, "%s%u: prime = %d", s->name, s->unit,
749 s->sub.prime);
750 #endif
751 s->sub.prime--;
752 }
753 }
754 if (s->mode & MODE_PLAY) {
755 dev_mix_badd(d, s);
756 if (s->pstate != SLOT_STOP)
757 s->ops->fill(s->arg);
758 }
759 ps = &s->next;
760 }
761 if ((d->mode & MODE_PLAY) && d->encbuf) {
762 enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
763 d->encbuf, d->round);
764 }
765 }
766
767 /*
768 * called at every clock tick by the device
769 */
770 void
dev_onmove(struct dev * d,int delta)771 dev_onmove(struct dev *d, int delta)
772 {
773 long long pos;
774 struct slot *s, *snext;
775
776 d->delta += delta;
777
778 if (d->slot_list == NULL)
779 d->idle += delta;
780
781 for (s = d->slot_list; s != NULL; s = snext) {
782 /*
783 * s->ops->onmove() may remove the slot
784 */
785 snext = s->next;
786 pos = s->delta_rem +
787 (long long)s->delta * d->round +
788 (long long)delta * s->round;
789 s->delta = pos / (int)d->round;
790 s->delta_rem = pos % d->round;
791 if (s->delta_rem < 0) {
792 s->delta_rem += d->round;
793 s->delta--;
794 }
795 if (s->delta >= 0)
796 s->ops->onmove(s->arg);
797 }
798
799 if (mtc_array[0].dev == d && mtc_array[0].tstate == MTC_RUN)
800 mtc_midi_qfr(&mtc_array[0], delta);
801 }
802
803 void
dev_master(struct dev * d,unsigned int master)804 dev_master(struct dev *d, unsigned int master)
805 {
806 struct ctl *c;
807 unsigned int v;
808
809 logx(2, "%s: master volume set to %u", d->path, master);
810
811 if (d->master_enabled) {
812 d->master = master;
813 if (d->mode & MODE_PLAY)
814 dev_mix_adjvol(d);
815 } else {
816 for (c = ctl_list; c != NULL; c = c->next) {
817 if (c->scope != CTL_HW || c->u.hw.dev != d)
818 continue;
819 if (c->type != CTL_NUM ||
820 strcmp(c->group, d->name) != 0 ||
821 strcmp(c->node0.name, "output") != 0 ||
822 strcmp(c->func, "level") != 0)
823 continue;
824 v = (master * c->maxval + 64) / 127;
825 ctl_setval(c, v);
826 }
827 }
828 }
829
830 /*
831 * Create a sndio device
832 */
833 struct dev *
dev_new(char * path,struct aparams * par,unsigned int mode,unsigned int bufsz,unsigned int round,unsigned int rate,unsigned int hold,unsigned int autovol)834 dev_new(char *path, struct aparams *par,
835 unsigned int mode, unsigned int bufsz, unsigned int round,
836 unsigned int rate, unsigned int hold, unsigned int autovol)
837 {
838 struct dev *d, **pd;
839
840 if (dev_sndnum == DEV_NMAX) {
841 logx(1, "too many devices");
842 return NULL;
843 }
844 d = xmalloc(sizeof(struct dev));
845 d->path = path;
846 d->num = dev_sndnum++;
847
848 d->reqpar = *par;
849 d->reqmode = mode;
850 d->reqpchan = d->reqrchan = 0;
851 d->reqbufsz = bufsz;
852 d->reqround = round;
853 d->reqrate = rate;
854 d->hold = hold;
855 d->autovol = autovol;
856 d->refcnt = 0;
857 d->pstate = DEV_CFG;
858 d->slot_list = NULL;
859 d->master = MIDI_MAXCTL;
860 d->master_enabled = 0;
861 d->alt_next = d;
862 snprintf(d->name, CTL_NAMEMAX, "%u", d->num);
863 for (pd = &dev_list; *pd != NULL; pd = &(*pd)->next)
864 ;
865 d->next = *pd;
866 *pd = d;
867 return d;
868 }
869
870 /*
871 * adjust device parameters and mode
872 */
873 void
dev_adjpar(struct dev * d,int mode,int pmax,int rmax)874 dev_adjpar(struct dev *d, int mode,
875 int pmax, int rmax)
876 {
877 d->reqmode |= mode & MODE_AUDIOMASK;
878 if (mode & MODE_PLAY) {
879 if (d->reqpchan < pmax + 1)
880 d->reqpchan = pmax + 1;
881 }
882 if (mode & MODE_REC) {
883 if (d->reqrchan < rmax + 1)
884 d->reqrchan = rmax + 1;
885 }
886 }
887
888 /*
889 * Open the device with the dev_reqxxx capabilities. Setup a mixer, demuxer,
890 * monitor, midi control, and any necessary conversions.
891 *
892 * Note that record and play buffers are always allocated, even if the
893 * underlying device doesn't support both modes.
894 */
895 int
dev_allocbufs(struct dev * d)896 dev_allocbufs(struct dev *d)
897 {
898 char enc_str[ENCMAX], chans_str[64];
899
900 /*
901 * Create record buffer.
902 */
903
904 /* Create device <-> demuxer buffer */
905 d->rbuf = xmalloc(d->round * d->rchan * sizeof(adata_t));
906
907 /* Insert a converter, if needed. */
908 if (!aparams_native(&d->par)) {
909 dec_init(&d->dec, &d->par, d->rchan);
910 d->decbuf = xmalloc(d->round * d->rchan * d->par.bps);
911 } else
912 d->decbuf = NULL;
913
914 /*
915 * Create play buffer
916 */
917
918 /* Create device <-> mixer buffer */
919 d->poffs = 0;
920 d->psize = d->bufsz + d->round;
921 d->pbuf = xmalloc(d->psize * d->pchan * sizeof(adata_t));
922 d->mode |= MODE_MON;
923
924 /* Append a converter, if needed. */
925 if (!aparams_native(&d->par)) {
926 enc_init(&d->enc, &d->par, d->pchan);
927 d->encbuf = xmalloc(d->round * d->pchan * d->par.bps);
928 } else
929 d->encbuf = NULL;
930
931 /*
932 * Initially fill the record buffer with zeroed samples. This ensures
933 * that when a client records from a play-only device the client just
934 * gets silence.
935 */
936 memset(d->rbuf, 0, d->round * d->rchan * sizeof(adata_t));
937
938 logx(2, "%s: %dHz, %s, %s, %d blocks of %d frames",
939 d->path, d->rate,
940 (aparams_enctostr(&d->par, enc_str), enc_str),
941 (chans_fmt(chans_str, sizeof(chans_str),
942 d->mode & (MODE_PLAY | MODE_REC),
943 0, d->pchan - 1, 0, d->rchan - 1), chans_str),
944 d->bufsz / d->round, d->round);
945
946 return 1;
947 }
948
949 /*
950 * Reset parameters and open the device.
951 */
952 int
dev_open(struct dev * d)953 dev_open(struct dev *d)
954 {
955 d->mode = d->reqmode;
956 d->round = d->reqround;
957 d->bufsz = d->reqbufsz;
958 d->rate = d->reqrate;
959 d->pchan = d->reqpchan;
960 d->rchan = d->reqrchan;
961 d->par = d->reqpar;
962 if (d->pchan == 0)
963 d->pchan = 2;
964 if (d->rchan == 0)
965 d->rchan = 2;
966 if (!dev_sio_open(d)) {
967 logx(1, "%s: failed to open audio device", d->path);
968 return 0;
969 }
970 if (!dev_allocbufs(d))
971 return 0;
972
973 d->pstate = DEV_INIT;
974 return 1;
975 }
976
977 /*
978 * Force all slots to exit and close device, called after an error
979 */
980 void
dev_abort(struct dev * d)981 dev_abort(struct dev *d)
982 {
983 int i;
984 struct slot *s;
985 struct ctlslot *c;
986 struct opt *o;
987
988 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
989 if (s->opt == NULL || s->opt->dev != d)
990 continue;
991 if (s->ops) {
992 s->ops->exit(s->arg);
993 s->ops = NULL;
994 }
995 }
996 d->slot_list = NULL;
997
998 for (o = opt_list; o != NULL; o = o->next) {
999 if (o->dev != d)
1000 continue;
1001 for (c = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, c++) {
1002 if (c->ops == NULL)
1003 continue;
1004 if (c->opt == o) {
1005 c->ops->exit(c->arg);
1006 c->ops = NULL;
1007 }
1008 }
1009
1010 midi_abort(o->midi);
1011 }
1012
1013 if (d->pstate != DEV_CFG)
1014 dev_close(d);
1015 }
1016
1017 /*
1018 * force the device to go in DEV_CFG state, the caller is supposed to
1019 * ensure buffers are drained
1020 */
1021 void
dev_freebufs(struct dev * d)1022 dev_freebufs(struct dev *d)
1023 {
1024 #ifdef DEBUG
1025 logx(3, "%s: closing", d->path);
1026 #endif
1027 if (d->mode & MODE_PLAY) {
1028 if (d->encbuf != NULL)
1029 xfree(d->encbuf);
1030 xfree(d->pbuf);
1031 }
1032 if (d->mode & MODE_REC) {
1033 if (d->decbuf != NULL)
1034 xfree(d->decbuf);
1035 xfree(d->rbuf);
1036 }
1037 }
1038
1039 /*
1040 * Close the device and exit all slots
1041 */
1042 void
dev_close(struct dev * d)1043 dev_close(struct dev *d)
1044 {
1045 d->pstate = DEV_CFG;
1046 dev_sio_close(d);
1047 dev_freebufs(d);
1048
1049 if (d->master_enabled) {
1050 d->master_enabled = 0;
1051 ctl_del(CTL_DEV_MASTER, d, NULL);
1052 }
1053 }
1054
1055 int
dev_ref(struct dev * d)1056 dev_ref(struct dev *d)
1057 {
1058 #ifdef DEBUG
1059 logx(3, "%s: device requested", d->path);
1060 #endif
1061 if (d->pstate == DEV_CFG && !dev_open(d))
1062 return 0;
1063 d->refcnt++;
1064 return 1;
1065 }
1066
1067 void
dev_unref(struct dev * d)1068 dev_unref(struct dev *d)
1069 {
1070 #ifdef DEBUG
1071 logx(3, "%s: device released", d->path);
1072 #endif
1073 d->refcnt--;
1074 if (d->refcnt == 0 && d->pstate == DEV_INIT)
1075 dev_close(d);
1076 }
1077
1078 /*
1079 * initialize the device with the current parameters
1080 */
1081 int
dev_init(struct dev * d)1082 dev_init(struct dev *d)
1083 {
1084 if ((d->reqmode & MODE_AUDIOMASK) == 0) {
1085 #ifdef DEBUG
1086 logx(1, "%s: has no streams", d->path);
1087 #endif
1088 return 0;
1089 }
1090 if (d->hold && !dev_ref(d))
1091 return 0;
1092 return 1;
1093 }
1094
1095 /*
1096 * Unless the device is already in process of closing, request it to close
1097 */
1098 void
dev_done(struct dev * d)1099 dev_done(struct dev *d)
1100 {
1101 #ifdef DEBUG
1102 logx(3, "%s: draining", d->path);
1103 #endif
1104 if (mtc_array[0].dev == d && mtc_array[0].tstate != MTC_STOP)
1105 mtc_stop(&mtc_array[0]);
1106 if (d->hold)
1107 dev_unref(d);
1108 }
1109
1110 struct dev *
dev_bynum(int num)1111 dev_bynum(int num)
1112 {
1113 struct dev *d;
1114
1115 for (d = dev_list; d != NULL; d = d->next) {
1116 if (d->num == num)
1117 return d;
1118 }
1119 return NULL;
1120 }
1121
1122 /*
1123 * Free the device
1124 */
1125 void
dev_del(struct dev * d)1126 dev_del(struct dev *d)
1127 {
1128 struct dev **p;
1129
1130 #ifdef DEBUG
1131 logx(3, "%s: deleting", d->path);
1132 #endif
1133 if (d->pstate != DEV_CFG)
1134 dev_close(d);
1135 for (p = &dev_list; *p != d; p = &(*p)->next) {
1136 #ifdef DEBUG
1137 if (*p == NULL) {
1138 logx(0, "%s: not on the list", d->path);
1139 panic();
1140 }
1141 #endif
1142 }
1143 *p = d->next;
1144 xfree(d);
1145 }
1146
1147 unsigned int
dev_roundof(struct dev * d,unsigned int newrate)1148 dev_roundof(struct dev *d, unsigned int newrate)
1149 {
1150 return (d->round * newrate + d->rate / 2) / d->rate;
1151 }
1152
1153 /*
1154 * If the device is paused, then resume it.
1155 */
1156 void
dev_wakeup(struct dev * d)1157 dev_wakeup(struct dev *d)
1158 {
1159 if (d->pstate == DEV_INIT) {
1160 logx(2, "%s: started", d->path);
1161
1162 if (d->mode & MODE_PLAY) {
1163 d->prime = d->bufsz;
1164 } else {
1165 d->prime = 0;
1166 }
1167 d->idle = 0;
1168 d->poffs = 0;
1169
1170 /*
1171 * empty cycles don't increment delta, so it's ok to
1172 * start at 0
1173 **/
1174 d->delta = 0;
1175
1176 d->pstate = DEV_RUN;
1177 dev_sio_start(d);
1178 }
1179 }
1180
1181 /*
1182 * Return true if both of the given devices can run the same
1183 * clients
1184 */
1185 int
dev_iscompat(struct dev * o,struct dev * n)1186 dev_iscompat(struct dev *o, struct dev *n)
1187 {
1188 if (((long long)o->round * n->rate != (long long)n->round * o->rate) ||
1189 ((long long)o->bufsz * n->rate != (long long)n->bufsz * o->rate)) {
1190 logx(1, "%s: not compatible with %s", n->name, o->name);
1191 return 0;
1192 }
1193 return 1;
1194 }
1195
1196 /*
1197 * Close the device, but attempt to migrate everything to a new sndio
1198 * device.
1199 */
1200 struct dev *
dev_migrate(struct dev * odev)1201 dev_migrate(struct dev *odev)
1202 {
1203 struct dev *ndev;
1204 struct opt *o;
1205 struct slot *s;
1206 int i;
1207
1208 /* not opened */
1209 if (odev->pstate == DEV_CFG)
1210 return odev;
1211
1212 ndev = odev;
1213 while (1) {
1214 /* try next one, circulating through the list */
1215 ndev = ndev->alt_next;
1216 if (ndev == odev) {
1217 logx(1, "%s: no fall-back device found", odev->path);
1218 return NULL;
1219 }
1220
1221
1222 if (!dev_ref(ndev))
1223 continue;
1224
1225 /* check if new parameters are compatible with old ones */
1226 if (!dev_iscompat(odev, ndev)) {
1227 dev_unref(ndev);
1228 continue;
1229 }
1230
1231 /* found it!*/
1232 break;
1233 }
1234
1235 logx(1, "%s: switching to %s", odev->path, ndev->path);
1236
1237 if (mtc_array[0].dev == odev)
1238 mtc_setdev(&mtc_array[0], ndev);
1239
1240 /* move opts to new device (also moves clients using the opts) */
1241 for (o = opt_list; o != NULL; o = o->next) {
1242 if (o->dev != odev)
1243 continue;
1244 if (strcmp(o->name, o->dev->name) == 0)
1245 continue;
1246 opt_setdev(o, ndev);
1247 }
1248
1249 /* terminate remaining clients */
1250 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1251 if (s->opt == NULL || s->opt->dev != odev)
1252 continue;
1253 if (s->ops != NULL) {
1254 s->ops->exit(s->arg);
1255 s->ops = NULL;
1256 }
1257 }
1258
1259 /* slots and/or MMC hold refs, drop ours */
1260 dev_unref(ndev);
1261
1262 return ndev;
1263 }
1264
1265 /*
1266 * check that all clients controlled by MMC are ready to start, if so,
1267 * attach them all at the same position
1268 */
1269 void
mtc_trigger(struct mtc * mtc)1270 mtc_trigger(struct mtc *mtc)
1271 {
1272 int i;
1273 struct slot *s;
1274
1275 if (mtc->tstate != MTC_START) {
1276 logx(2, "%s: not started by mmc yet, waiting.", mtc->dev->path);
1277 return;
1278 }
1279
1280 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1281 if (s->opt == NULL || s->opt->mtc != mtc)
1282 continue;
1283 if (s->pstate != SLOT_READY) {
1284 #ifdef DEBUG
1285 logx(3, "%s%u: not ready, start delayed", s->name, s->unit);
1286 #endif
1287 return;
1288 }
1289 }
1290 if (!dev_ref(mtc->dev))
1291 return;
1292
1293 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1294 if (s->opt == NULL || s->opt->mtc != mtc)
1295 continue;
1296 slot_attach(s);
1297 s->pstate = SLOT_RUN;
1298 }
1299 mtc->tstate = MTC_RUN;
1300 mtc_midi_full(mtc);
1301 dev_wakeup(mtc->dev);
1302 }
1303
1304 /*
1305 * start all slots simultaneously
1306 */
1307 void
mtc_start(struct mtc * mtc)1308 mtc_start(struct mtc *mtc)
1309 {
1310 if (mtc->tstate == MTC_STOP) {
1311 mtc->tstate = MTC_START;
1312 mtc_trigger(mtc);
1313 #ifdef DEBUG
1314 } else {
1315 logx(3, "%s: ignoring mmc start", mtc->dev->path);
1316 #endif
1317 }
1318 }
1319
1320 /*
1321 * stop all slots simultaneously
1322 */
1323 void
mtc_stop(struct mtc * mtc)1324 mtc_stop(struct mtc *mtc)
1325 {
1326 switch (mtc->tstate) {
1327 case MTC_START:
1328 mtc->tstate = MTC_STOP;
1329 return;
1330 case MTC_RUN:
1331 mtc->tstate = MTC_STOP;
1332 dev_unref(mtc->dev);
1333 break;
1334 default:
1335 #ifdef DEBUG
1336 logx(3, "%s: ignored mmc stop", mtc->dev->path);
1337 #endif
1338 return;
1339 }
1340 }
1341
1342 /*
1343 * relocate all slots simultaneously
1344 */
1345 void
mtc_loc(struct mtc * mtc,unsigned int origin)1346 mtc_loc(struct mtc *mtc, unsigned int origin)
1347 {
1348 logx(2, "%s: relocated to %u", mtc->dev->path, origin);
1349
1350 if (mtc->tstate == MTC_RUN)
1351 mtc_stop(mtc);
1352 mtc->origin = origin;
1353 if (mtc->tstate == MTC_RUN)
1354 mtc_start(mtc);
1355 }
1356
1357 /*
1358 * set MMC device
1359 */
1360 void
mtc_setdev(struct mtc * mtc,struct dev * d)1361 mtc_setdev(struct mtc *mtc, struct dev *d)
1362 {
1363 struct opt *o;
1364
1365 if (mtc->dev == d)
1366 return;
1367
1368 logx(2, "%s: set to be MIDI clock source", d->path);
1369
1370 /* adjust clock and ref counter, if needed */
1371 if (mtc->tstate == MTC_RUN) {
1372 mtc->delta -= mtc->dev->delta;
1373 dev_unref(mtc->dev);
1374 }
1375
1376 mtc->dev = d;
1377
1378 if (mtc->tstate == MTC_RUN) {
1379 mtc->delta += mtc->dev->delta;
1380 dev_ref(mtc->dev);
1381 dev_wakeup(mtc->dev);
1382 }
1383
1384 /* move in once anything using MMC */
1385 for (o = opt_list; o != NULL; o = o->next) {
1386 if (o->mtc == mtc)
1387 opt_setdev(o, mtc->dev);
1388 }
1389 }
1390
1391 /*
1392 * allocate buffers & conversion chain
1393 */
1394 void
slot_initconv(struct slot * s)1395 slot_initconv(struct slot *s)
1396 {
1397 unsigned int dev_nch;
1398 struct dev *d = s->opt->dev;
1399
1400 if (s->mode & MODE_PLAY) {
1401 cmap_init(&s->mix.cmap,
1402 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1403 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1404 0, d->pchan - 1,
1405 s->opt->pmin, s->opt->pmax);
1406 s->mix.decbuf = NULL;
1407 s->mix.resampbuf = NULL;
1408 if (!aparams_native(&s->par)) {
1409 dec_init(&s->mix.dec, &s->par, s->mix.nch);
1410 s->mix.decbuf =
1411 xmalloc(s->round * s->mix.nch * sizeof(adata_t));
1412 }
1413 if (s->rate != d->rate) {
1414 resamp_init(&s->mix.resamp, s->round, d->round,
1415 s->mix.nch);
1416 s->mix.resampbuf =
1417 xmalloc(d->round * s->mix.nch * sizeof(adata_t));
1418 }
1419 s->mix.join = 1;
1420 s->mix.expand = 1;
1421 if (s->opt->dup && s->mix.cmap.nch > 0) {
1422 dev_nch = d->pchan < (s->opt->pmax + 1) ?
1423 d->pchan - s->opt->pmin :
1424 s->opt->pmax - s->opt->pmin + 1;
1425 if (dev_nch > s->mix.nch)
1426 s->mix.expand = dev_nch / s->mix.nch;
1427 else if (s->mix.nch > dev_nch)
1428 s->mix.join = s->mix.nch / dev_nch;
1429 }
1430 }
1431
1432 if (s->mode & MODE_RECMASK) {
1433 unsigned int outchan = (s->opt->mode & MODE_MON) ?
1434 d->pchan : d->rchan;
1435
1436 s->sub.encbuf = NULL;
1437 s->sub.resampbuf = NULL;
1438 cmap_init(&s->sub.cmap,
1439 0, outchan - 1,
1440 s->opt->rmin, s->opt->rmax,
1441 s->opt->rmin, s->opt->rmin + s->sub.nch - 1,
1442 s->opt->rmin, s->opt->rmin + s->sub.nch - 1);
1443 if (s->rate != d->rate) {
1444 resamp_init(&s->sub.resamp, d->round, s->round,
1445 s->sub.nch);
1446 s->sub.resampbuf =
1447 xmalloc(d->round * s->sub.nch * sizeof(adata_t));
1448 }
1449 if (!aparams_native(&s->par)) {
1450 enc_init(&s->sub.enc, &s->par, s->sub.nch);
1451 s->sub.encbuf =
1452 xmalloc(s->round * s->sub.nch * sizeof(adata_t));
1453 }
1454 s->sub.join = 1;
1455 s->sub.expand = 1;
1456 if (s->opt->dup && s->sub.cmap.nch > 0) {
1457 dev_nch = outchan < (s->opt->rmax + 1) ?
1458 outchan - s->opt->rmin :
1459 s->opt->rmax - s->opt->rmin + 1;
1460 if (dev_nch > s->sub.nch)
1461 s->sub.join = dev_nch / s->sub.nch;
1462 else if (s->sub.nch > dev_nch)
1463 s->sub.expand = s->sub.nch / dev_nch;
1464 }
1465
1466 /*
1467 * cmap_copy() doesn't write samples in all channels,
1468 * for instance when mono->stereo conversion is
1469 * disabled. So we have to prefill cmap_copy() output
1470 * with silence.
1471 */
1472 if (s->sub.resampbuf) {
1473 memset(s->sub.resampbuf, 0,
1474 d->round * s->sub.nch * sizeof(adata_t));
1475 } else if (s->sub.encbuf) {
1476 memset(s->sub.encbuf, 0,
1477 s->round * s->sub.nch * sizeof(adata_t));
1478 } else {
1479 memset(s->sub.buf.data, 0,
1480 s->appbufsz * s->sub.nch * sizeof(adata_t));
1481 }
1482 }
1483 }
1484
1485 /*
1486 * allocate buffers & conversion chain
1487 */
1488 void
slot_allocbufs(struct slot * s)1489 slot_allocbufs(struct slot *s)
1490 {
1491 if (s->mode & MODE_PLAY) {
1492 s->mix.bpf = s->par.bps * s->mix.nch;
1493 abuf_init(&s->mix.buf, s->appbufsz * s->mix.bpf);
1494 }
1495
1496 if (s->mode & MODE_RECMASK) {
1497 s->sub.bpf = s->par.bps * s->sub.nch;
1498 abuf_init(&s->sub.buf, s->appbufsz * s->sub.bpf);
1499 }
1500
1501 #ifdef DEBUG
1502 logx(3, "%s%u: allocated %u/%u fr buffers",
1503 s->name, s->unit, s->appbufsz, SLOT_BUFSZ(s));
1504 #endif
1505 }
1506
1507 /*
1508 * free buffers & conversion chain
1509 */
1510 void
slot_freebufs(struct slot * s)1511 slot_freebufs(struct slot *s)
1512 {
1513 if (s->mode & MODE_RECMASK) {
1514 abuf_done(&s->sub.buf);
1515 }
1516
1517 if (s->mode & MODE_PLAY) {
1518 abuf_done(&s->mix.buf);
1519 }
1520 }
1521
1522 /*
1523 * allocate a new slot and register the given call-backs
1524 */
1525 struct slot *
slot_new(struct opt * opt,unsigned int id,char * who,struct slotops * ops,void * arg,int mode)1526 slot_new(struct opt *opt, unsigned int id, char *who,
1527 struct slotops *ops, void *arg, int mode)
1528 {
1529 char *p;
1530 char name[SLOT_NAMEMAX];
1531 char ctl_name[CTL_NAMEMAX];
1532 unsigned int i, ser, bestser, bestidx;
1533 struct slot *unit[DEV_NSLOT];
1534 struct slot *s;
1535
1536 /*
1537 * create a ``valid'' control name (lowcase, remove [^a-z], truncate)
1538 */
1539 for (i = 0, p = who; ; p++) {
1540 if (i == SLOT_NAMEMAX - 1 || *p == '\0') {
1541 name[i] = '\0';
1542 break;
1543 } else if (*p >= 'A' && *p <= 'Z') {
1544 name[i++] = *p + 'a' - 'A';
1545 } else if (*p >= 'a' && *p <= 'z')
1546 name[i++] = *p;
1547 }
1548 if (i == 0)
1549 strlcpy(name, "noname", SLOT_NAMEMAX);
1550
1551 /*
1552 * build a unit-to-slot map for this name
1553 */
1554 for (i = 0; i < DEV_NSLOT; i++)
1555 unit[i] = NULL;
1556 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1557 if (strcmp(s->name, name) == 0)
1558 unit[s->unit] = s;
1559 }
1560
1561 /*
1562 * find the free slot with the least unit number and same id
1563 */
1564 for (i = 0; i < DEV_NSLOT; i++) {
1565 s = unit[i];
1566 if (s != NULL && s->ops == NULL && s->id == id)
1567 goto found;
1568 }
1569
1570 /*
1571 * find the free slot with the least unit number
1572 */
1573 for (i = 0; i < DEV_NSLOT; i++) {
1574 s = unit[i];
1575 if (s != NULL && s->ops == NULL) {
1576 s->id = id;
1577 goto found;
1578 }
1579 }
1580
1581 /*
1582 * couldn't find a matching slot, pick oldest free slot
1583 * and set its name/unit
1584 */
1585 bestser = 0;
1586 bestidx = DEV_NSLOT;
1587 for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1588 if (s->ops != NULL)
1589 continue;
1590 ser = slot_serial - s->serial;
1591 if (ser > bestser) {
1592 bestser = ser;
1593 bestidx = i;
1594 }
1595 }
1596
1597 if (bestidx == DEV_NSLOT) {
1598 logx(1, "%s: out of sub-device slots", name);
1599 return NULL;
1600 }
1601
1602 s = slot_array + bestidx;
1603 ctl_del(CTL_SLOT_LEVEL, s, NULL);
1604 s->vol = MIDI_MAXCTL;
1605 strlcpy(s->name, name, SLOT_NAMEMAX);
1606 s->serial = slot_serial++;
1607 for (i = 0; unit[i] != NULL; i++)
1608 ; /* nothing */
1609 s->unit = i;
1610 s->id = id;
1611 s->opt = opt;
1612 slot_ctlname(s, ctl_name, CTL_NAMEMAX);
1613 ctl_new(CTL_SLOT_LEVEL, s, NULL,
1614 CTL_NUM, "", "app", ctl_name, -1, "level",
1615 NULL, -1, 127, s->vol);
1616
1617 found:
1618 /* open device, this may change opt's device */
1619 if (!opt_ref(opt))
1620 return NULL;
1621 s->opt = opt;
1622 s->ops = ops;
1623 s->arg = arg;
1624 s->pstate = SLOT_INIT;
1625 s->mode = mode;
1626 aparams_init(&s->par);
1627 if (s->mode & MODE_PLAY)
1628 s->mix.nch = s->opt->pmax - s->opt->pmin + 1;
1629 if (s->mode & MODE_RECMASK)
1630 s->sub.nch = s->opt->rmax - s->opt->rmin + 1;
1631 s->xrun = s->opt->mtc != NULL ? XRUN_SYNC : XRUN_IGNORE;
1632 s->appbufsz = s->opt->dev->bufsz;
1633 s->round = s->opt->dev->round;
1634 s->rate = s->opt->dev->rate;
1635 dev_midi_slotdesc(s->opt->dev, s);
1636 dev_midi_vol(s->opt->dev, s);
1637 #ifdef DEBUG
1638 logx(3, "%s%u: using %s, mode = %x", s->name, s->unit, s->opt->name, mode);
1639 #endif
1640 return s;
1641 }
1642
1643 /*
1644 * release the given slot
1645 */
1646 void
slot_del(struct slot * s)1647 slot_del(struct slot *s)
1648 {
1649 s->arg = s;
1650 s->ops = &zomb_slotops;
1651 switch (s->pstate) {
1652 case SLOT_INIT:
1653 s->ops = NULL;
1654 break;
1655 case SLOT_START:
1656 case SLOT_READY:
1657 case SLOT_RUN:
1658 case SLOT_STOP:
1659 slot_stop(s, 0);
1660 break;
1661 }
1662 opt_unref(s->opt);
1663 }
1664
1665 /*
1666 * change the slot play volume; called either by the slot or by MIDI
1667 */
1668 void
slot_setvol(struct slot * s,unsigned int vol)1669 slot_setvol(struct slot *s, unsigned int vol)
1670 {
1671 #ifdef DEBUG
1672 logx(3, "%s%u: setting volume %u", s->name, s->unit, vol);
1673 #endif
1674 s->vol = vol;
1675 s->mix.vol = MIDI_TO_ADATA(s->vol);
1676 }
1677
1678 /*
1679 * set device for this slot
1680 */
1681 void
slot_setopt(struct slot * s,struct opt * o)1682 slot_setopt(struct slot *s, struct opt *o)
1683 {
1684 struct opt *t;
1685 struct dev *odev, *ndev;
1686 struct ctl *c;
1687
1688 if (s->opt == NULL || s->opt == o)
1689 return;
1690
1691 logx(2, "%s%u: moving to opt %s", s->name, s->unit, o->name);
1692
1693 odev = s->opt->dev;
1694 if (s->ops != NULL) {
1695 ndev = opt_ref(o);
1696 if (ndev == NULL)
1697 return;
1698
1699 if (!dev_iscompat(odev, ndev)) {
1700 opt_unref(o);
1701 return;
1702 }
1703 }
1704
1705 if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1706 slot_detach(s);
1707
1708 t = s->opt;
1709 s->opt = o;
1710
1711 c = ctl_find(CTL_SLOT_LEVEL, s, NULL);
1712 ctl_update(c);
1713
1714 if (o->dev != t->dev) {
1715 dev_midi_slotdesc(odev, s);
1716 dev_midi_slotdesc(ndev, s);
1717 dev_midi_vol(ndev, s);
1718 }
1719
1720 if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1721 slot_attach(s);
1722
1723 if (s->ops != NULL) {
1724 opt_unref(t);
1725 return;
1726 }
1727 }
1728
1729 /*
1730 * attach the slot to the device (ie start playing & recording
1731 */
1732 void
slot_attach(struct slot * s)1733 slot_attach(struct slot *s)
1734 {
1735 struct dev *d = s->opt->dev;
1736 long long pos;
1737
1738 if (((s->mode & MODE_PLAY) && !(s->opt->mode & MODE_PLAY)) ||
1739 ((s->mode & MODE_RECMASK) && !(s->opt->mode & MODE_RECMASK))) {
1740 logx(1, "%s%u at %s: mode not allowed", s->name, s->unit, s->opt->name);
1741 return;
1742 }
1743
1744 /*
1745 * setup conversions layer
1746 */
1747 slot_initconv(s);
1748
1749 /*
1750 * start the device if not started
1751 */
1752 dev_wakeup(d);
1753
1754 /*
1755 * adjust initial clock
1756 */
1757 pos = s->delta_rem +
1758 (long long)s->delta * d->round +
1759 (long long)d->delta * s->round;
1760 s->delta = pos / (int)d->round;
1761 s->delta_rem = pos % d->round;
1762 if (s->delta_rem < 0) {
1763 s->delta_rem += d->round;
1764 s->delta--;
1765 }
1766
1767 #ifdef DEBUG
1768 logx(2, "%s%u: attached at %d + %d / %d",
1769 s->name, s->unit, s->delta, s->delta_rem, s->round);
1770 #endif
1771
1772 /*
1773 * We dont check whether the device is dying,
1774 * because dev_xxx() functions are supposed to
1775 * work (i.e., not to crash)
1776 */
1777
1778 s->next = d->slot_list;
1779 d->slot_list = s;
1780 if (s->mode & MODE_PLAY) {
1781 s->mix.vol = MIDI_TO_ADATA(s->vol);
1782 dev_mix_adjvol(d);
1783 }
1784 }
1785
1786 /*
1787 * if MMC is enabled, and try to attach all slots synchronously, else
1788 * simply attach the slot
1789 */
1790 void
slot_ready(struct slot * s)1791 slot_ready(struct slot *s)
1792 {
1793 /*
1794 * device may be disconnected, and if so we're called from
1795 * slot->ops->exit() on a closed device
1796 */
1797 if (s->opt->dev->pstate == DEV_CFG)
1798 return;
1799 if (s->opt->mtc == NULL) {
1800 slot_attach(s);
1801 s->pstate = SLOT_RUN;
1802 } else
1803 mtc_trigger(s->opt->mtc);
1804 }
1805
1806 /*
1807 * setup buffers & conversion layers, prepare the slot to receive data
1808 * (for playback) or start (recording).
1809 */
1810 void
slot_start(struct slot * s)1811 slot_start(struct slot *s)
1812 {
1813 struct dev *d = s->opt->dev;
1814 #ifdef DEBUG
1815 char enc_str[ENCMAX], chans_str[64];
1816
1817 if (s->pstate != SLOT_INIT) {
1818 logx(0, "%s%u: slot_start: wrong state", s->name, s->unit);
1819 panic();
1820 }
1821
1822 logx(2, "%s%u: %dHz, %s, %s, %d blocks of %d frames",
1823 s->name, s->unit, s->rate,
1824 (aparams_enctostr(&s->par, enc_str), enc_str),
1825 (chans_fmt(chans_str, sizeof(chans_str), s->mode,
1826 s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1827 s->opt->rmin, s->opt->rmin + s->sub.nch - 1), chans_str),
1828 s->appbufsz / s->round, s->round);
1829 #endif
1830 slot_allocbufs(s);
1831
1832 if (s->mode & MODE_RECMASK) {
1833 /*
1834 * N-th recorded block is the N-th played block
1835 */
1836 s->sub.prime = d->bufsz / d->round;
1837 }
1838 s->skip = 0;
1839
1840 /*
1841 * get the current position, the origin is when the first sample
1842 * played and/or recorded
1843 */
1844 s->delta = -(long long)d->bufsz * s->round / d->round;
1845 s->delta_rem = 0;
1846
1847 if (s->mode & MODE_PLAY) {
1848 s->pstate = SLOT_START;
1849 } else {
1850 s->pstate = SLOT_READY;
1851 slot_ready(s);
1852 }
1853 }
1854
1855 /*
1856 * stop playback and recording, and free conversion layers
1857 */
1858 void
slot_detach(struct slot * s)1859 slot_detach(struct slot *s)
1860 {
1861 struct slot **ps;
1862 struct dev *d = s->opt->dev;
1863 long long pos;
1864
1865 for (ps = &d->slot_list; *ps != s; ps = &(*ps)->next) {
1866 #ifdef DEBUG
1867 if (*ps == NULL) {
1868 logx(0, "%s%u: can't detach, not on list", s->name, s->unit);
1869 panic();
1870 }
1871 #endif
1872 }
1873 *ps = s->next;
1874
1875 /*
1876 * adjust clock, go back d->delta ticks so that slot_attach()
1877 * could be called with the resulting state
1878 */
1879 pos = s->delta_rem +
1880 (long long)s->delta * d->round -
1881 (long long)d->delta * s->round;
1882 s->delta = pos / (int)d->round;
1883 s->delta_rem = pos % d->round;
1884 if (s->delta_rem < 0) {
1885 s->delta_rem += d->round;
1886 s->delta--;
1887 }
1888
1889 #ifdef DEBUG
1890 logx(2, "%s%u: detached at %d + %d / %d",
1891 s->name, s->unit, s->delta, s->delta_rem, d->round);
1892 #endif
1893 if (s->mode & MODE_PLAY)
1894 dev_mix_adjvol(d);
1895
1896 if (s->mode & MODE_RECMASK) {
1897 if (s->sub.encbuf) {
1898 xfree(s->sub.encbuf);
1899 s->sub.encbuf = NULL;
1900 }
1901 if (s->sub.resampbuf) {
1902 xfree(s->sub.resampbuf);
1903 s->sub.resampbuf = NULL;
1904 }
1905 }
1906
1907 if (s->mode & MODE_PLAY) {
1908 if (s->mix.decbuf) {
1909 xfree(s->mix.decbuf);
1910 s->mix.decbuf = NULL;
1911 }
1912 if (s->mix.resampbuf) {
1913 xfree(s->mix.resampbuf);
1914 s->mix.resampbuf = NULL;
1915 }
1916 }
1917 }
1918
1919 /*
1920 * put the slot in stopping state (draining play buffers) or
1921 * stop & detach if no data to drain.
1922 */
1923 void
slot_stop(struct slot * s,int drain)1924 slot_stop(struct slot *s, int drain)
1925 {
1926 #ifdef DEBUG
1927 logx(3, "%s%u: stopping (drain = %d)", s->name, s->unit, drain);
1928 #endif
1929 if (s->pstate == SLOT_START) {
1930 /*
1931 * If in rec-only mode, we're already in the READY or
1932 * RUN states. We're here because the play buffer was
1933 * not full enough, try to start so it's drained.
1934 */
1935 s->pstate = SLOT_READY;
1936 slot_ready(s);
1937 }
1938
1939 if (s->pstate == SLOT_RUN) {
1940 if ((s->mode & MODE_PLAY) && drain) {
1941 /*
1942 * Don't detach, dev_cycle() will do it for us
1943 * when the buffer is drained.
1944 */
1945 s->pstate = SLOT_STOP;
1946 return;
1947 }
1948 slot_detach(s);
1949 } else if (s->pstate == SLOT_STOP) {
1950 slot_detach(s);
1951 } else {
1952 #ifdef DEBUG
1953 logx(3, "%s%u: not drained (blocked by mmc)", s->name, s->unit);
1954 #endif
1955 }
1956
1957 s->pstate = SLOT_INIT;
1958 s->ops->eof(s->arg);
1959 slot_freebufs(s);
1960 }
1961
1962 void
slot_skip_update(struct slot * s)1963 slot_skip_update(struct slot *s)
1964 {
1965 int skip;
1966
1967 skip = slot_skip(s);
1968 while (skip > 0) {
1969 #ifdef DEBUG
1970 logx(4, "%s%u: catching skipped block", s->name, s->unit);
1971 #endif
1972 if (s->mode & MODE_RECMASK)
1973 s->ops->flush(s->arg);
1974 if (s->mode & MODE_PLAY)
1975 s->ops->fill(s->arg);
1976 skip--;
1977 }
1978 }
1979
1980 /*
1981 * notify the slot that we just wrote in the play buffer, must be called
1982 * after each write
1983 */
1984 void
slot_write(struct slot * s)1985 slot_write(struct slot *s)
1986 {
1987 if (s->pstate == SLOT_START && s->mix.buf.used == s->mix.buf.len) {
1988 #ifdef DEBUG
1989 logx(4, "%s%u: switching to READY state", s->name, s->unit);
1990 #endif
1991 s->pstate = SLOT_READY;
1992 slot_ready(s);
1993 }
1994 slot_skip_update(s);
1995 }
1996
1997 /*
1998 * notify the slot that we freed some space in the rec buffer
1999 */
2000 void
slot_read(struct slot * s)2001 slot_read(struct slot *s)
2002 {
2003 slot_skip_update(s);
2004 }
2005
2006 /*
2007 * allocate at control slot
2008 */
2009 struct ctlslot *
ctlslot_new(struct opt * o,struct ctlops * ops,void * arg)2010 ctlslot_new(struct opt *o, struct ctlops *ops, void *arg)
2011 {
2012 struct ctlslot *s;
2013 struct ctl *c;
2014 int i;
2015
2016 i = 0;
2017 for (;;) {
2018 if (i == DEV_NCTLSLOT)
2019 return NULL;
2020 s = ctlslot_array + i;
2021 if (s->ops == NULL)
2022 break;
2023 i++;
2024 }
2025 s->opt = o;
2026 s->self = 1 << i;
2027 if (!opt_ref(o))
2028 return NULL;
2029 s->ops = ops;
2030 s->arg = arg;
2031 for (c = ctl_list; c != NULL; c = c->next) {
2032 if (!ctlslot_visible(s, c))
2033 continue;
2034 c->refs_mask |= s->self;
2035 }
2036 return s;
2037 }
2038
2039 /*
2040 * free control slot
2041 */
2042 void
ctlslot_del(struct ctlslot * s)2043 ctlslot_del(struct ctlslot *s)
2044 {
2045 struct ctl *c, **pc;
2046
2047 pc = &ctl_list;
2048 while ((c = *pc) != NULL) {
2049 c->refs_mask &= ~s->self;
2050 if (c->refs_mask == 0) {
2051 *pc = c->next;
2052 xfree(c);
2053 } else
2054 pc = &c->next;
2055 }
2056 s->ops = NULL;
2057 opt_unref(s->opt);
2058 }
2059
2060 int
ctlslot_visible(struct ctlslot * s,struct ctl * c)2061 ctlslot_visible(struct ctlslot *s, struct ctl *c)
2062 {
2063 if (s->opt == NULL)
2064 return 1;
2065 switch (c->scope) {
2066 case CTL_HW:
2067 /*
2068 * Disable hardware's server.device control as its
2069 * replaced by sndiod's one
2070 */
2071 if (strcmp(c->node0.name, "server") == 0 &&
2072 strcmp(c->func, "device") == 0)
2073 return 0;
2074 /* FALLTHROUGH */
2075 case CTL_DEV_MASTER:
2076 return (s->opt->dev == c->u.any.arg0);
2077 case CTL_OPT_DEV:
2078 return (s->opt == c->u.any.arg0);
2079 case CTL_SLOT_LEVEL:
2080 return (s->opt->dev == c->u.slot_level.slot->opt->dev);
2081 default:
2082 return 0;
2083 }
2084 }
2085
2086 struct ctl *
ctlslot_lookup(struct ctlslot * s,int addr)2087 ctlslot_lookup(struct ctlslot *s, int addr)
2088 {
2089 struct ctl *c;
2090
2091 c = ctl_list;
2092 while (1) {
2093 if (c == NULL)
2094 return NULL;
2095 if (c->type != CTL_NONE && c->addr == addr)
2096 break;
2097 c = c->next;
2098 }
2099 if (!ctlslot_visible(s, c))
2100 return NULL;
2101 return c;
2102 }
2103
2104 void
ctlslot_update(struct ctlslot * s)2105 ctlslot_update(struct ctlslot *s)
2106 {
2107 struct ctl *c;
2108 unsigned int refs_mask;
2109
2110 for (c = ctl_list; c != NULL; c = c->next) {
2111 if (c->type == CTL_NONE)
2112 continue;
2113 refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2114
2115 /* nothing to do if no visibility change */
2116 if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2117 continue;
2118 /* if control becomes visible */
2119 if (refs_mask)
2120 c->refs_mask |= s->self;
2121 /* if control is hidden */
2122 c->desc_mask |= s->self;
2123 }
2124 if (s->ops)
2125 s->ops->sync(s->arg);
2126 }
2127
2128 size_t
ctl_node_fmt(char * buf,size_t size,struct ctl_node * c)2129 ctl_node_fmt(char *buf, size_t size, struct ctl_node *c)
2130 {
2131 char *end = buf + size;
2132 char *p = buf;
2133
2134 p += snprintf(buf, size, "%s", c->name);
2135
2136 if (c->unit >= 0)
2137 p += snprintf(p, p < end ? end - p : 0, "%d", c->unit);
2138
2139 return p - buf;
2140 }
2141
2142 size_t
ctl_scope_fmt(char * buf,size_t size,struct ctl * c)2143 ctl_scope_fmt(char *buf, size_t size, struct ctl *c)
2144 {
2145 switch (c->scope) {
2146 case CTL_HW:
2147 return snprintf(buf, size, "hw:%s/%u",
2148 c->u.hw.dev->name, c->u.hw.addr);
2149 case CTL_DEV_MASTER:
2150 return snprintf(buf, size, "dev_master:%s",
2151 c->u.dev_master.dev->name);
2152 case CTL_SLOT_LEVEL:
2153 return snprintf(buf, size, "slot_level:%s%u",
2154 c->u.slot_level.slot->name, c->u.slot_level.slot->unit);
2155 case CTL_OPT_DEV:
2156 return snprintf(buf, size, "opt_dev:%s/%s",
2157 c->u.opt_dev.opt->name, c->u.opt_dev.dev->name);
2158 default:
2159 return snprintf(buf, size, "unknown");
2160 }
2161 }
2162
2163 size_t
ctl_fmt(char * buf,size_t size,struct ctl * c)2164 ctl_fmt(char *buf, size_t size, struct ctl *c)
2165 {
2166 char *end = buf + size;
2167 char *p = buf;
2168
2169 p += snprintf(p, size, "%s/", c->group);
2170 p += ctl_node_fmt(p, p < end ? end - p : 0, &c->node0);
2171 p += snprintf(p, p < end ? end - p : 0, ".%s", c->func);
2172
2173 switch (c->type) {
2174 case CTL_VEC:
2175 case CTL_LIST:
2176 case CTL_SEL:
2177 p += snprintf(p, p < end ? end - p : 0, "[");
2178 p += ctl_node_fmt(p, p < end ? end - p : 0, &c->node1);
2179 p += snprintf(p, p < end ? end - p : 0, "]");
2180 }
2181
2182 if (c->display[0] != 0)
2183 p += snprintf(p, size, " (%s)", c->display);
2184
2185 return p - buf;
2186 }
2187
2188 int
ctl_setval(struct ctl * c,int val)2189 ctl_setval(struct ctl *c, int val)
2190 {
2191 if (c->curval == val) {
2192 logx(3, "ctl%u: already set", c->addr);
2193 return 1;
2194 }
2195 if (val < 0 || val > c->maxval) {
2196 logx(3, "ctl%u: %d: out of range", c->addr, val);
2197 return 0;
2198 }
2199
2200 switch (c->scope) {
2201 case CTL_HW:
2202 logx(3, "ctl%u: marked as dirty", c->addr);
2203 c->curval = val;
2204 c->dirty = 1;
2205 return dev_ref(c->u.hw.dev);
2206 case CTL_DEV_MASTER:
2207 if (!c->u.dev_master.dev->master_enabled)
2208 return 1;
2209 dev_master(c->u.dev_master.dev, val);
2210 dev_midi_master(c->u.dev_master.dev);
2211 c->val_mask = ~0U;
2212 c->curval = val;
2213 return 1;
2214 case CTL_SLOT_LEVEL:
2215 slot_setvol(c->u.slot_level.slot, val);
2216 // XXX change dev_midi_vol() into slot_midi_vol()
2217 dev_midi_vol(c->u.slot_level.slot->opt->dev, c->u.slot_level.slot);
2218 c->val_mask = ~0U;
2219 c->curval = val;
2220 return 1;
2221 case CTL_OPT_DEV:
2222 if (opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev))
2223 c->u.opt_dev.opt->alt_first = c->u.opt_dev.dev;
2224 return 1;
2225 default:
2226 logx(2, "ctl%u: not writable", c->addr);
2227 return 1;
2228 }
2229 }
2230
2231 /*
2232 * add a ctl
2233 */
2234 struct ctl *
ctl_new(int scope,void * arg0,void * arg1,int type,char * display,char * gstr,char * str0,int unit0,char * func,char * str1,int unit1,int maxval,int val)2235 ctl_new(int scope, void *arg0, void *arg1,
2236 int type, char *display, char *gstr,
2237 char *str0, int unit0, char *func,
2238 char *str1, int unit1, int maxval, int val)
2239 {
2240 #ifdef DEBUG
2241 char ctl_str[64], scope_str[32];
2242 #endif
2243 struct ctl *c, **pc;
2244 struct ctlslot *s;
2245 int addr;
2246 int i;
2247
2248 /*
2249 * find the smallest unused addr number and
2250 * the last position in the list
2251 */
2252 addr = 0;
2253 for (pc = &ctl_list; (c = *pc) != NULL; pc = &c->next) {
2254 if (c->addr > addr)
2255 addr = c->addr;
2256 }
2257 addr++;
2258
2259 c = xmalloc(sizeof(struct ctl));
2260 c->type = type;
2261 strlcpy(c->func, func, CTL_NAMEMAX);
2262 strlcpy(c->group, gstr, CTL_NAMEMAX);
2263 strlcpy(c->display, display, CTL_DISPLAYMAX);
2264 strlcpy(c->node0.name, str0, CTL_NAMEMAX);
2265 c->node0.unit = unit0;
2266 if (c->type == CTL_VEC || c->type == CTL_LIST || c->type == CTL_SEL) {
2267 strlcpy(c->node1.name, str1, CTL_NAMEMAX);
2268 c->node1.unit = unit1;
2269 } else
2270 memset(&c->node1, 0, sizeof(struct ctl_node));
2271 c->scope = scope;
2272 c->u.any.arg0 = arg0;
2273 switch (scope) {
2274 case CTL_HW:
2275 c->u.hw.addr = *(unsigned int *)arg1;
2276 break;
2277 case CTL_OPT_DEV:
2278 c->u.any.arg1 = arg1;
2279 break;
2280 default:
2281 c->u.any.arg1 = NULL;
2282 }
2283 c->addr = addr;
2284 c->maxval = maxval;
2285 c->val_mask = ~0;
2286 c->desc_mask = ~0;
2287 c->curval = val;
2288 c->dirty = 0;
2289 c->refs_mask = CTL_DEVMASK;
2290 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2291 if (s->ops == NULL)
2292 continue;
2293 if (ctlslot_visible(s, c))
2294 c->refs_mask |= 1 << i;
2295 }
2296 c->next = *pc;
2297 *pc = c;
2298 #ifdef DEBUG
2299 logx(2, "ctl%u: %s = %d at %s: added", c->addr,
2300 (ctl_fmt(ctl_str, sizeof(ctl_str), c), ctl_str), c->curval,
2301 (ctl_scope_fmt(scope_str, sizeof(scope_str), c), scope_str));
2302 #endif
2303 return c;
2304 }
2305
2306 void
ctl_update(struct ctl * c)2307 ctl_update(struct ctl *c)
2308 {
2309 struct ctlslot *s;
2310 unsigned int refs_mask;
2311 int i;
2312
2313 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2314 if (s->ops == NULL)
2315 continue;
2316 refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2317
2318 /* nothing to do if no visibility change */
2319 if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2320 continue;
2321 /* if control becomes visible */
2322 if (refs_mask)
2323 c->refs_mask |= s->self;
2324 /* if control is hidden */
2325 c->desc_mask |= s->self;
2326 s->ops->sync(s->arg);
2327 }
2328 }
2329
2330 int
ctl_match(struct ctl * c,int scope,void * arg0,void * arg1)2331 ctl_match(struct ctl *c, int scope, void *arg0, void *arg1)
2332 {
2333 if (c->type == CTL_NONE || c->scope != scope || c->u.any.arg0 != arg0)
2334 return 0;
2335 if (arg0 != NULL && c->u.any.arg0 != arg0)
2336 return 0;
2337 switch (scope) {
2338 case CTL_HW:
2339 if (arg1 != NULL && c->u.hw.addr != *(unsigned int *)arg1)
2340 return 0;
2341 break;
2342 case CTL_OPT_DEV:
2343 if (arg1 != NULL && c->u.any.arg1 != arg1)
2344 return 0;
2345 break;
2346 }
2347 return 1;
2348 }
2349
2350 struct ctl *
ctl_find(int scope,void * arg0,void * arg1)2351 ctl_find(int scope, void *arg0, void *arg1)
2352 {
2353 struct ctl *c;
2354
2355 for (c = ctl_list; c != NULL; c = c->next) {
2356 if (ctl_match(c, scope, arg0, arg1))
2357 return c;
2358 }
2359 return NULL;
2360 }
2361
2362 int
ctl_onval(int scope,void * arg0,void * arg1,int val)2363 ctl_onval(int scope, void *arg0, void *arg1, int val)
2364 {
2365 struct ctl *c;
2366
2367 c = ctl_find(scope, arg0, arg1);
2368 if (c == NULL)
2369 return 0;
2370 c->curval = val;
2371 c->val_mask = ~0U;
2372 return 1;
2373 }
2374
2375 int
ctl_del(int scope,void * arg0,void * arg1)2376 ctl_del(int scope, void *arg0, void *arg1)
2377 {
2378 #ifdef DEBUG
2379 char str[64];
2380 #endif
2381 struct ctl *c, **pc;
2382 int found;
2383
2384 found = 0;
2385 pc = &ctl_list;
2386 for (;;) {
2387 c = *pc;
2388 if (c == NULL)
2389 return found;
2390 if (ctl_match(c, scope, arg0, arg1)) {
2391 #ifdef DEBUG
2392 logx(2, "ctl%u: %s: removed", c->addr,
2393 (ctl_fmt(str, sizeof(str), c), str));
2394 #endif
2395 found++;
2396 c->refs_mask &= ~CTL_DEVMASK;
2397 if (c->refs_mask == 0) {
2398 *pc = c->next;
2399 xfree(c);
2400 continue;
2401 }
2402 c->type = CTL_NONE;
2403 c->desc_mask = ~0;
2404 }
2405 pc = &c->next;
2406 }
2407 }
2408
2409 char *
dev_getdisplay(struct dev * d)2410 dev_getdisplay(struct dev *d)
2411 {
2412 struct ctl *c;
2413 char *display;
2414
2415 display = "";
2416 for (c = ctl_list; c != NULL; c = c->next) {
2417 if (c->scope == CTL_HW &&
2418 c->u.hw.dev == d &&
2419 c->type == CTL_SEL &&
2420 strcmp(c->group, d->name) == 0 &&
2421 strcmp(c->node0.name, "server") == 0 &&
2422 strcmp(c->func, "device") == 0 &&
2423 c->curval == 1)
2424 display = c->display;
2425 }
2426 return display;
2427 }
2428
2429 void
dev_ctlsync(struct dev * d)2430 dev_ctlsync(struct dev *d)
2431 {
2432 struct ctl *c;
2433 struct ctlslot *s;
2434 const char *display;
2435 int found, i;
2436
2437 found = 0;
2438 for (c = ctl_list; c != NULL; c = c->next) {
2439 if (c->scope == CTL_HW &&
2440 c->u.hw.dev == d &&
2441 c->type == CTL_NUM &&
2442 strcmp(c->group, d->name) == 0 &&
2443 strcmp(c->node0.name, "output") == 0 &&
2444 strcmp(c->func, "level") == 0)
2445 found = 1;
2446 }
2447
2448 if (d->master_enabled && found) {
2449 logx(2, "%s: software master level control disabled", d->path);
2450 d->master_enabled = 0;
2451 ctl_del(CTL_DEV_MASTER, d, NULL);
2452 } else if (!d->master_enabled && !found) {
2453 logx(2, "%s: software master level control enabled", d->path);
2454 d->master_enabled = 1;
2455 ctl_new(CTL_DEV_MASTER, d, NULL,
2456 CTL_NUM, "", d->name, "output", -1, "level",
2457 NULL, -1, 127, d->master);
2458 }
2459
2460 /*
2461 * if the hardware's server.device changed, update the display name
2462 */
2463 display = dev_getdisplay(d);
2464 for (c = ctl_list; c != NULL; c = c->next) {
2465 if (c->scope != CTL_OPT_DEV ||
2466 c->u.opt_dev.dev != d ||
2467 strcmp(c->display, display) == 0)
2468 continue;
2469 strlcpy(c->display, display, CTL_DISPLAYMAX);
2470 c->desc_mask = ~0;
2471 }
2472
2473 for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2474 if (s->ops == NULL)
2475 continue;
2476 if (s->opt->dev == d)
2477 s->ops->sync(s->arg);
2478 }
2479 }
2480