xref: /openbsd/usr.bin/sndiod/dev.c (revision 5a38ef86)
1 /*	$OpenBSD: dev.c,v 1.103 2021/11/01 14:43:24 ratchov Exp $	*/
2 /*
3  * Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <stdio.h>
18 #include <string.h>
19 
20 #include "abuf.h"
21 #include "defs.h"
22 #include "dev.h"
23 #include "dsp.h"
24 #include "siofile.h"
25 #include "midi.h"
26 #include "opt.h"
27 #include "sysex.h"
28 #include "utils.h"
29 
30 void zomb_onmove(void *);
31 void zomb_onvol(void *);
32 void zomb_fill(void *);
33 void zomb_flush(void *);
34 void zomb_eof(void *);
35 void zomb_exit(void *);
36 
37 void dev_mix_badd(struct dev *, struct slot *);
38 void dev_mix_adjvol(struct dev *);
39 void dev_sub_bcopy(struct dev *, struct slot *);
40 
41 void dev_onmove(struct dev *, int);
42 void dev_master(struct dev *, unsigned int);
43 void dev_cycle(struct dev *);
44 struct dev *dev_new(char *, struct aparams *, unsigned int, unsigned int,
45     unsigned int, unsigned int, unsigned int, unsigned int);
46 void dev_adjpar(struct dev *, int, int, int);
47 int dev_allocbufs(struct dev *);
48 void dev_freebufs(struct dev *);
49 int dev_ref(struct dev *);
50 void dev_unref(struct dev *);
51 int dev_init(struct dev *);
52 void dev_done(struct dev *);
53 struct dev *dev_bynum(int);
54 void dev_del(struct dev *);
55 unsigned int dev_roundof(struct dev *, unsigned int);
56 void dev_wakeup(struct dev *);
57 
58 void slot_ctlname(struct slot *, char *, size_t);
59 void slot_log(struct slot *);
60 void slot_del(struct slot *);
61 void slot_setvol(struct slot *, unsigned int);
62 void slot_ready(struct slot *);
63 void slot_allocbufs(struct slot *);
64 void slot_freebufs(struct slot *);
65 void slot_skip_update(struct slot *);
66 void slot_write(struct slot *);
67 void slot_read(struct slot *);
68 int slot_skip(struct slot *);
69 
70 void ctl_node_log(struct ctl_node *);
71 void ctl_log(struct ctl *);
72 
73 struct slotops zomb_slotops = {
74 	zomb_onmove,
75 	zomb_onvol,
76 	zomb_fill,
77 	zomb_flush,
78 	zomb_eof,
79 	zomb_exit
80 };
81 
82 struct ctl *ctl_list = NULL;
83 struct dev *dev_list = NULL;
84 unsigned int dev_sndnum = 0;
85 
86 struct ctlslot ctlslot_array[DEV_NCTLSLOT];
87 struct slot slot_array[DEV_NSLOT];
88 unsigned int slot_serial;		/* for slot allocation */
89 
90 /*
91  * we support/need a single MTC clock source only
92  */
93 struct mtc mtc_array[1] = {
94 	{.dev = NULL, .tstate = MTC_STOP}
95 };
96 
97 void
98 slot_array_init(void)
99 {
100 	unsigned int i;
101 
102 	for (i = 0; i < DEV_NSLOT; i++) {
103 		slot_array[i].unit = i;
104 		slot_array[i].ops = NULL;
105 		slot_array[i].vol = MIDI_MAXCTL;
106 		slot_array[i].opt = NULL;
107 		slot_array[i].serial = slot_serial++;
108 		memset(slot_array[i].name, 0, SLOT_NAMEMAX);
109 	}
110 }
111 
112 void
113 dev_log(struct dev *d)
114 {
115 #ifdef DEBUG
116 	static char *pstates[] = {
117 		"cfg", "ini", "run"
118 	};
119 #endif
120 	log_puts("snd");
121 	log_putu(d->num);
122 #ifdef DEBUG
123 	if (log_level >= 3) {
124 		log_puts(" pst=");
125 		log_puts(pstates[d->pstate]);
126 	}
127 #endif
128 }
129 
130 void
131 slot_ctlname(struct slot *s, char *name, size_t size)
132 {
133 	snprintf(name, size, "%s%u", s->name, s->unit);
134 }
135 
136 void
137 slot_log(struct slot *s)
138 {
139 	char name[CTL_NAMEMAX];
140 #ifdef DEBUG
141 	static char *pstates[] = {
142 		"ini", "sta", "rdy", "run", "stp", "mid"
143 	};
144 #endif
145 	slot_ctlname(s, name, CTL_NAMEMAX);
146 	log_puts(name);
147 #ifdef DEBUG
148 	if (log_level >= 3) {
149 		log_puts(" vol=");
150 		log_putu(s->vol);
151 		if (s->ops) {
152 			log_puts(",pst=");
153 			log_puts(pstates[s->pstate]);
154 		}
155 	}
156 #endif
157 }
158 
159 void
160 zomb_onmove(void *arg)
161 {
162 }
163 
164 void
165 zomb_onvol(void *arg)
166 {
167 }
168 
169 void
170 zomb_fill(void *arg)
171 {
172 }
173 
174 void
175 zomb_flush(void *arg)
176 {
177 }
178 
179 void
180 zomb_eof(void *arg)
181 {
182 	struct slot *s = arg;
183 
184 #ifdef DEBUG
185 	if (log_level >= 3) {
186 		slot_log(s);
187 		log_puts(": zomb_eof\n");
188 	}
189 #endif
190 	s->ops = NULL;
191 }
192 
193 void
194 zomb_exit(void *arg)
195 {
196 #ifdef DEBUG
197 	struct slot *s = arg;
198 
199 	if (log_level >= 3) {
200 		slot_log(s);
201 		log_puts(": zomb_exit\n");
202 	}
203 #endif
204 }
205 
206 /*
207  * Broadcast MIDI data to all opts using this device
208  */
209 void
210 dev_midi_send(struct dev *d, void *msg, int msglen)
211 {
212 	struct opt *o;
213 
214 	for (o = opt_list; o != NULL; o = o->next) {
215 		if (o->dev != d)
216 			continue;
217 		midi_send(o->midi, msg, msglen);
218 	}
219 }
220 
221 /*
222  * send a quarter frame MTC message
223  */
224 void
225 mtc_midi_qfr(struct mtc *mtc, int delta)
226 {
227 	unsigned char buf[2];
228 	unsigned int data;
229 	int qfrlen;
230 
231 	mtc->delta += delta * MTC_SEC;
232 	qfrlen = mtc->dev->rate * (MTC_SEC / (4 * mtc->fps));
233 	while (mtc->delta >= qfrlen) {
234 		switch (mtc->qfr) {
235 		case 0:
236 			data = mtc->fr & 0xf;
237 			break;
238 		case 1:
239 			data = mtc->fr >> 4;
240 			break;
241 		case 2:
242 			data = mtc->sec & 0xf;
243 			break;
244 		case 3:
245 			data = mtc->sec >> 4;
246 			break;
247 		case 4:
248 			data = mtc->min & 0xf;
249 			break;
250 		case 5:
251 			data = mtc->min >> 4;
252 			break;
253 		case 6:
254 			data = mtc->hr & 0xf;
255 			break;
256 		case 7:
257 			data = (mtc->hr >> 4) | (mtc->fps_id << 1);
258 			/*
259 			 * tick messages are sent 2 frames ahead
260 			 */
261 			mtc->fr += 2;
262 			if (mtc->fr < mtc->fps)
263 				break;
264 			mtc->fr -= mtc->fps;
265 			mtc->sec++;
266 			if (mtc->sec < 60)
267 				break;
268 			mtc->sec = 0;
269 			mtc->min++;
270 			if (mtc->min < 60)
271 				break;
272 			mtc->min = 0;
273 			mtc->hr++;
274 			if (mtc->hr < 24)
275 				break;
276 			mtc->hr = 0;
277 			break;
278 		default:
279 			/* NOTREACHED */
280 			data = 0;
281 		}
282 		buf[0] = 0xf1;
283 		buf[1] = (mtc->qfr << 4) | data;
284 		mtc->qfr++;
285 		mtc->qfr &= 7;
286 		dev_midi_send(mtc->dev, buf, 2);
287 		mtc->delta -= qfrlen;
288 	}
289 }
290 
291 /*
292  * send a full frame MTC message
293  */
294 void
295 mtc_midi_full(struct mtc *mtc)
296 {
297 	struct sysex x;
298 	unsigned int fps;
299 
300 	mtc->delta = -MTC_SEC * (int)mtc->dev->bufsz;
301 	if (mtc->dev->rate % (30 * 4 * mtc->dev->round) == 0) {
302 		mtc->fps_id = MTC_FPS_30;
303 		mtc->fps = 30;
304 	} else if (mtc->dev->rate % (25 * 4 * mtc->dev->round) == 0) {
305 		mtc->fps_id = MTC_FPS_25;
306 		mtc->fps = 25;
307 	} else {
308 		mtc->fps_id = MTC_FPS_24;
309 		mtc->fps = 24;
310 	}
311 #ifdef DEBUG
312 	if (log_level >= 3) {
313 		dev_log(mtc->dev);
314 		log_puts(": mtc full frame at ");
315 		log_puti(mtc->delta);
316 		log_puts(", ");
317 		log_puti(mtc->fps);
318 		log_puts(" fps\n");
319 	}
320 #endif
321 	fps = mtc->fps;
322 	mtc->hr =  (mtc->origin / (MTC_SEC * 3600)) % 24;
323 	mtc->min = (mtc->origin / (MTC_SEC * 60))   % 60;
324 	mtc->sec = (mtc->origin / (MTC_SEC))        % 60;
325 	mtc->fr =  (mtc->origin / (MTC_SEC / fps))  % fps;
326 
327 	x.start = SYSEX_START;
328 	x.type = SYSEX_TYPE_RT;
329 	x.dev = SYSEX_DEV_ANY;
330 	x.id0 = SYSEX_MTC;
331 	x.id1 = SYSEX_MTC_FULL;
332 	x.u.full.hr = mtc->hr | (mtc->fps_id << 5);
333 	x.u.full.min = mtc->min;
334 	x.u.full.sec = mtc->sec;
335 	x.u.full.fr = mtc->fr;
336 	x.u.full.end = SYSEX_END;
337 	mtc->qfr = 0;
338 	dev_midi_send(mtc->dev, (unsigned char *)&x, SYSEX_SIZE(full));
339 }
340 
341 /*
342  * send a volume change MIDI message
343  */
344 void
345 dev_midi_vol(struct dev *d, struct slot *s)
346 {
347 	unsigned char msg[3];
348 
349 	msg[0] = MIDI_CTL | (s - slot_array);
350 	msg[1] = MIDI_CTL_VOL;
351 	msg[2] = s->vol;
352 	dev_midi_send(d, msg, 3);
353 }
354 
355 /*
356  * send a master volume MIDI message
357  */
358 void
359 dev_midi_master(struct dev *d)
360 {
361 	struct ctl *c;
362 	unsigned int master, v;
363 	struct sysex x;
364 
365 	if (d->master_enabled)
366 		master = d->master;
367 	else {
368 		master = 0;
369 		for (c = ctl_list; c != NULL; c = c->next) {
370 			if (c->type != CTL_NUM ||
371 			    strcmp(c->group, d->name) != 0 ||
372 			    strcmp(c->node0.name, "output") != 0 ||
373 			    strcmp(c->func, "level") != 0)
374 				continue;
375 			if (c->u.any.arg0 != d)
376 				continue;
377 			v = (c->curval * 127 + c->maxval / 2) / c->maxval;
378 			if (master < v)
379 				master = v;
380 		}
381 	}
382 
383 	memset(&x, 0, sizeof(struct sysex));
384 	x.start = SYSEX_START;
385 	x.type = SYSEX_TYPE_RT;
386 	x.dev = SYSEX_DEV_ANY;
387 	x.id0 = SYSEX_CONTROL;
388 	x.id1 = SYSEX_MASTER;
389 	x.u.master.fine = 0;
390 	x.u.master.coarse = master;
391 	x.u.master.end = SYSEX_END;
392 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(master));
393 }
394 
395 /*
396  * send a sndiod-specific slot description MIDI message
397  */
398 void
399 dev_midi_slotdesc(struct dev *d, struct slot *s)
400 {
401 	struct sysex x;
402 
403 	memset(&x, 0, sizeof(struct sysex));
404 	x.start = SYSEX_START;
405 	x.type = SYSEX_TYPE_EDU;
406 	x.dev = SYSEX_DEV_ANY;
407 	x.id0 = SYSEX_AUCAT;
408 	x.id1 = SYSEX_AUCAT_SLOTDESC;
409 	if (s->opt != NULL && s->opt->dev == d)
410 		slot_ctlname(s, (char *)x.u.slotdesc.name, SYSEX_NAMELEN);
411 	x.u.slotdesc.chan = (s - slot_array);
412 	x.u.slotdesc.end = SYSEX_END;
413 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(slotdesc));
414 }
415 
416 void
417 dev_midi_dump(struct dev *d)
418 {
419 	struct sysex x;
420 	struct slot *s;
421 	int i;
422 
423 	dev_midi_master(d);
424 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
425 		if (s->opt != NULL && s->opt->dev != d)
426 			continue;
427 		dev_midi_slotdesc(d, s);
428 		dev_midi_vol(d, s);
429 	}
430 	x.start = SYSEX_START;
431 	x.type = SYSEX_TYPE_EDU;
432 	x.dev = SYSEX_DEV_ANY;
433 	x.id0 = SYSEX_AUCAT;
434 	x.id1 = SYSEX_AUCAT_DUMPEND;
435 	x.u.dumpend.end = SYSEX_END;
436 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(dumpend));
437 }
438 
439 int
440 slot_skip(struct slot *s)
441 {
442 	unsigned char *data = (unsigned char *)0xdeadbeef; /* please gcc */
443 	int max, count;
444 
445 	max = s->skip;
446 	while (s->skip > 0) {
447 		if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
448 			data = abuf_wgetblk(&s->sub.buf, &count);
449 			if (count < s->round * s->sub.bpf)
450 				break;
451 		}
452 		if (s->mode & MODE_PLAY) {
453 			if (s->mix.buf.used < s->round * s->mix.bpf)
454 				break;
455 		}
456 #ifdef DEBUG
457 		if (log_level >= 4) {
458 			slot_log(s);
459 			log_puts(": skipped a cycle\n");
460 		}
461 #endif
462 		if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
463 			if (s->sub.encbuf)
464 				enc_sil_do(&s->sub.enc, data, s->round);
465 			else
466 				memset(data, 0, s->round * s->sub.bpf);
467 			abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
468 		}
469 		if (s->mode & MODE_PLAY) {
470 			abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
471 		}
472 		s->skip--;
473 	}
474 	return max - s->skip;
475 }
476 
477 /*
478  * Mix the slot input block over the output block
479  */
480 void
481 dev_mix_badd(struct dev *d, struct slot *s)
482 {
483 	adata_t *idata, *odata, *in;
484 	int icount, i, offs, vol, nch;
485 
486 	odata = DEV_PBUF(d);
487 	idata = (adata_t *)abuf_rgetblk(&s->mix.buf, &icount);
488 #ifdef DEBUG
489 	if (icount < s->round * s->mix.bpf) {
490 		slot_log(s);
491 		log_puts(": not enough data to mix (");
492 		log_putu(icount);
493 		log_puts("bytes)\n");
494 		panic();
495 	}
496 #endif
497 	if (!(s->opt->mode & MODE_PLAY)) {
498 		/*
499 		 * playback not allowed in opt structure, produce silence
500 		 */
501 		abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
502 		return;
503 	}
504 
505 
506 	/*
507 	 * Apply the following processing chain:
508 	 *
509 	 *	dec -> resamp-> cmap
510 	 *
511 	 * where the first two are optional.
512 	 */
513 
514 	in = idata;
515 
516 	if (s->mix.decbuf) {
517 		dec_do(&s->mix.dec, (void *)in, s->mix.decbuf, s->round);
518 		in = s->mix.decbuf;
519 	}
520 
521 	if (s->mix.resampbuf) {
522 		resamp_do(&s->mix.resamp, in, s->mix.resampbuf, s->round);
523 		in = s->mix.resampbuf;
524 	}
525 
526 	nch = s->mix.cmap.nch;
527 	vol = ADATA_MUL(s->mix.weight, s->mix.vol) / s->mix.join;
528 	cmap_add(&s->mix.cmap, in, odata, vol, d->round);
529 
530 	offs = 0;
531 	for (i = s->mix.join - 1; i > 0; i--) {
532 		offs += nch;
533 		cmap_add(&s->mix.cmap, in + offs, odata, vol, d->round);
534 	}
535 
536 	offs = 0;
537 	for (i = s->mix.expand - 1; i > 0; i--) {
538 		offs += nch;
539 		cmap_add(&s->mix.cmap, in, odata + offs, vol, d->round);
540 	}
541 
542 	abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
543 }
544 
545 /*
546  * Normalize input levels.
547  */
548 void
549 dev_mix_adjvol(struct dev *d)
550 {
551 	unsigned int n;
552 	struct slot *i, *j;
553 	int jcmax, icmax, weight;
554 
555 	for (i = d->slot_list; i != NULL; i = i->next) {
556 		if (!(i->mode & MODE_PLAY))
557 			continue;
558 		icmax = i->opt->pmin + i->mix.nch - 1;
559 		weight = ADATA_UNIT;
560 		if (d->autovol) {
561 			/*
562 			 * count the number of inputs that have
563 			 * overlapping channel sets
564 			 */
565 			n = 0;
566 			for (j = d->slot_list; j != NULL; j = j->next) {
567 				if (!(j->mode & MODE_PLAY))
568 					continue;
569 				jcmax = j->opt->pmin + j->mix.nch - 1;
570 				if (i->opt->pmin <= jcmax &&
571 				    icmax >= j->opt->pmin)
572 					n++;
573 			}
574 			weight /= n;
575 		}
576 		if (weight > i->opt->maxweight)
577 			weight = i->opt->maxweight;
578 		i->mix.weight = d->master_enabled ?
579 		    ADATA_MUL(weight, MIDI_TO_ADATA(d->master)) : weight;
580 #ifdef DEBUG
581 		if (log_level >= 3) {
582 			slot_log(i);
583 			log_puts(": set weight: ");
584 			log_puti(i->mix.weight);
585 			log_puts("/");
586 			log_puti(i->opt->maxweight);
587 			log_puts("\n");
588 		}
589 #endif
590 	}
591 }
592 
593 /*
594  * Copy data from slot to device
595  */
596 void
597 dev_sub_bcopy(struct dev *d, struct slot *s)
598 {
599 	adata_t *idata, *enc_out, *resamp_out, *cmap_out;
600 	void *odata;
601 	int ocount, moffs;
602 
603 	int i, vol, offs, nch;
604 
605 
606 	odata = (adata_t *)abuf_wgetblk(&s->sub.buf, &ocount);
607 #ifdef DEBUG
608 	if (ocount < s->round * s->sub.bpf) {
609 		log_puts("dev_sub_bcopy: not enough space\n");
610 		panic();
611 	}
612 #endif
613 	if (s->opt->mode & MODE_MON) {
614 		moffs = d->poffs + d->round;
615 		if (moffs == d->psize)
616 			moffs = 0;
617 		idata = d->pbuf + moffs * d->pchan;
618 	} else if (s->opt->mode & MODE_REC) {
619 		idata = d->rbuf;
620 	} else {
621 		/*
622 		 * recording not allowed in opt structure, produce silence
623 		 */
624 		enc_sil_do(&s->sub.enc, odata, s->round);
625 		abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
626 		return;
627 	}
628 
629 	/*
630 	 * Apply the following processing chain:
631 	 *
632 	 *	cmap -> resamp -> enc
633 	 *
634 	 * where the last two are optional.
635 	 */
636 
637 	enc_out = odata;
638 	resamp_out = s->sub.encbuf ? s->sub.encbuf : enc_out;
639 	cmap_out = s->sub.resampbuf ? s->sub.resampbuf : resamp_out;
640 
641 	nch = s->sub.cmap.nch;
642 	vol = ADATA_UNIT / s->sub.join;
643 	cmap_copy(&s->sub.cmap, idata, cmap_out, vol, d->round);
644 
645 	offs = 0;
646 	for (i = s->sub.join - 1; i > 0; i--) {
647 		offs += nch;
648 		cmap_add(&s->sub.cmap, idata + offs, cmap_out, vol, d->round);
649 	}
650 
651 	offs = 0;
652 	for (i = s->sub.expand - 1; i > 0; i--) {
653 		offs += nch;
654 		cmap_copy(&s->sub.cmap, idata, cmap_out + offs, vol, d->round);
655 	}
656 
657 	if (s->sub.resampbuf) {
658 		resamp_do(&s->sub.resamp,
659 		    s->sub.resampbuf, resamp_out, d->round);
660 	}
661 
662 	if (s->sub.encbuf)
663 		enc_do(&s->sub.enc, s->sub.encbuf, (void *)enc_out, s->round);
664 
665 	abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
666 }
667 
668 /*
669  * run a one block cycle: consume one recorded block from
670  * rbuf and produce one play block in pbuf
671  */
672 void
673 dev_cycle(struct dev *d)
674 {
675 	struct slot *s, **ps;
676 	unsigned char *base;
677 	int nsamp;
678 
679 	/*
680 	 * check if the device is actually used. If it isn't,
681 	 * then close it
682 	 */
683 	if (d->slot_list == NULL && (mtc_array[0].dev != d ||
684 	    mtc_array[0].tstate != MTC_RUN)) {
685 		if (log_level >= 2) {
686 			dev_log(d);
687 			log_puts(": device stopped\n");
688 		}
689 		dev_sio_stop(d);
690 		d->pstate = DEV_INIT;
691 		if (d->refcnt == 0)
692 			dev_close(d);
693 		return;
694 	}
695 
696 	if (d->prime > 0) {
697 #ifdef DEBUG
698 		if (log_level >= 4) {
699 			dev_log(d);
700 			log_puts(": empty cycle, prime = ");
701 			log_putu(d->prime);
702 			log_puts("\n");
703 		}
704 #endif
705 		base = (unsigned char *)DEV_PBUF(d);
706 		nsamp = d->round * d->pchan;
707 		memset(base, 0, nsamp * sizeof(adata_t));
708 		if (d->encbuf) {
709 			enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
710 			    d->encbuf, d->round);
711 		}
712 		d->prime -= d->round;
713 		return;
714 	}
715 
716 	d->delta -= d->round;
717 #ifdef DEBUG
718 	if (log_level >= 4) {
719 		dev_log(d);
720 		log_puts(": full cycle: delta = ");
721 		log_puti(d->delta);
722 		if (d->mode & MODE_PLAY) {
723 			log_puts(", poffs = ");
724 			log_puti(d->poffs);
725 		}
726 		log_puts("\n");
727 	}
728 #endif
729 	if (d->mode & MODE_PLAY) {
730 		base = (unsigned char *)DEV_PBUF(d);
731 		nsamp = d->round * d->pchan;
732 		memset(base, 0, nsamp * sizeof(adata_t));
733 	}
734 	if ((d->mode & MODE_REC) && d->decbuf)
735 		dec_do(&d->dec, d->decbuf, (unsigned char *)d->rbuf, d->round);
736 	ps = &d->slot_list;
737 	while ((s = *ps) != NULL) {
738 #ifdef DEBUG
739 		if (log_level >= 4) {
740 			slot_log(s);
741 			log_puts(": running");
742 			log_puts(", skip = ");
743 			log_puti(s->skip);
744 			log_puts("\n");
745 		}
746 #endif
747 		/*
748 		 * skip cycles for XRUN_SYNC correction
749 		 */
750 		slot_skip(s);
751 		if (s->skip < 0) {
752 			s->skip++;
753 			ps = &s->next;
754 			continue;
755 		}
756 
757 #ifdef DEBUG
758 		if (s->pstate == SLOT_STOP && !(s->mode & MODE_PLAY)) {
759 			slot_log(s);
760 			log_puts(": rec-only slots can't be drained\n");
761 			panic();
762 		}
763 #endif
764 		/*
765 		 * check if stopped stream finished draining
766 		 */
767 		if (s->pstate == SLOT_STOP &&
768 		    s->mix.buf.used < s->round * s->mix.bpf) {
769 			/*
770 			 * partial blocks are zero-filled by socket
771 			 * layer, so s->mix.buf.used == 0 and we can
772 			 * destroy the buffer
773 			 */
774 			*ps = s->next;
775 			s->pstate = SLOT_INIT;
776 			s->ops->eof(s->arg);
777 			slot_freebufs(s);
778 			dev_mix_adjvol(d);
779 #ifdef DEBUG
780 			if (log_level >= 3) {
781 				slot_log(s);
782 				log_puts(": drained\n");
783 			}
784 #endif
785 			continue;
786 		}
787 
788 		/*
789 		 * check for xruns
790 		 */
791 		if (((s->mode & MODE_PLAY) &&
792 			s->mix.buf.used < s->round * s->mix.bpf) ||
793 		    ((s->mode & MODE_RECMASK) &&
794 			s->sub.buf.len - s->sub.buf.used <
795 			s->round * s->sub.bpf)) {
796 
797 #ifdef DEBUG
798 			if (log_level >= 3) {
799 				slot_log(s);
800 				log_puts(": xrun, pause cycle\n");
801 			}
802 #endif
803 			if (s->xrun == XRUN_IGNORE) {
804 				s->delta -= s->round;
805 				ps = &s->next;
806 			} else if (s->xrun == XRUN_SYNC) {
807 				s->skip++;
808 				ps = &s->next;
809 			} else if (s->xrun == XRUN_ERROR) {
810 				s->ops->exit(s->arg);
811 				*ps = s->next;
812 			} else {
813 #ifdef DEBUG
814 				slot_log(s);
815 				log_puts(": bad xrun mode\n");
816 				panic();
817 #endif
818 			}
819 			continue;
820 		}
821 		if ((s->mode & MODE_RECMASK) && !(s->pstate == SLOT_STOP)) {
822 			if (s->sub.prime == 0) {
823 				dev_sub_bcopy(d, s);
824 				s->ops->flush(s->arg);
825 			} else {
826 #ifdef DEBUG
827 				if (log_level >= 3) {
828 					slot_log(s);
829 					log_puts(": prime = ");
830 					log_puti(s->sub.prime);
831 					log_puts("\n");
832 				}
833 #endif
834 				s->sub.prime--;
835 			}
836 		}
837 		if (s->mode & MODE_PLAY) {
838 			dev_mix_badd(d, s);
839 			if (s->pstate != SLOT_STOP)
840 				s->ops->fill(s->arg);
841 		}
842 		ps = &s->next;
843 	}
844 	if ((d->mode & MODE_PLAY) && d->encbuf) {
845 		enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
846 		    d->encbuf, d->round);
847 	}
848 }
849 
850 /*
851  * called at every clock tick by the device
852  */
853 void
854 dev_onmove(struct dev *d, int delta)
855 {
856 	long long pos;
857 	struct slot *s, *snext;
858 
859 	d->delta += delta;
860 
861 	for (s = d->slot_list; s != NULL; s = snext) {
862 		/*
863 		 * s->ops->onmove() may remove the slot
864 		 */
865 		snext = s->next;
866 		pos = s->delta_rem +
867 		    (long long)s->delta * d->round +
868 		    (long long)delta * s->round;
869 		s->delta = pos / (int)d->round;
870 		s->delta_rem = pos % d->round;
871 		if (s->delta_rem < 0) {
872 			s->delta_rem += d->round;
873 			s->delta--;
874 		}
875 		if (s->delta >= 0)
876 			s->ops->onmove(s->arg);
877 	}
878 
879 	if (mtc_array[0].dev == d && mtc_array[0].tstate == MTC_RUN)
880 		mtc_midi_qfr(&mtc_array[0], delta);
881 }
882 
883 void
884 dev_master(struct dev *d, unsigned int master)
885 {
886 	struct ctl *c;
887 	unsigned int v;
888 
889 	if (log_level >= 2) {
890 		dev_log(d);
891 		log_puts(": master volume set to ");
892 		log_putu(master);
893 		log_puts("\n");
894 	}
895 	if (d->master_enabled) {
896 		d->master = master;
897 		if (d->mode & MODE_PLAY)
898 			dev_mix_adjvol(d);
899 	} else {
900 		for (c = ctl_list; c != NULL; c = c->next) {
901 			if (c->scope != CTL_HW || c->u.hw.dev != d)
902 				continue;
903 			if (c->type != CTL_NUM ||
904 			    strcmp(c->group, d->name) != 0 ||
905 			    strcmp(c->node0.name, "output") != 0 ||
906 			    strcmp(c->func, "level") != 0)
907 				continue;
908 			v = (master * c->maxval + 64) / 127;
909 			ctl_setval(c, v);
910 		}
911 	}
912 }
913 
914 /*
915  * Create a sndio device
916  */
917 struct dev *
918 dev_new(char *path, struct aparams *par,
919     unsigned int mode, unsigned int bufsz, unsigned int round,
920     unsigned int rate, unsigned int hold, unsigned int autovol)
921 {
922 	struct dev *d, **pd;
923 
924 	if (dev_sndnum == DEV_NMAX) {
925 		if (log_level >= 1)
926 			log_puts("too many devices\n");
927 		return NULL;
928 	}
929 	d = xmalloc(sizeof(struct dev));
930 	d->path = path;
931 	d->num = dev_sndnum++;
932 
933 	d->reqpar = *par;
934 	d->reqmode = mode;
935 	d->reqpchan = d->reqrchan = 0;
936 	d->reqbufsz = bufsz;
937 	d->reqround = round;
938 	d->reqrate = rate;
939 	d->hold = hold;
940 	d->autovol = autovol;
941 	d->refcnt = 0;
942 	d->pstate = DEV_CFG;
943 	d->slot_list = NULL;
944 	d->master = MIDI_MAXCTL;
945 	d->master_enabled = 0;
946 	d->alt_next = d;
947 	snprintf(d->name, CTL_NAMEMAX, "%u", d->num);
948 	for (pd = &dev_list; *pd != NULL; pd = &(*pd)->next)
949 		;
950 	d->next = *pd;
951 	*pd = d;
952 	return d;
953 }
954 
955 /*
956  * adjust device parameters and mode
957  */
958 void
959 dev_adjpar(struct dev *d, int mode,
960     int pmax, int rmax)
961 {
962 	d->reqmode |= mode & MODE_AUDIOMASK;
963 	if (mode & MODE_PLAY) {
964 		if (d->reqpchan < pmax + 1)
965 			d->reqpchan = pmax + 1;
966 	}
967 	if (mode & MODE_REC) {
968 		if (d->reqrchan < rmax + 1)
969 			d->reqrchan = rmax + 1;
970 	}
971 }
972 
973 /*
974  * Open the device with the dev_reqxxx capabilities. Setup a mixer, demuxer,
975  * monitor, midi control, and any necessary conversions.
976  *
977  * Note that record and play buffers are always allocated, even if the
978  * underlying device doesn't support both modes.
979  */
980 int
981 dev_allocbufs(struct dev *d)
982 {
983 	/*
984 	 * Create record buffer.
985 	 */
986 
987 	 /* Create device <-> demuxer buffer */
988 	d->rbuf = xmalloc(d->round * d->rchan * sizeof(adata_t));
989 
990 	/* Insert a converter, if needed. */
991 	if (!aparams_native(&d->par)) {
992 		dec_init(&d->dec, &d->par, d->rchan);
993 		d->decbuf = xmalloc(d->round * d->rchan * d->par.bps);
994 	} else
995 		d->decbuf = NULL;
996 
997 	/*
998 	 * Create play buffer
999 	 */
1000 
1001 	/* Create device <-> mixer buffer */
1002 	d->poffs = 0;
1003 	d->psize = d->bufsz + d->round;
1004 	d->pbuf = xmalloc(d->psize * d->pchan * sizeof(adata_t));
1005 	d->mode |= MODE_MON;
1006 
1007 	/* Append a converter, if needed. */
1008 	if (!aparams_native(&d->par)) {
1009 		enc_init(&d->enc, &d->par, d->pchan);
1010 		d->encbuf = xmalloc(d->round * d->pchan * d->par.bps);
1011 	} else
1012 		d->encbuf = NULL;
1013 
1014 	/*
1015 	 * Initially fill the record buffer with zeroed samples. This ensures
1016 	 * that when a client records from a play-only device the client just
1017 	 * gets silence.
1018 	 */
1019 	memset(d->rbuf, 0, d->round * d->rchan * sizeof(adata_t));
1020 
1021 	if (log_level >= 2) {
1022 		dev_log(d);
1023 		log_puts(": ");
1024 		log_putu(d->rate);
1025 		log_puts("Hz, ");
1026 		aparams_log(&d->par);
1027 		if (d->mode & MODE_PLAY) {
1028 			log_puts(", play 0:");
1029 			log_puti(d->pchan - 1);
1030 		}
1031 		if (d->mode & MODE_REC) {
1032 			log_puts(", rec 0:");
1033 			log_puti(d->rchan - 1);
1034 		}
1035 		log_puts(", ");
1036 		log_putu(d->bufsz / d->round);
1037 		log_puts(" blocks of ");
1038 		log_putu(d->round);
1039 		log_puts(" frames");
1040 		if (d == mtc_array[0].dev)
1041 			log_puts(", mtc");
1042 		log_puts("\n");
1043 	}
1044 	return 1;
1045 }
1046 
1047 /*
1048  * Reset parameters and open the device.
1049  */
1050 int
1051 dev_open(struct dev *d)
1052 {
1053 	d->mode = d->reqmode;
1054 	d->round = d->reqround;
1055 	d->bufsz = d->reqbufsz;
1056 	d->rate = d->reqrate;
1057 	d->pchan = d->reqpchan;
1058 	d->rchan = d->reqrchan;
1059 	d->par = d->reqpar;
1060 	if (d->pchan == 0)
1061 		d->pchan = 2;
1062 	if (d->rchan == 0)
1063 		d->rchan = 2;
1064 	if (!dev_sio_open(d)) {
1065 		if (log_level >= 1) {
1066 			dev_log(d);
1067 			log_puts(": failed to open audio device\n");
1068 		}
1069 		return 0;
1070 	}
1071 	if (!dev_allocbufs(d))
1072 		return 0;
1073 
1074 	d->pstate = DEV_INIT;
1075 	return 1;
1076 }
1077 
1078 /*
1079  * Force all slots to exit and close device, called after an error
1080  */
1081 void
1082 dev_abort(struct dev *d)
1083 {
1084 	int i;
1085 	struct slot *s;
1086 	struct ctlslot *c;
1087 	struct opt *o;
1088 
1089 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1090 		if (s->opt == NULL || s->opt->dev != d)
1091 			continue;
1092 		if (s->ops) {
1093 			s->ops->exit(s->arg);
1094 			s->ops = NULL;
1095 		}
1096 	}
1097 	d->slot_list = NULL;
1098 
1099 	for (o = opt_list; o != NULL; o = o->next) {
1100 		if (o->dev != d)
1101 			continue;
1102 		for (c = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, c++) {
1103 			if (c->ops == NULL)
1104 				continue;
1105 			if (c->opt == o) {
1106 				c->ops->exit(s->arg);
1107 				c->ops = NULL;
1108 			}
1109 		}
1110 
1111 		midi_abort(o->midi);
1112 	}
1113 
1114 	if (d->pstate != DEV_CFG)
1115 		dev_close(d);
1116 }
1117 
1118 /*
1119  * force the device to go in DEV_CFG state, the caller is supposed to
1120  * ensure buffers are drained
1121  */
1122 void
1123 dev_freebufs(struct dev *d)
1124 {
1125 #ifdef DEBUG
1126 	if (log_level >= 3) {
1127 		dev_log(d);
1128 		log_puts(": closing\n");
1129 	}
1130 #endif
1131 	if (d->mode & MODE_PLAY) {
1132 		if (d->encbuf != NULL)
1133 			xfree(d->encbuf);
1134 		xfree(d->pbuf);
1135 	}
1136 	if (d->mode & MODE_REC) {
1137 		if (d->decbuf != NULL)
1138 			xfree(d->decbuf);
1139 		xfree(d->rbuf);
1140 	}
1141 }
1142 
1143 /*
1144  * Close the device and exit all slots
1145  */
1146 void
1147 dev_close(struct dev *d)
1148 {
1149 	d->pstate = DEV_CFG;
1150 	dev_sio_close(d);
1151 	dev_freebufs(d);
1152 
1153 	if (d->master_enabled) {
1154 		d->master_enabled = 0;
1155 		ctl_del(CTL_DEV_MASTER, d, NULL);
1156 	}
1157 }
1158 
1159 int
1160 dev_ref(struct dev *d)
1161 {
1162 #ifdef DEBUG
1163 	if (log_level >= 3) {
1164 		dev_log(d);
1165 		log_puts(": device requested\n");
1166 	}
1167 #endif
1168 	if (d->pstate == DEV_CFG && !dev_open(d))
1169 		return 0;
1170 	d->refcnt++;
1171 	return 1;
1172 }
1173 
1174 void
1175 dev_unref(struct dev *d)
1176 {
1177 #ifdef DEBUG
1178 	if (log_level >= 3) {
1179 		dev_log(d);
1180 		log_puts(": device released\n");
1181 	}
1182 #endif
1183 	d->refcnt--;
1184 	if (d->refcnt == 0 && d->pstate == DEV_INIT)
1185 		dev_close(d);
1186 }
1187 
1188 /*
1189  * initialize the device with the current parameters
1190  */
1191 int
1192 dev_init(struct dev *d)
1193 {
1194 	if ((d->reqmode & MODE_AUDIOMASK) == 0) {
1195 #ifdef DEBUG
1196 		    dev_log(d);
1197 		    log_puts(": has no streams\n");
1198 #endif
1199 		    return 0;
1200 	}
1201 	if (d->hold && !dev_ref(d))
1202 		return 0;
1203 	return 1;
1204 }
1205 
1206 /*
1207  * Unless the device is already in process of closing, request it to close
1208  */
1209 void
1210 dev_done(struct dev *d)
1211 {
1212 #ifdef DEBUG
1213 	if (log_level >= 3) {
1214 		dev_log(d);
1215 		log_puts(": draining\n");
1216 	}
1217 #endif
1218 	if (mtc_array[0].dev == d && mtc_array[0].tstate != MTC_STOP)
1219 		mtc_stop(&mtc_array[0]);
1220 	if (d->hold)
1221 		dev_unref(d);
1222 }
1223 
1224 struct dev *
1225 dev_bynum(int num)
1226 {
1227 	struct dev *d;
1228 
1229 	for (d = dev_list; d != NULL; d = d->next) {
1230 		if (d->num == num)
1231 			return d;
1232 	}
1233 	return NULL;
1234 }
1235 
1236 /*
1237  * Free the device
1238  */
1239 void
1240 dev_del(struct dev *d)
1241 {
1242 	struct dev **p;
1243 
1244 #ifdef DEBUG
1245 	if (log_level >= 3) {
1246 		dev_log(d);
1247 		log_puts(": deleting\n");
1248 	}
1249 #endif
1250 	if (d->pstate != DEV_CFG)
1251 		dev_close(d);
1252 	for (p = &dev_list; *p != d; p = &(*p)->next) {
1253 #ifdef DEBUG
1254 		if (*p == NULL) {
1255 			dev_log(d);
1256 			log_puts(": device to delete not on the list\n");
1257 			panic();
1258 		}
1259 #endif
1260 	}
1261 	*p = d->next;
1262 	xfree(d);
1263 }
1264 
1265 unsigned int
1266 dev_roundof(struct dev *d, unsigned int newrate)
1267 {
1268 	return (d->round * newrate + d->rate / 2) / d->rate;
1269 }
1270 
1271 /*
1272  * If the device is paused, then resume it.
1273  */
1274 void
1275 dev_wakeup(struct dev *d)
1276 {
1277 	if (d->pstate == DEV_INIT) {
1278 		if (log_level >= 2) {
1279 			dev_log(d);
1280 			log_puts(": device started\n");
1281 		}
1282 		if (d->mode & MODE_PLAY) {
1283 			d->prime = d->bufsz;
1284 		} else {
1285 			d->prime = 0;
1286 		}
1287 		d->poffs = 0;
1288 
1289 		/*
1290 		 * empty cycles don't increment delta, so it's ok to
1291 		 * start at 0
1292 		 **/
1293 		d->delta = 0;
1294 
1295 		d->pstate = DEV_RUN;
1296 		dev_sio_start(d);
1297 	}
1298 }
1299 
1300 /*
1301  * Return true if both of the given devices can run the same
1302  * clients
1303  */
1304 int
1305 dev_iscompat(struct dev *o, struct dev *n)
1306 {
1307 	if (((long long)o->round * n->rate != (long long)n->round * o->rate) ||
1308 	    ((long long)o->bufsz * n->rate != (long long)n->bufsz * o->rate)) {
1309 		if (log_level >= 1) {
1310 			log_puts(n->name);
1311 			log_puts(": not compatible with ");
1312 			log_puts(o->name);
1313 			log_puts("\n");
1314 		}
1315 		return 0;
1316 	}
1317 	return 1;
1318 }
1319 
1320 /*
1321  * Close the device, but attempt to migrate everything to a new sndio
1322  * device.
1323  */
1324 struct dev *
1325 dev_migrate(struct dev *odev)
1326 {
1327 	struct dev *ndev;
1328 	struct opt *o;
1329 	struct slot *s;
1330 	int i;
1331 
1332 	/* not opened */
1333 	if (odev->pstate == DEV_CFG)
1334 		return odev;
1335 
1336 	ndev = odev;
1337 	while (1) {
1338 		/* try next one, circulating through the list */
1339 		ndev = ndev->alt_next;
1340 		if (ndev == odev) {
1341 			if (log_level >= 1) {
1342 				dev_log(odev);
1343 				log_puts(": no fall-back device found\n");
1344 			}
1345 			return NULL;
1346 		}
1347 
1348 
1349 		if (!dev_ref(ndev))
1350 			continue;
1351 
1352 		/* check if new parameters are compatible with old ones */
1353 		if (!dev_iscompat(odev, ndev)) {
1354 			dev_unref(ndev);
1355 			continue;
1356 		}
1357 
1358 		/* found it!*/
1359 		break;
1360 	}
1361 
1362 	if (log_level >= 1) {
1363 		dev_log(odev);
1364 		log_puts(": switching to ");
1365 		dev_log(ndev);
1366 		log_puts("\n");
1367 	}
1368 
1369 	if (mtc_array[0].dev == odev)
1370 		mtc_setdev(&mtc_array[0], ndev);
1371 
1372 	/* move opts to new device (also moves clients using the opts) */
1373 	for (o = opt_list; o != NULL; o = o->next) {
1374 		if (o->dev != odev)
1375 			continue;
1376 		if (strcmp(o->name, o->dev->name) == 0)
1377 			continue;
1378 		opt_setdev(o, ndev);
1379 	}
1380 
1381 	/* terminate remaining clients */
1382 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1383 		if (s->opt == NULL || s->opt->dev != odev)
1384 			continue;
1385 		if (s->ops != NULL) {
1386 			s->ops->exit(s);
1387 			s->ops = NULL;
1388 		}
1389 	}
1390 
1391 	/* slots and/or MMC hold refs, drop ours */
1392 	dev_unref(ndev);
1393 
1394 	return ndev;
1395 }
1396 
1397 /*
1398  * check that all clients controlled by MMC are ready to start, if so,
1399  * attach them all at the same position
1400  */
1401 void
1402 mtc_trigger(struct mtc *mtc)
1403 {
1404 	int i;
1405 	struct slot *s;
1406 
1407 	if (mtc->tstate != MTC_START) {
1408 		if (log_level >= 2) {
1409 			dev_log(mtc->dev);
1410 			log_puts(": not started by mmc yet, waiting...\n");
1411 		}
1412 		return;
1413 	}
1414 
1415 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1416 		if (s->opt == NULL || s->opt->mtc != mtc)
1417 			continue;
1418 		if (s->pstate != SLOT_READY) {
1419 #ifdef DEBUG
1420 			if (log_level >= 3) {
1421 				slot_log(s);
1422 				log_puts(": not ready, start delayed\n");
1423 			}
1424 #endif
1425 			return;
1426 		}
1427 	}
1428 	if (!dev_ref(mtc->dev))
1429 		return;
1430 
1431 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1432 		if (s->opt == NULL || s->opt->mtc != mtc)
1433 			continue;
1434 		slot_attach(s);
1435 		s->pstate = SLOT_RUN;
1436 	}
1437 	mtc->tstate = MTC_RUN;
1438 	mtc_midi_full(mtc);
1439 	dev_wakeup(mtc->dev);
1440 }
1441 
1442 /*
1443  * start all slots simultaneously
1444  */
1445 void
1446 mtc_start(struct mtc *mtc)
1447 {
1448 	if (mtc->tstate == MTC_STOP) {
1449 		mtc->tstate = MTC_START;
1450 		mtc_trigger(mtc);
1451 #ifdef DEBUG
1452 	} else {
1453 		if (log_level >= 3) {
1454 			dev_log(mtc->dev);
1455 			log_puts(": ignoring mmc start\n");
1456 		}
1457 #endif
1458 	}
1459 }
1460 
1461 /*
1462  * stop all slots simultaneously
1463  */
1464 void
1465 mtc_stop(struct mtc *mtc)
1466 {
1467 	switch (mtc->tstate) {
1468 	case MTC_START:
1469 		mtc->tstate = MTC_STOP;
1470 		return;
1471 	case MTC_RUN:
1472 		mtc->tstate = MTC_STOP;
1473 		dev_unref(mtc->dev);
1474 		break;
1475 	default:
1476 #ifdef DEBUG
1477 		if (log_level >= 3) {
1478 			dev_log(mtc->dev);
1479 			log_puts(": ignored mmc stop\n");
1480 		}
1481 #endif
1482 		return;
1483 	}
1484 }
1485 
1486 /*
1487  * relocate all slots simultaneously
1488  */
1489 void
1490 mtc_loc(struct mtc *mtc, unsigned int origin)
1491 {
1492 	if (log_level >= 2) {
1493 		dev_log(mtc->dev);
1494 		log_puts(": relocated to ");
1495 		log_putu(origin);
1496 		log_puts("\n");
1497 	}
1498 	if (mtc->tstate == MTC_RUN)
1499 		mtc_stop(mtc);
1500 	mtc->origin = origin;
1501 	if (mtc->tstate == MTC_RUN)
1502 		mtc_start(mtc);
1503 }
1504 
1505 /*
1506  * set MMC device
1507  */
1508 void
1509 mtc_setdev(struct mtc *mtc, struct dev *d)
1510 {
1511 	struct opt *o;
1512 
1513 	if (mtc->dev == d)
1514 		return;
1515 
1516 	if (log_level >= 2) {
1517 		dev_log(d);
1518 		log_puts(": set to be MIDI clock source\n");
1519 	}
1520 
1521 	/* adjust clock and ref counter, if needed */
1522 	if (mtc->tstate == MTC_RUN) {
1523 		mtc->delta -= mtc->dev->delta;
1524 		dev_unref(mtc->dev);
1525 	}
1526 
1527 	mtc->dev = d;
1528 
1529 	if (mtc->tstate == MTC_RUN) {
1530 		mtc->delta += mtc->dev->delta;
1531 		dev_ref(mtc->dev);
1532 		dev_wakeup(mtc->dev);
1533 	}
1534 
1535 	/* move in once anything using MMC */
1536 	for (o = opt_list; o != NULL; o = o->next) {
1537 		if (o->mtc == mtc)
1538 			opt_setdev(o, mtc->dev);
1539 	}
1540 }
1541 
1542 /*
1543  * allocate buffers & conversion chain
1544  */
1545 void
1546 slot_initconv(struct slot *s)
1547 {
1548 	unsigned int dev_nch;
1549 	struct dev *d = s->opt->dev;
1550 
1551 	if (s->mode & MODE_PLAY) {
1552 		cmap_init(&s->mix.cmap,
1553 		    s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1554 		    s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1555 		    0, d->pchan - 1,
1556 		    s->opt->pmin, s->opt->pmax);
1557 		s->mix.decbuf = NULL;
1558 		s->mix.resampbuf = NULL;
1559 		if (!aparams_native(&s->par)) {
1560 			dec_init(&s->mix.dec, &s->par, s->mix.nch);
1561 			s->mix.decbuf =
1562 			    xmalloc(s->round * s->mix.nch * sizeof(adata_t));
1563 		}
1564 		if (s->rate != d->rate) {
1565 			resamp_init(&s->mix.resamp, s->round, d->round,
1566 			    s->mix.nch);
1567 			s->mix.resampbuf =
1568 			    xmalloc(d->round * s->mix.nch * sizeof(adata_t));
1569 		}
1570 		s->mix.join = 1;
1571 		s->mix.expand = 1;
1572 		if (s->opt->dup && s->mix.cmap.nch > 0) {
1573 			dev_nch = d->pchan < (s->opt->pmax + 1) ?
1574 			    d->pchan - s->opt->pmin :
1575 			    s->opt->pmax - s->opt->pmin + 1;
1576 			if (dev_nch > s->mix.nch)
1577 				s->mix.expand = dev_nch / s->mix.nch;
1578 			else if (s->mix.nch > dev_nch)
1579 				s->mix.join = s->mix.nch / dev_nch;
1580 		}
1581 	}
1582 
1583 	if (s->mode & MODE_RECMASK) {
1584 		unsigned int outchan = (s->opt->mode & MODE_MON) ?
1585 		    d->pchan : d->rchan;
1586 
1587 		s->sub.encbuf = NULL;
1588 		s->sub.resampbuf = NULL;
1589 		cmap_init(&s->sub.cmap,
1590 		    0, outchan - 1,
1591 		    s->opt->rmin, s->opt->rmax,
1592 		    s->opt->rmin, s->opt->rmin + s->sub.nch - 1,
1593 		    s->opt->rmin, s->opt->rmin + s->sub.nch - 1);
1594 		if (s->rate != d->rate) {
1595 			resamp_init(&s->sub.resamp, d->round, s->round,
1596 			    s->sub.nch);
1597 			s->sub.resampbuf =
1598 			    xmalloc(d->round * s->sub.nch * sizeof(adata_t));
1599 		}
1600 		if (!aparams_native(&s->par)) {
1601 			enc_init(&s->sub.enc, &s->par, s->sub.nch);
1602 			s->sub.encbuf =
1603 			    xmalloc(s->round * s->sub.nch * sizeof(adata_t));
1604 		}
1605 		s->sub.join = 1;
1606 		s->sub.expand = 1;
1607 		if (s->opt->dup && s->sub.cmap.nch > 0) {
1608 			dev_nch = outchan < (s->opt->rmax + 1) ?
1609 			    outchan - s->opt->rmin :
1610 			    s->opt->rmax - s->opt->rmin + 1;
1611 			if (dev_nch > s->sub.nch)
1612 				s->sub.join = dev_nch / s->sub.nch;
1613 			else if (s->sub.nch > dev_nch)
1614 				s->sub.expand = s->sub.nch / dev_nch;
1615 		}
1616 
1617 		/*
1618 		 * cmap_copy() doesn't write samples in all channels,
1619 	         * for instance when mono->stereo conversion is
1620 	         * disabled. So we have to prefill cmap_copy() output
1621 	         * with silence.
1622 	         */
1623 		if (s->sub.resampbuf) {
1624 			memset(s->sub.resampbuf, 0,
1625 			    d->round * s->sub.nch * sizeof(adata_t));
1626 		} else if (s->sub.encbuf) {
1627 			memset(s->sub.encbuf, 0,
1628 			    s->round * s->sub.nch * sizeof(adata_t));
1629 		} else {
1630 			memset(s->sub.buf.data, 0,
1631 			    s->appbufsz * s->sub.nch * sizeof(adata_t));
1632 		}
1633 	}
1634 }
1635 
1636 /*
1637  * allocate buffers & conversion chain
1638  */
1639 void
1640 slot_allocbufs(struct slot *s)
1641 {
1642 	if (s->mode & MODE_PLAY) {
1643 		s->mix.bpf = s->par.bps * s->mix.nch;
1644 		abuf_init(&s->mix.buf, s->appbufsz * s->mix.bpf);
1645 	}
1646 
1647 	if (s->mode & MODE_RECMASK) {
1648 		s->sub.bpf = s->par.bps * s->sub.nch;
1649 		abuf_init(&s->sub.buf, s->appbufsz * s->sub.bpf);
1650 	}
1651 
1652 #ifdef DEBUG
1653 	if (log_level >= 3) {
1654 		slot_log(s);
1655 		log_puts(": allocated ");
1656 		log_putu(s->appbufsz);
1657 		log_puts("/");
1658 		log_putu(SLOT_BUFSZ(s));
1659 		log_puts(" fr buffers\n");
1660 	}
1661 #endif
1662 }
1663 
1664 /*
1665  * free buffers & conversion chain
1666  */
1667 void
1668 slot_freebufs(struct slot *s)
1669 {
1670 	if (s->mode & MODE_RECMASK) {
1671 		abuf_done(&s->sub.buf);
1672 	}
1673 
1674 	if (s->mode & MODE_PLAY) {
1675 		abuf_done(&s->mix.buf);
1676 	}
1677 }
1678 
1679 /*
1680  * allocate a new slot and register the given call-backs
1681  */
1682 struct slot *
1683 slot_new(struct opt *opt, unsigned int id, char *who,
1684     struct slotops *ops, void *arg, int mode)
1685 {
1686 	char *p;
1687 	char name[SLOT_NAMEMAX];
1688 	char ctl_name[CTL_NAMEMAX];
1689 	unsigned int i, ser, bestser, bestidx;
1690 	struct slot *unit[DEV_NSLOT];
1691 	struct slot *s;
1692 
1693 	/*
1694 	 * create a ``valid'' control name (lowcase, remove [^a-z], truncate)
1695 	 */
1696 	for (i = 0, p = who; ; p++) {
1697 		if (i == SLOT_NAMEMAX - 1 || *p == '\0') {
1698 			name[i] = '\0';
1699 			break;
1700 		} else if (*p >= 'A' && *p <= 'Z') {
1701 			name[i++] = *p + 'a' - 'A';
1702 		} else if (*p >= 'a' && *p <= 'z')
1703 			name[i++] = *p;
1704 	}
1705 	if (i == 0)
1706 		strlcpy(name, "noname", SLOT_NAMEMAX);
1707 
1708 	/*
1709 	 * build a unit-to-slot map for this name
1710 	 */
1711 	for (i = 0; i < DEV_NSLOT; i++)
1712 		unit[i] = NULL;
1713 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1714 		if (strcmp(s->name, name) == 0)
1715 			unit[s->unit] = s;
1716 	}
1717 
1718 	/*
1719 	 * find the free slot with the least unit number and same id
1720 	 */
1721 	for (i = 0; i < DEV_NSLOT; i++) {
1722 		s = unit[i];
1723 		if (s != NULL && s->ops == NULL && s->id == id)
1724 			goto found;
1725 	}
1726 
1727 	/*
1728 	 * find the free slot with the least unit number
1729 	 */
1730 	for (i = 0; i < DEV_NSLOT; i++) {
1731 		s = unit[i];
1732 		if (s != NULL && s->ops == NULL) {
1733 			s->id = id;
1734 			goto found;
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * couldn't find a matching slot, pick oldest free slot
1740 	 * and set its name/unit
1741 	 */
1742 	bestser = 0;
1743 	bestidx = DEV_NSLOT;
1744 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1745 		if (s->ops != NULL)
1746 			continue;
1747 		ser = slot_serial - s->serial;
1748 		if (ser > bestser) {
1749 			bestser = ser;
1750 			bestidx = i;
1751 		}
1752 	}
1753 
1754 	if (bestidx == DEV_NSLOT) {
1755 		if (log_level >= 1) {
1756 			log_puts(name);
1757 			log_puts(": out of sub-device slots\n");
1758 		}
1759 		return NULL;
1760 	}
1761 
1762 	s = slot_array + bestidx;
1763 	ctl_del(CTL_SLOT_LEVEL, s, NULL);
1764 	s->vol = MIDI_MAXCTL;
1765 	strlcpy(s->name, name, SLOT_NAMEMAX);
1766 	s->serial = slot_serial++;
1767 	for (i = 0; unit[i] != NULL; i++)
1768 		; /* nothing */
1769 	s->unit = i;
1770 	s->id = id;
1771 	s->opt = opt;
1772 	slot_ctlname(s, ctl_name, CTL_NAMEMAX);
1773 	ctl_new(CTL_SLOT_LEVEL, s, NULL,
1774 	    CTL_NUM, "app", ctl_name, -1, "level",
1775 	    NULL, -1, 127, s->vol);
1776 
1777 found:
1778 	/* open device, this may change opt's device */
1779 	if (!opt_ref(s->opt))
1780 		return NULL;
1781 	s->opt = opt;
1782 	s->ops = ops;
1783 	s->arg = arg;
1784 	s->pstate = SLOT_INIT;
1785 	s->mode = mode;
1786 	aparams_init(&s->par);
1787 	if (s->mode & MODE_PLAY)
1788 		s->mix.nch = s->opt->pmax - s->opt->pmin + 1;
1789 	if (s->mode & MODE_RECMASK)
1790 		s->sub.nch = s->opt->rmax - s->opt->rmin + 1;
1791 	s->xrun = s->opt->mtc != NULL ? XRUN_SYNC : XRUN_IGNORE;
1792 	s->appbufsz = s->opt->dev->bufsz;
1793 	s->round = s->opt->dev->round;
1794 	s->rate = s->opt->dev->rate;
1795 	dev_midi_slotdesc(s->opt->dev, s);
1796 	dev_midi_vol(s->opt->dev, s);
1797 #ifdef DEBUG
1798 	if (log_level >= 3) {
1799 		slot_log(s);
1800 		log_puts(": using ");
1801 		log_puts(s->opt->name);
1802 		log_puts(", mode = ");
1803 		log_putx(mode);
1804 		log_puts("\n");
1805 	}
1806 #endif
1807 	return s;
1808 }
1809 
1810 /*
1811  * release the given slot
1812  */
1813 void
1814 slot_del(struct slot *s)
1815 {
1816 	s->arg = s;
1817 	s->ops = &zomb_slotops;
1818 	switch (s->pstate) {
1819 	case SLOT_INIT:
1820 		s->ops = NULL;
1821 		break;
1822 	case SLOT_START:
1823 	case SLOT_READY:
1824 	case SLOT_RUN:
1825 	case SLOT_STOP:
1826 		slot_stop(s, 0);
1827 		break;
1828 	}
1829 	opt_unref(s->opt);
1830 }
1831 
1832 /*
1833  * change the slot play volume; called either by the slot or by MIDI
1834  */
1835 void
1836 slot_setvol(struct slot *s, unsigned int vol)
1837 {
1838 #ifdef DEBUG
1839 	if (log_level >= 3) {
1840 		slot_log(s);
1841 		log_puts(": setting volume ");
1842 		log_putu(vol);
1843 		log_puts("\n");
1844 	}
1845 #endif
1846 	s->vol = vol;
1847 	s->mix.vol = MIDI_TO_ADATA(s->vol);
1848 }
1849 
1850 /*
1851  * set device for this slot
1852  */
1853 void
1854 slot_setopt(struct slot *s, struct opt *o)
1855 {
1856 	struct opt *t;
1857 	struct dev *odev, *ndev;
1858 	struct ctl *c;
1859 
1860 	if (s->opt == NULL || s->opt == o)
1861 		return;
1862 
1863 	if (log_level >= 2) {
1864 		slot_log(s);
1865 		log_puts(": moving to opt ");
1866 		log_puts(o->name);
1867 		log_puts("\n");
1868 	}
1869 
1870 	odev = s->opt->dev;
1871 	if (s->ops != NULL) {
1872 		ndev = opt_ref(o);
1873 		if (ndev == NULL)
1874 			return;
1875 
1876 		if (!dev_iscompat(odev, ndev)) {
1877 			opt_unref(o);
1878 			return;
1879 		}
1880 	}
1881 
1882 	if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1883 		slot_detach(s);
1884 
1885 	t = s->opt;
1886 	s->opt = o;
1887 
1888 	c = ctl_find(CTL_SLOT_LEVEL, s, NULL);
1889 	ctl_update(c);
1890 
1891 	if (o->dev != t->dev) {
1892 		dev_midi_slotdesc(odev, s);
1893 		dev_midi_slotdesc(ndev, s);
1894 		dev_midi_vol(ndev, s);
1895 	}
1896 
1897 	if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1898 		slot_attach(s);
1899 
1900 	if (s->ops != NULL) {
1901 		opt_unref(t);
1902 		return;
1903 	}
1904 }
1905 
1906 /*
1907  * attach the slot to the device (ie start playing & recording
1908  */
1909 void
1910 slot_attach(struct slot *s)
1911 {
1912 	struct dev *d = s->opt->dev;
1913 	long long pos;
1914 
1915 	if (((s->mode & MODE_PLAY) && !(s->opt->mode & MODE_PLAY)) ||
1916 	    ((s->mode & MODE_RECMASK) && !(s->opt->mode & MODE_RECMASK))) {
1917 		if (log_level >= 1) {
1918 			slot_log(s);
1919 			log_puts(" at ");
1920 			log_puts(s->opt->name);
1921 			log_puts(": mode not allowed on this sub-device\n");
1922 		}
1923 	}
1924 
1925 	/*
1926 	 * setup converions layer
1927 	 */
1928 	slot_initconv(s);
1929 
1930 	/*
1931 	 * start the device if not started
1932 	 */
1933 	dev_wakeup(d);
1934 
1935 	/*
1936 	 * adjust initial clock
1937 	 */
1938 	pos = s->delta_rem +
1939 	    (long long)s->delta * d->round +
1940 	    (long long)d->delta * s->round;
1941 	s->delta = pos / (int)d->round;
1942 	s->delta_rem = pos % d->round;
1943 	if (s->delta_rem < 0) {
1944 		s->delta_rem += d->round;
1945 		s->delta--;
1946 	}
1947 
1948 #ifdef DEBUG
1949 	if (log_level >= 2) {
1950 		slot_log(s);
1951 		log_puts(": attached at ");
1952 		log_puti(s->delta);
1953 		log_puts(" + ");
1954 		log_puti(s->delta_rem);
1955 		log_puts("/");
1956 		log_puti(s->round);
1957 		log_puts("\n");
1958 	}
1959 #endif
1960 
1961 	/*
1962 	 * We dont check whether the device is dying,
1963 	 * because dev_xxx() functions are supposed to
1964 	 * work (i.e., not to crash)
1965 	 */
1966 
1967 	s->next = d->slot_list;
1968 	d->slot_list = s;
1969 	if (s->mode & MODE_PLAY) {
1970 		s->mix.vol = MIDI_TO_ADATA(s->vol);
1971 		dev_mix_adjvol(d);
1972 	}
1973 }
1974 
1975 /*
1976  * if MMC is enabled, and try to attach all slots synchronously, else
1977  * simply attach the slot
1978  */
1979 void
1980 slot_ready(struct slot *s)
1981 {
1982 	/*
1983 	 * device may be disconnected, and if so we're called from
1984 	 * slot->ops->exit() on a closed device
1985 	 */
1986 	if (s->opt->dev->pstate == DEV_CFG)
1987 		return;
1988 	if (s->opt->mtc == NULL) {
1989 		slot_attach(s);
1990 		s->pstate = SLOT_RUN;
1991 	} else
1992 		mtc_trigger(s->opt->mtc);
1993 }
1994 
1995 /*
1996  * setup buffers & conversion layers, prepare the slot to receive data
1997  * (for playback) or start (recording).
1998  */
1999 void
2000 slot_start(struct slot *s)
2001 {
2002 	struct dev *d = s->opt->dev;
2003 #ifdef DEBUG
2004 	if (s->pstate != SLOT_INIT) {
2005 		slot_log(s);
2006 		log_puts(": slot_start: wrong state\n");
2007 		panic();
2008 	}
2009 	if (s->mode & MODE_PLAY) {
2010 		if (log_level >= 3) {
2011 			slot_log(s);
2012 			log_puts(": playing ");
2013 			aparams_log(&s->par);
2014 			log_puts(" -> ");
2015 			aparams_log(&d->par);
2016 			log_puts("\n");
2017 		}
2018 	}
2019 	if (s->mode & MODE_RECMASK) {
2020 		if (log_level >= 3) {
2021 			slot_log(s);
2022 			log_puts(": recording ");
2023 			aparams_log(&s->par);
2024 			log_puts(" <- ");
2025 			aparams_log(&d->par);
2026 			log_puts("\n");
2027 		}
2028 	}
2029 #endif
2030 	slot_allocbufs(s);
2031 
2032 	if (s->mode & MODE_RECMASK) {
2033 		/*
2034 		 * N-th recorded block is the N-th played block
2035 		 */
2036 		s->sub.prime = d->bufsz / d->round;
2037 	}
2038 	s->skip = 0;
2039 
2040 	/*
2041 	 * get the current position, the origin is when the first sample
2042 	 * played and/or recorded
2043 	 */
2044 	s->delta = -(long long)d->bufsz * s->round / d->round;
2045 	s->delta_rem = 0;
2046 
2047 	if (s->mode & MODE_PLAY) {
2048 		s->pstate = SLOT_START;
2049 	} else {
2050 		s->pstate = SLOT_READY;
2051 		slot_ready(s);
2052 	}
2053 }
2054 
2055 /*
2056  * stop playback and recording, and free conversion layers
2057  */
2058 void
2059 slot_detach(struct slot *s)
2060 {
2061 	struct slot **ps;
2062 	struct dev *d = s->opt->dev;
2063 	long long pos;
2064 
2065 	for (ps = &d->slot_list; *ps != s; ps = &(*ps)->next) {
2066 #ifdef DEBUG
2067 		if (*ps == NULL) {
2068 			slot_log(s);
2069 			log_puts(": can't detach, not on list\n");
2070 			panic();
2071 		}
2072 #endif
2073 	}
2074 	*ps = s->next;
2075 
2076 	/*
2077 	 * adjust clock, go back d->delta ticks so that slot_attach()
2078 	 * could be called with the resulting state
2079 	 */
2080 	pos = s->delta_rem +
2081 	    (long long)s->delta * d->round -
2082 	    (long long)d->delta * s->round;
2083 	s->delta = pos / (int)d->round;
2084 	s->delta_rem = pos % d->round;
2085 	if (s->delta_rem < 0) {
2086 		s->delta_rem += d->round;
2087 		s->delta--;
2088 	}
2089 
2090 #ifdef DEBUG
2091 	if (log_level >= 2) {
2092 		slot_log(s);
2093 		log_puts(": detached at ");
2094 		log_puti(s->delta);
2095 		log_puts(" + ");
2096 		log_puti(s->delta_rem);
2097 		log_puts("/");
2098 		log_puti(d->round);
2099 		log_puts("\n");
2100 	}
2101 #endif
2102 	if (s->mode & MODE_PLAY)
2103 		dev_mix_adjvol(d);
2104 
2105 	if (s->mode & MODE_RECMASK) {
2106 		if (s->sub.encbuf) {
2107 			xfree(s->sub.encbuf);
2108 			s->sub.encbuf = NULL;
2109 		}
2110 		if (s->sub.resampbuf) {
2111 			xfree(s->sub.resampbuf);
2112 			s->sub.resampbuf = NULL;
2113 		}
2114 	}
2115 
2116 	if (s->mode & MODE_PLAY) {
2117 		if (s->mix.decbuf) {
2118 			xfree(s->mix.decbuf);
2119 			s->mix.decbuf = NULL;
2120 		}
2121 		if (s->mix.resampbuf) {
2122 			xfree(s->mix.resampbuf);
2123 			s->mix.resampbuf = NULL;
2124 		}
2125 	}
2126 }
2127 
2128 /*
2129  * put the slot in stopping state (draining play buffers) or
2130  * stop & detach if no data to drain.
2131  */
2132 void
2133 slot_stop(struct slot *s, int drain)
2134 {
2135 #ifdef DEBUG
2136 	if (log_level >= 3) {
2137 		slot_log(s);
2138 		log_puts(": stopping\n");
2139 	}
2140 #endif
2141 	if (s->pstate == SLOT_START) {
2142 		/*
2143 		 * If in rec-only mode, we're already in the READY or
2144 		 * RUN states. We're here because the play buffer was
2145 		 * not full enough, try to start so it's drained.
2146 		 */
2147 		s->pstate = SLOT_READY;
2148 		slot_ready(s);
2149 	}
2150 
2151 	if (s->pstate == SLOT_RUN) {
2152 		if ((s->mode & MODE_PLAY) && drain) {
2153 			/*
2154 			 * Don't detach, dev_cycle() will do it for us
2155 			 * when the buffer is drained.
2156 			 */
2157 			s->pstate = SLOT_STOP;
2158 			return;
2159 		}
2160 		slot_detach(s);
2161 	} else if (s->pstate == SLOT_STOP) {
2162 		slot_detach(s);
2163 	} else {
2164 #ifdef DEBUG
2165 		if (log_level >= 3) {
2166 			slot_log(s);
2167 			log_puts(": not drained (blocked by mmc)\n");
2168 		}
2169 #endif
2170 	}
2171 
2172 	s->pstate = SLOT_INIT;
2173 	s->ops->eof(s->arg);
2174 	slot_freebufs(s);
2175 }
2176 
2177 void
2178 slot_skip_update(struct slot *s)
2179 {
2180 	int skip;
2181 
2182 	skip = slot_skip(s);
2183 	while (skip > 0) {
2184 #ifdef DEBUG
2185 		if (log_level >= 4) {
2186 			slot_log(s);
2187 			log_puts(": catching skipped block\n");
2188 		}
2189 #endif
2190 		if (s->mode & MODE_RECMASK)
2191 			s->ops->flush(s->arg);
2192 		if (s->mode & MODE_PLAY)
2193 			s->ops->fill(s->arg);
2194 		skip--;
2195 	}
2196 }
2197 
2198 /*
2199  * notify the slot that we just wrote in the play buffer, must be called
2200  * after each write
2201  */
2202 void
2203 slot_write(struct slot *s)
2204 {
2205 	if (s->pstate == SLOT_START && s->mix.buf.used == s->mix.buf.len) {
2206 #ifdef DEBUG
2207 		if (log_level >= 4) {
2208 			slot_log(s);
2209 			log_puts(": switching to READY state\n");
2210 		}
2211 #endif
2212 		s->pstate = SLOT_READY;
2213 		slot_ready(s);
2214 	}
2215 	slot_skip_update(s);
2216 }
2217 
2218 /*
2219  * notify the slot that we freed some space in the rec buffer
2220  */
2221 void
2222 slot_read(struct slot *s)
2223 {
2224 	slot_skip_update(s);
2225 }
2226 
2227 /*
2228  * allocate at control slot
2229  */
2230 struct ctlslot *
2231 ctlslot_new(struct opt *o, struct ctlops *ops, void *arg)
2232 {
2233 	struct ctlslot *s;
2234 	struct ctl *c;
2235 	int i;
2236 
2237 	i = 0;
2238 	for (;;) {
2239 		if (i == DEV_NCTLSLOT)
2240 			return NULL;
2241 		s = ctlslot_array + i;
2242 		if (s->ops == NULL)
2243 			break;
2244 		i++;
2245 	}
2246 	s->opt = o;
2247 	s->self = 1 << i;
2248 	if (!opt_ref(o))
2249 		return NULL;
2250 	s->ops = ops;
2251 	s->arg = arg;
2252 	for (c = ctl_list; c != NULL; c = c->next) {
2253 		if (!ctlslot_visible(s, c))
2254 			continue;
2255 		c->refs_mask |= s->self;
2256 	}
2257 	return s;
2258 }
2259 
2260 /*
2261  * free control slot
2262  */
2263 void
2264 ctlslot_del(struct ctlslot *s)
2265 {
2266 	struct ctl *c, **pc;
2267 
2268 	pc = &ctl_list;
2269 	while ((c = *pc) != NULL) {
2270 		c->refs_mask &= ~s->self;
2271 		if (c->refs_mask == 0) {
2272 			*pc = c->next;
2273 			xfree(c);
2274 		} else
2275 			pc = &c->next;
2276 	}
2277 	s->ops = NULL;
2278 	opt_unref(s->opt);
2279 }
2280 
2281 int
2282 ctlslot_visible(struct ctlslot *s, struct ctl *c)
2283 {
2284 	if (s->opt == NULL)
2285 		return 1;
2286 	switch (c->scope) {
2287 	case CTL_HW:
2288 	case CTL_DEV_MASTER:
2289 		return (s->opt->dev == c->u.any.arg0);
2290 	case CTL_OPT_DEV:
2291 		return (s->opt == c->u.any.arg0);
2292 	case CTL_SLOT_LEVEL:
2293 		return (s->opt->dev == c->u.slot_level.slot->opt->dev);
2294 	default:
2295 		return 0;
2296 	}
2297 }
2298 
2299 struct ctl *
2300 ctlslot_lookup(struct ctlslot *s, int addr)
2301 {
2302 	struct ctl *c;
2303 
2304 	c = ctl_list;
2305 	while (1) {
2306 		if (c == NULL)
2307 			return NULL;
2308 		if (c->type != CTL_NONE && c->addr == addr)
2309 			break;
2310 		c = c->next;
2311 	}
2312 	if (!ctlslot_visible(s, c))
2313 		return NULL;
2314 	return c;
2315 }
2316 
2317 void
2318 ctlslot_update(struct ctlslot *s)
2319 {
2320 	struct ctl *c;
2321 	unsigned int refs_mask;
2322 
2323 	for (c = ctl_list; c != NULL; c = c->next) {
2324 		if (c->type == CTL_NONE)
2325 			continue;
2326 		refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2327 
2328 		/* nothing to do if no visibility change */
2329 		if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2330 			continue;
2331 		/* if control becomes visble */
2332 		if (refs_mask)
2333 			c->refs_mask |= s->self;
2334 		/* if control is hidden */
2335 		c->desc_mask |= s->self;
2336 	}
2337 }
2338 
2339 void
2340 ctl_node_log(struct ctl_node *c)
2341 {
2342 	log_puts(c->name);
2343 	if (c->unit >= 0)
2344 		log_putu(c->unit);
2345 }
2346 
2347 void
2348 ctl_log(struct ctl *c)
2349 {
2350 	if (c->group[0] != 0) {
2351 		log_puts(c->group);
2352 		log_puts("/");
2353 	}
2354 	ctl_node_log(&c->node0);
2355 	log_puts(".");
2356 	log_puts(c->func);
2357 	log_puts("=");
2358 	switch (c->type) {
2359 	case CTL_NONE:
2360 		log_puts("none");
2361 		break;
2362 	case CTL_NUM:
2363 	case CTL_SW:
2364 		log_putu(c->curval);
2365 		break;
2366 	case CTL_VEC:
2367 	case CTL_LIST:
2368 	case CTL_SEL:
2369 		ctl_node_log(&c->node1);
2370 		log_puts(":");
2371 		log_putu(c->curval);
2372 	}
2373 	log_puts(" at ");
2374 	log_putu(c->addr);
2375 	log_puts(" -> ");
2376 	switch (c->scope) {
2377 	case CTL_HW:
2378 		log_puts("hw:");
2379 		log_puts(c->u.hw.dev->name);
2380 		log_puts("/");
2381 		log_putu(c->u.hw.addr);
2382 		break;
2383 	case CTL_DEV_MASTER:
2384 		log_puts("dev_master:");
2385 		log_puts(c->u.dev_master.dev->name);
2386 		break;
2387 	case CTL_SLOT_LEVEL:
2388 		log_puts("slot_level:");
2389 		log_puts(c->u.slot_level.slot->name);
2390 		log_putu(c->u.slot_level.slot->unit);
2391 		break;
2392 	case CTL_OPT_DEV:
2393 		log_puts("opt_dev:");
2394 		log_puts(c->u.opt_dev.opt->name);
2395 		log_puts("/");
2396 		log_puts(c->u.opt_dev.dev->name);
2397 		break;
2398 	default:
2399 		log_puts("unknown");
2400 	}
2401 }
2402 
2403 int
2404 ctl_setval(struct ctl *c, int val)
2405 {
2406 	if (c->curval == val) {
2407 		if (log_level >= 3) {
2408 			ctl_log(c);
2409 			log_puts(": already set\n");
2410 		}
2411 		return 1;
2412 	}
2413 	if (val < 0 || val > c->maxval) {
2414 		if (log_level >= 3) {
2415 			log_putu(val);
2416 			log_puts(": ctl val out of bounds\n");
2417 		}
2418 		return 0;
2419 	}
2420 
2421 	switch (c->scope) {
2422 	case CTL_HW:
2423 		if (log_level >= 3) {
2424 			ctl_log(c);
2425 			log_puts(": marked as dirty\n");
2426 		}
2427 		c->curval = val;
2428 		c->dirty = 1;
2429 		return dev_ref(c->u.hw.dev);
2430 	case CTL_DEV_MASTER:
2431 		if (!c->u.dev_master.dev->master_enabled)
2432 			return 1;
2433 		dev_master(c->u.dev_master.dev, val);
2434 		dev_midi_master(c->u.dev_master.dev);
2435 		c->val_mask = ~0U;
2436 		c->curval = val;
2437 		return 1;
2438 	case CTL_SLOT_LEVEL:
2439 		slot_setvol(c->u.slot_level.slot, val);
2440 		// XXX change dev_midi_vol() into slot_midi_vol()
2441 		dev_midi_vol(c->u.slot_level.slot->opt->dev, c->u.slot_level.slot);
2442 		c->val_mask = ~0U;
2443 		c->curval = val;
2444 		return 1;
2445 	case CTL_OPT_DEV:
2446 		c->u.opt_dev.opt->alt_first = c->u.opt_dev.dev;
2447 		opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev);
2448 		return 1;
2449 	default:
2450 		if (log_level >= 2) {
2451 			ctl_log(c);
2452 			log_puts(": not writable\n");
2453 		}
2454 		return 1;
2455 	}
2456 }
2457 
2458 /*
2459  * add a ctl
2460  */
2461 struct ctl *
2462 ctl_new(int scope, void *arg0, void *arg1,
2463     int type, char *gstr,
2464     char *str0, int unit0, char *func,
2465     char *str1, int unit1, int maxval, int val)
2466 {
2467 	struct ctl *c, **pc;
2468 	struct ctlslot *s;
2469 	int addr;
2470 	int i;
2471 
2472 	/*
2473 	 * find the smallest unused addr number and
2474 	 * the last position in the list
2475 	 */
2476 	addr = 0;
2477 	for (pc = &ctl_list; (c = *pc) != NULL; pc = &c->next) {
2478 		if (c->addr > addr)
2479 			addr = c->addr;
2480 	}
2481 	addr++;
2482 
2483 	c = xmalloc(sizeof(struct ctl));
2484 	c->type = type;
2485 	strlcpy(c->func, func, CTL_NAMEMAX);
2486 	strlcpy(c->group, gstr, CTL_NAMEMAX);
2487 	strlcpy(c->node0.name, str0, CTL_NAMEMAX);
2488 	c->node0.unit = unit0;
2489 	if (c->type == CTL_VEC || c->type == CTL_LIST || c->type == CTL_SEL) {
2490 		strlcpy(c->node1.name, str1, CTL_NAMEMAX);
2491 		c->node1.unit = unit1;
2492 	} else
2493 		memset(&c->node1, 0, sizeof(struct ctl_node));
2494 	c->scope = scope;
2495 	c->u.any.arg0 = arg0;
2496 	switch (scope) {
2497 	case CTL_HW:
2498 		c->u.hw.addr = *(unsigned int *)arg1;
2499 		break;
2500 	case CTL_OPT_DEV:
2501 		c->u.any.arg1 = arg1;
2502 		break;
2503 	default:
2504 		c->u.any.arg1 = NULL;
2505 	}
2506 	c->addr = addr;
2507 	c->maxval = maxval;
2508 	c->val_mask = ~0;
2509 	c->desc_mask = ~0;
2510 	c->curval = val;
2511 	c->dirty = 0;
2512 	c->refs_mask = CTL_DEVMASK;
2513 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2514 		if (s->ops == NULL)
2515 			continue;
2516 		if (ctlslot_visible(s, c))
2517 			c->refs_mask |= 1 << i;
2518 	}
2519 	c->next = *pc;
2520 	*pc = c;
2521 #ifdef DEBUG
2522 	if (log_level >= 2) {
2523 		ctl_log(c);
2524 		log_puts(": added\n");
2525 	}
2526 #endif
2527 	return c;
2528 }
2529 
2530 void
2531 ctl_update(struct ctl *c)
2532 {
2533 	struct ctlslot *s;
2534 	unsigned int refs_mask;
2535 	int i;
2536 
2537 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2538 		if (s->ops == NULL)
2539 			continue;
2540 		refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2541 
2542 		/* nothing to do if no visibility change */
2543 		if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2544 			continue;
2545 		/* if control becomes visble */
2546 		if (refs_mask)
2547 			c->refs_mask |= s->self;
2548 		/* if control is hidden */
2549 		c->desc_mask |= s->self;
2550 	}
2551 }
2552 
2553 int
2554 ctl_match(struct ctl *c, int scope, void *arg0, void *arg1)
2555 {
2556 	if (c->type == CTL_NONE || c->scope != scope || c->u.any.arg0 != arg0)
2557 		return 0;
2558 	if (arg0 != NULL && c->u.any.arg0 != arg0)
2559 		return 0;
2560 	switch (scope) {
2561 	case CTL_HW:
2562 		if (arg1 != NULL && c->u.hw.addr != *(unsigned int *)arg1)
2563 			return 0;
2564 		break;
2565 	case CTL_OPT_DEV:
2566 		if (arg1 != NULL && c->u.any.arg1 != arg1)
2567 			return 0;
2568 		break;
2569 	}
2570 	return 1;
2571 }
2572 
2573 struct ctl *
2574 ctl_find(int scope, void *arg0, void *arg1)
2575 {
2576 	struct ctl *c;
2577 
2578 	for (c = ctl_list; c != NULL; c = c->next) {
2579 		if (ctl_match(c, scope, arg0, arg1))
2580 			return c;
2581 	}
2582 	return NULL;
2583 }
2584 
2585 int
2586 ctl_onval(int scope, void *arg0, void *arg1, int val)
2587 {
2588 	struct ctl *c;
2589 
2590 	c = ctl_find(scope, arg0, arg1);
2591 	if (c == NULL)
2592 		return 0;
2593 	c->curval = val;
2594 	c->val_mask = ~0U;
2595 	return 1;
2596 }
2597 
2598 void
2599 ctl_del(int scope, void *arg0, void *arg1)
2600 {
2601 	struct ctl *c, **pc;
2602 
2603 	pc = &ctl_list;
2604 	for (;;) {
2605 		c = *pc;
2606 		if (c == NULL)
2607 			return;
2608 		if (ctl_match(c, scope, arg0, arg1)) {
2609 #ifdef DEBUG
2610 			if (log_level >= 2) {
2611 				ctl_log(c);
2612 				log_puts(": removed\n");
2613 			}
2614 #endif
2615 			c->refs_mask &= ~CTL_DEVMASK;
2616 			if (c->refs_mask == 0) {
2617 				*pc = c->next;
2618 				xfree(c);
2619 				continue;
2620 			}
2621 			c->type = CTL_NONE;
2622 			c->desc_mask = ~0;
2623 		}
2624 		pc = &c->next;
2625 	}
2626 }
2627 
2628 void
2629 dev_ctlsync(struct dev *d)
2630 {
2631 	struct ctl *c;
2632 	struct ctlslot *s;
2633 	int found, i;
2634 
2635 	found = 0;
2636 	for (c = ctl_list; c != NULL; c = c->next) {
2637 		if (c->scope == CTL_HW &&
2638 		    c->u.hw.dev == d &&
2639 		    c->type == CTL_NUM &&
2640 		    strcmp(c->group, d->name) == 0 &&
2641 		    strcmp(c->node0.name, "output") == 0 &&
2642 		    strcmp(c->func, "level") == 0)
2643 			found = 1;
2644 	}
2645 
2646 	if (d->master_enabled && found) {
2647 		if (log_level >= 2) {
2648 			dev_log(d);
2649 			log_puts(": software master level control disabled\n");
2650 		}
2651 		d->master_enabled = 0;
2652 		ctl_del(CTL_DEV_MASTER, d, NULL);
2653 	} else if (!d->master_enabled && !found) {
2654 		if (log_level >= 2) {
2655 			dev_log(d);
2656 			log_puts(": software master level control enabled\n");
2657 		}
2658 		d->master_enabled = 1;
2659 		ctl_new(CTL_DEV_MASTER, d, NULL,
2660 		    CTL_NUM, d->name, "output", -1, "level",
2661 		    NULL, -1, 127, d->master);
2662 	}
2663 
2664 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2665 		if (s->ops == NULL)
2666 			continue;
2667 		if (s->opt->dev == d)
2668 			s->ops->sync(s->arg);
2669 	}
2670 }
2671