xref: /openbsd/usr.bin/sndiod/dev.c (revision 3bef86f7)
1 /*	$OpenBSD: dev.c,v 1.107 2023/12/09 22:12:03 ratchov Exp $	*/
2 /*
3  * Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include <stdio.h>
18 #include <string.h>
19 
20 #include "abuf.h"
21 #include "defs.h"
22 #include "dev.h"
23 #include "dsp.h"
24 #include "siofile.h"
25 #include "midi.h"
26 #include "opt.h"
27 #include "sysex.h"
28 #include "utils.h"
29 
30 void zomb_onmove(void *);
31 void zomb_onvol(void *);
32 void zomb_fill(void *);
33 void zomb_flush(void *);
34 void zomb_eof(void *);
35 void zomb_exit(void *);
36 
37 void dev_mix_badd(struct dev *, struct slot *);
38 void dev_mix_adjvol(struct dev *);
39 void dev_sub_bcopy(struct dev *, struct slot *);
40 
41 void dev_onmove(struct dev *, int);
42 void dev_master(struct dev *, unsigned int);
43 void dev_cycle(struct dev *);
44 struct dev *dev_new(char *, struct aparams *, unsigned int, unsigned int,
45     unsigned int, unsigned int, unsigned int, unsigned int);
46 void dev_adjpar(struct dev *, int, int, int);
47 int dev_allocbufs(struct dev *);
48 void dev_freebufs(struct dev *);
49 int dev_ref(struct dev *);
50 void dev_unref(struct dev *);
51 int dev_init(struct dev *);
52 void dev_done(struct dev *);
53 struct dev *dev_bynum(int);
54 void dev_del(struct dev *);
55 unsigned int dev_roundof(struct dev *, unsigned int);
56 void dev_wakeup(struct dev *);
57 
58 void slot_ctlname(struct slot *, char *, size_t);
59 void slot_log(struct slot *);
60 void slot_del(struct slot *);
61 void slot_setvol(struct slot *, unsigned int);
62 void slot_ready(struct slot *);
63 void slot_allocbufs(struct slot *);
64 void slot_freebufs(struct slot *);
65 void slot_skip_update(struct slot *);
66 void slot_write(struct slot *);
67 void slot_read(struct slot *);
68 int slot_skip(struct slot *);
69 
70 void ctl_node_log(struct ctl_node *);
71 void ctl_log(struct ctl *);
72 
73 struct slotops zomb_slotops = {
74 	zomb_onmove,
75 	zomb_onvol,
76 	zomb_fill,
77 	zomb_flush,
78 	zomb_eof,
79 	zomb_exit
80 };
81 
82 struct ctl *ctl_list = NULL;
83 struct dev *dev_list = NULL;
84 unsigned int dev_sndnum = 0;
85 
86 struct ctlslot ctlslot_array[DEV_NCTLSLOT];
87 struct slot slot_array[DEV_NSLOT];
88 unsigned int slot_serial;		/* for slot allocation */
89 
90 /*
91  * we support/need a single MTC clock source only
92  */
93 struct mtc mtc_array[1] = {
94 	{.dev = NULL, .tstate = MTC_STOP}
95 };
96 
97 void
98 slot_array_init(void)
99 {
100 	unsigned int i;
101 
102 	for (i = 0; i < DEV_NSLOT; i++) {
103 		slot_array[i].unit = i;
104 		slot_array[i].ops = NULL;
105 		slot_array[i].vol = MIDI_MAXCTL;
106 		slot_array[i].opt = NULL;
107 		slot_array[i].serial = slot_serial++;
108 		memset(slot_array[i].name, 0, SLOT_NAMEMAX);
109 	}
110 }
111 
112 void
113 dev_log(struct dev *d)
114 {
115 #ifdef DEBUG
116 	static char *pstates[] = {
117 		"cfg", "ini", "run"
118 	};
119 #endif
120 	log_puts("snd");
121 	log_putu(d->num);
122 #ifdef DEBUG
123 	if (log_level >= 3) {
124 		log_puts(" pst=");
125 		log_puts(pstates[d->pstate]);
126 	}
127 #endif
128 }
129 
130 void
131 slot_ctlname(struct slot *s, char *name, size_t size)
132 {
133 	snprintf(name, size, "%s%u", s->name, s->unit);
134 }
135 
136 void
137 slot_log(struct slot *s)
138 {
139 	char name[CTL_NAMEMAX];
140 #ifdef DEBUG
141 	static char *pstates[] = {
142 		"ini", "sta", "rdy", "run", "stp", "mid"
143 	};
144 #endif
145 	slot_ctlname(s, name, CTL_NAMEMAX);
146 	log_puts(name);
147 #ifdef DEBUG
148 	if (log_level >= 3) {
149 		log_puts(" vol=");
150 		log_putu(s->vol);
151 		if (s->ops) {
152 			log_puts(",pst=");
153 			log_puts(pstates[s->pstate]);
154 		}
155 	}
156 #endif
157 }
158 
159 void
160 zomb_onmove(void *arg)
161 {
162 }
163 
164 void
165 zomb_onvol(void *arg)
166 {
167 }
168 
169 void
170 zomb_fill(void *arg)
171 {
172 }
173 
174 void
175 zomb_flush(void *arg)
176 {
177 }
178 
179 void
180 zomb_eof(void *arg)
181 {
182 	struct slot *s = arg;
183 
184 #ifdef DEBUG
185 	if (log_level >= 3) {
186 		slot_log(s);
187 		log_puts(": zomb_eof\n");
188 	}
189 #endif
190 	s->ops = NULL;
191 }
192 
193 void
194 zomb_exit(void *arg)
195 {
196 #ifdef DEBUG
197 	struct slot *s = arg;
198 
199 	if (log_level >= 3) {
200 		slot_log(s);
201 		log_puts(": zomb_exit\n");
202 	}
203 #endif
204 }
205 
206 /*
207  * Broadcast MIDI data to all opts using this device
208  */
209 void
210 dev_midi_send(struct dev *d, void *msg, int msglen)
211 {
212 	struct opt *o;
213 
214 	for (o = opt_list; o != NULL; o = o->next) {
215 		if (o->dev != d)
216 			continue;
217 		midi_send(o->midi, msg, msglen);
218 	}
219 }
220 
221 /*
222  * send a quarter frame MTC message
223  */
224 void
225 mtc_midi_qfr(struct mtc *mtc, int delta)
226 {
227 	unsigned char buf[2];
228 	unsigned int data;
229 	int qfrlen;
230 
231 	mtc->delta += delta * MTC_SEC;
232 	qfrlen = mtc->dev->rate * (MTC_SEC / (4 * mtc->fps));
233 	while (mtc->delta >= qfrlen) {
234 		switch (mtc->qfr) {
235 		case 0:
236 			data = mtc->fr & 0xf;
237 			break;
238 		case 1:
239 			data = mtc->fr >> 4;
240 			break;
241 		case 2:
242 			data = mtc->sec & 0xf;
243 			break;
244 		case 3:
245 			data = mtc->sec >> 4;
246 			break;
247 		case 4:
248 			data = mtc->min & 0xf;
249 			break;
250 		case 5:
251 			data = mtc->min >> 4;
252 			break;
253 		case 6:
254 			data = mtc->hr & 0xf;
255 			break;
256 		case 7:
257 			data = (mtc->hr >> 4) | (mtc->fps_id << 1);
258 			/*
259 			 * tick messages are sent 2 frames ahead
260 			 */
261 			mtc->fr += 2;
262 			if (mtc->fr < mtc->fps)
263 				break;
264 			mtc->fr -= mtc->fps;
265 			mtc->sec++;
266 			if (mtc->sec < 60)
267 				break;
268 			mtc->sec = 0;
269 			mtc->min++;
270 			if (mtc->min < 60)
271 				break;
272 			mtc->min = 0;
273 			mtc->hr++;
274 			if (mtc->hr < 24)
275 				break;
276 			mtc->hr = 0;
277 			break;
278 		default:
279 			/* NOTREACHED */
280 			data = 0;
281 		}
282 		buf[0] = 0xf1;
283 		buf[1] = (mtc->qfr << 4) | data;
284 		mtc->qfr++;
285 		mtc->qfr &= 7;
286 		dev_midi_send(mtc->dev, buf, 2);
287 		mtc->delta -= qfrlen;
288 	}
289 }
290 
291 /*
292  * send a full frame MTC message
293  */
294 void
295 mtc_midi_full(struct mtc *mtc)
296 {
297 	struct sysex x;
298 	unsigned int fps;
299 
300 	mtc->delta = -MTC_SEC * (int)mtc->dev->bufsz;
301 	if (mtc->dev->rate % (30 * 4 * mtc->dev->round) == 0) {
302 		mtc->fps_id = MTC_FPS_30;
303 		mtc->fps = 30;
304 	} else if (mtc->dev->rate % (25 * 4 * mtc->dev->round) == 0) {
305 		mtc->fps_id = MTC_FPS_25;
306 		mtc->fps = 25;
307 	} else {
308 		mtc->fps_id = MTC_FPS_24;
309 		mtc->fps = 24;
310 	}
311 #ifdef DEBUG
312 	if (log_level >= 3) {
313 		dev_log(mtc->dev);
314 		log_puts(": mtc full frame at ");
315 		log_puti(mtc->delta);
316 		log_puts(", ");
317 		log_puti(mtc->fps);
318 		log_puts(" fps\n");
319 	}
320 #endif
321 	fps = mtc->fps;
322 	mtc->hr =  (mtc->origin / (MTC_SEC * 3600)) % 24;
323 	mtc->min = (mtc->origin / (MTC_SEC * 60))   % 60;
324 	mtc->sec = (mtc->origin / (MTC_SEC))        % 60;
325 	mtc->fr =  (mtc->origin / (MTC_SEC / fps))  % fps;
326 
327 	x.start = SYSEX_START;
328 	x.type = SYSEX_TYPE_RT;
329 	x.dev = SYSEX_DEV_ANY;
330 	x.id0 = SYSEX_MTC;
331 	x.id1 = SYSEX_MTC_FULL;
332 	x.u.full.hr = mtc->hr | (mtc->fps_id << 5);
333 	x.u.full.min = mtc->min;
334 	x.u.full.sec = mtc->sec;
335 	x.u.full.fr = mtc->fr;
336 	x.u.full.end = SYSEX_END;
337 	mtc->qfr = 0;
338 	dev_midi_send(mtc->dev, (unsigned char *)&x, SYSEX_SIZE(full));
339 }
340 
341 /*
342  * send a volume change MIDI message
343  */
344 void
345 dev_midi_vol(struct dev *d, struct slot *s)
346 {
347 	unsigned char msg[3];
348 
349 	msg[0] = MIDI_CTL | (s - slot_array);
350 	msg[1] = MIDI_CTL_VOL;
351 	msg[2] = s->vol;
352 	dev_midi_send(d, msg, 3);
353 }
354 
355 /*
356  * send a master volume MIDI message
357  */
358 void
359 dev_midi_master(struct dev *d)
360 {
361 	struct ctl *c;
362 	unsigned int master, v;
363 	struct sysex x;
364 
365 	if (d->master_enabled)
366 		master = d->master;
367 	else {
368 		master = 0;
369 		for (c = ctl_list; c != NULL; c = c->next) {
370 			if (c->type != CTL_NUM ||
371 			    strcmp(c->group, d->name) != 0 ||
372 			    strcmp(c->node0.name, "output") != 0 ||
373 			    strcmp(c->func, "level") != 0)
374 				continue;
375 			if (c->u.any.arg0 != d)
376 				continue;
377 			v = (c->curval * 127 + c->maxval / 2) / c->maxval;
378 			if (master < v)
379 				master = v;
380 		}
381 	}
382 
383 	memset(&x, 0, sizeof(struct sysex));
384 	x.start = SYSEX_START;
385 	x.type = SYSEX_TYPE_RT;
386 	x.dev = SYSEX_DEV_ANY;
387 	x.id0 = SYSEX_CONTROL;
388 	x.id1 = SYSEX_MASTER;
389 	x.u.master.fine = 0;
390 	x.u.master.coarse = master;
391 	x.u.master.end = SYSEX_END;
392 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(master));
393 }
394 
395 /*
396  * send a sndiod-specific slot description MIDI message
397  */
398 void
399 dev_midi_slotdesc(struct dev *d, struct slot *s)
400 {
401 	struct sysex x;
402 
403 	memset(&x, 0, sizeof(struct sysex));
404 	x.start = SYSEX_START;
405 	x.type = SYSEX_TYPE_EDU;
406 	x.dev = SYSEX_DEV_ANY;
407 	x.id0 = SYSEX_AUCAT;
408 	x.id1 = SYSEX_AUCAT_SLOTDESC;
409 	if (s->opt != NULL && s->opt->dev == d)
410 		slot_ctlname(s, (char *)x.u.slotdesc.name, SYSEX_NAMELEN);
411 	x.u.slotdesc.chan = (s - slot_array);
412 	x.u.slotdesc.end = SYSEX_END;
413 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(slotdesc));
414 }
415 
416 void
417 dev_midi_dump(struct dev *d)
418 {
419 	struct sysex x;
420 	struct slot *s;
421 	int i;
422 
423 	dev_midi_master(d);
424 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
425 		if (s->opt != NULL && s->opt->dev != d)
426 			continue;
427 		dev_midi_slotdesc(d, s);
428 		dev_midi_vol(d, s);
429 	}
430 	x.start = SYSEX_START;
431 	x.type = SYSEX_TYPE_EDU;
432 	x.dev = SYSEX_DEV_ANY;
433 	x.id0 = SYSEX_AUCAT;
434 	x.id1 = SYSEX_AUCAT_DUMPEND;
435 	x.u.dumpend.end = SYSEX_END;
436 	dev_midi_send(d, (unsigned char *)&x, SYSEX_SIZE(dumpend));
437 }
438 
439 int
440 slot_skip(struct slot *s)
441 {
442 	unsigned char *data = (unsigned char *)0xdeadbeef; /* please gcc */
443 	int max, count;
444 
445 	max = s->skip;
446 	while (s->skip > 0) {
447 		if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
448 			data = abuf_wgetblk(&s->sub.buf, &count);
449 			if (count < s->round * s->sub.bpf)
450 				break;
451 		}
452 		if (s->mode & MODE_PLAY) {
453 			if (s->mix.buf.used < s->round * s->mix.bpf)
454 				break;
455 		}
456 #ifdef DEBUG
457 		if (log_level >= 4) {
458 			slot_log(s);
459 			log_puts(": skipped a cycle\n");
460 		}
461 #endif
462 		if (s->pstate != SLOT_STOP && (s->mode & MODE_RECMASK)) {
463 			if (s->sub.encbuf)
464 				enc_sil_do(&s->sub.enc, data, s->round);
465 			else
466 				memset(data, 0, s->round * s->sub.bpf);
467 			abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
468 		}
469 		if (s->mode & MODE_PLAY) {
470 			abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
471 		}
472 		s->skip--;
473 	}
474 	return max - s->skip;
475 }
476 
477 /*
478  * Mix the slot input block over the output block
479  */
480 void
481 dev_mix_badd(struct dev *d, struct slot *s)
482 {
483 	adata_t *idata, *odata, *in;
484 	int icount, i, offs, vol, nch;
485 
486 	odata = DEV_PBUF(d);
487 	idata = (adata_t *)abuf_rgetblk(&s->mix.buf, &icount);
488 #ifdef DEBUG
489 	if (icount < s->round * s->mix.bpf) {
490 		slot_log(s);
491 		log_puts(": not enough data to mix (");
492 		log_putu(icount);
493 		log_puts("bytes)\n");
494 		panic();
495 	}
496 #endif
497 	if (!(s->opt->mode & MODE_PLAY)) {
498 		/*
499 		 * playback not allowed in opt structure, produce silence
500 		 */
501 		abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
502 		return;
503 	}
504 
505 
506 	/*
507 	 * Apply the following processing chain:
508 	 *
509 	 *	dec -> resamp-> cmap
510 	 *
511 	 * where the first two are optional.
512 	 */
513 
514 	in = idata;
515 
516 	if (s->mix.decbuf) {
517 		dec_do(&s->mix.dec, (void *)in, s->mix.decbuf, s->round);
518 		in = s->mix.decbuf;
519 	}
520 
521 	if (s->mix.resampbuf) {
522 		resamp_do(&s->mix.resamp, in, s->mix.resampbuf, s->round);
523 		in = s->mix.resampbuf;
524 	}
525 
526 	nch = s->mix.cmap.nch;
527 	vol = ADATA_MUL(s->mix.weight, s->mix.vol) / s->mix.join;
528 	cmap_add(&s->mix.cmap, in, odata, vol, d->round);
529 
530 	offs = 0;
531 	for (i = s->mix.join - 1; i > 0; i--) {
532 		offs += nch;
533 		cmap_add(&s->mix.cmap, in + offs, odata, vol, d->round);
534 	}
535 
536 	offs = 0;
537 	for (i = s->mix.expand - 1; i > 0; i--) {
538 		offs += nch;
539 		cmap_add(&s->mix.cmap, in, odata + offs, vol, d->round);
540 	}
541 
542 	abuf_rdiscard(&s->mix.buf, s->round * s->mix.bpf);
543 }
544 
545 /*
546  * Normalize input levels.
547  */
548 void
549 dev_mix_adjvol(struct dev *d)
550 {
551 	unsigned int n;
552 	struct slot *i, *j;
553 	int jcmax, icmax, weight;
554 
555 	for (i = d->slot_list; i != NULL; i = i->next) {
556 		if (!(i->mode & MODE_PLAY))
557 			continue;
558 		icmax = i->opt->pmin + i->mix.nch - 1;
559 		weight = ADATA_UNIT;
560 		if (d->autovol) {
561 			/*
562 			 * count the number of inputs that have
563 			 * overlapping channel sets
564 			 */
565 			n = 0;
566 			for (j = d->slot_list; j != NULL; j = j->next) {
567 				if (!(j->mode & MODE_PLAY))
568 					continue;
569 				jcmax = j->opt->pmin + j->mix.nch - 1;
570 				if (i->opt->pmin <= jcmax &&
571 				    icmax >= j->opt->pmin)
572 					n++;
573 			}
574 			weight /= n;
575 		}
576 		if (weight > i->opt->maxweight)
577 			weight = i->opt->maxweight;
578 		i->mix.weight = d->master_enabled ?
579 		    ADATA_MUL(weight, MIDI_TO_ADATA(d->master)) : weight;
580 #ifdef DEBUG
581 		if (log_level >= 3) {
582 			slot_log(i);
583 			log_puts(": set weight: ");
584 			log_puti(i->mix.weight);
585 			log_puts("/");
586 			log_puti(i->opt->maxweight);
587 			log_puts("\n");
588 		}
589 #endif
590 	}
591 }
592 
593 /*
594  * Copy data from slot to device
595  */
596 void
597 dev_sub_bcopy(struct dev *d, struct slot *s)
598 {
599 	adata_t *idata, *enc_out, *resamp_out, *cmap_out;
600 	void *odata;
601 	int ocount, moffs;
602 
603 	int i, vol, offs, nch;
604 
605 
606 	odata = (adata_t *)abuf_wgetblk(&s->sub.buf, &ocount);
607 #ifdef DEBUG
608 	if (ocount < s->round * s->sub.bpf) {
609 		log_puts("dev_sub_bcopy: not enough space\n");
610 		panic();
611 	}
612 #endif
613 	if (s->opt->mode & MODE_MON) {
614 		moffs = d->poffs + d->round;
615 		if (moffs == d->psize)
616 			moffs = 0;
617 		idata = d->pbuf + moffs * d->pchan;
618 	} else if (s->opt->mode & MODE_REC) {
619 		idata = d->rbuf;
620 	} else {
621 		/*
622 		 * recording not allowed in opt structure, produce silence
623 		 */
624 		enc_sil_do(&s->sub.enc, odata, s->round);
625 		abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
626 		return;
627 	}
628 
629 	/*
630 	 * Apply the following processing chain:
631 	 *
632 	 *	cmap -> resamp -> enc
633 	 *
634 	 * where the last two are optional.
635 	 */
636 
637 	enc_out = odata;
638 	resamp_out = s->sub.encbuf ? s->sub.encbuf : enc_out;
639 	cmap_out = s->sub.resampbuf ? s->sub.resampbuf : resamp_out;
640 
641 	nch = s->sub.cmap.nch;
642 	vol = ADATA_UNIT / s->sub.join;
643 	cmap_copy(&s->sub.cmap, idata, cmap_out, vol, d->round);
644 
645 	offs = 0;
646 	for (i = s->sub.join - 1; i > 0; i--) {
647 		offs += nch;
648 		cmap_add(&s->sub.cmap, idata + offs, cmap_out, vol, d->round);
649 	}
650 
651 	offs = 0;
652 	for (i = s->sub.expand - 1; i > 0; i--) {
653 		offs += nch;
654 		cmap_copy(&s->sub.cmap, idata, cmap_out + offs, vol, d->round);
655 	}
656 
657 	if (s->sub.resampbuf) {
658 		resamp_do(&s->sub.resamp,
659 		    s->sub.resampbuf, resamp_out, d->round);
660 	}
661 
662 	if (s->sub.encbuf)
663 		enc_do(&s->sub.enc, s->sub.encbuf, (void *)enc_out, s->round);
664 
665 	abuf_wcommit(&s->sub.buf, s->round * s->sub.bpf);
666 }
667 
668 /*
669  * run a one block cycle: consume one recorded block from
670  * rbuf and produce one play block in pbuf
671  */
672 void
673 dev_cycle(struct dev *d)
674 {
675 	struct slot *s, **ps;
676 	unsigned char *base;
677 	int nsamp;
678 
679 	/*
680 	 * check if the device is actually used. If it isn't,
681 	 * then close it
682 	 */
683 	if (d->slot_list == NULL && d->idle >= d->bufsz &&
684 	    (mtc_array[0].dev != d || mtc_array[0].tstate != MTC_RUN)) {
685 		if (log_level >= 2) {
686 			dev_log(d);
687 			log_puts(": device stopped\n");
688 		}
689 		dev_sio_stop(d);
690 		d->pstate = DEV_INIT;
691 		if (d->refcnt == 0)
692 			dev_close(d);
693 		return;
694 	}
695 
696 	if (d->prime > 0) {
697 #ifdef DEBUG
698 		if (log_level >= 4) {
699 			dev_log(d);
700 			log_puts(": empty cycle, prime = ");
701 			log_putu(d->prime);
702 			log_puts("\n");
703 		}
704 #endif
705 		base = (unsigned char *)DEV_PBUF(d);
706 		nsamp = d->round * d->pchan;
707 		memset(base, 0, nsamp * sizeof(adata_t));
708 		if (d->encbuf) {
709 			enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
710 			    d->encbuf, d->round);
711 		}
712 		d->prime -= d->round;
713 		return;
714 	}
715 
716 	d->delta -= d->round;
717 #ifdef DEBUG
718 	if (log_level >= 4) {
719 		dev_log(d);
720 		log_puts(": full cycle: delta = ");
721 		log_puti(d->delta);
722 		if (d->mode & MODE_PLAY) {
723 			log_puts(", poffs = ");
724 			log_puti(d->poffs);
725 		}
726 		log_puts("\n");
727 	}
728 #endif
729 	if (d->mode & MODE_PLAY) {
730 		base = (unsigned char *)DEV_PBUF(d);
731 		nsamp = d->round * d->pchan;
732 		memset(base, 0, nsamp * sizeof(adata_t));
733 	}
734 	if ((d->mode & MODE_REC) && d->decbuf)
735 		dec_do(&d->dec, d->decbuf, (unsigned char *)d->rbuf, d->round);
736 	ps = &d->slot_list;
737 	while ((s = *ps) != NULL) {
738 #ifdef DEBUG
739 		if (log_level >= 4) {
740 			slot_log(s);
741 			log_puts(": running");
742 			log_puts(", skip = ");
743 			log_puti(s->skip);
744 			log_puts("\n");
745 		}
746 #endif
747 		d->idle = 0;
748 
749 		/*
750 		 * skip cycles for XRUN_SYNC correction
751 		 */
752 		slot_skip(s);
753 		if (s->skip < 0) {
754 			s->skip++;
755 			ps = &s->next;
756 			continue;
757 		}
758 
759 #ifdef DEBUG
760 		if (s->pstate == SLOT_STOP && !(s->mode & MODE_PLAY)) {
761 			slot_log(s);
762 			log_puts(": rec-only slots can't be drained\n");
763 			panic();
764 		}
765 #endif
766 		/*
767 		 * check if stopped stream finished draining
768 		 */
769 		if (s->pstate == SLOT_STOP &&
770 		    s->mix.buf.used < s->round * s->mix.bpf) {
771 			/*
772 			 * partial blocks are zero-filled by socket
773 			 * layer, so s->mix.buf.used == 0 and we can
774 			 * destroy the buffer
775 			 */
776 			*ps = s->next;
777 			s->pstate = SLOT_INIT;
778 			s->ops->eof(s->arg);
779 			slot_freebufs(s);
780 			dev_mix_adjvol(d);
781 #ifdef DEBUG
782 			if (log_level >= 3) {
783 				slot_log(s);
784 				log_puts(": drained\n");
785 			}
786 #endif
787 			continue;
788 		}
789 
790 		/*
791 		 * check for xruns
792 		 */
793 		if (((s->mode & MODE_PLAY) &&
794 			s->mix.buf.used < s->round * s->mix.bpf) ||
795 		    ((s->mode & MODE_RECMASK) &&
796 			s->sub.buf.len - s->sub.buf.used <
797 			s->round * s->sub.bpf)) {
798 
799 #ifdef DEBUG
800 			if (log_level >= 3) {
801 				slot_log(s);
802 				log_puts(": xrun, pause cycle\n");
803 			}
804 #endif
805 			if (s->xrun == XRUN_IGNORE) {
806 				s->delta -= s->round;
807 				ps = &s->next;
808 			} else if (s->xrun == XRUN_SYNC) {
809 				s->skip++;
810 				ps = &s->next;
811 			} else if (s->xrun == XRUN_ERROR) {
812 				s->ops->exit(s->arg);
813 				*ps = s->next;
814 			} else {
815 #ifdef DEBUG
816 				slot_log(s);
817 				log_puts(": bad xrun mode\n");
818 				panic();
819 #endif
820 			}
821 			continue;
822 		}
823 		if ((s->mode & MODE_RECMASK) && !(s->pstate == SLOT_STOP)) {
824 			if (s->sub.prime == 0) {
825 				dev_sub_bcopy(d, s);
826 				s->ops->flush(s->arg);
827 			} else {
828 #ifdef DEBUG
829 				if (log_level >= 3) {
830 					slot_log(s);
831 					log_puts(": prime = ");
832 					log_puti(s->sub.prime);
833 					log_puts("\n");
834 				}
835 #endif
836 				s->sub.prime--;
837 			}
838 		}
839 		if (s->mode & MODE_PLAY) {
840 			dev_mix_badd(d, s);
841 			if (s->pstate != SLOT_STOP)
842 				s->ops->fill(s->arg);
843 		}
844 		ps = &s->next;
845 	}
846 	if ((d->mode & MODE_PLAY) && d->encbuf) {
847 		enc_do(&d->enc, (unsigned char *)DEV_PBUF(d),
848 		    d->encbuf, d->round);
849 	}
850 }
851 
852 /*
853  * called at every clock tick by the device
854  */
855 void
856 dev_onmove(struct dev *d, int delta)
857 {
858 	long long pos;
859 	struct slot *s, *snext;
860 
861 	d->delta += delta;
862 
863 	if (d->slot_list == NULL)
864 		d->idle += delta;
865 
866 	for (s = d->slot_list; s != NULL; s = snext) {
867 		/*
868 		 * s->ops->onmove() may remove the slot
869 		 */
870 		snext = s->next;
871 		pos = s->delta_rem +
872 		    (long long)s->delta * d->round +
873 		    (long long)delta * s->round;
874 		s->delta = pos / (int)d->round;
875 		s->delta_rem = pos % d->round;
876 		if (s->delta_rem < 0) {
877 			s->delta_rem += d->round;
878 			s->delta--;
879 		}
880 		if (s->delta >= 0)
881 			s->ops->onmove(s->arg);
882 	}
883 
884 	if (mtc_array[0].dev == d && mtc_array[0].tstate == MTC_RUN)
885 		mtc_midi_qfr(&mtc_array[0], delta);
886 }
887 
888 void
889 dev_master(struct dev *d, unsigned int master)
890 {
891 	struct ctl *c;
892 	unsigned int v;
893 
894 	if (log_level >= 2) {
895 		dev_log(d);
896 		log_puts(": master volume set to ");
897 		log_putu(master);
898 		log_puts("\n");
899 	}
900 	if (d->master_enabled) {
901 		d->master = master;
902 		if (d->mode & MODE_PLAY)
903 			dev_mix_adjvol(d);
904 	} else {
905 		for (c = ctl_list; c != NULL; c = c->next) {
906 			if (c->scope != CTL_HW || c->u.hw.dev != d)
907 				continue;
908 			if (c->type != CTL_NUM ||
909 			    strcmp(c->group, d->name) != 0 ||
910 			    strcmp(c->node0.name, "output") != 0 ||
911 			    strcmp(c->func, "level") != 0)
912 				continue;
913 			v = (master * c->maxval + 64) / 127;
914 			ctl_setval(c, v);
915 		}
916 	}
917 }
918 
919 /*
920  * Create a sndio device
921  */
922 struct dev *
923 dev_new(char *path, struct aparams *par,
924     unsigned int mode, unsigned int bufsz, unsigned int round,
925     unsigned int rate, unsigned int hold, unsigned int autovol)
926 {
927 	struct dev *d, **pd;
928 
929 	if (dev_sndnum == DEV_NMAX) {
930 		if (log_level >= 1)
931 			log_puts("too many devices\n");
932 		return NULL;
933 	}
934 	d = xmalloc(sizeof(struct dev));
935 	d->path = path;
936 	d->num = dev_sndnum++;
937 
938 	d->reqpar = *par;
939 	d->reqmode = mode;
940 	d->reqpchan = d->reqrchan = 0;
941 	d->reqbufsz = bufsz;
942 	d->reqround = round;
943 	d->reqrate = rate;
944 	d->hold = hold;
945 	d->autovol = autovol;
946 	d->refcnt = 0;
947 	d->pstate = DEV_CFG;
948 	d->slot_list = NULL;
949 	d->master = MIDI_MAXCTL;
950 	d->master_enabled = 0;
951 	d->alt_next = d;
952 	snprintf(d->name, CTL_NAMEMAX, "%u", d->num);
953 	for (pd = &dev_list; *pd != NULL; pd = &(*pd)->next)
954 		;
955 	d->next = *pd;
956 	*pd = d;
957 	return d;
958 }
959 
960 /*
961  * adjust device parameters and mode
962  */
963 void
964 dev_adjpar(struct dev *d, int mode,
965     int pmax, int rmax)
966 {
967 	d->reqmode |= mode & MODE_AUDIOMASK;
968 	if (mode & MODE_PLAY) {
969 		if (d->reqpchan < pmax + 1)
970 			d->reqpchan = pmax + 1;
971 	}
972 	if (mode & MODE_REC) {
973 		if (d->reqrchan < rmax + 1)
974 			d->reqrchan = rmax + 1;
975 	}
976 }
977 
978 /*
979  * Open the device with the dev_reqxxx capabilities. Setup a mixer, demuxer,
980  * monitor, midi control, and any necessary conversions.
981  *
982  * Note that record and play buffers are always allocated, even if the
983  * underlying device doesn't support both modes.
984  */
985 int
986 dev_allocbufs(struct dev *d)
987 {
988 	/*
989 	 * Create record buffer.
990 	 */
991 
992 	 /* Create device <-> demuxer buffer */
993 	d->rbuf = xmalloc(d->round * d->rchan * sizeof(adata_t));
994 
995 	/* Insert a converter, if needed. */
996 	if (!aparams_native(&d->par)) {
997 		dec_init(&d->dec, &d->par, d->rchan);
998 		d->decbuf = xmalloc(d->round * d->rchan * d->par.bps);
999 	} else
1000 		d->decbuf = NULL;
1001 
1002 	/*
1003 	 * Create play buffer
1004 	 */
1005 
1006 	/* Create device <-> mixer buffer */
1007 	d->poffs = 0;
1008 	d->psize = d->bufsz + d->round;
1009 	d->pbuf = xmalloc(d->psize * d->pchan * sizeof(adata_t));
1010 	d->mode |= MODE_MON;
1011 
1012 	/* Append a converter, if needed. */
1013 	if (!aparams_native(&d->par)) {
1014 		enc_init(&d->enc, &d->par, d->pchan);
1015 		d->encbuf = xmalloc(d->round * d->pchan * d->par.bps);
1016 	} else
1017 		d->encbuf = NULL;
1018 
1019 	/*
1020 	 * Initially fill the record buffer with zeroed samples. This ensures
1021 	 * that when a client records from a play-only device the client just
1022 	 * gets silence.
1023 	 */
1024 	memset(d->rbuf, 0, d->round * d->rchan * sizeof(adata_t));
1025 
1026 	if (log_level >= 2) {
1027 		dev_log(d);
1028 		log_puts(": ");
1029 		log_putu(d->rate);
1030 		log_puts("Hz, ");
1031 		aparams_log(&d->par);
1032 		if (d->mode & MODE_PLAY) {
1033 			log_puts(", play 0:");
1034 			log_puti(d->pchan - 1);
1035 		}
1036 		if (d->mode & MODE_REC) {
1037 			log_puts(", rec 0:");
1038 			log_puti(d->rchan - 1);
1039 		}
1040 		log_puts(", ");
1041 		log_putu(d->bufsz / d->round);
1042 		log_puts(" blocks of ");
1043 		log_putu(d->round);
1044 		log_puts(" frames");
1045 		if (d == mtc_array[0].dev)
1046 			log_puts(", mtc");
1047 		log_puts("\n");
1048 	}
1049 	return 1;
1050 }
1051 
1052 /*
1053  * Reset parameters and open the device.
1054  */
1055 int
1056 dev_open(struct dev *d)
1057 {
1058 	d->mode = d->reqmode;
1059 	d->round = d->reqround;
1060 	d->bufsz = d->reqbufsz;
1061 	d->rate = d->reqrate;
1062 	d->pchan = d->reqpchan;
1063 	d->rchan = d->reqrchan;
1064 	d->par = d->reqpar;
1065 	if (d->pchan == 0)
1066 		d->pchan = 2;
1067 	if (d->rchan == 0)
1068 		d->rchan = 2;
1069 	if (!dev_sio_open(d)) {
1070 		if (log_level >= 1) {
1071 			dev_log(d);
1072 			log_puts(": failed to open audio device\n");
1073 		}
1074 		return 0;
1075 	}
1076 	if (!dev_allocbufs(d))
1077 		return 0;
1078 
1079 	d->pstate = DEV_INIT;
1080 	return 1;
1081 }
1082 
1083 /*
1084  * Force all slots to exit and close device, called after an error
1085  */
1086 void
1087 dev_abort(struct dev *d)
1088 {
1089 	int i;
1090 	struct slot *s;
1091 	struct ctlslot *c;
1092 	struct opt *o;
1093 
1094 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1095 		if (s->opt == NULL || s->opt->dev != d)
1096 			continue;
1097 		if (s->ops) {
1098 			s->ops->exit(s->arg);
1099 			s->ops = NULL;
1100 		}
1101 	}
1102 	d->slot_list = NULL;
1103 
1104 	for (o = opt_list; o != NULL; o = o->next) {
1105 		if (o->dev != d)
1106 			continue;
1107 		for (c = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, c++) {
1108 			if (c->ops == NULL)
1109 				continue;
1110 			if (c->opt == o) {
1111 				c->ops->exit(s->arg);
1112 				c->ops = NULL;
1113 			}
1114 		}
1115 
1116 		midi_abort(o->midi);
1117 	}
1118 
1119 	if (d->pstate != DEV_CFG)
1120 		dev_close(d);
1121 }
1122 
1123 /*
1124  * force the device to go in DEV_CFG state, the caller is supposed to
1125  * ensure buffers are drained
1126  */
1127 void
1128 dev_freebufs(struct dev *d)
1129 {
1130 #ifdef DEBUG
1131 	if (log_level >= 3) {
1132 		dev_log(d);
1133 		log_puts(": closing\n");
1134 	}
1135 #endif
1136 	if (d->mode & MODE_PLAY) {
1137 		if (d->encbuf != NULL)
1138 			xfree(d->encbuf);
1139 		xfree(d->pbuf);
1140 	}
1141 	if (d->mode & MODE_REC) {
1142 		if (d->decbuf != NULL)
1143 			xfree(d->decbuf);
1144 		xfree(d->rbuf);
1145 	}
1146 }
1147 
1148 /*
1149  * Close the device and exit all slots
1150  */
1151 void
1152 dev_close(struct dev *d)
1153 {
1154 	d->pstate = DEV_CFG;
1155 	dev_sio_close(d);
1156 	dev_freebufs(d);
1157 
1158 	if (d->master_enabled) {
1159 		d->master_enabled = 0;
1160 		ctl_del(CTL_DEV_MASTER, d, NULL);
1161 	}
1162 }
1163 
1164 int
1165 dev_ref(struct dev *d)
1166 {
1167 #ifdef DEBUG
1168 	if (log_level >= 3) {
1169 		dev_log(d);
1170 		log_puts(": device requested\n");
1171 	}
1172 #endif
1173 	if (d->pstate == DEV_CFG && !dev_open(d))
1174 		return 0;
1175 	d->refcnt++;
1176 	return 1;
1177 }
1178 
1179 void
1180 dev_unref(struct dev *d)
1181 {
1182 #ifdef DEBUG
1183 	if (log_level >= 3) {
1184 		dev_log(d);
1185 		log_puts(": device released\n");
1186 	}
1187 #endif
1188 	d->refcnt--;
1189 	if (d->refcnt == 0 && d->pstate == DEV_INIT)
1190 		dev_close(d);
1191 }
1192 
1193 /*
1194  * initialize the device with the current parameters
1195  */
1196 int
1197 dev_init(struct dev *d)
1198 {
1199 	if ((d->reqmode & MODE_AUDIOMASK) == 0) {
1200 #ifdef DEBUG
1201 		    dev_log(d);
1202 		    log_puts(": has no streams\n");
1203 #endif
1204 		    return 0;
1205 	}
1206 	if (d->hold && !dev_ref(d))
1207 		return 0;
1208 	return 1;
1209 }
1210 
1211 /*
1212  * Unless the device is already in process of closing, request it to close
1213  */
1214 void
1215 dev_done(struct dev *d)
1216 {
1217 #ifdef DEBUG
1218 	if (log_level >= 3) {
1219 		dev_log(d);
1220 		log_puts(": draining\n");
1221 	}
1222 #endif
1223 	if (mtc_array[0].dev == d && mtc_array[0].tstate != MTC_STOP)
1224 		mtc_stop(&mtc_array[0]);
1225 	if (d->hold)
1226 		dev_unref(d);
1227 }
1228 
1229 struct dev *
1230 dev_bynum(int num)
1231 {
1232 	struct dev *d;
1233 
1234 	for (d = dev_list; d != NULL; d = d->next) {
1235 		if (d->num == num)
1236 			return d;
1237 	}
1238 	return NULL;
1239 }
1240 
1241 /*
1242  * Free the device
1243  */
1244 void
1245 dev_del(struct dev *d)
1246 {
1247 	struct dev **p;
1248 
1249 #ifdef DEBUG
1250 	if (log_level >= 3) {
1251 		dev_log(d);
1252 		log_puts(": deleting\n");
1253 	}
1254 #endif
1255 	if (d->pstate != DEV_CFG)
1256 		dev_close(d);
1257 	for (p = &dev_list; *p != d; p = &(*p)->next) {
1258 #ifdef DEBUG
1259 		if (*p == NULL) {
1260 			dev_log(d);
1261 			log_puts(": device to delete not on the list\n");
1262 			panic();
1263 		}
1264 #endif
1265 	}
1266 	*p = d->next;
1267 	xfree(d);
1268 }
1269 
1270 unsigned int
1271 dev_roundof(struct dev *d, unsigned int newrate)
1272 {
1273 	return (d->round * newrate + d->rate / 2) / d->rate;
1274 }
1275 
1276 /*
1277  * If the device is paused, then resume it.
1278  */
1279 void
1280 dev_wakeup(struct dev *d)
1281 {
1282 	if (d->pstate == DEV_INIT) {
1283 		if (log_level >= 2) {
1284 			dev_log(d);
1285 			log_puts(": device started\n");
1286 		}
1287 		if (d->mode & MODE_PLAY) {
1288 			d->prime = d->bufsz;
1289 		} else {
1290 			d->prime = 0;
1291 		}
1292 		d->idle = 0;
1293 		d->poffs = 0;
1294 
1295 		/*
1296 		 * empty cycles don't increment delta, so it's ok to
1297 		 * start at 0
1298 		 **/
1299 		d->delta = 0;
1300 
1301 		d->pstate = DEV_RUN;
1302 		dev_sio_start(d);
1303 	}
1304 }
1305 
1306 /*
1307  * Return true if both of the given devices can run the same
1308  * clients
1309  */
1310 int
1311 dev_iscompat(struct dev *o, struct dev *n)
1312 {
1313 	if (((long long)o->round * n->rate != (long long)n->round * o->rate) ||
1314 	    ((long long)o->bufsz * n->rate != (long long)n->bufsz * o->rate)) {
1315 		if (log_level >= 1) {
1316 			log_puts(n->name);
1317 			log_puts(": not compatible with ");
1318 			log_puts(o->name);
1319 			log_puts("\n");
1320 		}
1321 		return 0;
1322 	}
1323 	return 1;
1324 }
1325 
1326 /*
1327  * Close the device, but attempt to migrate everything to a new sndio
1328  * device.
1329  */
1330 struct dev *
1331 dev_migrate(struct dev *odev)
1332 {
1333 	struct dev *ndev;
1334 	struct opt *o;
1335 	struct slot *s;
1336 	int i;
1337 
1338 	/* not opened */
1339 	if (odev->pstate == DEV_CFG)
1340 		return odev;
1341 
1342 	ndev = odev;
1343 	while (1) {
1344 		/* try next one, circulating through the list */
1345 		ndev = ndev->alt_next;
1346 		if (ndev == odev) {
1347 			if (log_level >= 1) {
1348 				dev_log(odev);
1349 				log_puts(": no fall-back device found\n");
1350 			}
1351 			return NULL;
1352 		}
1353 
1354 
1355 		if (!dev_ref(ndev))
1356 			continue;
1357 
1358 		/* check if new parameters are compatible with old ones */
1359 		if (!dev_iscompat(odev, ndev)) {
1360 			dev_unref(ndev);
1361 			continue;
1362 		}
1363 
1364 		/* found it!*/
1365 		break;
1366 	}
1367 
1368 	if (log_level >= 1) {
1369 		dev_log(odev);
1370 		log_puts(": switching to ");
1371 		dev_log(ndev);
1372 		log_puts("\n");
1373 	}
1374 
1375 	if (mtc_array[0].dev == odev)
1376 		mtc_setdev(&mtc_array[0], ndev);
1377 
1378 	/* move opts to new device (also moves clients using the opts) */
1379 	for (o = opt_list; o != NULL; o = o->next) {
1380 		if (o->dev != odev)
1381 			continue;
1382 		if (strcmp(o->name, o->dev->name) == 0)
1383 			continue;
1384 		opt_setdev(o, ndev);
1385 	}
1386 
1387 	/* terminate remaining clients */
1388 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1389 		if (s->opt == NULL || s->opt->dev != odev)
1390 			continue;
1391 		if (s->ops != NULL) {
1392 			s->ops->exit(s->arg);
1393 			s->ops = NULL;
1394 		}
1395 	}
1396 
1397 	/* slots and/or MMC hold refs, drop ours */
1398 	dev_unref(ndev);
1399 
1400 	return ndev;
1401 }
1402 
1403 /*
1404  * check that all clients controlled by MMC are ready to start, if so,
1405  * attach them all at the same position
1406  */
1407 void
1408 mtc_trigger(struct mtc *mtc)
1409 {
1410 	int i;
1411 	struct slot *s;
1412 
1413 	if (mtc->tstate != MTC_START) {
1414 		if (log_level >= 2) {
1415 			dev_log(mtc->dev);
1416 			log_puts(": not started by mmc yet, waiting...\n");
1417 		}
1418 		return;
1419 	}
1420 
1421 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1422 		if (s->opt == NULL || s->opt->mtc != mtc)
1423 			continue;
1424 		if (s->pstate != SLOT_READY) {
1425 #ifdef DEBUG
1426 			if (log_level >= 3) {
1427 				slot_log(s);
1428 				log_puts(": not ready, start delayed\n");
1429 			}
1430 #endif
1431 			return;
1432 		}
1433 	}
1434 	if (!dev_ref(mtc->dev))
1435 		return;
1436 
1437 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1438 		if (s->opt == NULL || s->opt->mtc != mtc)
1439 			continue;
1440 		slot_attach(s);
1441 		s->pstate = SLOT_RUN;
1442 	}
1443 	mtc->tstate = MTC_RUN;
1444 	mtc_midi_full(mtc);
1445 	dev_wakeup(mtc->dev);
1446 }
1447 
1448 /*
1449  * start all slots simultaneously
1450  */
1451 void
1452 mtc_start(struct mtc *mtc)
1453 {
1454 	if (mtc->tstate == MTC_STOP) {
1455 		mtc->tstate = MTC_START;
1456 		mtc_trigger(mtc);
1457 #ifdef DEBUG
1458 	} else {
1459 		if (log_level >= 3) {
1460 			dev_log(mtc->dev);
1461 			log_puts(": ignoring mmc start\n");
1462 		}
1463 #endif
1464 	}
1465 }
1466 
1467 /*
1468  * stop all slots simultaneously
1469  */
1470 void
1471 mtc_stop(struct mtc *mtc)
1472 {
1473 	switch (mtc->tstate) {
1474 	case MTC_START:
1475 		mtc->tstate = MTC_STOP;
1476 		return;
1477 	case MTC_RUN:
1478 		mtc->tstate = MTC_STOP;
1479 		dev_unref(mtc->dev);
1480 		break;
1481 	default:
1482 #ifdef DEBUG
1483 		if (log_level >= 3) {
1484 			dev_log(mtc->dev);
1485 			log_puts(": ignored mmc stop\n");
1486 		}
1487 #endif
1488 		return;
1489 	}
1490 }
1491 
1492 /*
1493  * relocate all slots simultaneously
1494  */
1495 void
1496 mtc_loc(struct mtc *mtc, unsigned int origin)
1497 {
1498 	if (log_level >= 2) {
1499 		dev_log(mtc->dev);
1500 		log_puts(": relocated to ");
1501 		log_putu(origin);
1502 		log_puts("\n");
1503 	}
1504 	if (mtc->tstate == MTC_RUN)
1505 		mtc_stop(mtc);
1506 	mtc->origin = origin;
1507 	if (mtc->tstate == MTC_RUN)
1508 		mtc_start(mtc);
1509 }
1510 
1511 /*
1512  * set MMC device
1513  */
1514 void
1515 mtc_setdev(struct mtc *mtc, struct dev *d)
1516 {
1517 	struct opt *o;
1518 
1519 	if (mtc->dev == d)
1520 		return;
1521 
1522 	if (log_level >= 2) {
1523 		dev_log(d);
1524 		log_puts(": set to be MIDI clock source\n");
1525 	}
1526 
1527 	/* adjust clock and ref counter, if needed */
1528 	if (mtc->tstate == MTC_RUN) {
1529 		mtc->delta -= mtc->dev->delta;
1530 		dev_unref(mtc->dev);
1531 	}
1532 
1533 	mtc->dev = d;
1534 
1535 	if (mtc->tstate == MTC_RUN) {
1536 		mtc->delta += mtc->dev->delta;
1537 		dev_ref(mtc->dev);
1538 		dev_wakeup(mtc->dev);
1539 	}
1540 
1541 	/* move in once anything using MMC */
1542 	for (o = opt_list; o != NULL; o = o->next) {
1543 		if (o->mtc == mtc)
1544 			opt_setdev(o, mtc->dev);
1545 	}
1546 }
1547 
1548 /*
1549  * allocate buffers & conversion chain
1550  */
1551 void
1552 slot_initconv(struct slot *s)
1553 {
1554 	unsigned int dev_nch;
1555 	struct dev *d = s->opt->dev;
1556 
1557 	if (s->mode & MODE_PLAY) {
1558 		cmap_init(&s->mix.cmap,
1559 		    s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1560 		    s->opt->pmin, s->opt->pmin + s->mix.nch - 1,
1561 		    0, d->pchan - 1,
1562 		    s->opt->pmin, s->opt->pmax);
1563 		s->mix.decbuf = NULL;
1564 		s->mix.resampbuf = NULL;
1565 		if (!aparams_native(&s->par)) {
1566 			dec_init(&s->mix.dec, &s->par, s->mix.nch);
1567 			s->mix.decbuf =
1568 			    xmalloc(s->round * s->mix.nch * sizeof(adata_t));
1569 		}
1570 		if (s->rate != d->rate) {
1571 			resamp_init(&s->mix.resamp, s->round, d->round,
1572 			    s->mix.nch);
1573 			s->mix.resampbuf =
1574 			    xmalloc(d->round * s->mix.nch * sizeof(adata_t));
1575 		}
1576 		s->mix.join = 1;
1577 		s->mix.expand = 1;
1578 		if (s->opt->dup && s->mix.cmap.nch > 0) {
1579 			dev_nch = d->pchan < (s->opt->pmax + 1) ?
1580 			    d->pchan - s->opt->pmin :
1581 			    s->opt->pmax - s->opt->pmin + 1;
1582 			if (dev_nch > s->mix.nch)
1583 				s->mix.expand = dev_nch / s->mix.nch;
1584 			else if (s->mix.nch > dev_nch)
1585 				s->mix.join = s->mix.nch / dev_nch;
1586 		}
1587 	}
1588 
1589 	if (s->mode & MODE_RECMASK) {
1590 		unsigned int outchan = (s->opt->mode & MODE_MON) ?
1591 		    d->pchan : d->rchan;
1592 
1593 		s->sub.encbuf = NULL;
1594 		s->sub.resampbuf = NULL;
1595 		cmap_init(&s->sub.cmap,
1596 		    0, outchan - 1,
1597 		    s->opt->rmin, s->opt->rmax,
1598 		    s->opt->rmin, s->opt->rmin + s->sub.nch - 1,
1599 		    s->opt->rmin, s->opt->rmin + s->sub.nch - 1);
1600 		if (s->rate != d->rate) {
1601 			resamp_init(&s->sub.resamp, d->round, s->round,
1602 			    s->sub.nch);
1603 			s->sub.resampbuf =
1604 			    xmalloc(d->round * s->sub.nch * sizeof(adata_t));
1605 		}
1606 		if (!aparams_native(&s->par)) {
1607 			enc_init(&s->sub.enc, &s->par, s->sub.nch);
1608 			s->sub.encbuf =
1609 			    xmalloc(s->round * s->sub.nch * sizeof(adata_t));
1610 		}
1611 		s->sub.join = 1;
1612 		s->sub.expand = 1;
1613 		if (s->opt->dup && s->sub.cmap.nch > 0) {
1614 			dev_nch = outchan < (s->opt->rmax + 1) ?
1615 			    outchan - s->opt->rmin :
1616 			    s->opt->rmax - s->opt->rmin + 1;
1617 			if (dev_nch > s->sub.nch)
1618 				s->sub.join = dev_nch / s->sub.nch;
1619 			else if (s->sub.nch > dev_nch)
1620 				s->sub.expand = s->sub.nch / dev_nch;
1621 		}
1622 
1623 		/*
1624 		 * cmap_copy() doesn't write samples in all channels,
1625 	         * for instance when mono->stereo conversion is
1626 	         * disabled. So we have to prefill cmap_copy() output
1627 	         * with silence.
1628 	         */
1629 		if (s->sub.resampbuf) {
1630 			memset(s->sub.resampbuf, 0,
1631 			    d->round * s->sub.nch * sizeof(adata_t));
1632 		} else if (s->sub.encbuf) {
1633 			memset(s->sub.encbuf, 0,
1634 			    s->round * s->sub.nch * sizeof(adata_t));
1635 		} else {
1636 			memset(s->sub.buf.data, 0,
1637 			    s->appbufsz * s->sub.nch * sizeof(adata_t));
1638 		}
1639 	}
1640 }
1641 
1642 /*
1643  * allocate buffers & conversion chain
1644  */
1645 void
1646 slot_allocbufs(struct slot *s)
1647 {
1648 	if (s->mode & MODE_PLAY) {
1649 		s->mix.bpf = s->par.bps * s->mix.nch;
1650 		abuf_init(&s->mix.buf, s->appbufsz * s->mix.bpf);
1651 	}
1652 
1653 	if (s->mode & MODE_RECMASK) {
1654 		s->sub.bpf = s->par.bps * s->sub.nch;
1655 		abuf_init(&s->sub.buf, s->appbufsz * s->sub.bpf);
1656 	}
1657 
1658 #ifdef DEBUG
1659 	if (log_level >= 3) {
1660 		slot_log(s);
1661 		log_puts(": allocated ");
1662 		log_putu(s->appbufsz);
1663 		log_puts("/");
1664 		log_putu(SLOT_BUFSZ(s));
1665 		log_puts(" fr buffers\n");
1666 	}
1667 #endif
1668 }
1669 
1670 /*
1671  * free buffers & conversion chain
1672  */
1673 void
1674 slot_freebufs(struct slot *s)
1675 {
1676 	if (s->mode & MODE_RECMASK) {
1677 		abuf_done(&s->sub.buf);
1678 	}
1679 
1680 	if (s->mode & MODE_PLAY) {
1681 		abuf_done(&s->mix.buf);
1682 	}
1683 }
1684 
1685 /*
1686  * allocate a new slot and register the given call-backs
1687  */
1688 struct slot *
1689 slot_new(struct opt *opt, unsigned int id, char *who,
1690     struct slotops *ops, void *arg, int mode)
1691 {
1692 	char *p;
1693 	char name[SLOT_NAMEMAX];
1694 	char ctl_name[CTL_NAMEMAX];
1695 	unsigned int i, ser, bestser, bestidx;
1696 	struct slot *unit[DEV_NSLOT];
1697 	struct slot *s;
1698 
1699 	/*
1700 	 * create a ``valid'' control name (lowcase, remove [^a-z], truncate)
1701 	 */
1702 	for (i = 0, p = who; ; p++) {
1703 		if (i == SLOT_NAMEMAX - 1 || *p == '\0') {
1704 			name[i] = '\0';
1705 			break;
1706 		} else if (*p >= 'A' && *p <= 'Z') {
1707 			name[i++] = *p + 'a' - 'A';
1708 		} else if (*p >= 'a' && *p <= 'z')
1709 			name[i++] = *p;
1710 	}
1711 	if (i == 0)
1712 		strlcpy(name, "noname", SLOT_NAMEMAX);
1713 
1714 	/*
1715 	 * build a unit-to-slot map for this name
1716 	 */
1717 	for (i = 0; i < DEV_NSLOT; i++)
1718 		unit[i] = NULL;
1719 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1720 		if (strcmp(s->name, name) == 0)
1721 			unit[s->unit] = s;
1722 	}
1723 
1724 	/*
1725 	 * find the free slot with the least unit number and same id
1726 	 */
1727 	for (i = 0; i < DEV_NSLOT; i++) {
1728 		s = unit[i];
1729 		if (s != NULL && s->ops == NULL && s->id == id)
1730 			goto found;
1731 	}
1732 
1733 	/*
1734 	 * find the free slot with the least unit number
1735 	 */
1736 	for (i = 0; i < DEV_NSLOT; i++) {
1737 		s = unit[i];
1738 		if (s != NULL && s->ops == NULL) {
1739 			s->id = id;
1740 			goto found;
1741 		}
1742 	}
1743 
1744 	/*
1745 	 * couldn't find a matching slot, pick oldest free slot
1746 	 * and set its name/unit
1747 	 */
1748 	bestser = 0;
1749 	bestidx = DEV_NSLOT;
1750 	for (i = 0, s = slot_array; i < DEV_NSLOT; i++, s++) {
1751 		if (s->ops != NULL)
1752 			continue;
1753 		ser = slot_serial - s->serial;
1754 		if (ser > bestser) {
1755 			bestser = ser;
1756 			bestidx = i;
1757 		}
1758 	}
1759 
1760 	if (bestidx == DEV_NSLOT) {
1761 		if (log_level >= 1) {
1762 			log_puts(name);
1763 			log_puts(": out of sub-device slots\n");
1764 		}
1765 		return NULL;
1766 	}
1767 
1768 	s = slot_array + bestidx;
1769 	ctl_del(CTL_SLOT_LEVEL, s, NULL);
1770 	s->vol = MIDI_MAXCTL;
1771 	strlcpy(s->name, name, SLOT_NAMEMAX);
1772 	s->serial = slot_serial++;
1773 	for (i = 0; unit[i] != NULL; i++)
1774 		; /* nothing */
1775 	s->unit = i;
1776 	s->id = id;
1777 	s->opt = opt;
1778 	slot_ctlname(s, ctl_name, CTL_NAMEMAX);
1779 	ctl_new(CTL_SLOT_LEVEL, s, NULL,
1780 	    CTL_NUM, "app", ctl_name, -1, "level",
1781 	    NULL, -1, 127, s->vol);
1782 
1783 found:
1784 	/* open device, this may change opt's device */
1785 	if (!opt_ref(opt))
1786 		return NULL;
1787 	s->opt = opt;
1788 	s->ops = ops;
1789 	s->arg = arg;
1790 	s->pstate = SLOT_INIT;
1791 	s->mode = mode;
1792 	aparams_init(&s->par);
1793 	if (s->mode & MODE_PLAY)
1794 		s->mix.nch = s->opt->pmax - s->opt->pmin + 1;
1795 	if (s->mode & MODE_RECMASK)
1796 		s->sub.nch = s->opt->rmax - s->opt->rmin + 1;
1797 	s->xrun = s->opt->mtc != NULL ? XRUN_SYNC : XRUN_IGNORE;
1798 	s->appbufsz = s->opt->dev->bufsz;
1799 	s->round = s->opt->dev->round;
1800 	s->rate = s->opt->dev->rate;
1801 	dev_midi_slotdesc(s->opt->dev, s);
1802 	dev_midi_vol(s->opt->dev, s);
1803 #ifdef DEBUG
1804 	if (log_level >= 3) {
1805 		slot_log(s);
1806 		log_puts(": using ");
1807 		log_puts(s->opt->name);
1808 		log_puts(", mode = ");
1809 		log_putx(mode);
1810 		log_puts("\n");
1811 	}
1812 #endif
1813 	return s;
1814 }
1815 
1816 /*
1817  * release the given slot
1818  */
1819 void
1820 slot_del(struct slot *s)
1821 {
1822 	s->arg = s;
1823 	s->ops = &zomb_slotops;
1824 	switch (s->pstate) {
1825 	case SLOT_INIT:
1826 		s->ops = NULL;
1827 		break;
1828 	case SLOT_START:
1829 	case SLOT_READY:
1830 	case SLOT_RUN:
1831 	case SLOT_STOP:
1832 		slot_stop(s, 0);
1833 		break;
1834 	}
1835 	opt_unref(s->opt);
1836 }
1837 
1838 /*
1839  * change the slot play volume; called either by the slot or by MIDI
1840  */
1841 void
1842 slot_setvol(struct slot *s, unsigned int vol)
1843 {
1844 #ifdef DEBUG
1845 	if (log_level >= 3) {
1846 		slot_log(s);
1847 		log_puts(": setting volume ");
1848 		log_putu(vol);
1849 		log_puts("\n");
1850 	}
1851 #endif
1852 	s->vol = vol;
1853 	s->mix.vol = MIDI_TO_ADATA(s->vol);
1854 }
1855 
1856 /*
1857  * set device for this slot
1858  */
1859 void
1860 slot_setopt(struct slot *s, struct opt *o)
1861 {
1862 	struct opt *t;
1863 	struct dev *odev, *ndev;
1864 	struct ctl *c;
1865 
1866 	if (s->opt == NULL || s->opt == o)
1867 		return;
1868 
1869 	if (log_level >= 2) {
1870 		slot_log(s);
1871 		log_puts(": moving to opt ");
1872 		log_puts(o->name);
1873 		log_puts("\n");
1874 	}
1875 
1876 	odev = s->opt->dev;
1877 	if (s->ops != NULL) {
1878 		ndev = opt_ref(o);
1879 		if (ndev == NULL)
1880 			return;
1881 
1882 		if (!dev_iscompat(odev, ndev)) {
1883 			opt_unref(o);
1884 			return;
1885 		}
1886 	}
1887 
1888 	if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1889 		slot_detach(s);
1890 
1891 	t = s->opt;
1892 	s->opt = o;
1893 
1894 	c = ctl_find(CTL_SLOT_LEVEL, s, NULL);
1895 	ctl_update(c);
1896 
1897 	if (o->dev != t->dev) {
1898 		dev_midi_slotdesc(odev, s);
1899 		dev_midi_slotdesc(ndev, s);
1900 		dev_midi_vol(ndev, s);
1901 	}
1902 
1903 	if (s->pstate == SLOT_RUN || s->pstate == SLOT_STOP)
1904 		slot_attach(s);
1905 
1906 	if (s->ops != NULL) {
1907 		opt_unref(t);
1908 		return;
1909 	}
1910 }
1911 
1912 /*
1913  * attach the slot to the device (ie start playing & recording
1914  */
1915 void
1916 slot_attach(struct slot *s)
1917 {
1918 	struct dev *d = s->opt->dev;
1919 	long long pos;
1920 
1921 	if (((s->mode & MODE_PLAY) && !(s->opt->mode & MODE_PLAY)) ||
1922 	    ((s->mode & MODE_RECMASK) && !(s->opt->mode & MODE_RECMASK))) {
1923 		if (log_level >= 1) {
1924 			slot_log(s);
1925 			log_puts(" at ");
1926 			log_puts(s->opt->name);
1927 			log_puts(": mode not allowed on this sub-device\n");
1928 		}
1929 	}
1930 
1931 	/*
1932 	 * setup conversions layer
1933 	 */
1934 	slot_initconv(s);
1935 
1936 	/*
1937 	 * start the device if not started
1938 	 */
1939 	dev_wakeup(d);
1940 
1941 	/*
1942 	 * adjust initial clock
1943 	 */
1944 	pos = s->delta_rem +
1945 	    (long long)s->delta * d->round +
1946 	    (long long)d->delta * s->round;
1947 	s->delta = pos / (int)d->round;
1948 	s->delta_rem = pos % d->round;
1949 	if (s->delta_rem < 0) {
1950 		s->delta_rem += d->round;
1951 		s->delta--;
1952 	}
1953 
1954 #ifdef DEBUG
1955 	if (log_level >= 2) {
1956 		slot_log(s);
1957 		log_puts(": attached at ");
1958 		log_puti(s->delta);
1959 		log_puts(" + ");
1960 		log_puti(s->delta_rem);
1961 		log_puts("/");
1962 		log_puti(s->round);
1963 		log_puts("\n");
1964 	}
1965 #endif
1966 
1967 	/*
1968 	 * We dont check whether the device is dying,
1969 	 * because dev_xxx() functions are supposed to
1970 	 * work (i.e., not to crash)
1971 	 */
1972 
1973 	s->next = d->slot_list;
1974 	d->slot_list = s;
1975 	if (s->mode & MODE_PLAY) {
1976 		s->mix.vol = MIDI_TO_ADATA(s->vol);
1977 		dev_mix_adjvol(d);
1978 	}
1979 }
1980 
1981 /*
1982  * if MMC is enabled, and try to attach all slots synchronously, else
1983  * simply attach the slot
1984  */
1985 void
1986 slot_ready(struct slot *s)
1987 {
1988 	/*
1989 	 * device may be disconnected, and if so we're called from
1990 	 * slot->ops->exit() on a closed device
1991 	 */
1992 	if (s->opt->dev->pstate == DEV_CFG)
1993 		return;
1994 	if (s->opt->mtc == NULL) {
1995 		slot_attach(s);
1996 		s->pstate = SLOT_RUN;
1997 	} else
1998 		mtc_trigger(s->opt->mtc);
1999 }
2000 
2001 /*
2002  * setup buffers & conversion layers, prepare the slot to receive data
2003  * (for playback) or start (recording).
2004  */
2005 void
2006 slot_start(struct slot *s)
2007 {
2008 	struct dev *d = s->opt->dev;
2009 #ifdef DEBUG
2010 	if (s->pstate != SLOT_INIT) {
2011 		slot_log(s);
2012 		log_puts(": slot_start: wrong state\n");
2013 		panic();
2014 	}
2015 	if (s->mode & MODE_PLAY) {
2016 		if (log_level >= 3) {
2017 			slot_log(s);
2018 			log_puts(": playing ");
2019 			aparams_log(&s->par);
2020 			log_puts(" -> ");
2021 			aparams_log(&d->par);
2022 			log_puts("\n");
2023 		}
2024 	}
2025 	if (s->mode & MODE_RECMASK) {
2026 		if (log_level >= 3) {
2027 			slot_log(s);
2028 			log_puts(": recording ");
2029 			aparams_log(&s->par);
2030 			log_puts(" <- ");
2031 			aparams_log(&d->par);
2032 			log_puts("\n");
2033 		}
2034 	}
2035 #endif
2036 	slot_allocbufs(s);
2037 
2038 	if (s->mode & MODE_RECMASK) {
2039 		/*
2040 		 * N-th recorded block is the N-th played block
2041 		 */
2042 		s->sub.prime = d->bufsz / d->round;
2043 	}
2044 	s->skip = 0;
2045 
2046 	/*
2047 	 * get the current position, the origin is when the first sample
2048 	 * played and/or recorded
2049 	 */
2050 	s->delta = -(long long)d->bufsz * s->round / d->round;
2051 	s->delta_rem = 0;
2052 
2053 	if (s->mode & MODE_PLAY) {
2054 		s->pstate = SLOT_START;
2055 	} else {
2056 		s->pstate = SLOT_READY;
2057 		slot_ready(s);
2058 	}
2059 }
2060 
2061 /*
2062  * stop playback and recording, and free conversion layers
2063  */
2064 void
2065 slot_detach(struct slot *s)
2066 {
2067 	struct slot **ps;
2068 	struct dev *d = s->opt->dev;
2069 	long long pos;
2070 
2071 	for (ps = &d->slot_list; *ps != s; ps = &(*ps)->next) {
2072 #ifdef DEBUG
2073 		if (*ps == NULL) {
2074 			slot_log(s);
2075 			log_puts(": can't detach, not on list\n");
2076 			panic();
2077 		}
2078 #endif
2079 	}
2080 	*ps = s->next;
2081 
2082 	/*
2083 	 * adjust clock, go back d->delta ticks so that slot_attach()
2084 	 * could be called with the resulting state
2085 	 */
2086 	pos = s->delta_rem +
2087 	    (long long)s->delta * d->round -
2088 	    (long long)d->delta * s->round;
2089 	s->delta = pos / (int)d->round;
2090 	s->delta_rem = pos % d->round;
2091 	if (s->delta_rem < 0) {
2092 		s->delta_rem += d->round;
2093 		s->delta--;
2094 	}
2095 
2096 #ifdef DEBUG
2097 	if (log_level >= 2) {
2098 		slot_log(s);
2099 		log_puts(": detached at ");
2100 		log_puti(s->delta);
2101 		log_puts(" + ");
2102 		log_puti(s->delta_rem);
2103 		log_puts("/");
2104 		log_puti(d->round);
2105 		log_puts("\n");
2106 	}
2107 #endif
2108 	if (s->mode & MODE_PLAY)
2109 		dev_mix_adjvol(d);
2110 
2111 	if (s->mode & MODE_RECMASK) {
2112 		if (s->sub.encbuf) {
2113 			xfree(s->sub.encbuf);
2114 			s->sub.encbuf = NULL;
2115 		}
2116 		if (s->sub.resampbuf) {
2117 			xfree(s->sub.resampbuf);
2118 			s->sub.resampbuf = NULL;
2119 		}
2120 	}
2121 
2122 	if (s->mode & MODE_PLAY) {
2123 		if (s->mix.decbuf) {
2124 			xfree(s->mix.decbuf);
2125 			s->mix.decbuf = NULL;
2126 		}
2127 		if (s->mix.resampbuf) {
2128 			xfree(s->mix.resampbuf);
2129 			s->mix.resampbuf = NULL;
2130 		}
2131 	}
2132 }
2133 
2134 /*
2135  * put the slot in stopping state (draining play buffers) or
2136  * stop & detach if no data to drain.
2137  */
2138 void
2139 slot_stop(struct slot *s, int drain)
2140 {
2141 #ifdef DEBUG
2142 	if (log_level >= 3) {
2143 		slot_log(s);
2144 		log_puts(": stopping\n");
2145 	}
2146 #endif
2147 	if (s->pstate == SLOT_START) {
2148 		/*
2149 		 * If in rec-only mode, we're already in the READY or
2150 		 * RUN states. We're here because the play buffer was
2151 		 * not full enough, try to start so it's drained.
2152 		 */
2153 		s->pstate = SLOT_READY;
2154 		slot_ready(s);
2155 	}
2156 
2157 	if (s->pstate == SLOT_RUN) {
2158 		if ((s->mode & MODE_PLAY) && drain) {
2159 			/*
2160 			 * Don't detach, dev_cycle() will do it for us
2161 			 * when the buffer is drained.
2162 			 */
2163 			s->pstate = SLOT_STOP;
2164 			return;
2165 		}
2166 		slot_detach(s);
2167 	} else if (s->pstate == SLOT_STOP) {
2168 		slot_detach(s);
2169 	} else {
2170 #ifdef DEBUG
2171 		if (log_level >= 3) {
2172 			slot_log(s);
2173 			log_puts(": not drained (blocked by mmc)\n");
2174 		}
2175 #endif
2176 	}
2177 
2178 	s->pstate = SLOT_INIT;
2179 	s->ops->eof(s->arg);
2180 	slot_freebufs(s);
2181 }
2182 
2183 void
2184 slot_skip_update(struct slot *s)
2185 {
2186 	int skip;
2187 
2188 	skip = slot_skip(s);
2189 	while (skip > 0) {
2190 #ifdef DEBUG
2191 		if (log_level >= 4) {
2192 			slot_log(s);
2193 			log_puts(": catching skipped block\n");
2194 		}
2195 #endif
2196 		if (s->mode & MODE_RECMASK)
2197 			s->ops->flush(s->arg);
2198 		if (s->mode & MODE_PLAY)
2199 			s->ops->fill(s->arg);
2200 		skip--;
2201 	}
2202 }
2203 
2204 /*
2205  * notify the slot that we just wrote in the play buffer, must be called
2206  * after each write
2207  */
2208 void
2209 slot_write(struct slot *s)
2210 {
2211 	if (s->pstate == SLOT_START && s->mix.buf.used == s->mix.buf.len) {
2212 #ifdef DEBUG
2213 		if (log_level >= 4) {
2214 			slot_log(s);
2215 			log_puts(": switching to READY state\n");
2216 		}
2217 #endif
2218 		s->pstate = SLOT_READY;
2219 		slot_ready(s);
2220 	}
2221 	slot_skip_update(s);
2222 }
2223 
2224 /*
2225  * notify the slot that we freed some space in the rec buffer
2226  */
2227 void
2228 slot_read(struct slot *s)
2229 {
2230 	slot_skip_update(s);
2231 }
2232 
2233 /*
2234  * allocate at control slot
2235  */
2236 struct ctlslot *
2237 ctlslot_new(struct opt *o, struct ctlops *ops, void *arg)
2238 {
2239 	struct ctlslot *s;
2240 	struct ctl *c;
2241 	int i;
2242 
2243 	i = 0;
2244 	for (;;) {
2245 		if (i == DEV_NCTLSLOT)
2246 			return NULL;
2247 		s = ctlslot_array + i;
2248 		if (s->ops == NULL)
2249 			break;
2250 		i++;
2251 	}
2252 	s->opt = o;
2253 	s->self = 1 << i;
2254 	if (!opt_ref(o))
2255 		return NULL;
2256 	s->ops = ops;
2257 	s->arg = arg;
2258 	for (c = ctl_list; c != NULL; c = c->next) {
2259 		if (!ctlslot_visible(s, c))
2260 			continue;
2261 		c->refs_mask |= s->self;
2262 	}
2263 	return s;
2264 }
2265 
2266 /*
2267  * free control slot
2268  */
2269 void
2270 ctlslot_del(struct ctlslot *s)
2271 {
2272 	struct ctl *c, **pc;
2273 
2274 	pc = &ctl_list;
2275 	while ((c = *pc) != NULL) {
2276 		c->refs_mask &= ~s->self;
2277 		if (c->refs_mask == 0) {
2278 			*pc = c->next;
2279 			xfree(c);
2280 		} else
2281 			pc = &c->next;
2282 	}
2283 	s->ops = NULL;
2284 	opt_unref(s->opt);
2285 }
2286 
2287 int
2288 ctlslot_visible(struct ctlslot *s, struct ctl *c)
2289 {
2290 	if (s->opt == NULL)
2291 		return 1;
2292 	switch (c->scope) {
2293 	case CTL_HW:
2294 	case CTL_DEV_MASTER:
2295 		return (s->opt->dev == c->u.any.arg0);
2296 	case CTL_OPT_DEV:
2297 		return (s->opt == c->u.any.arg0);
2298 	case CTL_SLOT_LEVEL:
2299 		return (s->opt->dev == c->u.slot_level.slot->opt->dev);
2300 	default:
2301 		return 0;
2302 	}
2303 }
2304 
2305 struct ctl *
2306 ctlslot_lookup(struct ctlslot *s, int addr)
2307 {
2308 	struct ctl *c;
2309 
2310 	c = ctl_list;
2311 	while (1) {
2312 		if (c == NULL)
2313 			return NULL;
2314 		if (c->type != CTL_NONE && c->addr == addr)
2315 			break;
2316 		c = c->next;
2317 	}
2318 	if (!ctlslot_visible(s, c))
2319 		return NULL;
2320 	return c;
2321 }
2322 
2323 void
2324 ctlslot_update(struct ctlslot *s)
2325 {
2326 	struct ctl *c;
2327 	unsigned int refs_mask;
2328 
2329 	for (c = ctl_list; c != NULL; c = c->next) {
2330 		if (c->type == CTL_NONE)
2331 			continue;
2332 		refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2333 
2334 		/* nothing to do if no visibility change */
2335 		if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2336 			continue;
2337 		/* if control becomes visible */
2338 		if (refs_mask)
2339 			c->refs_mask |= s->self;
2340 		/* if control is hidden */
2341 		c->desc_mask |= s->self;
2342 	}
2343 }
2344 
2345 void
2346 ctl_node_log(struct ctl_node *c)
2347 {
2348 	log_puts(c->name);
2349 	if (c->unit >= 0)
2350 		log_putu(c->unit);
2351 }
2352 
2353 void
2354 ctl_log(struct ctl *c)
2355 {
2356 	if (c->group[0] != 0) {
2357 		log_puts(c->group);
2358 		log_puts("/");
2359 	}
2360 	ctl_node_log(&c->node0);
2361 	log_puts(".");
2362 	log_puts(c->func);
2363 	log_puts("=");
2364 	switch (c->type) {
2365 	case CTL_NONE:
2366 		log_puts("none");
2367 		break;
2368 	case CTL_NUM:
2369 	case CTL_SW:
2370 		log_putu(c->curval);
2371 		break;
2372 	case CTL_VEC:
2373 	case CTL_LIST:
2374 	case CTL_SEL:
2375 		ctl_node_log(&c->node1);
2376 		log_puts(":");
2377 		log_putu(c->curval);
2378 	}
2379 	log_puts(" at ");
2380 	log_putu(c->addr);
2381 	log_puts(" -> ");
2382 	switch (c->scope) {
2383 	case CTL_HW:
2384 		log_puts("hw:");
2385 		log_puts(c->u.hw.dev->name);
2386 		log_puts("/");
2387 		log_putu(c->u.hw.addr);
2388 		break;
2389 	case CTL_DEV_MASTER:
2390 		log_puts("dev_master:");
2391 		log_puts(c->u.dev_master.dev->name);
2392 		break;
2393 	case CTL_SLOT_LEVEL:
2394 		log_puts("slot_level:");
2395 		log_puts(c->u.slot_level.slot->name);
2396 		log_putu(c->u.slot_level.slot->unit);
2397 		break;
2398 	case CTL_OPT_DEV:
2399 		log_puts("opt_dev:");
2400 		log_puts(c->u.opt_dev.opt->name);
2401 		log_puts("/");
2402 		log_puts(c->u.opt_dev.dev->name);
2403 		break;
2404 	default:
2405 		log_puts("unknown");
2406 	}
2407 }
2408 
2409 int
2410 ctl_setval(struct ctl *c, int val)
2411 {
2412 	if (c->curval == val) {
2413 		if (log_level >= 3) {
2414 			ctl_log(c);
2415 			log_puts(": already set\n");
2416 		}
2417 		return 1;
2418 	}
2419 	if (val < 0 || val > c->maxval) {
2420 		if (log_level >= 3) {
2421 			log_putu(val);
2422 			log_puts(": ctl val out of bounds\n");
2423 		}
2424 		return 0;
2425 	}
2426 
2427 	switch (c->scope) {
2428 	case CTL_HW:
2429 		if (log_level >= 3) {
2430 			ctl_log(c);
2431 			log_puts(": marked as dirty\n");
2432 		}
2433 		c->curval = val;
2434 		c->dirty = 1;
2435 		return dev_ref(c->u.hw.dev);
2436 	case CTL_DEV_MASTER:
2437 		if (!c->u.dev_master.dev->master_enabled)
2438 			return 1;
2439 		dev_master(c->u.dev_master.dev, val);
2440 		dev_midi_master(c->u.dev_master.dev);
2441 		c->val_mask = ~0U;
2442 		c->curval = val;
2443 		return 1;
2444 	case CTL_SLOT_LEVEL:
2445 		slot_setvol(c->u.slot_level.slot, val);
2446 		// XXX change dev_midi_vol() into slot_midi_vol()
2447 		dev_midi_vol(c->u.slot_level.slot->opt->dev, c->u.slot_level.slot);
2448 		c->val_mask = ~0U;
2449 		c->curval = val;
2450 		return 1;
2451 	case CTL_OPT_DEV:
2452 		c->u.opt_dev.opt->alt_first = c->u.opt_dev.dev;
2453 		opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev);
2454 		return 1;
2455 	default:
2456 		if (log_level >= 2) {
2457 			ctl_log(c);
2458 			log_puts(": not writable\n");
2459 		}
2460 		return 1;
2461 	}
2462 }
2463 
2464 /*
2465  * add a ctl
2466  */
2467 struct ctl *
2468 ctl_new(int scope, void *arg0, void *arg1,
2469     int type, char *gstr,
2470     char *str0, int unit0, char *func,
2471     char *str1, int unit1, int maxval, int val)
2472 {
2473 	struct ctl *c, **pc;
2474 	struct ctlslot *s;
2475 	int addr;
2476 	int i;
2477 
2478 	/*
2479 	 * find the smallest unused addr number and
2480 	 * the last position in the list
2481 	 */
2482 	addr = 0;
2483 	for (pc = &ctl_list; (c = *pc) != NULL; pc = &c->next) {
2484 		if (c->addr > addr)
2485 			addr = c->addr;
2486 	}
2487 	addr++;
2488 
2489 	c = xmalloc(sizeof(struct ctl));
2490 	c->type = type;
2491 	strlcpy(c->func, func, CTL_NAMEMAX);
2492 	strlcpy(c->group, gstr, CTL_NAMEMAX);
2493 	strlcpy(c->node0.name, str0, CTL_NAMEMAX);
2494 	c->node0.unit = unit0;
2495 	if (c->type == CTL_VEC || c->type == CTL_LIST || c->type == CTL_SEL) {
2496 		strlcpy(c->node1.name, str1, CTL_NAMEMAX);
2497 		c->node1.unit = unit1;
2498 	} else
2499 		memset(&c->node1, 0, sizeof(struct ctl_node));
2500 	c->scope = scope;
2501 	c->u.any.arg0 = arg0;
2502 	switch (scope) {
2503 	case CTL_HW:
2504 		c->u.hw.addr = *(unsigned int *)arg1;
2505 		break;
2506 	case CTL_OPT_DEV:
2507 		c->u.any.arg1 = arg1;
2508 		break;
2509 	default:
2510 		c->u.any.arg1 = NULL;
2511 	}
2512 	c->addr = addr;
2513 	c->maxval = maxval;
2514 	c->val_mask = ~0;
2515 	c->desc_mask = ~0;
2516 	c->curval = val;
2517 	c->dirty = 0;
2518 	c->refs_mask = CTL_DEVMASK;
2519 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2520 		if (s->ops == NULL)
2521 			continue;
2522 		if (ctlslot_visible(s, c))
2523 			c->refs_mask |= 1 << i;
2524 	}
2525 	c->next = *pc;
2526 	*pc = c;
2527 #ifdef DEBUG
2528 	if (log_level >= 2) {
2529 		ctl_log(c);
2530 		log_puts(": added\n");
2531 	}
2532 #endif
2533 	return c;
2534 }
2535 
2536 void
2537 ctl_update(struct ctl *c)
2538 {
2539 	struct ctlslot *s;
2540 	unsigned int refs_mask;
2541 	int i;
2542 
2543 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2544 		if (s->ops == NULL)
2545 			continue;
2546 		refs_mask = ctlslot_visible(s, c) ? s->self : 0;
2547 
2548 		/* nothing to do if no visibility change */
2549 		if (((c->refs_mask & s->self) ^ refs_mask) == 0)
2550 			continue;
2551 		/* if control becomes visible */
2552 		if (refs_mask)
2553 			c->refs_mask |= s->self;
2554 		/* if control is hidden */
2555 		c->desc_mask |= s->self;
2556 	}
2557 }
2558 
2559 int
2560 ctl_match(struct ctl *c, int scope, void *arg0, void *arg1)
2561 {
2562 	if (c->type == CTL_NONE || c->scope != scope || c->u.any.arg0 != arg0)
2563 		return 0;
2564 	if (arg0 != NULL && c->u.any.arg0 != arg0)
2565 		return 0;
2566 	switch (scope) {
2567 	case CTL_HW:
2568 		if (arg1 != NULL && c->u.hw.addr != *(unsigned int *)arg1)
2569 			return 0;
2570 		break;
2571 	case CTL_OPT_DEV:
2572 		if (arg1 != NULL && c->u.any.arg1 != arg1)
2573 			return 0;
2574 		break;
2575 	}
2576 	return 1;
2577 }
2578 
2579 struct ctl *
2580 ctl_find(int scope, void *arg0, void *arg1)
2581 {
2582 	struct ctl *c;
2583 
2584 	for (c = ctl_list; c != NULL; c = c->next) {
2585 		if (ctl_match(c, scope, arg0, arg1))
2586 			return c;
2587 	}
2588 	return NULL;
2589 }
2590 
2591 int
2592 ctl_onval(int scope, void *arg0, void *arg1, int val)
2593 {
2594 	struct ctl *c;
2595 
2596 	c = ctl_find(scope, arg0, arg1);
2597 	if (c == NULL)
2598 		return 0;
2599 	c->curval = val;
2600 	c->val_mask = ~0U;
2601 	return 1;
2602 }
2603 
2604 void
2605 ctl_del(int scope, void *arg0, void *arg1)
2606 {
2607 	struct ctl *c, **pc;
2608 
2609 	pc = &ctl_list;
2610 	for (;;) {
2611 		c = *pc;
2612 		if (c == NULL)
2613 			return;
2614 		if (ctl_match(c, scope, arg0, arg1)) {
2615 #ifdef DEBUG
2616 			if (log_level >= 2) {
2617 				ctl_log(c);
2618 				log_puts(": removed\n");
2619 			}
2620 #endif
2621 			c->refs_mask &= ~CTL_DEVMASK;
2622 			if (c->refs_mask == 0) {
2623 				*pc = c->next;
2624 				xfree(c);
2625 				continue;
2626 			}
2627 			c->type = CTL_NONE;
2628 			c->desc_mask = ~0;
2629 		}
2630 		pc = &c->next;
2631 	}
2632 }
2633 
2634 void
2635 dev_ctlsync(struct dev *d)
2636 {
2637 	struct ctl *c;
2638 	struct ctlslot *s;
2639 	int found, i;
2640 
2641 	found = 0;
2642 	for (c = ctl_list; c != NULL; c = c->next) {
2643 		if (c->scope == CTL_HW &&
2644 		    c->u.hw.dev == d &&
2645 		    c->type == CTL_NUM &&
2646 		    strcmp(c->group, d->name) == 0 &&
2647 		    strcmp(c->node0.name, "output") == 0 &&
2648 		    strcmp(c->func, "level") == 0)
2649 			found = 1;
2650 	}
2651 
2652 	if (d->master_enabled && found) {
2653 		if (log_level >= 2) {
2654 			dev_log(d);
2655 			log_puts(": software master level control disabled\n");
2656 		}
2657 		d->master_enabled = 0;
2658 		ctl_del(CTL_DEV_MASTER, d, NULL);
2659 	} else if (!d->master_enabled && !found) {
2660 		if (log_level >= 2) {
2661 			dev_log(d);
2662 			log_puts(": software master level control enabled\n");
2663 		}
2664 		d->master_enabled = 1;
2665 		ctl_new(CTL_DEV_MASTER, d, NULL,
2666 		    CTL_NUM, d->name, "output", -1, "level",
2667 		    NULL, -1, 127, d->master);
2668 	}
2669 
2670 	for (s = ctlslot_array, i = 0; i < DEV_NCTLSLOT; i++, s++) {
2671 		if (s->ops == NULL)
2672 			continue;
2673 		if (s->opt->dev == d)
2674 			s->ops->sync(s->arg);
2675 	}
2676 }
2677