1 /* $OpenBSD: siofile.c,v 1.28 2024/12/20 07:35:56 ratchov Exp $ */
2 /*
3 * Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17 #include <sys/time.h>
18 #include <sys/types.h>
19
20 #include <poll.h>
21 #include <sndio.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "abuf.h"
27 #include "defs.h"
28 #include "dev.h"
29 #include "dev_sioctl.h"
30 #include "dsp.h"
31 #include "fdpass.h"
32 #include "file.h"
33 #include "siofile.h"
34 #include "utils.h"
35
36 #define WATCHDOG_USEC 4000000 /* 4 seconds */
37
38 void dev_sio_onmove(void *, int);
39 void dev_sio_timeout(void *);
40 int dev_sio_pollfd(void *, struct pollfd *);
41 int dev_sio_revents(void *, struct pollfd *);
42 void dev_sio_run(void *);
43 void dev_sio_hup(void *);
44
45 extern struct fileops dev_sioctl_ops;
46
47 struct fileops dev_sio_ops = {
48 "sio",
49 dev_sio_pollfd,
50 dev_sio_revents,
51 dev_sio_run,
52 dev_sio_run,
53 dev_sio_hup
54 };
55
56 void
dev_sio_onmove(void * arg,int delta)57 dev_sio_onmove(void *arg, int delta)
58 {
59 struct dev *d = arg;
60
61 #ifdef DEBUG
62 logx(4, "%s: tick, delta = %d", d->path, delta);
63
64 d->sio.sum_utime += file_utime - d->sio.utime;
65 d->sio.sum_wtime += file_wtime - d->sio.wtime;
66 d->sio.wtime = file_wtime;
67 d->sio.utime = file_utime;
68 if (d->mode & MODE_PLAY)
69 d->sio.pused -= delta;
70 if (d->mode & MODE_REC)
71 d->sio.rused += delta;
72 #endif
73 dev_onmove(d, delta);
74 }
75
76 void
dev_sio_timeout(void * arg)77 dev_sio_timeout(void *arg)
78 {
79 struct dev *d = arg;
80
81 logx(1, "%s: watchdog timeout", d->path);
82 dev_migrate(d);
83 dev_abort(d);
84 }
85
86 /*
87 * open the device.
88 */
89 int
dev_sio_open(struct dev * d)90 dev_sio_open(struct dev *d)
91 {
92 struct sio_par par;
93 unsigned int rate, mode = d->reqmode & (SIO_PLAY | SIO_REC);
94
95 d->sio.hdl = fdpass_sio_open(d->num, mode);
96 if (d->sio.hdl == NULL) {
97 if (mode != (SIO_PLAY | SIO_REC))
98 return 0;
99 d->sio.hdl = fdpass_sio_open(d->num, SIO_PLAY);
100 if (d->sio.hdl != NULL)
101 mode = SIO_PLAY;
102 else {
103 d->sio.hdl = fdpass_sio_open(d->num, SIO_REC);
104 if (d->sio.hdl != NULL)
105 mode = SIO_REC;
106 else
107 return 0;
108 }
109 logx(1, "%s: warning, device opened in %s mode",
110 d->path, mode == SIO_PLAY ? "play-only" : "rec-only");
111 }
112 d->mode = mode;
113
114 d->sioctl.hdl = fdpass_sioctl_open(d->num, SIOCTL_READ | SIOCTL_WRITE);
115 if (d->sioctl.hdl == NULL)
116 logx(1, "%s: no control device", d->path);
117
118 sio_initpar(&par);
119 par.bits = d->par.bits;
120 par.bps = d->par.bps;
121 par.sig = d->par.sig;
122 par.le = d->par.le;
123 par.msb = d->par.msb;
124 if (d->mode & SIO_PLAY)
125 par.pchan = d->pchan;
126 if (d->mode & SIO_REC)
127 par.rchan = d->rchan;
128 par.appbufsz = d->bufsz;
129 par.round = d->round;
130 par.rate = d->rate;
131 if (!sio_setpar(d->sio.hdl, &par))
132 goto bad_close;
133 if (!sio_getpar(d->sio.hdl, &par))
134 goto bad_close;
135
136 /*
137 * If the requested rate is not supported by the device,
138 * use the new one, but retry using a block size that would
139 * match the requested one
140 */
141 rate = par.rate;
142 if (rate != d->rate) {
143 sio_initpar(&par);
144 par.bits = d->par.bits;
145 par.bps = d->par.bps;
146 par.sig = d->par.sig;
147 par.le = d->par.le;
148 par.msb = d->par.msb;
149 if (mode & SIO_PLAY)
150 par.pchan = d->reqpchan;
151 if (mode & SIO_REC)
152 par.rchan = d->reqrchan;
153 par.appbufsz = d->bufsz * rate / d->rate;
154 par.round = d->round * rate / d->rate;
155 par.rate = rate;
156 if (!sio_setpar(d->sio.hdl, &par))
157 goto bad_close;
158 if (!sio_getpar(d->sio.hdl, &par))
159 goto bad_close;
160 }
161
162 #ifdef DEBUG
163 /*
164 * We support any parameter combination exposed by the kernel,
165 * and we have no other choice than trusting the kernel for
166 * returning correct parameters. But let's check parameters
167 * early and nicely report kernel bugs rather than crashing
168 * later in memset(), malloc() or alike.
169 */
170
171 if (par.bits > BITS_MAX) {
172 logx(0, "%s: %u: unsupported number of bits", d->path, par.bits);
173 goto bad_close;
174 }
175 if (par.bps > SIO_BPS(BITS_MAX)) {
176 logx(0, "%s: %u: unsupported sample size", d->path, par.bps);
177 goto bad_close;
178 }
179 if ((d->mode & SIO_PLAY) && par.pchan > NCHAN_MAX) {
180 logx(0, "%s: %u: unsupported number of play channels", d->path, par.pchan);
181 goto bad_close;
182 }
183 if ((d->mode & SIO_REC) && par.rchan > NCHAN_MAX) {
184 logx(0, "%s: %u: unsupported number of rec channels", d->path, par.rchan);
185 goto bad_close;
186 }
187 if (par.bufsz == 0 || par.bufsz > RATE_MAX) {
188 logx(0, "%s: %u: unsupported buffer size", d->path, par.bufsz);
189 goto bad_close;
190 }
191 if (par.round == 0 || par.round > par.bufsz ||
192 par.bufsz % par.round != 0) {
193 logx(0, "%s: %u: unsupported block size", d->path, par.round);
194 goto bad_close;
195 }
196 if (par.rate == 0 || par.rate > RATE_MAX) {
197 logx(0, "%s: %u: unsupported rate", d->path, par.rate);
198 goto bad_close;
199 }
200 #endif
201 d->par.bits = par.bits;
202 d->par.bps = par.bps;
203 d->par.sig = par.sig;
204 d->par.le = par.le;
205 d->par.msb = par.msb;
206 if (d->mode & SIO_PLAY)
207 d->pchan = par.pchan;
208 if (d->mode & SIO_REC)
209 d->rchan = par.rchan;
210 d->bufsz = par.bufsz;
211 d->round = par.round;
212 d->rate = par.rate;
213 if (d->mode & MODE_PLAY)
214 d->mode |= MODE_MON;
215 sio_onmove(d->sio.hdl, dev_sio_onmove, d);
216 d->sio.file = file_new(&dev_sio_ops, d, "dev", sio_nfds(d->sio.hdl));
217 if (d->sioctl.hdl) {
218 d->sioctl.file = file_new(&dev_sioctl_ops, d, "mix",
219 sioctl_nfds(d->sioctl.hdl));
220 }
221 timo_set(&d->sio.watchdog, dev_sio_timeout, d);
222 dev_sioctl_open(d);
223 return 1;
224 bad_close:
225 sio_close(d->sio.hdl);
226 if (d->sioctl.hdl) {
227 sioctl_close(d->sioctl.hdl);
228 d->sioctl.hdl = NULL;
229 }
230 return 0;
231 }
232
233 void
dev_sio_close(struct dev * d)234 dev_sio_close(struct dev *d)
235 {
236 dev_sioctl_close(d);
237 #ifdef DEBUG
238 logx(3, "%s: closed", d->path);
239 #endif
240 timo_del(&d->sio.watchdog);
241 file_del(d->sio.file);
242 sio_close(d->sio.hdl);
243 if (d->sioctl.hdl) {
244 file_del(d->sioctl.file);
245 sioctl_close(d->sioctl.hdl);
246 d->sioctl.hdl = NULL;
247 }
248 }
249
250 void
dev_sio_start(struct dev * d)251 dev_sio_start(struct dev *d)
252 {
253 if (!sio_start(d->sio.hdl)) {
254 logx(1, "%s: failed to start device", d->path);
255 return;
256 }
257 if (d->mode & MODE_PLAY) {
258 d->sio.cstate = DEV_SIO_CYCLE;
259 d->sio.todo = 0;
260 } else {
261 d->sio.cstate = DEV_SIO_READ;
262 d->sio.todo = d->round * d->rchan * d->par.bps;
263 }
264 #ifdef DEBUG
265 d->sio.pused = 0;
266 d->sio.rused = 0;
267 d->sio.sum_utime = 0;
268 d->sio.sum_wtime = 0;
269 d->sio.wtime = file_wtime;
270 d->sio.utime = file_utime;
271 logx(3, "%s: started", d->path);
272 #endif
273 timo_add(&d->sio.watchdog, WATCHDOG_USEC);
274 }
275
276 void
dev_sio_stop(struct dev * d)277 dev_sio_stop(struct dev *d)
278 {
279 if (!sio_eof(d->sio.hdl) && !sio_flush(d->sio.hdl)) {
280 logx(1, "%s: failed to stop device", d->path);
281 return;
282 }
283 #ifdef DEBUG
284 logx(3, "%s: stopped, load avg = %lld / %lld",
285 d->path, d->sio.sum_utime / 1000, d->sio.sum_wtime / 1000);
286 #endif
287 timo_del(&d->sio.watchdog);
288 }
289
290 int
dev_sio_pollfd(void * arg,struct pollfd * pfd)291 dev_sio_pollfd(void *arg, struct pollfd *pfd)
292 {
293 struct dev *d = arg;
294 int events;
295
296 events = (d->sio.cstate == DEV_SIO_READ) ? POLLIN : POLLOUT;
297 return sio_pollfd(d->sio.hdl, pfd, events);
298 }
299
300 int
dev_sio_revents(void * arg,struct pollfd * pfd)301 dev_sio_revents(void *arg, struct pollfd *pfd)
302 {
303 struct dev *d = arg;
304 int events;
305
306 events = sio_revents(d->sio.hdl, pfd);
307 #ifdef DEBUG
308 d->sio.events = events;
309 #endif
310 return events;
311 }
312
313 void
dev_sio_run(void * arg)314 dev_sio_run(void *arg)
315 {
316 struct dev *d = arg;
317 unsigned char *data, *base;
318 unsigned int n;
319
320 /*
321 * sio_read() and sio_write() would block at the end of the
322 * cycle so we *must* return and restart poll()'ing. Otherwise
323 * we may trigger dev_cycle() which would make all clients
324 * underrun (ex, on a play-only device)
325 */
326 for (;;) {
327 if (d->pstate != DEV_RUN)
328 return;
329 switch (d->sio.cstate) {
330 case DEV_SIO_READ:
331 #ifdef DEBUG
332 if (!(d->sio.events & POLLIN)) {
333 logx(0, "%s: recording, but POLLIN not set", d->path);
334 panic();
335 }
336 if (d->sio.todo == 0) {
337 logx(0, "%s: can't read data", d->path);
338 panic();
339 }
340 if (d->prime > 0) {
341 logx(0, "%s: unexpected data", d->path);
342 panic();
343 }
344 #endif
345 base = d->decbuf ? d->decbuf : (unsigned char *)d->rbuf;
346 data = base +
347 d->rchan * d->round * d->par.bps -
348 d->sio.todo;
349 n = sio_read(d->sio.hdl, data, d->sio.todo);
350 d->sio.todo -= n;
351 #ifdef DEBUG
352 logx(4, "%s: read %u bytes, todo %u / %u", d->path,
353 n, d->sio.todo, d->round * d->rchan * d->par.bps);
354 #endif
355 if (d->sio.todo > 0)
356 return;
357 #ifdef DEBUG
358 d->sio.rused -= d->round;
359 if (d->sio.rused >= d->round) {
360 logx(2, "%s: rec hw xrun, rused = %d / %d",
361 d->path, d->sio.rused, d->bufsz);
362 }
363 #endif
364 d->sio.cstate = DEV_SIO_CYCLE;
365 break;
366 case DEV_SIO_CYCLE:
367 timo_del(&d->sio.watchdog);
368 timo_add(&d->sio.watchdog, WATCHDOG_USEC);
369
370 #ifdef DEBUG
371 /*
372 * check that we're called at cycle boundary:
373 * either after a recorded block, or when POLLOUT is
374 * raised
375 */
376 if (!((d->mode & MODE_REC) && d->prime == 0) &&
377 !(d->sio.events & POLLOUT)) {
378 logx(0, "%s: cycle not at block boundary", d->path);
379 panic();
380 }
381 #endif
382 dev_cycle(d);
383 if (d->mode & MODE_PLAY) {
384 d->sio.cstate = DEV_SIO_WRITE;
385 d->sio.todo = d->round * d->pchan * d->par.bps;
386 break;
387 } else {
388 d->sio.cstate = DEV_SIO_READ;
389 d->sio.todo = d->round * d->rchan * d->par.bps;
390 return;
391 }
392 case DEV_SIO_WRITE:
393 #ifdef DEBUG
394 if (d->sio.todo == 0) {
395 logx(0, "%s: can't write data", d->path);
396 panic();
397 }
398 #endif
399 base = d->encbuf ? d->encbuf : (unsigned char *)DEV_PBUF(d);
400 data = base +
401 d->pchan * d->round * d->par.bps -
402 d->sio.todo;
403 n = sio_write(d->sio.hdl, data, d->sio.todo);
404 d->sio.todo -= n;
405 #ifdef DEBUG
406 logx(4, "%s: wrote %u bytes, todo %u / %u",
407 d->path, n, d->sio.todo, d->round * d->pchan * d->par.bps);
408 #endif
409 if (d->sio.todo > 0)
410 return;
411 #ifdef DEBUG
412 d->sio.pused += d->round;
413 if (d->prime == 0 &&
414 d->sio.pused <= d->bufsz - d->round) {
415 logx(2, "%s: play hw xrun, pused = %d / %d",
416 d->path, d->sio.pused, d->bufsz);
417 }
418 if (d->sio.pused < 0 ||
419 d->sio.pused > d->bufsz) {
420 /* device driver or libsndio bug */
421 logx(2, "%s: out of bounds pused = %d / %d",
422 d->path, d->sio.pused, d->bufsz);
423 }
424 #endif
425 d->poffs += d->round;
426 if (d->poffs == d->psize)
427 d->poffs = 0;
428 if ((d->mode & MODE_REC) && d->prime == 0) {
429 d->sio.cstate = DEV_SIO_READ;
430 d->sio.todo = d->round * d->rchan * d->par.bps;
431 } else
432 d->sio.cstate = DEV_SIO_CYCLE;
433 return;
434 }
435 }
436 }
437
438 void
dev_sio_hup(void * arg)439 dev_sio_hup(void *arg)
440 {
441 struct dev *d = arg;
442
443 #ifdef DEBUG
444 logx(2, "%s: disconnected", d->path);
445 #endif
446 dev_migrate(d);
447 dev_abort(d);
448 }
449