xref: /dragonfly/sys/dev/sound/clone.c (revision 0db87cb7)
1 /*-
2  * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/dev/sound/clone.c 193640 2009-06-07 19:12:08Z ariff $
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/conf.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/proc.h>
35 #include <sys/devfs.h>
36 
37 #ifdef HAVE_KERNEL_OPTION_HEADERS
38 #include "opt_snd.h"
39 #endif
40 
41 #include <dev/sound/pcm/sound.h>
42 #include <dev/sound/clone.h>
43 
44 extern struct devfs_bitmap devfs_dsp_clone_bitmap;
45 
46 /*
47  * So here we go again, another clonedevs manager. Unlike default clonedevs,
48  * this clone manager is designed to withstand various abusive behavior
49  * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
50  * after reaching certain expiration threshold, aggressive garbage collector,
51  * transparent device allocator and concurrency handling across multiple
52  * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
53  * we don't have much clues whether the caller wants a real open() or simply
54  * making fun of us with things like stat(), mtime() etc. Assuming that:
55  * 1) Time window between dev_clone EH <-> real open() should be small
56  * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
57  * operation, we can decide whether a new cdev must be created, old
58  * (expired) cdev can be reused or an existing cdev can be shared.
59  *
60  * Most of the operations and logics are generic enough and can be applied
61  * on other places (such as if_tap, snp, etc).  Perhaps this can be
62  * rearranged to complement clone_*(). However, due to this still being
63  * specific to the sound driver (and as a proof of concept on how it can be
64  * done), si_drv2 is used to keep the pointer of the clone list entry to
65  * avoid expensive lookup.
66  */
67 
68 /* clone entry */
69 struct snd_clone_entry {
70 	TAILQ_ENTRY(snd_clone_entry) link;
71 	struct snd_clone *parent;
72 	struct cdev *devt;
73 	struct timespec tsp;
74 	uint32_t flags;
75 	pid_t pid;
76 	int unit;
77 };
78 
79 /* clone manager */
80 struct snd_clone {
81 	TAILQ_HEAD(link_head, snd_clone_entry) head;
82 	struct timespec tsp;
83 	int refcount;
84 	int size;
85 	int typemask;
86 	int maxunit;
87 	int deadline;
88 	uint32_t flags;
89 };
90 
91 #ifdef SND_DIAGNOSTIC
92 #define SND_CLONE_ASSERT(x, y)		do {			\
93 	if (!(x))						\
94 		panic y;					\
95 } while (0)
96 #else
97 #define SND_CLONE_ASSERT(...)		KASSERT(__VA_ARGS__)
98 #endif
99 
100 /*
101  * Shamelessly ripped off from vfs_subr.c
102  * We need at least 1/HZ precision as default timestamping.
103  */
104 enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
105 
106 static int snd_timestamp_precision = SND_TSP_HZ;
107 TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
108 
109 void
110 snd_timestamp(struct timespec *tsp)
111 {
112 	struct timeval tv;
113 
114 	switch (snd_timestamp_precision) {
115 	case SND_TSP_SEC:
116 		tsp->tv_sec = time_second;
117 		tsp->tv_nsec = 0;
118 		break;
119 	case SND_TSP_HZ:
120 		getnanouptime(tsp);
121 		break;
122 	case SND_TSP_USEC:
123 		microuptime(&tv);
124 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
125 		break;
126 	case SND_TSP_NSEC:
127 		nanouptime(tsp);
128 		break;
129 	default:
130 		snd_timestamp_precision = SND_TSP_HZ;
131 		getnanouptime(tsp);
132 		break;
133 	}
134 }
135 
136 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
137 static int
138 sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
139 {
140 	int err, val;
141 
142 	val = snd_timestamp_precision;
143 	err = sysctl_handle_int(oidp, &val, 0, req);
144 	if (err == 0 && req->newptr != NULL) {
145 		switch (val) {
146 		case SND_TSP_SEC:
147 		case SND_TSP_HZ:
148 		case SND_TSP_USEC:
149 		case SND_TSP_NSEC:
150 			snd_timestamp_precision = val;
151 			break;
152 		default:
153 			break;
154 		}
155 	}
156 
157 	return (err);
158 }
159 SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
160     0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
161     "timestamp precision (0=s 1=hz 2=us 3=ns)");
162 #endif
163 
164 /*
165  * snd_clone_create() : Return opaque allocated clone manager.
166  */
167 struct snd_clone *
168 snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
169 {
170 	struct snd_clone *c;
171 
172 	SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
173 	    ("invalid typemask: 0x%08x", typemask));
174 	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
175 	    ("invalid clone flags=0x%08x", flags));
176 
177 	c = kmalloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
178 	c->refcount = 0;
179 	c->size = 0;
180 	c->typemask = typemask;
181 	c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
182 	    maxunit;
183 	c->deadline = deadline;
184 	c->flags = flags;
185 	snd_timestamp(&c->tsp);
186 	TAILQ_INIT(&c->head);
187 
188 	return (c);
189 }
190 
191 int
192 snd_clone_busy(struct snd_clone *c)
193 {
194 	struct snd_clone_entry *ce;
195 
196 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
197 
198 	if (c->size == 0)
199 		return (0);
200 
201 	TAILQ_FOREACH(ce, &c->head, link) {
202 		if (ce->flags & SND_CLONE_BUSY)
203 			return (EBUSY);
204 	}
205 
206 	return (0);
207 }
208 
209 /*
210  * snd_clone_enable()/disable() : Suspend/resume clone allocation through
211  * snd_clone_alloc(). Everything else will not be affected by this.
212  */
213 int
214 snd_clone_enable(struct snd_clone *c)
215 {
216 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
217 
218 	if (c->flags & SND_CLONE_ENABLE)
219 		return (EINVAL);
220 
221 	c->flags |= SND_CLONE_ENABLE;
222 
223 	return (0);
224 }
225 
226 int
227 snd_clone_disable(struct snd_clone *c)
228 {
229 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
230 
231 	if (!(c->flags & SND_CLONE_ENABLE))
232 		return (EINVAL);
233 
234 	c->flags &= ~SND_CLONE_ENABLE;
235 
236 	return (0);
237 }
238 
239 /*
240  * Getters / Setters. Not worth explaining :)
241  */
242 int
243 snd_clone_getsize(struct snd_clone *c)
244 {
245 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
246 
247 	return (c->size);
248 }
249 
250 int
251 snd_clone_getmaxunit(struct snd_clone *c)
252 {
253 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
254 
255 	return (c->maxunit);
256 }
257 
258 int
259 snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
260 {
261 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
262 
263 	c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
264 	    maxunit;
265 
266 	return (c->maxunit);
267 }
268 
269 int
270 snd_clone_getdeadline(struct snd_clone *c)
271 {
272 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
273 
274 	return (c->deadline);
275 }
276 
277 int
278 snd_clone_setdeadline(struct snd_clone *c, int deadline)
279 {
280 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
281 
282 	c->deadline = deadline;
283 
284 	return (c->deadline);
285 }
286 
287 int
288 snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
289 {
290 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
291 	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
292 
293 	*tsp = c->tsp;
294 
295 	return (0);
296 }
297 
298 uint32_t
299 snd_clone_getflags(struct snd_clone *c)
300 {
301 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
302 
303 	return (c->flags);
304 }
305 
306 uint32_t
307 snd_clone_setflags(struct snd_clone *c, uint32_t flags)
308 {
309 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
310 	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
311 	    ("invalid clone flags=0x%08x", flags));
312 
313 	c->flags = flags;
314 
315 	return (c->flags);
316 }
317 
318 int
319 snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
320 {
321 	struct snd_clone_entry *ce;
322 
323 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
324 	SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
325 
326 	ce = dev->si_drv2;
327 	if (ce == NULL)
328 		return (ENODEV);
329 
330 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
331 
332 	*tsp = ce->tsp;
333 
334 	return (0);
335 }
336 
337 uint32_t
338 snd_clone_getdevflags(struct cdev *dev)
339 {
340 	struct snd_clone_entry *ce;
341 
342 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
343 
344 	ce = dev->si_drv2;
345 	if (ce == NULL)
346 		return (0xffffffff);
347 
348 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
349 
350 	return (ce->flags);
351 }
352 
353 uint32_t
354 snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
355 {
356 	struct snd_clone_entry *ce;
357 
358 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
359 	SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
360 	    ("invalid clone dev flags=0x%08x", flags));
361 
362 	ce = dev->si_drv2;
363 	if (ce == NULL)
364 		return (0xffffffff);
365 
366 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
367 
368 	ce->flags = flags;
369 
370 	return (ce->flags);
371 }
372 
373 /* Elapsed time conversion to ms */
374 #define SND_CLONE_ELAPSED(x, y)						\
375 	((((x)->tv_sec - (y)->tv_sec) * 1000) +				\
376 	(((y)->tv_nsec > (x)->tv_nsec) ?				\
377 	(((1000000000L + (x)->tv_nsec -					\
378 	(y)->tv_nsec) / 1000000) - 1000) :				\
379 	(((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
380 
381 #define SND_CLONE_EXPIRED(x, y, z)					\
382 	((x)->deadline < 1 ||						\
383 	((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) ||		\
384 	SND_CLONE_ELAPSED(y, z) > (x)->deadline)
385 
386 /*
387  * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
388  * clone.h for explanations on GC settings.
389  */
390 int
391 snd_clone_gc(struct snd_clone *c)
392 {
393 	struct snd_clone_entry *ce, *tce;
394 	struct timespec now;
395 	int pruned;
396 	int subunit;
397 
398 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
399 
400 	if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
401 		return (0);
402 
403 	snd_timestamp(&now);
404 
405 	/*
406 	 * Bail out if the last clone handler was invoked below the deadline
407 	 * threshold.
408 	 */
409 	if ((c->flags & SND_CLONE_GC_EXPIRED) &&
410 	    !SND_CLONE_EXPIRED(c, &now, &c->tsp))
411 		return (0);
412 
413 	pruned = 0;
414 
415 	/*
416 	 * Visit each object in reverse order. If the object is still being
417 	 * referenced by a valid open(), skip it. Look for expired objects
418 	 * and either revoke its clone invocation status or mercilessly
419 	 * throw it away.
420 	 */
421 	TAILQ_FOREACH_REVERSE_MUTABLE(ce, &c->head, link_head, link, tce) {
422 		if (!(ce->flags & SND_CLONE_BUSY) &&
423 		    (!(ce->flags & SND_CLONE_INVOKE) ||
424 		    SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
425 			if (c->flags & SND_CLONE_GC_REVOKE) {
426 				ce->flags &= ~SND_CLONE_INVOKE;
427 				ce->pid = -1;
428 			} else {
429 				TAILQ_REMOVE(&c->head, ce, link);
430 				subunit = PCMSUBUNIT(ce->devt);
431 				destroy_dev(ce->devt);
432 				devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(dsp), subunit);
433 				kfree(ce, M_DEVBUF);
434 				c->size--;
435 			}
436 			pruned++;
437 		}
438 	}
439 
440 	/* return total pruned objects */
441 	return (pruned);
442 }
443 
444 void
445 snd_clone_destroy(struct snd_clone *c)
446 {
447 	struct snd_clone_entry *ce, *tmp;
448 	int subunit;
449 
450 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
451 
452 	ce = TAILQ_FIRST(&c->head);
453 	while (ce != NULL) {
454 		tmp = TAILQ_NEXT(ce, link);
455 		if (ce->devt != NULL) {
456 			subunit = PCMSUBUNIT(ce->devt);
457 			destroy_dev(ce->devt);
458 			devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(dsp), subunit);
459 		}
460 		kfree(ce, M_DEVBUF);
461 		ce = tmp;
462 	}
463 
464 	kfree(c, M_DEVBUF);
465 }
466 
467 /*
468  * snd_clone_acquire() : The vital part of concurrency management. Must be
469  * called somewhere at the beginning of open() handler. ENODEV is not really
470  * fatal since it just tell the caller that this is not cloned stuff.
471  * EBUSY is *real*, don't forget that!
472  */
473 int
474 snd_clone_acquire(struct cdev *dev)
475 {
476 	struct snd_clone_entry *ce;
477 
478 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
479 
480 	ce = dev->si_drv2;
481 	if (ce == NULL)
482 		return (ENODEV);
483 
484 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
485 
486 	ce->flags &= ~SND_CLONE_INVOKE;
487 
488 	if (ce->flags & SND_CLONE_BUSY)
489 		return (EBUSY);
490 
491 	ce->flags |= SND_CLONE_BUSY;
492 
493 	return (0);
494 }
495 
496 /*
497  * snd_clone_release() : Release busy status. Must be called somewhere at
498  * the end of close() handler, or somewhere after fail open().
499  */
500 int
501 snd_clone_release(struct cdev *dev)
502 {
503 	struct snd_clone_entry *ce;
504 
505 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
506 
507 	ce = dev->si_drv2;
508 	if (ce == NULL)
509 		return (ENODEV);
510 
511 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
512 
513 	ce->flags &= ~SND_CLONE_INVOKE;
514 
515 	if (!(ce->flags & SND_CLONE_BUSY))
516 		return (EBADF);
517 
518 	ce->flags &= ~SND_CLONE_BUSY;
519 	ce->pid = -1;
520 
521 	return (0);
522 }
523 
524 /*
525  * snd_clone_ref/unref() : Garbage collector reference counter. To make
526  * garbage collector run automatically, the sequence must be something like
527  * this (both in open() and close() handlers):
528  *
529  *  open() - 1) snd_clone_acquire()
530  *           2) .... check check ... if failed, snd_clone_release()
531  *           3) Success. Call snd_clone_ref()
532  *
533  * close() - 1) .... check check check ....
534  *           2) Success. snd_clone_release()
535  *           3) snd_clone_unref() . Garbage collector will run at this point
536  *              if this is the last referenced object.
537  */
538 int
539 snd_clone_ref(struct cdev *dev)
540 {
541 	struct snd_clone_entry *ce;
542 	struct snd_clone *c;
543 
544 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
545 
546 	ce = dev->si_drv2;
547 	if (ce == NULL)
548 		return (0);
549 
550 	c = ce->parent;
551 	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
552 	SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
553 
554 	return (++c->refcount);
555 }
556 
557 int
558 snd_clone_unref(struct cdev *dev)
559 {
560 	struct snd_clone_entry *ce;
561 	struct snd_clone *c;
562 
563 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
564 
565 	ce = dev->si_drv2;
566 	if (ce == NULL)
567 		return (0);
568 
569 	c = ce->parent;
570 	SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
571 	SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
572 
573 	c->refcount--;
574 
575 	/*
576 	 * Run automatic garbage collector, if needed.
577 	 */
578 	if ((c->flags & SND_CLONE_GC_UNREF) &&
579 	    (!(c->flags & SND_CLONE_GC_LASTREF) ||
580 	    (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
581 		(void)snd_clone_gc(c);
582 
583 	return (c->refcount);
584 }
585 
586 void
587 snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
588 {
589 	SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
590 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
591 	SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
592 	SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
593 	    ("invalid clone alloc flags=0x%08x", ce->flags));
594 	SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
595 #if 0	/* dev2unit doesn't make any sense on DragonFly */
596 	SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
597 	    ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
598 	    ce->unit, dev2unit(dev)));
599 #endif
600 
601 	SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
602 
603 	dev->si_drv2 = ce;
604 	ce->devt = dev;
605 	ce->flags &= ~SND_CLONE_ALLOC;
606 	ce->flags |= SND_CLONE_INVOKE;
607 }
608 
609 struct snd_clone_entry *
610 snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
611 {
612 	struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
613 	struct timespec now;
614 	int cunit, allocunit;
615 	pid_t curpid;
616 
617 	SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
618 	SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
619 	SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
620 	    ("invalid tmask: typemask=0x%08x tmask=0x%08x",
621 	    c->typemask, tmask));
622 	SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
623 	SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
624 	    ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
625 	    c->typemask, tmask, *unit));
626 
627 	if (!(c->flags & SND_CLONE_ENABLE) ||
628 	    (*unit != -1 && *unit > c->maxunit))
629 		return (NULL);
630 
631 	ce = NULL;
632 	after = NULL;
633 	bce = NULL;	/* "b"usy candidate */
634 	cce = NULL;	/* "c"urthread/proc candidate */
635 	nce = NULL;	/* "n"ull, totally unbusy candidate */
636 	tce = NULL;	/* Last "t"ry candidate */
637 	cunit = 0;
638 	allocunit = (*unit == -1) ? 0 : *unit;
639 	curpid = curthread->td_proc->p_pid;
640 
641 	snd_timestamp(&now);
642 
643 	TAILQ_FOREACH(ce, &c->head, link) {
644 		/*
645 		 * Sort incrementally according to device type.
646 		 */
647 		if (tmask > (ce->unit & c->typemask)) {
648 			if (cunit == 0)
649 				after = ce;
650 			continue;
651 		} else if (tmask < (ce->unit & c->typemask))
652 			break;
653 
654 		/*
655 		 * Shoot.. this is where the grumpiness begin. Just
656 		 * return immediately.
657 		 */
658 		if (*unit != -1 && *unit == (ce->unit & ~tmask))
659 			goto snd_clone_alloc_out;
660 
661 		cunit++;
662 		/*
663 		 * Simmilar device type. Sort incrementally according
664 		 * to allocation unit. While here, look for free slot
665 		 * and possible collision for new / future allocation.
666 		 */
667 		if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
668 			allocunit++;
669 		if ((ce->unit & ~tmask) < allocunit)
670 			after = ce;
671 		/*
672 		 * Clone logic:
673 		 *   1. Look for non busy, but keep track of the best
674 		 *      possible busy cdev.
675 		 *   2. Look for the best (oldest referenced) entry that is
676 		 *      in a same process / thread.
677 		 *   3. Look for the best (oldest referenced), absolute free
678 		 *      entry.
679 		 *   4. Lastly, look for the best (oldest referenced)
680 		 *      any entries that doesn't fit with anything above.
681 		 */
682 		if (ce->flags & SND_CLONE_BUSY) {
683 			if (ce->devt != NULL && (bce == NULL ||
684 			    timespeccmp(&ce->tsp, &bce->tsp, <)))
685 				bce = ce;
686 			continue;
687 		}
688 		if (ce->pid == curpid &&
689 		    (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
690 			cce = ce;
691 		else if (!(ce->flags & SND_CLONE_INVOKE) &&
692 		    (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
693 			nce = ce;
694 		else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
695 			tce = ce;
696 	}
697 	if (*unit != -1)
698 		goto snd_clone_alloc_new;
699 	else if (cce != NULL) {
700 		/* Same proc entry found, go for it */
701 		ce = cce;
702 		goto snd_clone_alloc_out;
703 	} else if (nce != NULL) {
704 		/*
705 		 * Next, try absolute free entry. If the calculated
706 		 * allocunit is smaller, create new entry instead.
707 		 */
708 		if (allocunit < (nce->unit & ~tmask))
709 			goto snd_clone_alloc_new;
710 		ce = nce;
711 		goto snd_clone_alloc_out;
712 	} else if (allocunit > c->maxunit) {
713 		/*
714 		 * Maximum allowable unit reached. Try returning any
715 		 * available cdev and hope for the best. If the lookup is
716 		 * done for things like stat(), mtime() etc. , things should
717 		 * be ok. Otherwise, open() handler should do further checks
718 		 * and decide whether to return correct error code or not.
719 		 */
720 		if (tce != NULL) {
721 			ce = tce;
722 			goto snd_clone_alloc_out;
723 		} else if (bce != NULL) {
724 			ce = bce;
725 			goto snd_clone_alloc_out;
726 		}
727 		return (NULL);
728 	}
729 
730 snd_clone_alloc_new:
731 	/*
732 	 * No free entries found, and we still haven't reached maximum
733 	 * allowable units. Allocate, setup a minimal unique entry with busy
734 	 * status so nobody will monkey on this new entry. Unit magic is set
735 	 * right here to avoid collision with other contesting handler.
736 	 * The caller must be carefull here to maintain its own
737 	 * synchronization, as long as it will not conflict with malloc(9)
738 	 * operations.
739 	 *
740 	 * That said, go figure.
741 	 */
742 	ce = kmalloc(sizeof(*ce), M_DEVBUF, M_WAITOK | M_ZERO);
743 	if (ce == NULL) {
744 		if (*unit != -1)
745 			return (NULL);
746 		/*
747 		 * We're being dense, ignorance is bliss,
748 		 * Super Regulatory Measure (TM).. TRY AGAIN!
749 		 */
750 		if (nce != NULL) {
751 			ce = nce;
752 			goto snd_clone_alloc_out;
753 		} else if (tce != NULL) {
754 			ce = tce;
755 			goto snd_clone_alloc_out;
756 		} else if (bce != NULL) {
757 			ce = bce;
758 			goto snd_clone_alloc_out;
759 		}
760 		return (NULL);
761 	}
762 	/* Setup new entry */
763 	ce->parent = c;
764 	ce->unit = tmask | allocunit;
765 	ce->pid = curpid;
766 	ce->tsp = now;
767 	ce->flags |= SND_CLONE_ALLOC;
768 	if (after != NULL) {
769 		TAILQ_INSERT_AFTER(&c->head, after, ce, link);
770 	} else {
771 		TAILQ_INSERT_HEAD(&c->head, ce, link);
772 	}
773 	c->size++;
774 	c->tsp = now;
775 	/*
776 	 * Save new allocation unit for caller which will be used
777 	 * by make_dev().
778 	 */
779 	*unit = allocunit;
780 
781 	return (ce);
782 
783 snd_clone_alloc_out:
784 	/*
785 	 * Set, mark, timestamp the entry if this is a truly free entry.
786 	 * Leave busy entry alone.
787 	 */
788 	if (!(ce->flags & SND_CLONE_BUSY)) {
789 		ce->pid = curpid;
790 		ce->tsp = now;
791 		ce->flags |= SND_CLONE_INVOKE;
792 	}
793 	c->tsp = now;
794 	*dev = ce->devt;
795 
796 	return (NULL);
797 }
798