1 /* $NetBSD: subr_autoconf.c,v 1.314 2023/07/18 11:57:37 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 1996, 2000 Christopher G. Demetriou
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the
18 * NetBSD Project. See http://www.NetBSD.org/ for
19 * information about NetBSD.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * --(license Id: LICENSE.proto,v 1.1 2000/06/13 21:40:26 cgd Exp )--
35 */
36
37 /*
38 * Copyright (c) 1992, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This software was developed by the Computer Systems Engineering group
42 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
43 * contributed to Berkeley.
44 *
45 * All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Lawrence Berkeley Laboratories.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Header: subr_autoconf.c,v 1.12 93/02/01 19:31:48 torek Exp (LBL)
75 *
76 * @(#)subr_autoconf.c 8.3 (Berkeley) 5/17/94
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.314 2023/07/18 11:57:37 riastradh Exp $");
81
82 #ifdef _KERNEL_OPT
83 #include "opt_ddb.h"
84 #include "drvctl.h"
85 #endif
86
87 #include <sys/param.h>
88 #include <sys/device.h>
89 #include <sys/device_impl.h>
90 #include <sys/disklabel.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/kmem.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/errno.h>
97 #include <sys/proc.h>
98 #include <sys/reboot.h>
99 #include <sys/kthread.h>
100 #include <sys/buf.h>
101 #include <sys/dirent.h>
102 #include <sys/mount.h>
103 #include <sys/namei.h>
104 #include <sys/unistd.h>
105 #include <sys/fcntl.h>
106 #include <sys/lockf.h>
107 #include <sys/callout.h>
108 #include <sys/devmon.h>
109 #include <sys/cpu.h>
110 #include <sys/sysctl.h>
111 #include <sys/stdarg.h>
112 #include <sys/localcount.h>
113
114 #include <sys/disk.h>
115
116 #include <sys/rndsource.h>
117
118 #include <machine/limits.h>
119
120 /*
121 * Autoconfiguration subroutines.
122 */
123
124 /*
125 * Device autoconfiguration timings are mixed into the entropy pool.
126 */
127 static krndsource_t rnd_autoconf_source;
128
129 /*
130 * ioconf.c exports exactly two names: cfdata and cfroots. All system
131 * devices and drivers are found via these tables.
132 */
133 extern struct cfdata cfdata[];
134 extern const short cfroots[];
135
136 /*
137 * List of all cfdriver structures. We use this to detect duplicates
138 * when other cfdrivers are loaded.
139 */
140 struct cfdriverlist allcfdrivers = LIST_HEAD_INITIALIZER(&allcfdrivers);
141 extern struct cfdriver * const cfdriver_list_initial[];
142
143 /*
144 * Initial list of cfattach's.
145 */
146 extern const struct cfattachinit cfattachinit[];
147
148 /*
149 * List of cfdata tables. We always have one such list -- the one
150 * built statically when the kernel was configured.
151 */
152 struct cftablelist allcftables = TAILQ_HEAD_INITIALIZER(allcftables);
153 static struct cftable initcftable;
154
155 #define ROOT ((device_t)NULL)
156
157 struct matchinfo {
158 cfsubmatch_t fn;
159 device_t parent;
160 const int *locs;
161 void *aux;
162 struct cfdata *match;
163 int pri;
164 };
165
166 struct alldevs_foray {
167 int af_s;
168 struct devicelist af_garbage;
169 };
170
171 /*
172 * Internal version of the cfargs structure; all versions are
173 * canonicalized to this.
174 */
175 struct cfargs_internal {
176 union {
177 cfsubmatch_t submatch;/* submatch function (direct config) */
178 cfsearch_t search; /* search function (indirect config) */
179 };
180 const char * iattr; /* interface attribute */
181 const int * locators; /* locators array */
182 devhandle_t devhandle; /* devhandle_t (by value) */
183 };
184
185 static char *number(char *, int);
186 static void mapply(struct matchinfo *, cfdata_t);
187 static void config_devdelete(device_t);
188 static void config_devunlink(device_t, struct devicelist *);
189 static void config_makeroom(int, struct cfdriver *);
190 static void config_devlink(device_t);
191 static void config_alldevs_enter(struct alldevs_foray *);
192 static void config_alldevs_exit(struct alldevs_foray *);
193 static void config_add_attrib_dict(device_t);
194 static device_t config_attach_internal(device_t, cfdata_t, void *,
195 cfprint_t, const struct cfargs_internal *);
196
197 static void config_collect_garbage(struct devicelist *);
198 static void config_dump_garbage(struct devicelist *);
199
200 static void pmflock_debug(device_t, const char *, int);
201
202 static device_t deviter_next1(deviter_t *);
203 static void deviter_reinit(deviter_t *);
204
205 struct deferred_config {
206 TAILQ_ENTRY(deferred_config) dc_queue;
207 device_t dc_dev;
208 void (*dc_func)(device_t);
209 };
210
211 TAILQ_HEAD(deferred_config_head, deferred_config);
212
213 static struct deferred_config_head deferred_config_queue =
214 TAILQ_HEAD_INITIALIZER(deferred_config_queue);
215 static struct deferred_config_head interrupt_config_queue =
216 TAILQ_HEAD_INITIALIZER(interrupt_config_queue);
217 static int interrupt_config_threads = 8;
218 static struct deferred_config_head mountroot_config_queue =
219 TAILQ_HEAD_INITIALIZER(mountroot_config_queue);
220 static int mountroot_config_threads = 2;
221 static lwp_t **mountroot_config_lwpids;
222 static size_t mountroot_config_lwpids_size;
223 bool root_is_mounted = false;
224
225 static void config_process_deferred(struct deferred_config_head *, device_t);
226
227 /* Hooks to finalize configuration once all real devices have been found. */
228 struct finalize_hook {
229 TAILQ_ENTRY(finalize_hook) f_list;
230 int (*f_func)(device_t);
231 device_t f_dev;
232 };
233 static TAILQ_HEAD(, finalize_hook) config_finalize_list =
234 TAILQ_HEAD_INITIALIZER(config_finalize_list);
235 static int config_finalize_done;
236
237 /* list of all devices */
238 static struct devicelist alldevs = TAILQ_HEAD_INITIALIZER(alldevs);
239 static kmutex_t alldevs_lock __cacheline_aligned;
240 static devgen_t alldevs_gen = 1;
241 static int alldevs_nread = 0;
242 static int alldevs_nwrite = 0;
243 static bool alldevs_garbage = false;
244
245 static struct devicelist config_pending =
246 TAILQ_HEAD_INITIALIZER(config_pending);
247 static kmutex_t config_misc_lock;
248 static kcondvar_t config_misc_cv;
249
250 static bool detachall = false;
251
252 #define STREQ(s1, s2) \
253 (*(s1) == *(s2) && strcmp((s1), (s2)) == 0)
254
255 static bool config_initialized = false; /* config_init() has been called. */
256
257 static int config_do_twiddle;
258 static callout_t config_twiddle_ch;
259
260 static void sysctl_detach_setup(struct sysctllog **);
261
262 int no_devmon_insert(const char *, prop_dictionary_t);
263 int (*devmon_insert_vec)(const char *, prop_dictionary_t) = no_devmon_insert;
264
265 typedef int (*cfdriver_fn)(struct cfdriver *);
266 static int
frob_cfdrivervec(struct cfdriver * const * cfdriverv,cfdriver_fn drv_do,cfdriver_fn drv_undo,const char * style,bool dopanic)267 frob_cfdrivervec(struct cfdriver * const *cfdriverv,
268 cfdriver_fn drv_do, cfdriver_fn drv_undo,
269 const char *style, bool dopanic)
270 {
271 void (*pr)(const char *, ...) __printflike(1, 2) =
272 dopanic ? panic : printf;
273 int i, error = 0, e2 __diagused;
274
275 for (i = 0; cfdriverv[i] != NULL; i++) {
276 if ((error = drv_do(cfdriverv[i])) != 0) {
277 pr("configure: `%s' driver %s failed: %d",
278 cfdriverv[i]->cd_name, style, error);
279 goto bad;
280 }
281 }
282
283 KASSERT(error == 0);
284 return 0;
285
286 bad:
287 printf("\n");
288 for (i--; i >= 0; i--) {
289 e2 = drv_undo(cfdriverv[i]);
290 KASSERT(e2 == 0);
291 }
292
293 return error;
294 }
295
296 typedef int (*cfattach_fn)(const char *, struct cfattach *);
297 static int
frob_cfattachvec(const struct cfattachinit * cfattachv,cfattach_fn att_do,cfattach_fn att_undo,const char * style,bool dopanic)298 frob_cfattachvec(const struct cfattachinit *cfattachv,
299 cfattach_fn att_do, cfattach_fn att_undo,
300 const char *style, bool dopanic)
301 {
302 const struct cfattachinit *cfai = NULL;
303 void (*pr)(const char *, ...) __printflike(1, 2) =
304 dopanic ? panic : printf;
305 int j = 0, error = 0, e2 __diagused;
306
307 for (cfai = &cfattachv[0]; cfai->cfai_name != NULL; cfai++) {
308 for (j = 0; cfai->cfai_list[j] != NULL; j++) {
309 if ((error = att_do(cfai->cfai_name,
310 cfai->cfai_list[j])) != 0) {
311 pr("configure: attachment `%s' "
312 "of `%s' driver %s failed: %d",
313 cfai->cfai_list[j]->ca_name,
314 cfai->cfai_name, style, error);
315 goto bad;
316 }
317 }
318 }
319
320 KASSERT(error == 0);
321 return 0;
322
323 bad:
324 /*
325 * Rollback in reverse order. dunno if super-important, but
326 * do that anyway. Although the code looks a little like
327 * someone did a little integration (in the math sense).
328 */
329 printf("\n");
330 if (cfai) {
331 bool last;
332
333 for (last = false; last == false; ) {
334 if (cfai == &cfattachv[0])
335 last = true;
336 for (j--; j >= 0; j--) {
337 e2 = att_undo(cfai->cfai_name,
338 cfai->cfai_list[j]);
339 KASSERT(e2 == 0);
340 }
341 if (!last) {
342 cfai--;
343 for (j = 0; cfai->cfai_list[j] != NULL; j++)
344 ;
345 }
346 }
347 }
348
349 return error;
350 }
351
352 /*
353 * Initialize the autoconfiguration data structures. Normally this
354 * is done by configure(), but some platforms need to do this very
355 * early (to e.g. initialize the console).
356 */
357 void
config_init(void)358 config_init(void)
359 {
360
361 KASSERT(config_initialized == false);
362
363 mutex_init(&alldevs_lock, MUTEX_DEFAULT, IPL_VM);
364
365 mutex_init(&config_misc_lock, MUTEX_DEFAULT, IPL_NONE);
366 cv_init(&config_misc_cv, "cfgmisc");
367
368 callout_init(&config_twiddle_ch, CALLOUT_MPSAFE);
369
370 frob_cfdrivervec(cfdriver_list_initial,
371 config_cfdriver_attach, NULL, "bootstrap", true);
372 frob_cfattachvec(cfattachinit,
373 config_cfattach_attach, NULL, "bootstrap", true);
374
375 initcftable.ct_cfdata = cfdata;
376 TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list);
377
378 rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN,
379 RND_FLAG_COLLECT_TIME);
380
381 config_initialized = true;
382 }
383
384 /*
385 * Init or fini drivers and attachments. Either all or none
386 * are processed (via rollback). It would be nice if this were
387 * atomic to outside consumers, but with the current state of
388 * locking ...
389 */
390 int
config_init_component(struct cfdriver * const * cfdriverv,const struct cfattachinit * cfattachv,struct cfdata * cfdatav)391 config_init_component(struct cfdriver * const *cfdriverv,
392 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
393 {
394 int error;
395
396 KERNEL_LOCK(1, NULL);
397
398 if ((error = frob_cfdrivervec(cfdriverv,
399 config_cfdriver_attach, config_cfdriver_detach, "init", false))!= 0)
400 goto out;
401 if ((error = frob_cfattachvec(cfattachv,
402 config_cfattach_attach, config_cfattach_detach,
403 "init", false)) != 0) {
404 frob_cfdrivervec(cfdriverv,
405 config_cfdriver_detach, NULL, "init rollback", true);
406 goto out;
407 }
408 if ((error = config_cfdata_attach(cfdatav, 1)) != 0) {
409 frob_cfattachvec(cfattachv,
410 config_cfattach_detach, NULL, "init rollback", true);
411 frob_cfdrivervec(cfdriverv,
412 config_cfdriver_detach, NULL, "init rollback", true);
413 goto out;
414 }
415
416 /* Success! */
417 error = 0;
418
419 out: KERNEL_UNLOCK_ONE(NULL);
420 return error;
421 }
422
423 int
config_fini_component(struct cfdriver * const * cfdriverv,const struct cfattachinit * cfattachv,struct cfdata * cfdatav)424 config_fini_component(struct cfdriver * const *cfdriverv,
425 const struct cfattachinit *cfattachv, struct cfdata *cfdatav)
426 {
427 int error;
428
429 KERNEL_LOCK(1, NULL);
430
431 if ((error = config_cfdata_detach(cfdatav)) != 0)
432 goto out;
433 if ((error = frob_cfattachvec(cfattachv,
434 config_cfattach_detach, config_cfattach_attach,
435 "fini", false)) != 0) {
436 if (config_cfdata_attach(cfdatav, 0) != 0)
437 panic("config_cfdata fini rollback failed");
438 goto out;
439 }
440 if ((error = frob_cfdrivervec(cfdriverv,
441 config_cfdriver_detach, config_cfdriver_attach,
442 "fini", false)) != 0) {
443 frob_cfattachvec(cfattachv,
444 config_cfattach_attach, NULL, "fini rollback", true);
445 if (config_cfdata_attach(cfdatav, 0) != 0)
446 panic("config_cfdata fini rollback failed");
447 goto out;
448 }
449
450 /* Success! */
451 error = 0;
452
453 out: KERNEL_UNLOCK_ONE(NULL);
454 return error;
455 }
456
457 void
config_init_mi(void)458 config_init_mi(void)
459 {
460
461 if (!config_initialized)
462 config_init();
463
464 sysctl_detach_setup(NULL);
465 }
466
467 void
config_deferred(device_t dev)468 config_deferred(device_t dev)
469 {
470
471 KASSERT(KERNEL_LOCKED_P());
472
473 config_process_deferred(&deferred_config_queue, dev);
474 config_process_deferred(&interrupt_config_queue, dev);
475 config_process_deferred(&mountroot_config_queue, dev);
476 }
477
478 static void
config_interrupts_thread(void * cookie)479 config_interrupts_thread(void *cookie)
480 {
481 struct deferred_config *dc;
482 device_t dev;
483
484 mutex_enter(&config_misc_lock);
485 while ((dc = TAILQ_FIRST(&interrupt_config_queue)) != NULL) {
486 TAILQ_REMOVE(&interrupt_config_queue, dc, dc_queue);
487 mutex_exit(&config_misc_lock);
488
489 dev = dc->dc_dev;
490 (*dc->dc_func)(dev);
491 if (!device_pmf_is_registered(dev))
492 aprint_debug_dev(dev,
493 "WARNING: power management not supported\n");
494 config_pending_decr(dev);
495 kmem_free(dc, sizeof(*dc));
496
497 mutex_enter(&config_misc_lock);
498 }
499 mutex_exit(&config_misc_lock);
500
501 kthread_exit(0);
502 }
503
504 void
config_create_interruptthreads(void)505 config_create_interruptthreads(void)
506 {
507 int i;
508
509 for (i = 0; i < interrupt_config_threads; i++) {
510 (void)kthread_create(PRI_NONE, 0/*XXXSMP */, NULL,
511 config_interrupts_thread, NULL, NULL, "configintr");
512 }
513 }
514
515 static void
config_mountroot_thread(void * cookie)516 config_mountroot_thread(void *cookie)
517 {
518 struct deferred_config *dc;
519
520 mutex_enter(&config_misc_lock);
521 while ((dc = TAILQ_FIRST(&mountroot_config_queue)) != NULL) {
522 TAILQ_REMOVE(&mountroot_config_queue, dc, dc_queue);
523 mutex_exit(&config_misc_lock);
524
525 (*dc->dc_func)(dc->dc_dev);
526 kmem_free(dc, sizeof(*dc));
527
528 mutex_enter(&config_misc_lock);
529 }
530 mutex_exit(&config_misc_lock);
531
532 kthread_exit(0);
533 }
534
535 void
config_create_mountrootthreads(void)536 config_create_mountrootthreads(void)
537 {
538 int i;
539
540 if (!root_is_mounted)
541 root_is_mounted = true;
542
543 mountroot_config_lwpids_size = sizeof(mountroot_config_lwpids) *
544 mountroot_config_threads;
545 mountroot_config_lwpids = kmem_alloc(mountroot_config_lwpids_size,
546 KM_NOSLEEP);
547 KASSERT(mountroot_config_lwpids);
548 for (i = 0; i < mountroot_config_threads; i++) {
549 mountroot_config_lwpids[i] = 0;
550 (void)kthread_create(PRI_NONE, KTHREAD_MUSTJOIN/* XXXSMP */,
551 NULL, config_mountroot_thread, NULL,
552 &mountroot_config_lwpids[i],
553 "configroot");
554 }
555 }
556
557 void
config_finalize_mountroot(void)558 config_finalize_mountroot(void)
559 {
560 int i, error;
561
562 for (i = 0; i < mountroot_config_threads; i++) {
563 if (mountroot_config_lwpids[i] == 0)
564 continue;
565
566 error = kthread_join(mountroot_config_lwpids[i]);
567 if (error)
568 printf("%s: thread %x joined with error %d\n",
569 __func__, i, error);
570 }
571 kmem_free(mountroot_config_lwpids, mountroot_config_lwpids_size);
572 }
573
574 /*
575 * Announce device attach/detach to userland listeners.
576 */
577
578 int
no_devmon_insert(const char * name,prop_dictionary_t p)579 no_devmon_insert(const char *name, prop_dictionary_t p)
580 {
581
582 return ENODEV;
583 }
584
585 static void
devmon_report_device(device_t dev,bool isattach)586 devmon_report_device(device_t dev, bool isattach)
587 {
588 prop_dictionary_t ev, dict = device_properties(dev);
589 const char *parent;
590 const char *what;
591 const char *where;
592 device_t pdev = device_parent(dev);
593
594 /* If currently no drvctl device, just return */
595 if (devmon_insert_vec == no_devmon_insert)
596 return;
597
598 ev = prop_dictionary_create();
599 if (ev == NULL)
600 return;
601
602 what = (isattach ? "device-attach" : "device-detach");
603 parent = (pdev == NULL ? "root" : device_xname(pdev));
604 if (prop_dictionary_get_string(dict, "location", &where)) {
605 prop_dictionary_set_string(ev, "location", where);
606 aprint_debug("ev: %s %s at %s in [%s]\n",
607 what, device_xname(dev), parent, where);
608 }
609 if (!prop_dictionary_set_string(ev, "device", device_xname(dev)) ||
610 !prop_dictionary_set_string(ev, "parent", parent)) {
611 prop_object_release(ev);
612 return;
613 }
614
615 if ((*devmon_insert_vec)(what, ev) != 0)
616 prop_object_release(ev);
617 }
618
619 /*
620 * Add a cfdriver to the system.
621 */
622 int
config_cfdriver_attach(struct cfdriver * cd)623 config_cfdriver_attach(struct cfdriver *cd)
624 {
625 struct cfdriver *lcd;
626
627 /* Make sure this driver isn't already in the system. */
628 LIST_FOREACH(lcd, &allcfdrivers, cd_list) {
629 if (STREQ(lcd->cd_name, cd->cd_name))
630 return EEXIST;
631 }
632
633 LIST_INIT(&cd->cd_attach);
634 LIST_INSERT_HEAD(&allcfdrivers, cd, cd_list);
635
636 return 0;
637 }
638
639 /*
640 * Remove a cfdriver from the system.
641 */
642 int
config_cfdriver_detach(struct cfdriver * cd)643 config_cfdriver_detach(struct cfdriver *cd)
644 {
645 struct alldevs_foray af;
646 int i, rc = 0;
647
648 config_alldevs_enter(&af);
649 /* Make sure there are no active instances. */
650 for (i = 0; i < cd->cd_ndevs; i++) {
651 if (cd->cd_devs[i] != NULL) {
652 rc = EBUSY;
653 break;
654 }
655 }
656 config_alldevs_exit(&af);
657
658 if (rc != 0)
659 return rc;
660
661 /* ...and no attachments loaded. */
662 if (LIST_EMPTY(&cd->cd_attach) == 0)
663 return EBUSY;
664
665 LIST_REMOVE(cd, cd_list);
666
667 KASSERT(cd->cd_devs == NULL);
668
669 return 0;
670 }
671
672 /*
673 * Look up a cfdriver by name.
674 */
675 struct cfdriver *
config_cfdriver_lookup(const char * name)676 config_cfdriver_lookup(const char *name)
677 {
678 struct cfdriver *cd;
679
680 LIST_FOREACH(cd, &allcfdrivers, cd_list) {
681 if (STREQ(cd->cd_name, name))
682 return cd;
683 }
684
685 return NULL;
686 }
687
688 /*
689 * Add a cfattach to the specified driver.
690 */
691 int
config_cfattach_attach(const char * driver,struct cfattach * ca)692 config_cfattach_attach(const char *driver, struct cfattach *ca)
693 {
694 struct cfattach *lca;
695 struct cfdriver *cd;
696
697 cd = config_cfdriver_lookup(driver);
698 if (cd == NULL)
699 return ESRCH;
700
701 /* Make sure this attachment isn't already on this driver. */
702 LIST_FOREACH(lca, &cd->cd_attach, ca_list) {
703 if (STREQ(lca->ca_name, ca->ca_name))
704 return EEXIST;
705 }
706
707 LIST_INSERT_HEAD(&cd->cd_attach, ca, ca_list);
708
709 return 0;
710 }
711
712 /*
713 * Remove a cfattach from the specified driver.
714 */
715 int
config_cfattach_detach(const char * driver,struct cfattach * ca)716 config_cfattach_detach(const char *driver, struct cfattach *ca)
717 {
718 struct alldevs_foray af;
719 struct cfdriver *cd;
720 device_t dev;
721 int i, rc = 0;
722
723 cd = config_cfdriver_lookup(driver);
724 if (cd == NULL)
725 return ESRCH;
726
727 config_alldevs_enter(&af);
728 /* Make sure there are no active instances. */
729 for (i = 0; i < cd->cd_ndevs; i++) {
730 if ((dev = cd->cd_devs[i]) == NULL)
731 continue;
732 if (dev->dv_cfattach == ca) {
733 rc = EBUSY;
734 break;
735 }
736 }
737 config_alldevs_exit(&af);
738
739 if (rc != 0)
740 return rc;
741
742 LIST_REMOVE(ca, ca_list);
743
744 return 0;
745 }
746
747 /*
748 * Look up a cfattach by name.
749 */
750 static struct cfattach *
config_cfattach_lookup_cd(struct cfdriver * cd,const char * atname)751 config_cfattach_lookup_cd(struct cfdriver *cd, const char *atname)
752 {
753 struct cfattach *ca;
754
755 LIST_FOREACH(ca, &cd->cd_attach, ca_list) {
756 if (STREQ(ca->ca_name, atname))
757 return ca;
758 }
759
760 return NULL;
761 }
762
763 /*
764 * Look up a cfattach by driver/attachment name.
765 */
766 struct cfattach *
config_cfattach_lookup(const char * name,const char * atname)767 config_cfattach_lookup(const char *name, const char *atname)
768 {
769 struct cfdriver *cd;
770
771 cd = config_cfdriver_lookup(name);
772 if (cd == NULL)
773 return NULL;
774
775 return config_cfattach_lookup_cd(cd, atname);
776 }
777
778 /*
779 * Apply the matching function and choose the best. This is used
780 * a few times and we want to keep the code small.
781 */
782 static void
mapply(struct matchinfo * m,cfdata_t cf)783 mapply(struct matchinfo *m, cfdata_t cf)
784 {
785 int pri;
786
787 if (m->fn != NULL) {
788 pri = (*m->fn)(m->parent, cf, m->locs, m->aux);
789 } else {
790 pri = config_match(m->parent, cf, m->aux);
791 }
792 if (pri > m->pri) {
793 m->match = cf;
794 m->pri = pri;
795 }
796 }
797
798 int
config_stdsubmatch(device_t parent,cfdata_t cf,const int * locs,void * aux)799 config_stdsubmatch(device_t parent, cfdata_t cf, const int *locs, void *aux)
800 {
801 const struct cfiattrdata *ci;
802 const struct cflocdesc *cl;
803 int nlocs, i;
804
805 ci = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
806 KASSERT(ci);
807 nlocs = ci->ci_loclen;
808 KASSERT(!nlocs || locs);
809 for (i = 0; i < nlocs; i++) {
810 cl = &ci->ci_locdesc[i];
811 if (cl->cld_defaultstr != NULL &&
812 cf->cf_loc[i] == cl->cld_default)
813 continue;
814 if (cf->cf_loc[i] == locs[i])
815 continue;
816 return 0;
817 }
818
819 return config_match(parent, cf, aux);
820 }
821
822 /*
823 * Helper function: check whether the driver supports the interface attribute
824 * and return its descriptor structure.
825 */
826 static const struct cfiattrdata *
cfdriver_get_iattr(const struct cfdriver * cd,const char * ia)827 cfdriver_get_iattr(const struct cfdriver *cd, const char *ia)
828 {
829 const struct cfiattrdata * const *cpp;
830
831 if (cd->cd_attrs == NULL)
832 return 0;
833
834 for (cpp = cd->cd_attrs; *cpp; cpp++) {
835 if (STREQ((*cpp)->ci_name, ia)) {
836 /* Match. */
837 return *cpp;
838 }
839 }
840 return 0;
841 }
842
843 static int __diagused
cfdriver_iattr_count(const struct cfdriver * cd)844 cfdriver_iattr_count(const struct cfdriver *cd)
845 {
846 const struct cfiattrdata * const *cpp;
847 int i;
848
849 if (cd->cd_attrs == NULL)
850 return 0;
851
852 for (i = 0, cpp = cd->cd_attrs; *cpp; cpp++) {
853 i++;
854 }
855 return i;
856 }
857
858 /*
859 * Lookup an interface attribute description by name.
860 * If the driver is given, consider only its supported attributes.
861 */
862 const struct cfiattrdata *
cfiattr_lookup(const char * name,const struct cfdriver * cd)863 cfiattr_lookup(const char *name, const struct cfdriver *cd)
864 {
865 const struct cfdriver *d;
866 const struct cfiattrdata *ia;
867
868 if (cd)
869 return cfdriver_get_iattr(cd, name);
870
871 LIST_FOREACH(d, &allcfdrivers, cd_list) {
872 ia = cfdriver_get_iattr(d, name);
873 if (ia)
874 return ia;
875 }
876 return 0;
877 }
878
879 /*
880 * Determine if `parent' is a potential parent for a device spec based
881 * on `cfp'.
882 */
883 static int
cfparent_match(const device_t parent,const struct cfparent * cfp)884 cfparent_match(const device_t parent, const struct cfparent *cfp)
885 {
886 struct cfdriver *pcd;
887
888 /* We don't match root nodes here. */
889 if (cfp == NULL)
890 return 0;
891
892 pcd = parent->dv_cfdriver;
893 KASSERT(pcd != NULL);
894
895 /*
896 * First, ensure this parent has the correct interface
897 * attribute.
898 */
899 if (!cfdriver_get_iattr(pcd, cfp->cfp_iattr))
900 return 0;
901
902 /*
903 * If no specific parent device instance was specified (i.e.
904 * we're attaching to the attribute only), we're done!
905 */
906 if (cfp->cfp_parent == NULL)
907 return 1;
908
909 /*
910 * Check the parent device's name.
911 */
912 if (STREQ(pcd->cd_name, cfp->cfp_parent) == 0)
913 return 0; /* not the same parent */
914
915 /*
916 * Make sure the unit number matches.
917 */
918 if (cfp->cfp_unit == DVUNIT_ANY || /* wildcard */
919 cfp->cfp_unit == parent->dv_unit)
920 return 1;
921
922 /* Unit numbers don't match. */
923 return 0;
924 }
925
926 /*
927 * Helper for config_cfdata_attach(): check all devices whether it could be
928 * parent any attachment in the config data table passed, and rescan.
929 */
930 static void
rescan_with_cfdata(const struct cfdata * cf)931 rescan_with_cfdata(const struct cfdata *cf)
932 {
933 device_t d;
934 const struct cfdata *cf1;
935 deviter_t di;
936
937 KASSERT(KERNEL_LOCKED_P());
938
939 /*
940 * "alldevs" is likely longer than a modules's cfdata, so make it
941 * the outer loop.
942 */
943 for (d = deviter_first(&di, 0); d != NULL; d = deviter_next(&di)) {
944
945 if (!(d->dv_cfattach->ca_rescan))
946 continue;
947
948 for (cf1 = cf; cf1->cf_name; cf1++) {
949
950 if (!cfparent_match(d, cf1->cf_pspec))
951 continue;
952
953 (*d->dv_cfattach->ca_rescan)(d,
954 cfdata_ifattr(cf1), cf1->cf_loc);
955
956 config_deferred(d);
957 }
958 }
959 deviter_release(&di);
960 }
961
962 /*
963 * Attach a supplemental config data table and rescan potential
964 * parent devices if required.
965 */
966 int
config_cfdata_attach(cfdata_t cf,int scannow)967 config_cfdata_attach(cfdata_t cf, int scannow)
968 {
969 struct cftable *ct;
970
971 KERNEL_LOCK(1, NULL);
972
973 ct = kmem_alloc(sizeof(*ct), KM_SLEEP);
974 ct->ct_cfdata = cf;
975 TAILQ_INSERT_TAIL(&allcftables, ct, ct_list);
976
977 if (scannow)
978 rescan_with_cfdata(cf);
979
980 KERNEL_UNLOCK_ONE(NULL);
981
982 return 0;
983 }
984
985 /*
986 * Helper for config_cfdata_detach: check whether a device is
987 * found through any attachment in the config data table.
988 */
989 static int
dev_in_cfdata(device_t d,cfdata_t cf)990 dev_in_cfdata(device_t d, cfdata_t cf)
991 {
992 const struct cfdata *cf1;
993
994 for (cf1 = cf; cf1->cf_name; cf1++)
995 if (d->dv_cfdata == cf1)
996 return 1;
997
998 return 0;
999 }
1000
1001 /*
1002 * Detach a supplemental config data table. Detach all devices found
1003 * through that table (and thus keeping references to it) before.
1004 */
1005 int
config_cfdata_detach(cfdata_t cf)1006 config_cfdata_detach(cfdata_t cf)
1007 {
1008 device_t d;
1009 int error = 0;
1010 struct cftable *ct;
1011 deviter_t di;
1012
1013 KERNEL_LOCK(1, NULL);
1014
1015 for (d = deviter_first(&di, DEVITER_F_RW); d != NULL;
1016 d = deviter_next(&di)) {
1017 if (!dev_in_cfdata(d, cf))
1018 continue;
1019 if ((error = config_detach(d, 0)) != 0)
1020 break;
1021 }
1022 deviter_release(&di);
1023 if (error) {
1024 aprint_error_dev(d, "unable to detach instance\n");
1025 goto out;
1026 }
1027
1028 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1029 if (ct->ct_cfdata == cf) {
1030 TAILQ_REMOVE(&allcftables, ct, ct_list);
1031 kmem_free(ct, sizeof(*ct));
1032 error = 0;
1033 goto out;
1034 }
1035 }
1036
1037 /* not found -- shouldn't happen */
1038 error = EINVAL;
1039
1040 out: KERNEL_UNLOCK_ONE(NULL);
1041 return error;
1042 }
1043
1044 /*
1045 * Invoke the "match" routine for a cfdata entry on behalf of
1046 * an external caller, usually a direct config "submatch" routine.
1047 */
1048 int
config_match(device_t parent,cfdata_t cf,void * aux)1049 config_match(device_t parent, cfdata_t cf, void *aux)
1050 {
1051 struct cfattach *ca;
1052
1053 KASSERT(KERNEL_LOCKED_P());
1054
1055 ca = config_cfattach_lookup(cf->cf_name, cf->cf_atname);
1056 if (ca == NULL) {
1057 /* No attachment for this entry, oh well. */
1058 return 0;
1059 }
1060
1061 return (*ca->ca_match)(parent, cf, aux);
1062 }
1063
1064 /*
1065 * Invoke the "probe" routine for a cfdata entry on behalf of
1066 * an external caller, usually an indirect config "search" routine.
1067 */
1068 int
config_probe(device_t parent,cfdata_t cf,void * aux)1069 config_probe(device_t parent, cfdata_t cf, void *aux)
1070 {
1071 /*
1072 * This is currently a synonym for config_match(), but this
1073 * is an implementation detail; "match" and "probe" routines
1074 * have different behaviors.
1075 *
1076 * XXX config_probe() should return a bool, because there is
1077 * XXX no match score for probe -- it's either there or it's
1078 * XXX not, but some ports abuse the return value as a way
1079 * XXX to attach "critical" devices before "non-critical"
1080 * XXX devices.
1081 */
1082 return config_match(parent, cf, aux);
1083 }
1084
1085 static struct cfargs_internal *
cfargs_canonicalize(const struct cfargs * const cfargs,struct cfargs_internal * const store)1086 cfargs_canonicalize(const struct cfargs * const cfargs,
1087 struct cfargs_internal * const store)
1088 {
1089 struct cfargs_internal *args = store;
1090
1091 memset(args, 0, sizeof(*args));
1092
1093 /* If none specified, are all-NULL pointers are good. */
1094 if (cfargs == NULL) {
1095 return args;
1096 }
1097
1098 /*
1099 * Only one arguments version is recognized at this time.
1100 */
1101 if (cfargs->cfargs_version != CFARGS_VERSION) {
1102 panic("cfargs_canonicalize: unknown version %lu\n",
1103 (unsigned long)cfargs->cfargs_version);
1104 }
1105
1106 /*
1107 * submatch and search are mutually-exclusive.
1108 */
1109 if (cfargs->submatch != NULL && cfargs->search != NULL) {
1110 panic("cfargs_canonicalize: submatch and search are "
1111 "mutually-exclusive");
1112 }
1113 if (cfargs->submatch != NULL) {
1114 args->submatch = cfargs->submatch;
1115 } else if (cfargs->search != NULL) {
1116 args->search = cfargs->search;
1117 }
1118
1119 args->iattr = cfargs->iattr;
1120 args->locators = cfargs->locators;
1121 args->devhandle = cfargs->devhandle;
1122
1123 return args;
1124 }
1125
1126 /*
1127 * Iterate over all potential children of some device, calling the given
1128 * function (default being the child's match function) for each one.
1129 * Nonzero returns are matches; the highest value returned is considered
1130 * the best match. Return the `found child' if we got a match, or NULL
1131 * otherwise. The `aux' pointer is simply passed on through.
1132 *
1133 * Note that this function is designed so that it can be used to apply
1134 * an arbitrary function to all potential children (its return value
1135 * can be ignored).
1136 */
1137 static cfdata_t
config_search_internal(device_t parent,void * aux,const struct cfargs_internal * const args)1138 config_search_internal(device_t parent, void *aux,
1139 const struct cfargs_internal * const args)
1140 {
1141 struct cftable *ct;
1142 cfdata_t cf;
1143 struct matchinfo m;
1144
1145 KASSERT(config_initialized);
1146 KASSERTMSG((!args->iattr ||
1147 cfdriver_get_iattr(parent->dv_cfdriver, args->iattr)),
1148 "%s searched for child at interface attribute %s,"
1149 " but device %s(4) has no such interface attribute in config(5)",
1150 device_xname(parent), args->iattr,
1151 parent->dv_cfdriver->cd_name);
1152 KASSERTMSG((args->iattr ||
1153 cfdriver_iattr_count(parent->dv_cfdriver) < 2),
1154 "%s searched for child without interface attribute,"
1155 " needed to disambiguate among the %d declared for in %s(4)"
1156 " in config(5)",
1157 device_xname(parent),
1158 cfdriver_iattr_count(parent->dv_cfdriver),
1159 parent->dv_cfdriver->cd_name);
1160
1161 m.fn = args->submatch; /* N.B. union */
1162 m.parent = parent;
1163 m.locs = args->locators;
1164 m.aux = aux;
1165 m.match = NULL;
1166 m.pri = 0;
1167
1168 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1169 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1170
1171 /* We don't match root nodes here. */
1172 if (!cf->cf_pspec)
1173 continue;
1174
1175 /*
1176 * Skip cf if no longer eligible, otherwise scan
1177 * through parents for one matching `parent', and
1178 * try match function.
1179 */
1180 if (cf->cf_fstate == FSTATE_FOUND)
1181 continue;
1182 if (cf->cf_fstate == FSTATE_DNOTFOUND ||
1183 cf->cf_fstate == FSTATE_DSTAR)
1184 continue;
1185
1186 /*
1187 * If an interface attribute was specified,
1188 * consider only children which attach to
1189 * that attribute.
1190 */
1191 if (args->iattr != NULL &&
1192 !STREQ(args->iattr, cfdata_ifattr(cf)))
1193 continue;
1194
1195 if (cfparent_match(parent, cf->cf_pspec))
1196 mapply(&m, cf);
1197 }
1198 }
1199 rnd_add_uint32(&rnd_autoconf_source, 0);
1200 return m.match;
1201 }
1202
1203 cfdata_t
config_search(device_t parent,void * aux,const struct cfargs * cfargs)1204 config_search(device_t parent, void *aux, const struct cfargs *cfargs)
1205 {
1206 cfdata_t cf;
1207 struct cfargs_internal store;
1208
1209 cf = config_search_internal(parent, aux,
1210 cfargs_canonicalize(cfargs, &store));
1211
1212 return cf;
1213 }
1214
1215 /*
1216 * Find the given root device.
1217 * This is much like config_search, but there is no parent.
1218 * Don't bother with multiple cfdata tables; the root node
1219 * must always be in the initial table.
1220 */
1221 cfdata_t
config_rootsearch(cfsubmatch_t fn,const char * rootname,void * aux)1222 config_rootsearch(cfsubmatch_t fn, const char *rootname, void *aux)
1223 {
1224 cfdata_t cf;
1225 const short *p;
1226 struct matchinfo m;
1227
1228 m.fn = fn;
1229 m.parent = ROOT;
1230 m.aux = aux;
1231 m.match = NULL;
1232 m.pri = 0;
1233 m.locs = 0;
1234 /*
1235 * Look at root entries for matching name. We do not bother
1236 * with found-state here since only one root should ever be
1237 * searched (and it must be done first).
1238 */
1239 for (p = cfroots; *p >= 0; p++) {
1240 cf = &cfdata[*p];
1241 if (strcmp(cf->cf_name, rootname) == 0)
1242 mapply(&m, cf);
1243 }
1244 return m.match;
1245 }
1246
1247 static const char * const msgs[] = {
1248 [QUIET] = "",
1249 [UNCONF] = " not configured\n",
1250 [UNSUPP] = " unsupported\n",
1251 };
1252
1253 /*
1254 * The given `aux' argument describes a device that has been found
1255 * on the given parent, but not necessarily configured. Locate the
1256 * configuration data for that device (using the submatch function
1257 * provided, or using candidates' cd_match configuration driver
1258 * functions) and attach it, and return its device_t. If the device was
1259 * not configured, call the given `print' function and return NULL.
1260 */
1261 device_t
config_found_acquire(device_t parent,void * aux,cfprint_t print,const struct cfargs * const cfargs)1262 config_found_acquire(device_t parent, void *aux, cfprint_t print,
1263 const struct cfargs * const cfargs)
1264 {
1265 cfdata_t cf;
1266 struct cfargs_internal store;
1267 const struct cfargs_internal * const args =
1268 cfargs_canonicalize(cfargs, &store);
1269 device_t dev;
1270
1271 KERNEL_LOCK(1, NULL);
1272
1273 cf = config_search_internal(parent, aux, args);
1274 if (cf != NULL) {
1275 dev = config_attach_internal(parent, cf, aux, print, args);
1276 goto out;
1277 }
1278
1279 if (print) {
1280 if (config_do_twiddle && cold)
1281 twiddle();
1282
1283 const int pret = (*print)(aux, device_xname(parent));
1284 KASSERT(pret >= 0);
1285 KASSERT(pret < __arraycount(msgs));
1286 KASSERT(msgs[pret] != NULL);
1287 aprint_normal("%s", msgs[pret]);
1288 }
1289
1290 dev = NULL;
1291
1292 out: KERNEL_UNLOCK_ONE(NULL);
1293 return dev;
1294 }
1295
1296 /*
1297 * config_found(parent, aux, print, cfargs)
1298 *
1299 * Legacy entry point for callers whose use of the returned
1300 * device_t is not delimited by device_release.
1301 *
1302 * The caller is required to hold the kernel lock as a fragile
1303 * defence against races.
1304 *
1305 * Callers should ignore the return value or be converted to
1306 * config_found_acquire with a matching device_release once they
1307 * have finished with the returned device_t.
1308 */
1309 device_t
config_found(device_t parent,void * aux,cfprint_t print,const struct cfargs * const cfargs)1310 config_found(device_t parent, void *aux, cfprint_t print,
1311 const struct cfargs * const cfargs)
1312 {
1313 device_t dev;
1314
1315 KASSERT(KERNEL_LOCKED_P());
1316
1317 dev = config_found_acquire(parent, aux, print, cfargs);
1318 if (dev == NULL)
1319 return NULL;
1320 device_release(dev);
1321
1322 return dev;
1323 }
1324
1325 /*
1326 * As above, but for root devices.
1327 */
1328 device_t
config_rootfound(const char * rootname,void * aux)1329 config_rootfound(const char *rootname, void *aux)
1330 {
1331 cfdata_t cf;
1332 device_t dev = NULL;
1333
1334 KERNEL_LOCK(1, NULL);
1335 if ((cf = config_rootsearch(NULL, rootname, aux)) != NULL)
1336 dev = config_attach(ROOT, cf, aux, NULL, CFARGS_NONE);
1337 else
1338 aprint_error("root device %s not configured\n", rootname);
1339 KERNEL_UNLOCK_ONE(NULL);
1340 return dev;
1341 }
1342
1343 /* just like sprintf(buf, "%d") except that it works from the end */
1344 static char *
number(char * ep,int n)1345 number(char *ep, int n)
1346 {
1347
1348 *--ep = 0;
1349 while (n >= 10) {
1350 *--ep = (n % 10) + '0';
1351 n /= 10;
1352 }
1353 *--ep = n + '0';
1354 return ep;
1355 }
1356
1357 /*
1358 * Expand the size of the cd_devs array if necessary.
1359 *
1360 * The caller must hold alldevs_lock. config_makeroom() may release and
1361 * re-acquire alldevs_lock, so callers should re-check conditions such
1362 * as alldevs_nwrite == 0 and alldevs_nread == 0 when config_makeroom()
1363 * returns.
1364 */
1365 static void
config_makeroom(int n,struct cfdriver * cd)1366 config_makeroom(int n, struct cfdriver *cd)
1367 {
1368 int ondevs, nndevs;
1369 device_t *osp, *nsp;
1370
1371 KASSERT(mutex_owned(&alldevs_lock));
1372 alldevs_nwrite++;
1373
1374 /* XXX arithmetic overflow */
1375 for (nndevs = MAX(4, cd->cd_ndevs); nndevs <= n; nndevs += nndevs)
1376 ;
1377
1378 while (n >= cd->cd_ndevs) {
1379 /*
1380 * Need to expand the array.
1381 */
1382 ondevs = cd->cd_ndevs;
1383 osp = cd->cd_devs;
1384
1385 /*
1386 * Release alldevs_lock around allocation, which may
1387 * sleep.
1388 */
1389 mutex_exit(&alldevs_lock);
1390 nsp = kmem_alloc(sizeof(device_t) * nndevs, KM_SLEEP);
1391 mutex_enter(&alldevs_lock);
1392
1393 /*
1394 * If another thread moved the array while we did
1395 * not hold alldevs_lock, try again.
1396 */
1397 if (cd->cd_devs != osp || cd->cd_ndevs != ondevs) {
1398 mutex_exit(&alldevs_lock);
1399 kmem_free(nsp, sizeof(device_t) * nndevs);
1400 mutex_enter(&alldevs_lock);
1401 continue;
1402 }
1403
1404 memset(nsp + ondevs, 0, sizeof(device_t) * (nndevs - ondevs));
1405 if (ondevs != 0)
1406 memcpy(nsp, cd->cd_devs, sizeof(device_t) * ondevs);
1407
1408 cd->cd_ndevs = nndevs;
1409 cd->cd_devs = nsp;
1410 if (ondevs != 0) {
1411 mutex_exit(&alldevs_lock);
1412 kmem_free(osp, sizeof(device_t) * ondevs);
1413 mutex_enter(&alldevs_lock);
1414 }
1415 }
1416 KASSERT(mutex_owned(&alldevs_lock));
1417 alldevs_nwrite--;
1418 }
1419
1420 /*
1421 * Put dev into the devices list.
1422 */
1423 static void
config_devlink(device_t dev)1424 config_devlink(device_t dev)
1425 {
1426
1427 mutex_enter(&alldevs_lock);
1428
1429 KASSERT(device_cfdriver(dev)->cd_devs[dev->dv_unit] == dev);
1430
1431 dev->dv_add_gen = alldevs_gen;
1432 /* It is safe to add a device to the tail of the list while
1433 * readers and writers are in the list.
1434 */
1435 TAILQ_INSERT_TAIL(&alldevs, dev, dv_list);
1436 mutex_exit(&alldevs_lock);
1437 }
1438
1439 static void
config_devfree(device_t dev)1440 config_devfree(device_t dev)
1441 {
1442
1443 KASSERT(dev->dv_flags & DVF_PRIV_ALLOC);
1444 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1445
1446 if (dev->dv_cfattach->ca_devsize > 0)
1447 kmem_free(dev->dv_private, dev->dv_cfattach->ca_devsize);
1448 kmem_free(dev, sizeof(*dev));
1449 }
1450
1451 /*
1452 * Caller must hold alldevs_lock.
1453 */
1454 static void
config_devunlink(device_t dev,struct devicelist * garbage)1455 config_devunlink(device_t dev, struct devicelist *garbage)
1456 {
1457 struct device_garbage *dg = &dev->dv_garbage;
1458 cfdriver_t cd = device_cfdriver(dev);
1459 int i;
1460
1461 KASSERT(mutex_owned(&alldevs_lock));
1462 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1463
1464 /* Unlink from device list. Link to garbage list. */
1465 TAILQ_REMOVE(&alldevs, dev, dv_list);
1466 TAILQ_INSERT_TAIL(garbage, dev, dv_list);
1467
1468 /* Remove from cfdriver's array. */
1469 cd->cd_devs[dev->dv_unit] = NULL;
1470
1471 /*
1472 * If the device now has no units in use, unlink its softc array.
1473 */
1474 for (i = 0; i < cd->cd_ndevs; i++) {
1475 if (cd->cd_devs[i] != NULL)
1476 break;
1477 }
1478 /* Nothing found. Unlink, now. Deallocate, later. */
1479 if (i == cd->cd_ndevs) {
1480 dg->dg_ndevs = cd->cd_ndevs;
1481 dg->dg_devs = cd->cd_devs;
1482 cd->cd_devs = NULL;
1483 cd->cd_ndevs = 0;
1484 }
1485 }
1486
1487 static void
config_devdelete(device_t dev)1488 config_devdelete(device_t dev)
1489 {
1490 struct device_garbage *dg = &dev->dv_garbage;
1491 device_lock_t dvl = device_getlock(dev);
1492
1493 KASSERTMSG(dev->dv_pending == 0, "%d", dev->dv_pending);
1494
1495 if (dg->dg_devs != NULL)
1496 kmem_free(dg->dg_devs, sizeof(device_t) * dg->dg_ndevs);
1497
1498 localcount_fini(dev->dv_localcount);
1499 kmem_free(dev->dv_localcount, sizeof(*dev->dv_localcount));
1500
1501 cv_destroy(&dvl->dvl_cv);
1502 mutex_destroy(&dvl->dvl_mtx);
1503
1504 KASSERT(dev->dv_properties != NULL);
1505 prop_object_release(dev->dv_properties);
1506
1507 if (dev->dv_activity_handlers)
1508 panic("%s with registered handlers", __func__);
1509
1510 if (dev->dv_locators) {
1511 size_t amount = *--dev->dv_locators;
1512 kmem_free(dev->dv_locators, amount);
1513 }
1514
1515 config_devfree(dev);
1516 }
1517
1518 static int
config_unit_nextfree(cfdriver_t cd,cfdata_t cf)1519 config_unit_nextfree(cfdriver_t cd, cfdata_t cf)
1520 {
1521 int unit = cf->cf_unit;
1522
1523 KASSERT(mutex_owned(&alldevs_lock));
1524
1525 if (unit < 0)
1526 return -1;
1527 if (cf->cf_fstate == FSTATE_STAR) {
1528 for (; unit < cd->cd_ndevs; unit++)
1529 if (cd->cd_devs[unit] == NULL)
1530 break;
1531 /*
1532 * unit is now the unit of the first NULL device pointer,
1533 * or max(cd->cd_ndevs,cf->cf_unit).
1534 */
1535 } else {
1536 if (unit < cd->cd_ndevs && cd->cd_devs[unit] != NULL)
1537 unit = -1;
1538 }
1539 return unit;
1540 }
1541
1542 static int
config_unit_alloc(device_t dev,cfdriver_t cd,cfdata_t cf)1543 config_unit_alloc(device_t dev, cfdriver_t cd, cfdata_t cf)
1544 {
1545 struct alldevs_foray af;
1546 int unit;
1547
1548 config_alldevs_enter(&af);
1549 for (;;) {
1550 unit = config_unit_nextfree(cd, cf);
1551 if (unit == -1)
1552 break;
1553 if (unit < cd->cd_ndevs) {
1554 cd->cd_devs[unit] = dev;
1555 dev->dv_unit = unit;
1556 break;
1557 }
1558 config_makeroom(unit, cd);
1559 }
1560 config_alldevs_exit(&af);
1561
1562 return unit;
1563 }
1564
1565 static device_t
config_devalloc(const device_t parent,const cfdata_t cf,const struct cfargs_internal * const args)1566 config_devalloc(const device_t parent, const cfdata_t cf,
1567 const struct cfargs_internal * const args)
1568 {
1569 cfdriver_t cd;
1570 cfattach_t ca;
1571 size_t lname, lunit;
1572 const char *xunit;
1573 int myunit;
1574 char num[10];
1575 device_t dev;
1576 void *dev_private;
1577 const struct cfiattrdata *ia;
1578 device_lock_t dvl;
1579
1580 cd = config_cfdriver_lookup(cf->cf_name);
1581 if (cd == NULL)
1582 return NULL;
1583
1584 ca = config_cfattach_lookup_cd(cd, cf->cf_atname);
1585 if (ca == NULL)
1586 return NULL;
1587
1588 /* get memory for all device vars */
1589 KASSERT(ca->ca_flags & DVF_PRIV_ALLOC);
1590 if (ca->ca_devsize > 0) {
1591 dev_private = kmem_zalloc(ca->ca_devsize, KM_SLEEP);
1592 } else {
1593 dev_private = NULL;
1594 }
1595 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP);
1596
1597 dev->dv_handle = args->devhandle;
1598
1599 dev->dv_class = cd->cd_class;
1600 dev->dv_cfdata = cf;
1601 dev->dv_cfdriver = cd;
1602 dev->dv_cfattach = ca;
1603 dev->dv_activity_count = 0;
1604 dev->dv_activity_handlers = NULL;
1605 dev->dv_private = dev_private;
1606 dev->dv_flags = ca->ca_flags; /* inherit flags from class */
1607 dev->dv_attaching = curlwp;
1608
1609 myunit = config_unit_alloc(dev, cd, cf);
1610 if (myunit == -1) {
1611 config_devfree(dev);
1612 return NULL;
1613 }
1614
1615 /* compute length of name and decimal expansion of unit number */
1616 lname = strlen(cd->cd_name);
1617 xunit = number(&num[sizeof(num)], myunit);
1618 lunit = &num[sizeof(num)] - xunit;
1619 if (lname + lunit > sizeof(dev->dv_xname))
1620 panic("config_devalloc: device name too long");
1621
1622 dvl = device_getlock(dev);
1623
1624 mutex_init(&dvl->dvl_mtx, MUTEX_DEFAULT, IPL_NONE);
1625 cv_init(&dvl->dvl_cv, "pmfsusp");
1626
1627 memcpy(dev->dv_xname, cd->cd_name, lname);
1628 memcpy(dev->dv_xname + lname, xunit, lunit);
1629 dev->dv_parent = parent;
1630 if (parent != NULL)
1631 dev->dv_depth = parent->dv_depth + 1;
1632 else
1633 dev->dv_depth = 0;
1634 dev->dv_flags |= DVF_ACTIVE; /* always initially active */
1635 if (args->locators) {
1636 KASSERT(parent); /* no locators at root */
1637 ia = cfiattr_lookup(cfdata_ifattr(cf), parent->dv_cfdriver);
1638 dev->dv_locators =
1639 kmem_alloc(sizeof(int) * (ia->ci_loclen + 1), KM_SLEEP);
1640 *dev->dv_locators++ = sizeof(int) * (ia->ci_loclen + 1);
1641 memcpy(dev->dv_locators, args->locators,
1642 sizeof(int) * ia->ci_loclen);
1643 }
1644 dev->dv_properties = prop_dictionary_create();
1645 KASSERT(dev->dv_properties != NULL);
1646
1647 prop_dictionary_set_string_nocopy(dev->dv_properties,
1648 "device-driver", dev->dv_cfdriver->cd_name);
1649 prop_dictionary_set_uint16(dev->dv_properties,
1650 "device-unit", dev->dv_unit);
1651 if (parent != NULL) {
1652 prop_dictionary_set_string(dev->dv_properties,
1653 "device-parent", device_xname(parent));
1654 }
1655
1656 dev->dv_localcount = kmem_zalloc(sizeof(*dev->dv_localcount),
1657 KM_SLEEP);
1658 localcount_init(dev->dv_localcount);
1659
1660 if (dev->dv_cfdriver->cd_attrs != NULL)
1661 config_add_attrib_dict(dev);
1662
1663 return dev;
1664 }
1665
1666 /*
1667 * Create an array of device attach attributes and add it
1668 * to the device's dv_properties dictionary.
1669 *
1670 * <key>interface-attributes</key>
1671 * <array>
1672 * <dict>
1673 * <key>attribute-name</key>
1674 * <string>foo</string>
1675 * <key>locators</key>
1676 * <array>
1677 * <dict>
1678 * <key>loc-name</key>
1679 * <string>foo-loc1</string>
1680 * </dict>
1681 * <dict>
1682 * <key>loc-name</key>
1683 * <string>foo-loc2</string>
1684 * <key>default</key>
1685 * <string>foo-loc2-default</string>
1686 * </dict>
1687 * ...
1688 * </array>
1689 * </dict>
1690 * ...
1691 * </array>
1692 */
1693
1694 static void
config_add_attrib_dict(device_t dev)1695 config_add_attrib_dict(device_t dev)
1696 {
1697 int i, j;
1698 const struct cfiattrdata *ci;
1699 prop_dictionary_t attr_dict, loc_dict;
1700 prop_array_t attr_array, loc_array;
1701
1702 if ((attr_array = prop_array_create()) == NULL)
1703 return;
1704
1705 for (i = 0; ; i++) {
1706 if ((ci = dev->dv_cfdriver->cd_attrs[i]) == NULL)
1707 break;
1708 if ((attr_dict = prop_dictionary_create()) == NULL)
1709 break;
1710 prop_dictionary_set_string_nocopy(attr_dict, "attribute-name",
1711 ci->ci_name);
1712
1713 /* Create an array of the locator names and defaults */
1714
1715 if (ci->ci_loclen != 0 &&
1716 (loc_array = prop_array_create()) != NULL) {
1717 for (j = 0; j < ci->ci_loclen; j++) {
1718 loc_dict = prop_dictionary_create();
1719 if (loc_dict == NULL)
1720 continue;
1721 prop_dictionary_set_string_nocopy(loc_dict,
1722 "loc-name", ci->ci_locdesc[j].cld_name);
1723 if (ci->ci_locdesc[j].cld_defaultstr != NULL)
1724 prop_dictionary_set_string_nocopy(
1725 loc_dict, "default",
1726 ci->ci_locdesc[j].cld_defaultstr);
1727 prop_array_set(loc_array, j, loc_dict);
1728 prop_object_release(loc_dict);
1729 }
1730 prop_dictionary_set_and_rel(attr_dict, "locators",
1731 loc_array);
1732 }
1733 prop_array_add(attr_array, attr_dict);
1734 prop_object_release(attr_dict);
1735 }
1736 if (i == 0)
1737 prop_object_release(attr_array);
1738 else
1739 prop_dictionary_set_and_rel(dev->dv_properties,
1740 "interface-attributes", attr_array);
1741
1742 return;
1743 }
1744
1745 /*
1746 * Attach a found device.
1747 *
1748 * Returns the device referenced, to be released with device_release.
1749 */
1750 static device_t
config_attach_internal(device_t parent,cfdata_t cf,void * aux,cfprint_t print,const struct cfargs_internal * const args)1751 config_attach_internal(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1752 const struct cfargs_internal * const args)
1753 {
1754 device_t dev;
1755 struct cftable *ct;
1756 const char *drvname;
1757 bool deferred;
1758
1759 KASSERT(KERNEL_LOCKED_P());
1760
1761 dev = config_devalloc(parent, cf, args);
1762 if (!dev)
1763 panic("config_attach: allocation of device softc failed");
1764
1765 /* XXX redundant - see below? */
1766 if (cf->cf_fstate != FSTATE_STAR) {
1767 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1768 cf->cf_fstate = FSTATE_FOUND;
1769 }
1770
1771 config_devlink(dev);
1772
1773 if (config_do_twiddle && cold)
1774 twiddle();
1775 else
1776 aprint_naive("Found ");
1777 /*
1778 * We want the next two printfs for normal, verbose, and quiet,
1779 * but not silent (in which case, we're twiddling, instead).
1780 */
1781 if (parent == ROOT) {
1782 aprint_naive("%s (root)", device_xname(dev));
1783 aprint_normal("%s (root)", device_xname(dev));
1784 } else {
1785 aprint_naive("%s at %s", device_xname(dev),
1786 device_xname(parent));
1787 aprint_normal("%s at %s", device_xname(dev),
1788 device_xname(parent));
1789 if (print)
1790 (void) (*print)(aux, NULL);
1791 }
1792
1793 /*
1794 * Before attaching, clobber any unfound devices that are
1795 * otherwise identical.
1796 * XXX code above is redundant?
1797 */
1798 drvname = dev->dv_cfdriver->cd_name;
1799 TAILQ_FOREACH(ct, &allcftables, ct_list) {
1800 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
1801 if (STREQ(cf->cf_name, drvname) &&
1802 cf->cf_unit == dev->dv_unit) {
1803 if (cf->cf_fstate == FSTATE_NOTFOUND)
1804 cf->cf_fstate = FSTATE_FOUND;
1805 }
1806 }
1807 }
1808 device_register(dev, aux);
1809
1810 /* Let userland know */
1811 devmon_report_device(dev, true);
1812
1813 /*
1814 * Prevent detach until the driver's attach function, and all
1815 * deferred actions, have finished.
1816 */
1817 config_pending_incr(dev);
1818
1819 /*
1820 * Prevent concurrent detach from destroying the device_t until
1821 * the caller has released the device.
1822 */
1823 device_acquire(dev);
1824
1825 /* Call the driver's attach function. */
1826 (*dev->dv_cfattach->ca_attach)(parent, dev, aux);
1827
1828 /*
1829 * Allow other threads to acquire references to the device now
1830 * that the driver's attach function is done.
1831 */
1832 mutex_enter(&config_misc_lock);
1833 KASSERT(dev->dv_attaching == curlwp);
1834 dev->dv_attaching = NULL;
1835 cv_broadcast(&config_misc_cv);
1836 mutex_exit(&config_misc_lock);
1837
1838 /*
1839 * Synchronous parts of attach are done. Allow detach, unless
1840 * the driver's attach function scheduled deferred actions.
1841 */
1842 config_pending_decr(dev);
1843
1844 mutex_enter(&config_misc_lock);
1845 deferred = (dev->dv_pending != 0);
1846 mutex_exit(&config_misc_lock);
1847
1848 if (!deferred && !device_pmf_is_registered(dev))
1849 aprint_debug_dev(dev,
1850 "WARNING: power management not supported\n");
1851
1852 config_process_deferred(&deferred_config_queue, dev);
1853
1854 device_register_post_config(dev, aux);
1855 rnd_add_uint32(&rnd_autoconf_source, 0);
1856 return dev;
1857 }
1858
1859 device_t
config_attach_acquire(device_t parent,cfdata_t cf,void * aux,cfprint_t print,const struct cfargs * cfargs)1860 config_attach_acquire(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1861 const struct cfargs *cfargs)
1862 {
1863 struct cfargs_internal store;
1864 device_t dev;
1865
1866 KERNEL_LOCK(1, NULL);
1867 dev = config_attach_internal(parent, cf, aux, print,
1868 cfargs_canonicalize(cfargs, &store));
1869 KERNEL_UNLOCK_ONE(NULL);
1870
1871 return dev;
1872 }
1873
1874 /*
1875 * config_attach(parent, cf, aux, print, cfargs)
1876 *
1877 * Legacy entry point for callers whose use of the returned
1878 * device_t is not delimited by device_release.
1879 *
1880 * The caller is required to hold the kernel lock as a fragile
1881 * defence against races.
1882 *
1883 * Callers should ignore the return value or be converted to
1884 * config_attach_acquire with a matching device_release once they
1885 * have finished with the returned device_t.
1886 */
1887 device_t
config_attach(device_t parent,cfdata_t cf,void * aux,cfprint_t print,const struct cfargs * cfargs)1888 config_attach(device_t parent, cfdata_t cf, void *aux, cfprint_t print,
1889 const struct cfargs *cfargs)
1890 {
1891 device_t dev;
1892
1893 KASSERT(KERNEL_LOCKED_P());
1894
1895 dev = config_attach_acquire(parent, cf, aux, print, cfargs);
1896 if (dev == NULL)
1897 return NULL;
1898 device_release(dev);
1899
1900 return dev;
1901 }
1902
1903 /*
1904 * As above, but for pseudo-devices. Pseudo-devices attached in this
1905 * way are silently inserted into the device tree, and their children
1906 * attached.
1907 *
1908 * Note that because pseudo-devices are attached silently, any information
1909 * the attach routine wishes to print should be prefixed with the device
1910 * name by the attach routine.
1911 */
1912 device_t
config_attach_pseudo_acquire(cfdata_t cf,void * aux)1913 config_attach_pseudo_acquire(cfdata_t cf, void *aux)
1914 {
1915 device_t dev;
1916
1917 KERNEL_LOCK(1, NULL);
1918
1919 struct cfargs_internal args = { };
1920 dev = config_devalloc(ROOT, cf, &args);
1921 if (!dev)
1922 goto out;
1923
1924 /* XXX mark busy in cfdata */
1925
1926 if (cf->cf_fstate != FSTATE_STAR) {
1927 KASSERT(cf->cf_fstate == FSTATE_NOTFOUND);
1928 cf->cf_fstate = FSTATE_FOUND;
1929 }
1930
1931 config_devlink(dev);
1932
1933 #if 0 /* XXXJRT not yet */
1934 device_register(dev, NULL); /* like a root node */
1935 #endif
1936
1937 /* Let userland know */
1938 devmon_report_device(dev, true);
1939
1940 /*
1941 * Prevent detach until the driver's attach function, and all
1942 * deferred actions, have finished.
1943 */
1944 config_pending_incr(dev);
1945
1946 /*
1947 * Prevent concurrent detach from destroying the device_t until
1948 * the caller has released the device.
1949 */
1950 device_acquire(dev);
1951
1952 /* Call the driver's attach function. */
1953 (*dev->dv_cfattach->ca_attach)(ROOT, dev, aux);
1954
1955 /*
1956 * Allow other threads to acquire references to the device now
1957 * that the driver's attach function is done.
1958 */
1959 mutex_enter(&config_misc_lock);
1960 KASSERT(dev->dv_attaching == curlwp);
1961 dev->dv_attaching = NULL;
1962 cv_broadcast(&config_misc_cv);
1963 mutex_exit(&config_misc_lock);
1964
1965 /*
1966 * Synchronous parts of attach are done. Allow detach, unless
1967 * the driver's attach function scheduled deferred actions.
1968 */
1969 config_pending_decr(dev);
1970
1971 config_process_deferred(&deferred_config_queue, dev);
1972
1973 out: KERNEL_UNLOCK_ONE(NULL);
1974 return dev;
1975 }
1976
1977 /*
1978 * config_attach_pseudo(cf)
1979 *
1980 * Legacy entry point for callers whose use of the returned
1981 * device_t is not delimited by device_release.
1982 *
1983 * The caller is required to hold the kernel lock as a fragile
1984 * defence against races.
1985 *
1986 * Callers should ignore the return value or be converted to
1987 * config_attach_pseudo_acquire with a matching device_release
1988 * once they have finished with the returned device_t. As a
1989 * bonus, config_attach_pseudo_acquire can pass a non-null aux
1990 * argument into the driver's attach routine.
1991 */
1992 device_t
config_attach_pseudo(cfdata_t cf)1993 config_attach_pseudo(cfdata_t cf)
1994 {
1995 device_t dev;
1996
1997 dev = config_attach_pseudo_acquire(cf, NULL);
1998 if (dev == NULL)
1999 return dev;
2000 device_release(dev);
2001
2002 return dev;
2003 }
2004
2005 /*
2006 * Caller must hold alldevs_lock.
2007 */
2008 static void
config_collect_garbage(struct devicelist * garbage)2009 config_collect_garbage(struct devicelist *garbage)
2010 {
2011 device_t dv;
2012
2013 KASSERT(!cpu_intr_p());
2014 KASSERT(!cpu_softintr_p());
2015 KASSERT(mutex_owned(&alldevs_lock));
2016
2017 while (alldevs_nwrite == 0 && alldevs_nread == 0 && alldevs_garbage) {
2018 TAILQ_FOREACH(dv, &alldevs, dv_list) {
2019 if (dv->dv_del_gen != 0)
2020 break;
2021 }
2022 if (dv == NULL) {
2023 alldevs_garbage = false;
2024 break;
2025 }
2026 config_devunlink(dv, garbage);
2027 }
2028 KASSERT(mutex_owned(&alldevs_lock));
2029 }
2030
2031 static void
config_dump_garbage(struct devicelist * garbage)2032 config_dump_garbage(struct devicelist *garbage)
2033 {
2034 device_t dv;
2035
2036 while ((dv = TAILQ_FIRST(garbage)) != NULL) {
2037 TAILQ_REMOVE(garbage, dv, dv_list);
2038 config_devdelete(dv);
2039 }
2040 }
2041
2042 static int
config_detach_enter(device_t dev)2043 config_detach_enter(device_t dev)
2044 {
2045 struct lwp *l __diagused;
2046 int error = 0;
2047
2048 mutex_enter(&config_misc_lock);
2049
2050 /*
2051 * Wait until attach has fully completed, and until any
2052 * concurrent detach (e.g., drvctl racing with USB event
2053 * thread) has completed.
2054 *
2055 * Caller must hold alldevs_nread or alldevs_nwrite (e.g., via
2056 * deviter) to ensure the winner of the race doesn't free the
2057 * device leading the loser of the race into use-after-free.
2058 *
2059 * XXX Not all callers do this!
2060 */
2061 while (dev->dv_pending || dev->dv_detaching) {
2062 KASSERTMSG(dev->dv_detaching != curlwp,
2063 "recursively detaching %s", device_xname(dev));
2064 error = cv_wait_sig(&config_misc_cv, &config_misc_lock);
2065 if (error)
2066 goto out;
2067 }
2068
2069 /*
2070 * Attach has completed, and no other concurrent detach is
2071 * running. Claim the device for detaching. This will cause
2072 * all new attempts to acquire references to block.
2073 */
2074 KASSERTMSG((l = dev->dv_attaching) == NULL,
2075 "lwp %ld [%s] @ %p attaching %s",
2076 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2077 device_xname(dev));
2078 KASSERTMSG((l = dev->dv_detaching) == NULL,
2079 "lwp %ld [%s] @ %p detaching %s",
2080 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2081 device_xname(dev));
2082 dev->dv_detaching = curlwp;
2083
2084 out: mutex_exit(&config_misc_lock);
2085 return error;
2086 }
2087
2088 static void
config_detach_exit(device_t dev)2089 config_detach_exit(device_t dev)
2090 {
2091 struct lwp *l __diagused;
2092
2093 mutex_enter(&config_misc_lock);
2094 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2095 device_xname(dev));
2096 KASSERTMSG((l = dev->dv_detaching) == curlwp,
2097 "lwp %ld [%s] @ %p detaching %s",
2098 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2099 device_xname(dev));
2100 dev->dv_detaching = NULL;
2101 cv_broadcast(&config_misc_cv);
2102 mutex_exit(&config_misc_lock);
2103 }
2104
2105 /*
2106 * Detach a device. Optionally forced (e.g. because of hardware
2107 * removal) and quiet. Returns zero if successful, non-zero
2108 * (an error code) otherwise.
2109 *
2110 * Note that this code wants to be run from a process context, so
2111 * that the detach can sleep to allow processes which have a device
2112 * open to run and unwind their stacks.
2113 *
2114 * Caller must hold a reference with device_acquire or
2115 * device_lookup_acquire.
2116 */
2117 int
config_detach_release(device_t dev,int flags)2118 config_detach_release(device_t dev, int flags)
2119 {
2120 struct alldevs_foray af;
2121 struct cftable *ct;
2122 cfdata_t cf;
2123 const struct cfattach *ca;
2124 struct cfdriver *cd;
2125 device_t d __diagused;
2126 int rv = 0;
2127
2128 KERNEL_LOCK(1, NULL);
2129
2130 cf = dev->dv_cfdata;
2131 KASSERTMSG((cf == NULL || cf->cf_fstate == FSTATE_FOUND ||
2132 cf->cf_fstate == FSTATE_STAR),
2133 "config_detach: %s: bad device fstate: %d",
2134 device_xname(dev), cf ? cf->cf_fstate : -1);
2135
2136 cd = dev->dv_cfdriver;
2137 KASSERT(cd != NULL);
2138
2139 ca = dev->dv_cfattach;
2140 KASSERT(ca != NULL);
2141
2142 /*
2143 * Only one detach at a time, please -- and not until fully
2144 * attached.
2145 */
2146 rv = config_detach_enter(dev);
2147 device_release(dev);
2148 if (rv) {
2149 KERNEL_UNLOCK_ONE(NULL);
2150 return rv;
2151 }
2152
2153 mutex_enter(&alldevs_lock);
2154 if (dev->dv_del_gen != 0) {
2155 mutex_exit(&alldevs_lock);
2156 #ifdef DIAGNOSTIC
2157 printf("%s: %s is already detached\n", __func__,
2158 device_xname(dev));
2159 #endif /* DIAGNOSTIC */
2160 config_detach_exit(dev);
2161 KERNEL_UNLOCK_ONE(NULL);
2162 return ENOENT;
2163 }
2164 alldevs_nwrite++;
2165 mutex_exit(&alldevs_lock);
2166
2167 /*
2168 * Call the driver's .ca_detach function, unless it has none or
2169 * we are skipping it because it's unforced shutdown time and
2170 * the driver didn't ask to detach on shutdown.
2171 */
2172 if (!detachall &&
2173 (flags & (DETACH_SHUTDOWN|DETACH_FORCE)) == DETACH_SHUTDOWN &&
2174 (dev->dv_flags & DVF_DETACH_SHUTDOWN) == 0) {
2175 rv = EOPNOTSUPP;
2176 } else if (ca->ca_detach != NULL) {
2177 rv = (*ca->ca_detach)(dev, flags);
2178 } else
2179 rv = EOPNOTSUPP;
2180
2181 KASSERTMSG(!dev->dv_detach_done, "%s detached twice, error=%d",
2182 device_xname(dev), rv);
2183
2184 /*
2185 * If it was not possible to detach the device, then we either
2186 * panic() (for the forced but failed case), or return an error.
2187 */
2188 if (rv) {
2189 /*
2190 * Detach failed -- likely EOPNOTSUPP or EBUSY. Driver
2191 * must not have called config_detach_commit.
2192 */
2193 KASSERTMSG(!dev->dv_detach_committed,
2194 "%s committed to detaching and then backed out, error=%d",
2195 device_xname(dev), rv);
2196 if (flags & DETACH_FORCE) {
2197 panic("config_detach: forced detach of %s failed (%d)",
2198 device_xname(dev), rv);
2199 }
2200 goto out;
2201 }
2202
2203 /*
2204 * The device has now been successfully detached.
2205 */
2206 dev->dv_detach_done = true;
2207
2208 /*
2209 * If .ca_detach didn't commit to detach, then do that for it.
2210 * This wakes any pending device_lookup_acquire calls so they
2211 * will fail.
2212 */
2213 config_detach_commit(dev);
2214
2215 /*
2216 * If it was possible to detach the device, ensure that the
2217 * device is deactivated.
2218 */
2219 dev->dv_flags &= ~DVF_ACTIVE; /* XXXSMP */
2220
2221 /*
2222 * Wait for all device_lookup_acquire references -- mostly, for
2223 * all attempts to open the device -- to drain. It is the
2224 * responsibility of .ca_detach to ensure anything with open
2225 * references will be interrupted and release them promptly,
2226 * not block indefinitely. All new attempts to acquire
2227 * references will fail, as config_detach_commit has arranged
2228 * by now.
2229 */
2230 mutex_enter(&config_misc_lock);
2231 localcount_drain(dev->dv_localcount,
2232 &config_misc_cv, &config_misc_lock);
2233 mutex_exit(&config_misc_lock);
2234
2235 /* Let userland know */
2236 devmon_report_device(dev, false);
2237
2238 #ifdef DIAGNOSTIC
2239 /*
2240 * Sanity: If you're successfully detached, you should have no
2241 * children. (Note that because children must be attached
2242 * after parents, we only need to search the latter part of
2243 * the list.)
2244 */
2245 mutex_enter(&alldevs_lock);
2246 for (d = TAILQ_NEXT(dev, dv_list); d != NULL;
2247 d = TAILQ_NEXT(d, dv_list)) {
2248 if (d->dv_parent == dev && d->dv_del_gen == 0) {
2249 printf("config_detach: detached device %s"
2250 " has children %s\n", device_xname(dev),
2251 device_xname(d));
2252 panic("config_detach");
2253 }
2254 }
2255 mutex_exit(&alldevs_lock);
2256 #endif
2257
2258 /* notify the parent that the child is gone */
2259 if (dev->dv_parent) {
2260 device_t p = dev->dv_parent;
2261 if (p->dv_cfattach->ca_childdetached)
2262 (*p->dv_cfattach->ca_childdetached)(p, dev);
2263 }
2264
2265 /*
2266 * Mark cfdata to show that the unit can be reused, if possible.
2267 */
2268 TAILQ_FOREACH(ct, &allcftables, ct_list) {
2269 for (cf = ct->ct_cfdata; cf->cf_name; cf++) {
2270 if (STREQ(cf->cf_name, cd->cd_name)) {
2271 if (cf->cf_fstate == FSTATE_FOUND &&
2272 cf->cf_unit == dev->dv_unit)
2273 cf->cf_fstate = FSTATE_NOTFOUND;
2274 }
2275 }
2276 }
2277
2278 if (dev->dv_cfdata != NULL && (flags & DETACH_QUIET) == 0)
2279 aprint_normal_dev(dev, "detached\n");
2280
2281 out:
2282 config_detach_exit(dev);
2283
2284 config_alldevs_enter(&af);
2285 KASSERT(alldevs_nwrite != 0);
2286 --alldevs_nwrite;
2287 if (rv == 0 && dev->dv_del_gen == 0) {
2288 if (alldevs_nwrite == 0 && alldevs_nread == 0)
2289 config_devunlink(dev, &af.af_garbage);
2290 else {
2291 dev->dv_del_gen = alldevs_gen;
2292 alldevs_garbage = true;
2293 }
2294 }
2295 config_alldevs_exit(&af);
2296
2297 KERNEL_UNLOCK_ONE(NULL);
2298
2299 return rv;
2300 }
2301
2302 /*
2303 * config_detach(dev, flags)
2304 *
2305 * Legacy entry point for callers that have not acquired a
2306 * reference to dev.
2307 *
2308 * The caller is required to hold the kernel lock as a fragile
2309 * defence against races.
2310 *
2311 * Callers should be converted to use device_acquire under a lock
2312 * taken also by .ca_childdetached to synchronize access to the
2313 * device_t, and then config_detach_release ouside the lock.
2314 * Alternatively, most drivers detach children only in their own
2315 * detach routines, which can be done with config_detach_children
2316 * instead.
2317 */
2318 int
config_detach(device_t dev,int flags)2319 config_detach(device_t dev, int flags)
2320 {
2321
2322 device_acquire(dev);
2323 return config_detach_release(dev, flags);
2324 }
2325
2326 /*
2327 * config_detach_commit(dev)
2328 *
2329 * Issued by a driver's .ca_detach routine to notify anyone
2330 * waiting in device_lookup_acquire that the driver is committed
2331 * to detaching the device, which allows device_lookup_acquire to
2332 * wake up and fail immediately.
2333 *
2334 * Safe to call multiple times -- idempotent. Must be called
2335 * during config_detach_enter/exit. Safe to use with
2336 * device_lookup because the device is not actually removed from
2337 * the table until after config_detach_exit.
2338 */
2339 void
config_detach_commit(device_t dev)2340 config_detach_commit(device_t dev)
2341 {
2342 struct lwp *l __diagused;
2343
2344 mutex_enter(&config_misc_lock);
2345 KASSERTMSG(dev->dv_detaching != NULL, "not detaching %s",
2346 device_xname(dev));
2347 KASSERTMSG((l = dev->dv_detaching) == curlwp,
2348 "lwp %ld [%s] @ %p detaching %s",
2349 (long)l->l_lid, (l->l_name ? l->l_name : l->l_proc->p_comm), l,
2350 device_xname(dev));
2351 dev->dv_detach_committed = true;
2352 cv_broadcast(&config_misc_cv);
2353 mutex_exit(&config_misc_lock);
2354 }
2355
2356 int
config_detach_children(device_t parent,int flags)2357 config_detach_children(device_t parent, int flags)
2358 {
2359 device_t dv;
2360 deviter_t di;
2361 int error = 0;
2362
2363 KASSERT(KERNEL_LOCKED_P());
2364
2365 for (dv = deviter_first(&di, DEVITER_F_RW); dv != NULL;
2366 dv = deviter_next(&di)) {
2367 if (device_parent(dv) != parent)
2368 continue;
2369 if ((error = config_detach(dv, flags)) != 0)
2370 break;
2371 }
2372 deviter_release(&di);
2373 return error;
2374 }
2375
2376 device_t
shutdown_first(struct shutdown_state * s)2377 shutdown_first(struct shutdown_state *s)
2378 {
2379 if (!s->initialized) {
2380 deviter_init(&s->di, DEVITER_F_SHUTDOWN|DEVITER_F_LEAVES_FIRST);
2381 s->initialized = true;
2382 }
2383 return shutdown_next(s);
2384 }
2385
2386 device_t
shutdown_next(struct shutdown_state * s)2387 shutdown_next(struct shutdown_state *s)
2388 {
2389 device_t dv;
2390
2391 while ((dv = deviter_next(&s->di)) != NULL && !device_is_active(dv))
2392 ;
2393
2394 if (dv == NULL)
2395 s->initialized = false;
2396
2397 return dv;
2398 }
2399
2400 bool
config_detach_all(int how)2401 config_detach_all(int how)
2402 {
2403 static struct shutdown_state s;
2404 device_t curdev;
2405 bool progress = false;
2406 int flags;
2407
2408 KERNEL_LOCK(1, NULL);
2409
2410 if ((how & (RB_NOSYNC|RB_DUMP)) != 0)
2411 goto out;
2412
2413 if ((how & RB_POWERDOWN) == RB_POWERDOWN)
2414 flags = DETACH_SHUTDOWN | DETACH_POWEROFF;
2415 else
2416 flags = DETACH_SHUTDOWN;
2417
2418 for (curdev = shutdown_first(&s); curdev != NULL;
2419 curdev = shutdown_next(&s)) {
2420 aprint_debug(" detaching %s, ", device_xname(curdev));
2421 if (config_detach(curdev, flags) == 0) {
2422 progress = true;
2423 aprint_debug("success.");
2424 } else
2425 aprint_debug("failed.");
2426 }
2427
2428 out: KERNEL_UNLOCK_ONE(NULL);
2429 return progress;
2430 }
2431
2432 static bool
device_is_ancestor_of(device_t ancestor,device_t descendant)2433 device_is_ancestor_of(device_t ancestor, device_t descendant)
2434 {
2435 device_t dv;
2436
2437 for (dv = descendant; dv != NULL; dv = device_parent(dv)) {
2438 if (device_parent(dv) == ancestor)
2439 return true;
2440 }
2441 return false;
2442 }
2443
2444 int
config_deactivate(device_t dev)2445 config_deactivate(device_t dev)
2446 {
2447 deviter_t di;
2448 const struct cfattach *ca;
2449 device_t descendant;
2450 int s, rv = 0, oflags;
2451
2452 for (descendant = deviter_first(&di, DEVITER_F_ROOT_FIRST);
2453 descendant != NULL;
2454 descendant = deviter_next(&di)) {
2455 if (dev != descendant &&
2456 !device_is_ancestor_of(dev, descendant))
2457 continue;
2458
2459 if ((descendant->dv_flags & DVF_ACTIVE) == 0)
2460 continue;
2461
2462 ca = descendant->dv_cfattach;
2463 oflags = descendant->dv_flags;
2464
2465 descendant->dv_flags &= ~DVF_ACTIVE;
2466 if (ca->ca_activate == NULL)
2467 continue;
2468 s = splhigh();
2469 rv = (*ca->ca_activate)(descendant, DVACT_DEACTIVATE);
2470 splx(s);
2471 if (rv != 0)
2472 descendant->dv_flags = oflags;
2473 }
2474 deviter_release(&di);
2475 return rv;
2476 }
2477
2478 /*
2479 * Defer the configuration of the specified device until all
2480 * of its parent's devices have been attached.
2481 */
2482 void
config_defer(device_t dev,void (* func)(device_t))2483 config_defer(device_t dev, void (*func)(device_t))
2484 {
2485 struct deferred_config *dc;
2486
2487 if (dev->dv_parent == NULL)
2488 panic("config_defer: can't defer config of a root device");
2489
2490 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2491
2492 config_pending_incr(dev);
2493
2494 mutex_enter(&config_misc_lock);
2495 #ifdef DIAGNOSTIC
2496 struct deferred_config *odc;
2497 TAILQ_FOREACH(odc, &deferred_config_queue, dc_queue) {
2498 if (odc->dc_dev == dev)
2499 panic("config_defer: deferred twice");
2500 }
2501 #endif
2502 dc->dc_dev = dev;
2503 dc->dc_func = func;
2504 TAILQ_INSERT_TAIL(&deferred_config_queue, dc, dc_queue);
2505 mutex_exit(&config_misc_lock);
2506 }
2507
2508 /*
2509 * Defer some autoconfiguration for a device until after interrupts
2510 * are enabled.
2511 */
2512 void
config_interrupts(device_t dev,void (* func)(device_t))2513 config_interrupts(device_t dev, void (*func)(device_t))
2514 {
2515 struct deferred_config *dc;
2516
2517 /*
2518 * If interrupts are enabled, callback now.
2519 */
2520 if (cold == 0) {
2521 (*func)(dev);
2522 return;
2523 }
2524
2525 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2526
2527 config_pending_incr(dev);
2528
2529 mutex_enter(&config_misc_lock);
2530 #ifdef DIAGNOSTIC
2531 struct deferred_config *odc;
2532 TAILQ_FOREACH(odc, &interrupt_config_queue, dc_queue) {
2533 if (odc->dc_dev == dev)
2534 panic("config_interrupts: deferred twice");
2535 }
2536 #endif
2537 dc->dc_dev = dev;
2538 dc->dc_func = func;
2539 TAILQ_INSERT_TAIL(&interrupt_config_queue, dc, dc_queue);
2540 mutex_exit(&config_misc_lock);
2541 }
2542
2543 /*
2544 * Defer some autoconfiguration for a device until after root file system
2545 * is mounted (to load firmware etc).
2546 */
2547 void
config_mountroot(device_t dev,void (* func)(device_t))2548 config_mountroot(device_t dev, void (*func)(device_t))
2549 {
2550 struct deferred_config *dc;
2551
2552 /*
2553 * If root file system is mounted, callback now.
2554 */
2555 if (root_is_mounted) {
2556 (*func)(dev);
2557 return;
2558 }
2559
2560 dc = kmem_alloc(sizeof(*dc), KM_SLEEP);
2561
2562 mutex_enter(&config_misc_lock);
2563 #ifdef DIAGNOSTIC
2564 struct deferred_config *odc;
2565 TAILQ_FOREACH(odc, &mountroot_config_queue, dc_queue) {
2566 if (odc->dc_dev == dev)
2567 panic("%s: deferred twice", __func__);
2568 }
2569 #endif
2570
2571 dc->dc_dev = dev;
2572 dc->dc_func = func;
2573 TAILQ_INSERT_TAIL(&mountroot_config_queue, dc, dc_queue);
2574 mutex_exit(&config_misc_lock);
2575 }
2576
2577 /*
2578 * Process a deferred configuration queue.
2579 */
2580 static void
config_process_deferred(struct deferred_config_head * queue,device_t parent)2581 config_process_deferred(struct deferred_config_head *queue, device_t parent)
2582 {
2583 struct deferred_config *dc;
2584
2585 KASSERT(KERNEL_LOCKED_P());
2586
2587 mutex_enter(&config_misc_lock);
2588 dc = TAILQ_FIRST(queue);
2589 while (dc) {
2590 if (parent == NULL || dc->dc_dev->dv_parent == parent) {
2591 TAILQ_REMOVE(queue, dc, dc_queue);
2592 mutex_exit(&config_misc_lock);
2593
2594 (*dc->dc_func)(dc->dc_dev);
2595 config_pending_decr(dc->dc_dev);
2596 kmem_free(dc, sizeof(*dc));
2597
2598 mutex_enter(&config_misc_lock);
2599 /* Restart, queue might have changed */
2600 dc = TAILQ_FIRST(queue);
2601 } else {
2602 dc = TAILQ_NEXT(dc, dc_queue);
2603 }
2604 }
2605 mutex_exit(&config_misc_lock);
2606 }
2607
2608 /*
2609 * Manipulate the config_pending semaphore.
2610 */
2611 void
config_pending_incr(device_t dev)2612 config_pending_incr(device_t dev)
2613 {
2614
2615 mutex_enter(&config_misc_lock);
2616 KASSERTMSG(dev->dv_pending < INT_MAX,
2617 "%s: excess config_pending_incr", device_xname(dev));
2618 if (dev->dv_pending++ == 0)
2619 TAILQ_INSERT_TAIL(&config_pending, dev, dv_pending_list);
2620 #ifdef DEBUG_AUTOCONF
2621 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2622 #endif
2623 mutex_exit(&config_misc_lock);
2624 }
2625
2626 void
config_pending_decr(device_t dev)2627 config_pending_decr(device_t dev)
2628 {
2629
2630 mutex_enter(&config_misc_lock);
2631 KASSERTMSG(dev->dv_pending > 0,
2632 "%s: excess config_pending_decr", device_xname(dev));
2633 if (--dev->dv_pending == 0) {
2634 TAILQ_REMOVE(&config_pending, dev, dv_pending_list);
2635 cv_broadcast(&config_misc_cv);
2636 }
2637 #ifdef DEBUG_AUTOCONF
2638 printf("%s: %s %d\n", __func__, device_xname(dev), dev->dv_pending);
2639 #endif
2640 mutex_exit(&config_misc_lock);
2641 }
2642
2643 /*
2644 * Register a "finalization" routine. Finalization routines are
2645 * called iteratively once all real devices have been found during
2646 * autoconfiguration, for as long as any one finalizer has done
2647 * any work.
2648 */
2649 int
config_finalize_register(device_t dev,int (* fn)(device_t))2650 config_finalize_register(device_t dev, int (*fn)(device_t))
2651 {
2652 struct finalize_hook *f;
2653 int error = 0;
2654
2655 KERNEL_LOCK(1, NULL);
2656
2657 /*
2658 * If finalization has already been done, invoke the
2659 * callback function now.
2660 */
2661 if (config_finalize_done) {
2662 while ((*fn)(dev) != 0)
2663 /* loop */ ;
2664 goto out;
2665 }
2666
2667 /* Ensure this isn't already on the list. */
2668 TAILQ_FOREACH(f, &config_finalize_list, f_list) {
2669 if (f->f_func == fn && f->f_dev == dev) {
2670 error = EEXIST;
2671 goto out;
2672 }
2673 }
2674
2675 f = kmem_alloc(sizeof(*f), KM_SLEEP);
2676 f->f_func = fn;
2677 f->f_dev = dev;
2678 TAILQ_INSERT_TAIL(&config_finalize_list, f, f_list);
2679
2680 /* Success! */
2681 error = 0;
2682
2683 out: KERNEL_UNLOCK_ONE(NULL);
2684 return error;
2685 }
2686
2687 void
config_finalize(void)2688 config_finalize(void)
2689 {
2690 struct finalize_hook *f;
2691 struct pdevinit *pdev;
2692 extern struct pdevinit pdevinit[];
2693 unsigned t0 = getticks();
2694 int errcnt, rv;
2695
2696 /*
2697 * Now that device driver threads have been created, wait for
2698 * them to finish any deferred autoconfiguration.
2699 */
2700 mutex_enter(&config_misc_lock);
2701 while (!TAILQ_EMPTY(&config_pending)) {
2702 const unsigned t1 = getticks();
2703
2704 if (t1 - t0 >= hz) {
2705 void (*pr)(const char *, ...) __printflike(1,2);
2706 device_t dev;
2707
2708 if (t1 - t0 >= 60*hz) {
2709 pr = aprint_normal;
2710 t0 = t1;
2711 } else {
2712 pr = aprint_debug;
2713 }
2714
2715 (*pr)("waiting for devices:");
2716 TAILQ_FOREACH(dev, &config_pending, dv_pending_list)
2717 (*pr)(" %s", device_xname(dev));
2718 (*pr)("\n");
2719 }
2720
2721 (void)cv_timedwait(&config_misc_cv, &config_misc_lock,
2722 mstohz(1000));
2723 }
2724 mutex_exit(&config_misc_lock);
2725
2726 KERNEL_LOCK(1, NULL);
2727
2728 /* Attach pseudo-devices. */
2729 for (pdev = pdevinit; pdev->pdev_attach != NULL; pdev++)
2730 (*pdev->pdev_attach)(pdev->pdev_count);
2731
2732 /* Run the hooks until none of them does any work. */
2733 do {
2734 rv = 0;
2735 TAILQ_FOREACH(f, &config_finalize_list, f_list)
2736 rv |= (*f->f_func)(f->f_dev);
2737 } while (rv != 0);
2738
2739 config_finalize_done = 1;
2740
2741 /* Now free all the hooks. */
2742 while ((f = TAILQ_FIRST(&config_finalize_list)) != NULL) {
2743 TAILQ_REMOVE(&config_finalize_list, f, f_list);
2744 kmem_free(f, sizeof(*f));
2745 }
2746
2747 KERNEL_UNLOCK_ONE(NULL);
2748
2749 errcnt = aprint_get_error_count();
2750 if ((boothowto & (AB_QUIET|AB_SILENT)) != 0 &&
2751 (boothowto & AB_VERBOSE) == 0) {
2752 mutex_enter(&config_misc_lock);
2753 if (config_do_twiddle) {
2754 config_do_twiddle = 0;
2755 printf_nolog(" done.\n");
2756 }
2757 mutex_exit(&config_misc_lock);
2758 }
2759 if (errcnt != 0) {
2760 printf("WARNING: %d error%s while detecting hardware; "
2761 "check system log.\n", errcnt,
2762 errcnt == 1 ? "" : "s");
2763 }
2764 }
2765
2766 void
config_twiddle_init(void)2767 config_twiddle_init(void)
2768 {
2769
2770 if ((boothowto & (AB_SILENT|AB_VERBOSE)) == AB_SILENT) {
2771 config_do_twiddle = 1;
2772 }
2773 callout_setfunc(&config_twiddle_ch, config_twiddle_fn, NULL);
2774 }
2775
2776 void
config_twiddle_fn(void * cookie)2777 config_twiddle_fn(void *cookie)
2778 {
2779
2780 mutex_enter(&config_misc_lock);
2781 if (config_do_twiddle) {
2782 twiddle();
2783 callout_schedule(&config_twiddle_ch, mstohz(100));
2784 }
2785 mutex_exit(&config_misc_lock);
2786 }
2787
2788 static void
config_alldevs_enter(struct alldevs_foray * af)2789 config_alldevs_enter(struct alldevs_foray *af)
2790 {
2791 TAILQ_INIT(&af->af_garbage);
2792 mutex_enter(&alldevs_lock);
2793 config_collect_garbage(&af->af_garbage);
2794 }
2795
2796 static void
config_alldevs_exit(struct alldevs_foray * af)2797 config_alldevs_exit(struct alldevs_foray *af)
2798 {
2799 mutex_exit(&alldevs_lock);
2800 config_dump_garbage(&af->af_garbage);
2801 }
2802
2803 /*
2804 * device_lookup:
2805 *
2806 * Look up a device instance for a given driver.
2807 *
2808 * Caller is responsible for ensuring the device's state is
2809 * stable, either by holding a reference already obtained with
2810 * device_lookup_acquire or by otherwise ensuring the device is
2811 * attached and can't be detached (e.g., holding an open device
2812 * node and ensuring *_detach calls vdevgone).
2813 *
2814 * XXX Find a way to assert this.
2815 *
2816 * Safe for use up to and including interrupt context at IPL_VM.
2817 * Never sleeps.
2818 */
2819 device_t
device_lookup(cfdriver_t cd,int unit)2820 device_lookup(cfdriver_t cd, int unit)
2821 {
2822 device_t dv;
2823
2824 mutex_enter(&alldevs_lock);
2825 if (unit < 0 || unit >= cd->cd_ndevs)
2826 dv = NULL;
2827 else if ((dv = cd->cd_devs[unit]) != NULL && dv->dv_del_gen != 0)
2828 dv = NULL;
2829 mutex_exit(&alldevs_lock);
2830
2831 return dv;
2832 }
2833
2834 /*
2835 * device_lookup_private:
2836 *
2837 * Look up a softc instance for a given driver.
2838 */
2839 void *
device_lookup_private(cfdriver_t cd,int unit)2840 device_lookup_private(cfdriver_t cd, int unit)
2841 {
2842
2843 return device_private(device_lookup(cd, unit));
2844 }
2845
2846 /*
2847 * device_lookup_acquire:
2848 *
2849 * Look up a device instance for a given driver, and return a
2850 * reference to it that must be released by device_release.
2851 *
2852 * => If the device is still attaching, blocks until *_attach has
2853 * returned.
2854 *
2855 * => If the device is detaching, blocks until *_detach has
2856 * returned. May succeed or fail in that case, depending on
2857 * whether *_detach has backed out (EBUSY) or committed to
2858 * detaching.
2859 *
2860 * May sleep.
2861 */
2862 device_t
device_lookup_acquire(cfdriver_t cd,int unit)2863 device_lookup_acquire(cfdriver_t cd, int unit)
2864 {
2865 device_t dv;
2866
2867 ASSERT_SLEEPABLE();
2868
2869 /* XXX This should have a pserialized fast path -- TBD. */
2870 mutex_enter(&config_misc_lock);
2871 mutex_enter(&alldevs_lock);
2872 retry: if (unit < 0 || unit >= cd->cd_ndevs ||
2873 (dv = cd->cd_devs[unit]) == NULL ||
2874 dv->dv_del_gen != 0 ||
2875 dv->dv_detach_committed) {
2876 dv = NULL;
2877 } else {
2878 /*
2879 * Wait for the device to stabilize, if attaching or
2880 * detaching. Either way we must wait for *_attach or
2881 * *_detach to complete, and either way we must retry:
2882 * even if detaching, *_detach might fail (EBUSY) so
2883 * the device may still be there.
2884 */
2885 if ((dv->dv_attaching != NULL && dv->dv_attaching != curlwp) ||
2886 dv->dv_detaching != NULL) {
2887 mutex_exit(&alldevs_lock);
2888 cv_wait(&config_misc_cv, &config_misc_lock);
2889 mutex_enter(&alldevs_lock);
2890 goto retry;
2891 }
2892 device_acquire(dv);
2893 }
2894 mutex_exit(&alldevs_lock);
2895 mutex_exit(&config_misc_lock);
2896
2897 return dv;
2898 }
2899
2900 /*
2901 * device_acquire:
2902 *
2903 * Acquire a reference to a device. It is the caller's
2904 * responsibility to ensure that the device's .ca_detach routine
2905 * cannot return before calling this. Caller must release the
2906 * reference with device_release or config_detach_release.
2907 */
2908 void
device_acquire(device_t dv)2909 device_acquire(device_t dv)
2910 {
2911
2912 /*
2913 * No lock because the caller has promised that this can't
2914 * change concurrently with device_acquire.
2915 */
2916 KASSERTMSG(!dv->dv_detach_done, "%s",
2917 dv == NULL ? "(null)" : device_xname(dv));
2918 localcount_acquire(dv->dv_localcount);
2919 }
2920
2921 /*
2922 * device_release:
2923 *
2924 * Release a reference to a device acquired with device_acquire or
2925 * device_lookup_acquire.
2926 */
2927 void
device_release(device_t dv)2928 device_release(device_t dv)
2929 {
2930
2931 localcount_release(dv->dv_localcount,
2932 &config_misc_cv, &config_misc_lock);
2933 }
2934
2935 /*
2936 * device_find_by_xname:
2937 *
2938 * Returns the device of the given name or NULL if it doesn't exist.
2939 */
2940 device_t
device_find_by_xname(const char * name)2941 device_find_by_xname(const char *name)
2942 {
2943 device_t dv;
2944 deviter_t di;
2945
2946 for (dv = deviter_first(&di, 0); dv != NULL; dv = deviter_next(&di)) {
2947 if (strcmp(device_xname(dv), name) == 0)
2948 break;
2949 }
2950 deviter_release(&di);
2951
2952 return dv;
2953 }
2954
2955 /*
2956 * device_find_by_driver_unit:
2957 *
2958 * Returns the device of the given driver name and unit or
2959 * NULL if it doesn't exist.
2960 */
2961 device_t
device_find_by_driver_unit(const char * name,int unit)2962 device_find_by_driver_unit(const char *name, int unit)
2963 {
2964 struct cfdriver *cd;
2965
2966 if ((cd = config_cfdriver_lookup(name)) == NULL)
2967 return NULL;
2968 return device_lookup(cd, unit);
2969 }
2970
2971 static bool
match_strcmp(const char * const s1,const char * const s2)2972 match_strcmp(const char * const s1, const char * const s2)
2973 {
2974 return strcmp(s1, s2) == 0;
2975 }
2976
2977 static bool
match_pmatch(const char * const s1,const char * const s2)2978 match_pmatch(const char * const s1, const char * const s2)
2979 {
2980 return pmatch(s1, s2, NULL) == 2;
2981 }
2982
2983 static bool
strarray_match_internal(const char ** const strings,unsigned int const nstrings,const char * const str,unsigned int * const indexp,bool (* match_fn)(const char *,const char *))2984 strarray_match_internal(const char ** const strings,
2985 unsigned int const nstrings, const char * const str,
2986 unsigned int * const indexp,
2987 bool (*match_fn)(const char *, const char *))
2988 {
2989 unsigned int i;
2990
2991 if (strings == NULL || nstrings == 0) {
2992 return false;
2993 }
2994
2995 for (i = 0; i < nstrings; i++) {
2996 if ((*match_fn)(strings[i], str)) {
2997 *indexp = i;
2998 return true;
2999 }
3000 }
3001
3002 return false;
3003 }
3004
3005 static int
strarray_match(const char ** const strings,unsigned int const nstrings,const char * const str)3006 strarray_match(const char ** const strings, unsigned int const nstrings,
3007 const char * const str)
3008 {
3009 unsigned int idx;
3010
3011 if (strarray_match_internal(strings, nstrings, str, &idx,
3012 match_strcmp)) {
3013 return (int)(nstrings - idx);
3014 }
3015 return 0;
3016 }
3017
3018 static int
strarray_pmatch(const char ** const strings,unsigned int const nstrings,const char * const pattern)3019 strarray_pmatch(const char ** const strings, unsigned int const nstrings,
3020 const char * const pattern)
3021 {
3022 unsigned int idx;
3023
3024 if (strarray_match_internal(strings, nstrings, pattern, &idx,
3025 match_pmatch)) {
3026 return (int)(nstrings - idx);
3027 }
3028 return 0;
3029 }
3030
3031 static int
device_compatible_match_strarray_internal(const char ** device_compats,int ndevice_compats,const struct device_compatible_entry * driver_compats,const struct device_compatible_entry ** matching_entryp,int (* match_fn)(const char **,unsigned int,const char *))3032 device_compatible_match_strarray_internal(
3033 const char **device_compats, int ndevice_compats,
3034 const struct device_compatible_entry *driver_compats,
3035 const struct device_compatible_entry **matching_entryp,
3036 int (*match_fn)(const char **, unsigned int, const char *))
3037 {
3038 const struct device_compatible_entry *dce = NULL;
3039 int rv;
3040
3041 if (ndevice_compats == 0 || device_compats == NULL ||
3042 driver_compats == NULL)
3043 return 0;
3044
3045 for (dce = driver_compats; dce->compat != NULL; dce++) {
3046 rv = (*match_fn)(device_compats, ndevice_compats, dce->compat);
3047 if (rv != 0) {
3048 if (matching_entryp != NULL) {
3049 *matching_entryp = dce;
3050 }
3051 return rv;
3052 }
3053 }
3054 return 0;
3055 }
3056
3057 /*
3058 * device_compatible_match:
3059 *
3060 * Match a driver's "compatible" data against a device's
3061 * "compatible" strings. Returns resulted weighted by
3062 * which device "compatible" string was matched.
3063 */
3064 int
device_compatible_match(const char ** device_compats,int ndevice_compats,const struct device_compatible_entry * driver_compats)3065 device_compatible_match(const char **device_compats, int ndevice_compats,
3066 const struct device_compatible_entry *driver_compats)
3067 {
3068 return device_compatible_match_strarray_internal(device_compats,
3069 ndevice_compats, driver_compats, NULL, strarray_match);
3070 }
3071
3072 /*
3073 * device_compatible_pmatch:
3074 *
3075 * Like device_compatible_match(), but uses pmatch(9) to compare
3076 * the device "compatible" strings against patterns in the
3077 * driver's "compatible" data.
3078 */
3079 int
device_compatible_pmatch(const char ** device_compats,int ndevice_compats,const struct device_compatible_entry * driver_compats)3080 device_compatible_pmatch(const char **device_compats, int ndevice_compats,
3081 const struct device_compatible_entry *driver_compats)
3082 {
3083 return device_compatible_match_strarray_internal(device_compats,
3084 ndevice_compats, driver_compats, NULL, strarray_pmatch);
3085 }
3086
3087 static int
device_compatible_match_strlist_internal(const char * const device_compats,size_t const device_compatsize,const struct device_compatible_entry * driver_compats,const struct device_compatible_entry ** matching_entryp,int (* match_fn)(const char *,size_t,const char *))3088 device_compatible_match_strlist_internal(
3089 const char * const device_compats, size_t const device_compatsize,
3090 const struct device_compatible_entry *driver_compats,
3091 const struct device_compatible_entry **matching_entryp,
3092 int (*match_fn)(const char *, size_t, const char *))
3093 {
3094 const struct device_compatible_entry *dce = NULL;
3095 int rv;
3096
3097 if (device_compats == NULL || device_compatsize == 0 ||
3098 driver_compats == NULL)
3099 return 0;
3100
3101 for (dce = driver_compats; dce->compat != NULL; dce++) {
3102 rv = (*match_fn)(device_compats, device_compatsize,
3103 dce->compat);
3104 if (rv != 0) {
3105 if (matching_entryp != NULL) {
3106 *matching_entryp = dce;
3107 }
3108 return rv;
3109 }
3110 }
3111 return 0;
3112 }
3113
3114 /*
3115 * device_compatible_match_strlist:
3116 *
3117 * Like device_compatible_match(), but take the device
3118 * "compatible" strings as an OpenFirmware-style string
3119 * list.
3120 */
3121 int
device_compatible_match_strlist(const char * const device_compats,size_t const device_compatsize,const struct device_compatible_entry * driver_compats)3122 device_compatible_match_strlist(
3123 const char * const device_compats, size_t const device_compatsize,
3124 const struct device_compatible_entry *driver_compats)
3125 {
3126 return device_compatible_match_strlist_internal(device_compats,
3127 device_compatsize, driver_compats, NULL, strlist_match);
3128 }
3129
3130 /*
3131 * device_compatible_pmatch_strlist:
3132 *
3133 * Like device_compatible_pmatch(), but take the device
3134 * "compatible" strings as an OpenFirmware-style string
3135 * list.
3136 */
3137 int
device_compatible_pmatch_strlist(const char * const device_compats,size_t const device_compatsize,const struct device_compatible_entry * driver_compats)3138 device_compatible_pmatch_strlist(
3139 const char * const device_compats, size_t const device_compatsize,
3140 const struct device_compatible_entry *driver_compats)
3141 {
3142 return device_compatible_match_strlist_internal(device_compats,
3143 device_compatsize, driver_compats, NULL, strlist_pmatch);
3144 }
3145
3146 static int
device_compatible_match_id_internal(uintptr_t const id,uintptr_t const mask,uintptr_t const sentinel_id,const struct device_compatible_entry * driver_compats,const struct device_compatible_entry ** matching_entryp)3147 device_compatible_match_id_internal(
3148 uintptr_t const id, uintptr_t const mask, uintptr_t const sentinel_id,
3149 const struct device_compatible_entry *driver_compats,
3150 const struct device_compatible_entry **matching_entryp)
3151 {
3152 const struct device_compatible_entry *dce = NULL;
3153
3154 if (mask == 0)
3155 return 0;
3156
3157 for (dce = driver_compats; dce->id != sentinel_id; dce++) {
3158 if ((id & mask) == dce->id) {
3159 if (matching_entryp != NULL) {
3160 *matching_entryp = dce;
3161 }
3162 return 1;
3163 }
3164 }
3165 return 0;
3166 }
3167
3168 /*
3169 * device_compatible_match_id:
3170 *
3171 * Like device_compatible_match(), but takes a single
3172 * unsigned integer device ID.
3173 */
3174 int
device_compatible_match_id(uintptr_t const id,uintptr_t const sentinel_id,const struct device_compatible_entry * driver_compats)3175 device_compatible_match_id(
3176 uintptr_t const id, uintptr_t const sentinel_id,
3177 const struct device_compatible_entry *driver_compats)
3178 {
3179 return device_compatible_match_id_internal(id, (uintptr_t)-1,
3180 sentinel_id, driver_compats, NULL);
3181 }
3182
3183 /*
3184 * device_compatible_lookup:
3185 *
3186 * Look up and return the device_compatible_entry, using the
3187 * same matching criteria used by device_compatible_match().
3188 */
3189 const struct device_compatible_entry *
device_compatible_lookup(const char ** device_compats,int ndevice_compats,const struct device_compatible_entry * driver_compats)3190 device_compatible_lookup(const char **device_compats, int ndevice_compats,
3191 const struct device_compatible_entry *driver_compats)
3192 {
3193 const struct device_compatible_entry *dce;
3194
3195 if (device_compatible_match_strarray_internal(device_compats,
3196 ndevice_compats, driver_compats, &dce, strarray_match)) {
3197 return dce;
3198 }
3199 return NULL;
3200 }
3201
3202 /*
3203 * device_compatible_plookup:
3204 *
3205 * Look up and return the device_compatible_entry, using the
3206 * same matching criteria used by device_compatible_pmatch().
3207 */
3208 const struct device_compatible_entry *
device_compatible_plookup(const char ** device_compats,int ndevice_compats,const struct device_compatible_entry * driver_compats)3209 device_compatible_plookup(const char **device_compats, int ndevice_compats,
3210 const struct device_compatible_entry *driver_compats)
3211 {
3212 const struct device_compatible_entry *dce;
3213
3214 if (device_compatible_match_strarray_internal(device_compats,
3215 ndevice_compats, driver_compats, &dce, strarray_pmatch)) {
3216 return dce;
3217 }
3218 return NULL;
3219 }
3220
3221 /*
3222 * device_compatible_lookup_strlist:
3223 *
3224 * Like device_compatible_lookup(), but take the device
3225 * "compatible" strings as an OpenFirmware-style string
3226 * list.
3227 */
3228 const struct device_compatible_entry *
device_compatible_lookup_strlist(const char * const device_compats,size_t const device_compatsize,const struct device_compatible_entry * driver_compats)3229 device_compatible_lookup_strlist(
3230 const char * const device_compats, size_t const device_compatsize,
3231 const struct device_compatible_entry *driver_compats)
3232 {
3233 const struct device_compatible_entry *dce;
3234
3235 if (device_compatible_match_strlist_internal(device_compats,
3236 device_compatsize, driver_compats, &dce, strlist_match)) {
3237 return dce;
3238 }
3239 return NULL;
3240 }
3241
3242 /*
3243 * device_compatible_plookup_strlist:
3244 *
3245 * Like device_compatible_plookup(), but take the device
3246 * "compatible" strings as an OpenFirmware-style string
3247 * list.
3248 */
3249 const struct device_compatible_entry *
device_compatible_plookup_strlist(const char * const device_compats,size_t const device_compatsize,const struct device_compatible_entry * driver_compats)3250 device_compatible_plookup_strlist(
3251 const char * const device_compats, size_t const device_compatsize,
3252 const struct device_compatible_entry *driver_compats)
3253 {
3254 const struct device_compatible_entry *dce;
3255
3256 if (device_compatible_match_strlist_internal(device_compats,
3257 device_compatsize, driver_compats, &dce, strlist_pmatch)) {
3258 return dce;
3259 }
3260 return NULL;
3261 }
3262
3263 /*
3264 * device_compatible_lookup_id:
3265 *
3266 * Like device_compatible_lookup(), but takes a single
3267 * unsigned integer device ID.
3268 */
3269 const struct device_compatible_entry *
device_compatible_lookup_id(uintptr_t const id,uintptr_t const sentinel_id,const struct device_compatible_entry * driver_compats)3270 device_compatible_lookup_id(
3271 uintptr_t const id, uintptr_t const sentinel_id,
3272 const struct device_compatible_entry *driver_compats)
3273 {
3274 const struct device_compatible_entry *dce;
3275
3276 if (device_compatible_match_id_internal(id, (uintptr_t)-1,
3277 sentinel_id, driver_compats, &dce)) {
3278 return dce;
3279 }
3280 return NULL;
3281 }
3282
3283 /*
3284 * Power management related functions.
3285 */
3286
3287 bool
device_pmf_is_registered(device_t dev)3288 device_pmf_is_registered(device_t dev)
3289 {
3290 return (dev->dv_flags & DVF_POWER_HANDLERS) != 0;
3291 }
3292
3293 bool
device_pmf_driver_suspend(device_t dev,const pmf_qual_t * qual)3294 device_pmf_driver_suspend(device_t dev, const pmf_qual_t *qual)
3295 {
3296 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3297 return true;
3298 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3299 return false;
3300 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3301 dev->dv_driver_suspend != NULL &&
3302 !(*dev->dv_driver_suspend)(dev, qual))
3303 return false;
3304
3305 dev->dv_flags |= DVF_DRIVER_SUSPENDED;
3306 return true;
3307 }
3308
3309 bool
device_pmf_driver_resume(device_t dev,const pmf_qual_t * qual)3310 device_pmf_driver_resume(device_t dev, const pmf_qual_t *qual)
3311 {
3312 if ((dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3313 return true;
3314 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3315 return false;
3316 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_DRIVER &&
3317 dev->dv_driver_resume != NULL &&
3318 !(*dev->dv_driver_resume)(dev, qual))
3319 return false;
3320
3321 dev->dv_flags &= ~DVF_DRIVER_SUSPENDED;
3322 return true;
3323 }
3324
3325 bool
device_pmf_driver_shutdown(device_t dev,int how)3326 device_pmf_driver_shutdown(device_t dev, int how)
3327 {
3328
3329 if (*dev->dv_driver_shutdown != NULL &&
3330 !(*dev->dv_driver_shutdown)(dev, how))
3331 return false;
3332 return true;
3333 }
3334
3335 void
device_pmf_driver_register(device_t dev,bool (* suspend)(device_t,const pmf_qual_t *),bool (* resume)(device_t,const pmf_qual_t *),bool (* shutdown)(device_t,int))3336 device_pmf_driver_register(device_t dev,
3337 bool (*suspend)(device_t, const pmf_qual_t *),
3338 bool (*resume)(device_t, const pmf_qual_t *),
3339 bool (*shutdown)(device_t, int))
3340 {
3341
3342 dev->dv_driver_suspend = suspend;
3343 dev->dv_driver_resume = resume;
3344 dev->dv_driver_shutdown = shutdown;
3345 dev->dv_flags |= DVF_POWER_HANDLERS;
3346 }
3347
3348 void
device_pmf_driver_deregister(device_t dev)3349 device_pmf_driver_deregister(device_t dev)
3350 {
3351 device_lock_t dvl = device_getlock(dev);
3352
3353 dev->dv_driver_suspend = NULL;
3354 dev->dv_driver_resume = NULL;
3355
3356 mutex_enter(&dvl->dvl_mtx);
3357 dev->dv_flags &= ~DVF_POWER_HANDLERS;
3358 while (dvl->dvl_nlock > 0 || dvl->dvl_nwait > 0) {
3359 /* Wake a thread that waits for the lock. That
3360 * thread will fail to acquire the lock, and then
3361 * it will wake the next thread that waits for the
3362 * lock, or else it will wake us.
3363 */
3364 cv_signal(&dvl->dvl_cv);
3365 pmflock_debug(dev, __func__, __LINE__);
3366 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3367 pmflock_debug(dev, __func__, __LINE__);
3368 }
3369 mutex_exit(&dvl->dvl_mtx);
3370 }
3371
3372 void
device_pmf_driver_child_register(device_t dev)3373 device_pmf_driver_child_register(device_t dev)
3374 {
3375 device_t parent = device_parent(dev);
3376
3377 if (parent == NULL || parent->dv_driver_child_register == NULL)
3378 return;
3379 (*parent->dv_driver_child_register)(dev);
3380 }
3381
3382 void
device_pmf_driver_set_child_register(device_t dev,void (* child_register)(device_t))3383 device_pmf_driver_set_child_register(device_t dev,
3384 void (*child_register)(device_t))
3385 {
3386 dev->dv_driver_child_register = child_register;
3387 }
3388
3389 static void
pmflock_debug(device_t dev,const char * func,int line)3390 pmflock_debug(device_t dev, const char *func, int line)
3391 {
3392 #ifdef PMFLOCK_DEBUG
3393 device_lock_t dvl = device_getlock(dev);
3394 const char *curlwp_name;
3395
3396 if (curlwp->l_name != NULL)
3397 curlwp_name = curlwp->l_name;
3398 else
3399 curlwp_name = curlwp->l_proc->p_comm;
3400
3401 aprint_debug_dev(dev,
3402 "%s.%d, %s dvl_nlock %d dvl_nwait %d dv_flags %x\n", func, line,
3403 curlwp_name, dvl->dvl_nlock, dvl->dvl_nwait, dev->dv_flags);
3404 #endif /* PMFLOCK_DEBUG */
3405 }
3406
3407 static bool
device_pmf_lock1(device_t dev)3408 device_pmf_lock1(device_t dev)
3409 {
3410 device_lock_t dvl = device_getlock(dev);
3411
3412 while (device_pmf_is_registered(dev) &&
3413 dvl->dvl_nlock > 0 && dvl->dvl_holder != curlwp) {
3414 dvl->dvl_nwait++;
3415 pmflock_debug(dev, __func__, __LINE__);
3416 cv_wait(&dvl->dvl_cv, &dvl->dvl_mtx);
3417 pmflock_debug(dev, __func__, __LINE__);
3418 dvl->dvl_nwait--;
3419 }
3420 if (!device_pmf_is_registered(dev)) {
3421 pmflock_debug(dev, __func__, __LINE__);
3422 /* We could not acquire the lock, but some other thread may
3423 * wait for it, also. Wake that thread.
3424 */
3425 cv_signal(&dvl->dvl_cv);
3426 return false;
3427 }
3428 dvl->dvl_nlock++;
3429 dvl->dvl_holder = curlwp;
3430 pmflock_debug(dev, __func__, __LINE__);
3431 return true;
3432 }
3433
3434 bool
device_pmf_lock(device_t dev)3435 device_pmf_lock(device_t dev)
3436 {
3437 bool rc;
3438 device_lock_t dvl = device_getlock(dev);
3439
3440 mutex_enter(&dvl->dvl_mtx);
3441 rc = device_pmf_lock1(dev);
3442 mutex_exit(&dvl->dvl_mtx);
3443
3444 return rc;
3445 }
3446
3447 void
device_pmf_unlock(device_t dev)3448 device_pmf_unlock(device_t dev)
3449 {
3450 device_lock_t dvl = device_getlock(dev);
3451
3452 KASSERT(dvl->dvl_nlock > 0);
3453 mutex_enter(&dvl->dvl_mtx);
3454 if (--dvl->dvl_nlock == 0)
3455 dvl->dvl_holder = NULL;
3456 cv_signal(&dvl->dvl_cv);
3457 pmflock_debug(dev, __func__, __LINE__);
3458 mutex_exit(&dvl->dvl_mtx);
3459 }
3460
3461 device_lock_t
device_getlock(device_t dev)3462 device_getlock(device_t dev)
3463 {
3464 return &dev->dv_lock;
3465 }
3466
3467 void *
device_pmf_bus_private(device_t dev)3468 device_pmf_bus_private(device_t dev)
3469 {
3470 return dev->dv_bus_private;
3471 }
3472
3473 bool
device_pmf_bus_suspend(device_t dev,const pmf_qual_t * qual)3474 device_pmf_bus_suspend(device_t dev, const pmf_qual_t *qual)
3475 {
3476 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0)
3477 return true;
3478 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0 ||
3479 (dev->dv_flags & DVF_DRIVER_SUSPENDED) == 0)
3480 return false;
3481 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3482 dev->dv_bus_suspend != NULL &&
3483 !(*dev->dv_bus_suspend)(dev, qual))
3484 return false;
3485
3486 dev->dv_flags |= DVF_BUS_SUSPENDED;
3487 return true;
3488 }
3489
3490 bool
device_pmf_bus_resume(device_t dev,const pmf_qual_t * qual)3491 device_pmf_bus_resume(device_t dev, const pmf_qual_t *qual)
3492 {
3493 if ((dev->dv_flags & DVF_BUS_SUSPENDED) == 0)
3494 return true;
3495 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_BUS &&
3496 dev->dv_bus_resume != NULL &&
3497 !(*dev->dv_bus_resume)(dev, qual))
3498 return false;
3499
3500 dev->dv_flags &= ~DVF_BUS_SUSPENDED;
3501 return true;
3502 }
3503
3504 bool
device_pmf_bus_shutdown(device_t dev,int how)3505 device_pmf_bus_shutdown(device_t dev, int how)
3506 {
3507
3508 if (*dev->dv_bus_shutdown != NULL &&
3509 !(*dev->dv_bus_shutdown)(dev, how))
3510 return false;
3511 return true;
3512 }
3513
3514 void
device_pmf_bus_register(device_t dev,void * priv,bool (* suspend)(device_t,const pmf_qual_t *),bool (* resume)(device_t,const pmf_qual_t *),bool (* shutdown)(device_t,int),void (* deregister)(device_t))3515 device_pmf_bus_register(device_t dev, void *priv,
3516 bool (*suspend)(device_t, const pmf_qual_t *),
3517 bool (*resume)(device_t, const pmf_qual_t *),
3518 bool (*shutdown)(device_t, int), void (*deregister)(device_t))
3519 {
3520 dev->dv_bus_private = priv;
3521 dev->dv_bus_resume = resume;
3522 dev->dv_bus_suspend = suspend;
3523 dev->dv_bus_shutdown = shutdown;
3524 dev->dv_bus_deregister = deregister;
3525 }
3526
3527 void
device_pmf_bus_deregister(device_t dev)3528 device_pmf_bus_deregister(device_t dev)
3529 {
3530 if (dev->dv_bus_deregister == NULL)
3531 return;
3532 (*dev->dv_bus_deregister)(dev);
3533 dev->dv_bus_private = NULL;
3534 dev->dv_bus_suspend = NULL;
3535 dev->dv_bus_resume = NULL;
3536 dev->dv_bus_deregister = NULL;
3537 }
3538
3539 void *
device_pmf_class_private(device_t dev)3540 device_pmf_class_private(device_t dev)
3541 {
3542 return dev->dv_class_private;
3543 }
3544
3545 bool
device_pmf_class_suspend(device_t dev,const pmf_qual_t * qual)3546 device_pmf_class_suspend(device_t dev, const pmf_qual_t *qual)
3547 {
3548 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) != 0)
3549 return true;
3550 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3551 dev->dv_class_suspend != NULL &&
3552 !(*dev->dv_class_suspend)(dev, qual))
3553 return false;
3554
3555 dev->dv_flags |= DVF_CLASS_SUSPENDED;
3556 return true;
3557 }
3558
3559 bool
device_pmf_class_resume(device_t dev,const pmf_qual_t * qual)3560 device_pmf_class_resume(device_t dev, const pmf_qual_t *qual)
3561 {
3562 if ((dev->dv_flags & DVF_CLASS_SUSPENDED) == 0)
3563 return true;
3564 if ((dev->dv_flags & DVF_BUS_SUSPENDED) != 0 ||
3565 (dev->dv_flags & DVF_DRIVER_SUSPENDED) != 0)
3566 return false;
3567 if (pmf_qual_depth(qual) <= DEVACT_LEVEL_CLASS &&
3568 dev->dv_class_resume != NULL &&
3569 !(*dev->dv_class_resume)(dev, qual))
3570 return false;
3571
3572 dev->dv_flags &= ~DVF_CLASS_SUSPENDED;
3573 return true;
3574 }
3575
3576 void
device_pmf_class_register(device_t dev,void * priv,bool (* suspend)(device_t,const pmf_qual_t *),bool (* resume)(device_t,const pmf_qual_t *),void (* deregister)(device_t))3577 device_pmf_class_register(device_t dev, void *priv,
3578 bool (*suspend)(device_t, const pmf_qual_t *),
3579 bool (*resume)(device_t, const pmf_qual_t *),
3580 void (*deregister)(device_t))
3581 {
3582 dev->dv_class_private = priv;
3583 dev->dv_class_suspend = suspend;
3584 dev->dv_class_resume = resume;
3585 dev->dv_class_deregister = deregister;
3586 }
3587
3588 void
device_pmf_class_deregister(device_t dev)3589 device_pmf_class_deregister(device_t dev)
3590 {
3591 if (dev->dv_class_deregister == NULL)
3592 return;
3593 (*dev->dv_class_deregister)(dev);
3594 dev->dv_class_private = NULL;
3595 dev->dv_class_suspend = NULL;
3596 dev->dv_class_resume = NULL;
3597 dev->dv_class_deregister = NULL;
3598 }
3599
3600 bool
device_active(device_t dev,devactive_t type)3601 device_active(device_t dev, devactive_t type)
3602 {
3603 size_t i;
3604
3605 if (dev->dv_activity_count == 0)
3606 return false;
3607
3608 for (i = 0; i < dev->dv_activity_count; ++i) {
3609 if (dev->dv_activity_handlers[i] == NULL)
3610 break;
3611 (*dev->dv_activity_handlers[i])(dev, type);
3612 }
3613
3614 return true;
3615 }
3616
3617 bool
device_active_register(device_t dev,void (* handler)(device_t,devactive_t))3618 device_active_register(device_t dev, void (*handler)(device_t, devactive_t))
3619 {
3620 void (**new_handlers)(device_t, devactive_t);
3621 void (**old_handlers)(device_t, devactive_t);
3622 size_t i, old_size, new_size;
3623 int s;
3624
3625 old_handlers = dev->dv_activity_handlers;
3626 old_size = dev->dv_activity_count;
3627
3628 KASSERT(old_size == 0 || old_handlers != NULL);
3629
3630 for (i = 0; i < old_size; ++i) {
3631 KASSERT(old_handlers[i] != handler);
3632 if (old_handlers[i] == NULL) {
3633 old_handlers[i] = handler;
3634 return true;
3635 }
3636 }
3637
3638 new_size = old_size + 4;
3639 new_handlers = kmem_alloc(sizeof(void *) * new_size, KM_SLEEP);
3640
3641 for (i = 0; i < old_size; ++i)
3642 new_handlers[i] = old_handlers[i];
3643 new_handlers[old_size] = handler;
3644 for (i = old_size+1; i < new_size; ++i)
3645 new_handlers[i] = NULL;
3646
3647 s = splhigh();
3648 dev->dv_activity_count = new_size;
3649 dev->dv_activity_handlers = new_handlers;
3650 splx(s);
3651
3652 if (old_size > 0)
3653 kmem_free(old_handlers, sizeof(void *) * old_size);
3654
3655 return true;
3656 }
3657
3658 void
device_active_deregister(device_t dev,void (* handler)(device_t,devactive_t))3659 device_active_deregister(device_t dev, void (*handler)(device_t, devactive_t))
3660 {
3661 void (**old_handlers)(device_t, devactive_t);
3662 size_t i, old_size;
3663 int s;
3664
3665 old_handlers = dev->dv_activity_handlers;
3666 old_size = dev->dv_activity_count;
3667
3668 for (i = 0; i < old_size; ++i) {
3669 if (old_handlers[i] == handler)
3670 break;
3671 if (old_handlers[i] == NULL)
3672 return; /* XXX panic? */
3673 }
3674
3675 if (i == old_size)
3676 return; /* XXX panic? */
3677
3678 for (; i < old_size - 1; ++i) {
3679 if ((old_handlers[i] = old_handlers[i + 1]) != NULL)
3680 continue;
3681
3682 if (i == 0) {
3683 s = splhigh();
3684 dev->dv_activity_count = 0;
3685 dev->dv_activity_handlers = NULL;
3686 splx(s);
3687 kmem_free(old_handlers, sizeof(void *) * old_size);
3688 }
3689 return;
3690 }
3691 old_handlers[i] = NULL;
3692 }
3693
3694 /* Return true iff the device_t `dev' exists at generation `gen'. */
3695 static bool
device_exists_at(device_t dv,devgen_t gen)3696 device_exists_at(device_t dv, devgen_t gen)
3697 {
3698 return (dv->dv_del_gen == 0 || dv->dv_del_gen > gen) &&
3699 dv->dv_add_gen <= gen;
3700 }
3701
3702 static bool
deviter_visits(const deviter_t * di,device_t dv)3703 deviter_visits(const deviter_t *di, device_t dv)
3704 {
3705 return device_exists_at(dv, di->di_gen);
3706 }
3707
3708 /*
3709 * Device Iteration
3710 *
3711 * deviter_t: a device iterator. Holds state for a "walk" visiting
3712 * each device_t's in the device tree.
3713 *
3714 * deviter_init(di, flags): initialize the device iterator `di'
3715 * to "walk" the device tree. deviter_next(di) will return
3716 * the first device_t in the device tree, or NULL if there are
3717 * no devices.
3718 *
3719 * `flags' is one or more of DEVITER_F_RW, indicating that the
3720 * caller intends to modify the device tree by calling
3721 * config_detach(9) on devices in the order that the iterator
3722 * returns them; DEVITER_F_ROOT_FIRST, asking for the devices
3723 * nearest the "root" of the device tree to be returned, first;
3724 * DEVITER_F_LEAVES_FIRST, asking for the devices furthest from
3725 * the root of the device tree, first; and DEVITER_F_SHUTDOWN,
3726 * indicating both that deviter_init() should not respect any
3727 * locks on the device tree, and that deviter_next(di) may run
3728 * in more than one LWP before the walk has finished.
3729 *
3730 * Only one DEVITER_F_RW iterator may be in the device tree at
3731 * once.
3732 *
3733 * DEVITER_F_SHUTDOWN implies DEVITER_F_RW.
3734 *
3735 * Results are undefined if the flags DEVITER_F_ROOT_FIRST and
3736 * DEVITER_F_LEAVES_FIRST are used in combination.
3737 *
3738 * deviter_first(di, flags): initialize the device iterator `di'
3739 * and return the first device_t in the device tree, or NULL
3740 * if there are no devices. The statement
3741 *
3742 * dv = deviter_first(di);
3743 *
3744 * is shorthand for
3745 *
3746 * deviter_init(di);
3747 * dv = deviter_next(di);
3748 *
3749 * deviter_next(di): return the next device_t in the device tree,
3750 * or NULL if there are no more devices. deviter_next(di)
3751 * is undefined if `di' was not initialized with deviter_init() or
3752 * deviter_first().
3753 *
3754 * deviter_release(di): stops iteration (subsequent calls to
3755 * deviter_next() will return NULL), releases any locks and
3756 * resources held by the device iterator.
3757 *
3758 * Device iteration does not return device_t's in any particular
3759 * order. An iterator will never return the same device_t twice.
3760 * Device iteration is guaranteed to complete---i.e., if deviter_next(di)
3761 * is called repeatedly on the same `di', it will eventually return
3762 * NULL. It is ok to attach/detach devices during device iteration.
3763 */
3764 void
deviter_init(deviter_t * di,deviter_flags_t flags)3765 deviter_init(deviter_t *di, deviter_flags_t flags)
3766 {
3767 device_t dv;
3768
3769 memset(di, 0, sizeof(*di));
3770
3771 if ((flags & DEVITER_F_SHUTDOWN) != 0)
3772 flags |= DEVITER_F_RW;
3773
3774 mutex_enter(&alldevs_lock);
3775 if ((flags & DEVITER_F_RW) != 0)
3776 alldevs_nwrite++;
3777 else
3778 alldevs_nread++;
3779 di->di_gen = alldevs_gen++;
3780 di->di_flags = flags;
3781
3782 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3783 case DEVITER_F_LEAVES_FIRST:
3784 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3785 if (!deviter_visits(di, dv))
3786 continue;
3787 di->di_curdepth = MAX(di->di_curdepth, dv->dv_depth);
3788 }
3789 break;
3790 case DEVITER_F_ROOT_FIRST:
3791 TAILQ_FOREACH(dv, &alldevs, dv_list) {
3792 if (!deviter_visits(di, dv))
3793 continue;
3794 di->di_maxdepth = MAX(di->di_maxdepth, dv->dv_depth);
3795 }
3796 break;
3797 default:
3798 break;
3799 }
3800
3801 deviter_reinit(di);
3802 mutex_exit(&alldevs_lock);
3803 }
3804
3805 static void
deviter_reinit(deviter_t * di)3806 deviter_reinit(deviter_t *di)
3807 {
3808
3809 KASSERT(mutex_owned(&alldevs_lock));
3810 if ((di->di_flags & DEVITER_F_RW) != 0)
3811 di->di_prev = TAILQ_LAST(&alldevs, devicelist);
3812 else
3813 di->di_prev = TAILQ_FIRST(&alldevs);
3814 }
3815
3816 device_t
deviter_first(deviter_t * di,deviter_flags_t flags)3817 deviter_first(deviter_t *di, deviter_flags_t flags)
3818 {
3819
3820 deviter_init(di, flags);
3821 return deviter_next(di);
3822 }
3823
3824 static device_t
deviter_next2(deviter_t * di)3825 deviter_next2(deviter_t *di)
3826 {
3827 device_t dv;
3828
3829 KASSERT(mutex_owned(&alldevs_lock));
3830
3831 dv = di->di_prev;
3832
3833 if (dv == NULL)
3834 return NULL;
3835
3836 if ((di->di_flags & DEVITER_F_RW) != 0)
3837 di->di_prev = TAILQ_PREV(dv, devicelist, dv_list);
3838 else
3839 di->di_prev = TAILQ_NEXT(dv, dv_list);
3840
3841 return dv;
3842 }
3843
3844 static device_t
deviter_next1(deviter_t * di)3845 deviter_next1(deviter_t *di)
3846 {
3847 device_t dv;
3848
3849 KASSERT(mutex_owned(&alldevs_lock));
3850
3851 do {
3852 dv = deviter_next2(di);
3853 } while (dv != NULL && !deviter_visits(di, dv));
3854
3855 return dv;
3856 }
3857
3858 device_t
deviter_next(deviter_t * di)3859 deviter_next(deviter_t *di)
3860 {
3861 device_t dv = NULL;
3862
3863 mutex_enter(&alldevs_lock);
3864 switch (di->di_flags & (DEVITER_F_LEAVES_FIRST|DEVITER_F_ROOT_FIRST)) {
3865 case 0:
3866 dv = deviter_next1(di);
3867 break;
3868 case DEVITER_F_LEAVES_FIRST:
3869 while (di->di_curdepth >= 0) {
3870 if ((dv = deviter_next1(di)) == NULL) {
3871 di->di_curdepth--;
3872 deviter_reinit(di);
3873 } else if (dv->dv_depth == di->di_curdepth)
3874 break;
3875 }
3876 break;
3877 case DEVITER_F_ROOT_FIRST:
3878 while (di->di_curdepth <= di->di_maxdepth) {
3879 if ((dv = deviter_next1(di)) == NULL) {
3880 di->di_curdepth++;
3881 deviter_reinit(di);
3882 } else if (dv->dv_depth == di->di_curdepth)
3883 break;
3884 }
3885 break;
3886 default:
3887 break;
3888 }
3889 mutex_exit(&alldevs_lock);
3890
3891 return dv;
3892 }
3893
3894 void
deviter_release(deviter_t * di)3895 deviter_release(deviter_t *di)
3896 {
3897 bool rw = (di->di_flags & DEVITER_F_RW) != 0;
3898
3899 mutex_enter(&alldevs_lock);
3900 if (rw)
3901 --alldevs_nwrite;
3902 else
3903 --alldevs_nread;
3904 /* XXX wake a garbage-collection thread */
3905 mutex_exit(&alldevs_lock);
3906 }
3907
3908 const char *
cfdata_ifattr(const struct cfdata * cf)3909 cfdata_ifattr(const struct cfdata *cf)
3910 {
3911 return cf->cf_pspec->cfp_iattr;
3912 }
3913
3914 bool
ifattr_match(const char * snull,const char * t)3915 ifattr_match(const char *snull, const char *t)
3916 {
3917 return (snull == NULL) || strcmp(snull, t) == 0;
3918 }
3919
3920 void
null_childdetached(device_t self,device_t child)3921 null_childdetached(device_t self, device_t child)
3922 {
3923 /* do nothing */
3924 }
3925
3926 static void
sysctl_detach_setup(struct sysctllog ** clog)3927 sysctl_detach_setup(struct sysctllog **clog)
3928 {
3929
3930 sysctl_createv(clog, 0, NULL, NULL,
3931 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
3932 CTLTYPE_BOOL, "detachall",
3933 SYSCTL_DESCR("Detach all devices at shutdown"),
3934 NULL, 0, &detachall, 0,
3935 CTL_KERN, CTL_CREATE, CTL_EOL);
3936 }
3937