1 /*	$NetBSD: subr_devsw.c,v 1.34 2016/02/01 05:05:43 riz Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Overview
34  *
35  *	subr_devsw.c: registers device drivers by name and by major
36  *	number, and provides wrapper methods for performing I/O and
37  *	other tasks on device drivers, keying on the device number
38  *	(dev_t).
39  *
40  *	When the system is built, the config(8) command generates
41  *	static tables of device drivers built into the kernel image
42  *	along with their associated methods.  These are recorded in
43  *	the cdevsw0 and bdevsw0 tables.  Drivers can also be added to
44  *	and removed from the system dynamically.
45  *
46  * Allocation
47  *
48  *	When the system initially boots only the statically allocated
49  *	indexes (bdevsw0, cdevsw0) are used.  If these overflow due to
50  *	allocation, we allocate a fixed block of memory to hold the new,
51  *	expanded index.  This "fork" of the table is only ever performed
52  *	once in order to guarantee that other threads may safely access
53  *	the device tables:
54  *
55  *	o Once a thread has a "reference" to the table via an earlier
56  *	  open() call, we know that the entry in the table must exist
57  *	  and so it is safe to access it.
58  *
59  *	o Regardless of whether other threads see the old or new
60  *	  pointers, they will point to a correct device switch
61  *	  structure for the operation being performed.
62  *
63  *	XXX Currently, the wrapper methods such as cdev_read() verify
64  *	that a device driver does in fact exist before calling the
65  *	associated driver method.  This should be changed so that
66  *	once the device is has been referenced by a vnode (opened),
67  *	calling	the other methods should be valid until that reference
68  *	is dropped.
69  */
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.34 2016/02/01 05:05:43 riz Exp $");
73 
74 #ifdef _KERNEL_OPT
75 #include "opt_dtrace.h"
76 #endif
77 
78 #include <sys/param.h>
79 #include <sys/conf.h>
80 #include <sys/kmem.h>
81 #include <sys/systm.h>
82 #include <sys/poll.h>
83 #include <sys/tty.h>
84 #include <sys/cpu.h>
85 #include <sys/buf.h>
86 #include <sys/reboot.h>
87 #include <sys/sdt.h>
88 
89 #ifdef DEVSW_DEBUG
90 #define	DPRINTF(x)	printf x
91 #else /* DEVSW_DEBUG */
92 #define	DPRINTF(x)
93 #endif /* DEVSW_DEBUG */
94 
95 #define	MAXDEVSW	512	/* the maximum of major device number */
96 #define	BDEVSW_SIZE	(sizeof(struct bdevsw *))
97 #define	CDEVSW_SIZE	(sizeof(struct cdevsw *))
98 #define	DEVSWCONV_SIZE	(sizeof(struct devsw_conv))
99 
100 extern const struct bdevsw **bdevsw, *bdevsw0[];
101 extern const struct cdevsw **cdevsw, *cdevsw0[];
102 extern struct devsw_conv *devsw_conv, devsw_conv0[];
103 extern const int sys_bdevsws, sys_cdevsws;
104 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
105 
106 static int bdevsw_attach(const struct bdevsw *, devmajor_t *);
107 static int cdevsw_attach(const struct cdevsw *, devmajor_t *);
108 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
109 
110 kmutex_t device_lock;
111 
112 void (*biodone_vfs)(buf_t *) = (void *)nullop;
113 
114 void
devsw_init(void)115 devsw_init(void)
116 {
117 
118 	KASSERT(sys_bdevsws < MAXDEVSW - 1);
119 	KASSERT(sys_cdevsws < MAXDEVSW - 1);
120 	mutex_init(&device_lock, MUTEX_DEFAULT, IPL_NONE);
121 }
122 
123 int
devsw_attach(const char * devname,const struct bdevsw * bdev,devmajor_t * bmajor,const struct cdevsw * cdev,devmajor_t * cmajor)124 devsw_attach(const char *devname,
125 	     const struct bdevsw *bdev, devmajor_t *bmajor,
126 	     const struct cdevsw *cdev, devmajor_t *cmajor)
127 {
128 	struct devsw_conv *conv;
129 	char *name;
130 	int error, i;
131 	size_t len;
132 
133 	if (devname == NULL || cdev == NULL)
134 		return (EINVAL);
135 
136 	mutex_enter(&device_lock);
137 
138 	for (i = 0 ; i < max_devsw_convs ; i++) {
139 		conv = &devsw_conv[i];
140 		if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
141 			continue;
142 
143 		if (*bmajor < 0)
144 			*bmajor = conv->d_bmajor;
145 		if (*cmajor < 0)
146 			*cmajor = conv->d_cmajor;
147 
148 		if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
149 			error = EINVAL;
150 			goto fail;
151 		}
152 		if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
153 			error = EINVAL;
154 			goto fail;
155 		}
156 
157 		if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
158 		    cdevsw[*cmajor] != NULL) {
159 			error = EEXIST;
160 			goto fail;
161 		}
162 
163 		if (bdev != NULL)
164 			bdevsw[*bmajor] = bdev;
165 		cdevsw[*cmajor] = cdev;
166 
167 		mutex_exit(&device_lock);
168 		return (0);
169 	}
170 
171 	error = bdevsw_attach(bdev, bmajor);
172 	if (error != 0)
173 		goto fail;
174 	error = cdevsw_attach(cdev, cmajor);
175 	if (error != 0) {
176 		devsw_detach_locked(bdev, NULL);
177 		goto fail;
178 	}
179 
180 	for (i = 0 ; i < max_devsw_convs ; i++) {
181 		if (devsw_conv[i].d_name == NULL)
182 			break;
183 	}
184 	if (i == max_devsw_convs) {
185 		struct devsw_conv *newptr;
186 		int old_convs, new_convs;
187 
188 		old_convs = max_devsw_convs;
189 		new_convs = old_convs + 1;
190 
191 		newptr = kmem_zalloc(new_convs * DEVSWCONV_SIZE, KM_NOSLEEP);
192 		if (newptr == NULL) {
193 			devsw_detach_locked(bdev, cdev);
194 			error = ENOMEM;
195 			goto fail;
196 		}
197 		newptr[old_convs].d_name = NULL;
198 		newptr[old_convs].d_bmajor = -1;
199 		newptr[old_convs].d_cmajor = -1;
200 		memcpy(newptr, devsw_conv, old_convs * DEVSWCONV_SIZE);
201 		if (devsw_conv != devsw_conv0)
202 			kmem_free(devsw_conv, old_convs * DEVSWCONV_SIZE);
203 		devsw_conv = newptr;
204 		max_devsw_convs = new_convs;
205 	}
206 
207 	len = strlen(devname) + 1;
208 	name = kmem_alloc(len, KM_NOSLEEP);
209 	if (name == NULL) {
210 		devsw_detach_locked(bdev, cdev);
211 		error = ENOMEM;
212 		goto fail;
213 	}
214 	strlcpy(name, devname, len);
215 
216 	devsw_conv[i].d_name = name;
217 	devsw_conv[i].d_bmajor = *bmajor;
218 	devsw_conv[i].d_cmajor = *cmajor;
219 
220 	mutex_exit(&device_lock);
221 	return (0);
222  fail:
223 	mutex_exit(&device_lock);
224 	return (error);
225 }
226 
227 static int
bdevsw_attach(const struct bdevsw * devsw,devmajor_t * devmajor)228 bdevsw_attach(const struct bdevsw *devsw, devmajor_t *devmajor)
229 {
230 	const struct bdevsw **newptr;
231 	devmajor_t bmajor;
232 	int i;
233 
234 	KASSERT(mutex_owned(&device_lock));
235 
236 	if (devsw == NULL)
237 		return (0);
238 
239 	if (*devmajor < 0) {
240 		for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
241 			if (bdevsw[bmajor] != NULL)
242 				continue;
243 			for (i = 0 ; i < max_devsw_convs ; i++) {
244 				if (devsw_conv[i].d_bmajor == bmajor)
245 					break;
246 			}
247 			if (i != max_devsw_convs)
248 				continue;
249 			break;
250 		}
251 		*devmajor = bmajor;
252 	}
253 
254 	if (*devmajor >= MAXDEVSW) {
255 		printf("bdevsw_attach: block majors exhausted");
256 		return (ENOMEM);
257 	}
258 
259 	if (*devmajor >= max_bdevsws) {
260 		KASSERT(bdevsw == bdevsw0);
261 		newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
262 		if (newptr == NULL)
263 			return (ENOMEM);
264 		memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
265 		bdevsw = newptr;
266 		max_bdevsws = MAXDEVSW;
267 	}
268 
269 	if (bdevsw[*devmajor] != NULL)
270 		return (EEXIST);
271 
272 	bdevsw[*devmajor] = devsw;
273 
274 	return (0);
275 }
276 
277 static int
cdevsw_attach(const struct cdevsw * devsw,devmajor_t * devmajor)278 cdevsw_attach(const struct cdevsw *devsw, devmajor_t *devmajor)
279 {
280 	const struct cdevsw **newptr;
281 	devmajor_t cmajor;
282 	int i;
283 
284 	KASSERT(mutex_owned(&device_lock));
285 
286 	if (*devmajor < 0) {
287 		for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
288 			if (cdevsw[cmajor] != NULL)
289 				continue;
290 			for (i = 0 ; i < max_devsw_convs ; i++) {
291 				if (devsw_conv[i].d_cmajor == cmajor)
292 					break;
293 			}
294 			if (i != max_devsw_convs)
295 				continue;
296 			break;
297 		}
298 		*devmajor = cmajor;
299 	}
300 
301 	if (*devmajor >= MAXDEVSW) {
302 		printf("cdevsw_attach: character majors exhausted");
303 		return (ENOMEM);
304 	}
305 
306 	if (*devmajor >= max_cdevsws) {
307 		KASSERT(cdevsw == cdevsw0);
308 		newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
309 		if (newptr == NULL)
310 			return (ENOMEM);
311 		memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
312 		cdevsw = newptr;
313 		max_cdevsws = MAXDEVSW;
314 	}
315 
316 	if (cdevsw[*devmajor] != NULL)
317 		return (EEXIST);
318 
319 	cdevsw[*devmajor] = devsw;
320 
321 	return (0);
322 }
323 
324 static void
devsw_detach_locked(const struct bdevsw * bdev,const struct cdevsw * cdev)325 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
326 {
327 	int i;
328 
329 	KASSERT(mutex_owned(&device_lock));
330 
331 	if (bdev != NULL) {
332 		for (i = 0 ; i < max_bdevsws ; i++) {
333 			if (bdevsw[i] != bdev)
334 				continue;
335 			bdevsw[i] = NULL;
336 			break;
337 		}
338 	}
339 	if (cdev != NULL) {
340 		for (i = 0 ; i < max_cdevsws ; i++) {
341 			if (cdevsw[i] != cdev)
342 				continue;
343 			cdevsw[i] = NULL;
344 			break;
345 		}
346 	}
347 }
348 
349 int
devsw_detach(const struct bdevsw * bdev,const struct cdevsw * cdev)350 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
351 {
352 
353 	mutex_enter(&device_lock);
354 	devsw_detach_locked(bdev, cdev);
355 	mutex_exit(&device_lock);
356 	return 0;
357 }
358 
359 /*
360  * Look up a block device by number.
361  *
362  * => Caller must ensure that the device is attached.
363  */
364 const struct bdevsw *
bdevsw_lookup(dev_t dev)365 bdevsw_lookup(dev_t dev)
366 {
367 	devmajor_t bmajor;
368 
369 	if (dev == NODEV)
370 		return (NULL);
371 	bmajor = major(dev);
372 	if (bmajor < 0 || bmajor >= max_bdevsws)
373 		return (NULL);
374 
375 	return (bdevsw[bmajor]);
376 }
377 
378 /*
379  * Look up a character device by number.
380  *
381  * => Caller must ensure that the device is attached.
382  */
383 const struct cdevsw *
cdevsw_lookup(dev_t dev)384 cdevsw_lookup(dev_t dev)
385 {
386 	devmajor_t cmajor;
387 
388 	if (dev == NODEV)
389 		return (NULL);
390 	cmajor = major(dev);
391 	if (cmajor < 0 || cmajor >= max_cdevsws)
392 		return (NULL);
393 
394 	return (cdevsw[cmajor]);
395 }
396 
397 /*
398  * Look up a block device by reference to its operations set.
399  *
400  * => Caller must ensure that the device is not detached, and therefore
401  *    that the returned major is still valid when dereferenced.
402  */
403 devmajor_t
bdevsw_lookup_major(const struct bdevsw * bdev)404 bdevsw_lookup_major(const struct bdevsw *bdev)
405 {
406 	devmajor_t bmajor;
407 
408 	for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
409 		if (bdevsw[bmajor] == bdev)
410 			return (bmajor);
411 	}
412 
413 	return (NODEVMAJOR);
414 }
415 
416 /*
417  * Look up a character device by reference to its operations set.
418  *
419  * => Caller must ensure that the device is not detached, and therefore
420  *    that the returned major is still valid when dereferenced.
421  */
422 devmajor_t
cdevsw_lookup_major(const struct cdevsw * cdev)423 cdevsw_lookup_major(const struct cdevsw *cdev)
424 {
425 	devmajor_t cmajor;
426 
427 	for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
428 		if (cdevsw[cmajor] == cdev)
429 			return (cmajor);
430 	}
431 
432 	return (NODEVMAJOR);
433 }
434 
435 /*
436  * Convert from block major number to name.
437  *
438  * => Caller must ensure that the device is not detached, and therefore
439  *    that the name pointer is still valid when dereferenced.
440  */
441 const char *
devsw_blk2name(devmajor_t bmajor)442 devsw_blk2name(devmajor_t bmajor)
443 {
444 	const char *name;
445 	devmajor_t cmajor;
446 	int i;
447 
448 	name = NULL;
449 	cmajor = -1;
450 
451 	mutex_enter(&device_lock);
452 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
453 		mutex_exit(&device_lock);
454 		return (NULL);
455 	}
456 	for (i = 0 ; i < max_devsw_convs; i++) {
457 		if (devsw_conv[i].d_bmajor == bmajor) {
458 			cmajor = devsw_conv[i].d_cmajor;
459 			break;
460 		}
461 	}
462 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
463 		name = devsw_conv[i].d_name;
464 	mutex_exit(&device_lock);
465 
466 	return (name);
467 }
468 
469 /*
470  * Convert char major number to device driver name.
471  */
472 const char *
cdevsw_getname(devmajor_t major)473 cdevsw_getname(devmajor_t major)
474 {
475 	const char *name;
476 	int i;
477 
478 	name = NULL;
479 
480 	if (major < 0)
481 		return (NULL);
482 
483 	mutex_enter(&device_lock);
484 	for (i = 0 ; i < max_devsw_convs; i++) {
485 		if (devsw_conv[i].d_cmajor == major) {
486 			name = devsw_conv[i].d_name;
487 			break;
488 		}
489 	}
490 	mutex_exit(&device_lock);
491 	return (name);
492 }
493 
494 /*
495  * Convert block major number to device driver name.
496  */
497 const char *
bdevsw_getname(devmajor_t major)498 bdevsw_getname(devmajor_t major)
499 {
500 	const char *name;
501 	int i;
502 
503 	name = NULL;
504 
505 	if (major < 0)
506 		return (NULL);
507 
508 	mutex_enter(&device_lock);
509 	for (i = 0 ; i < max_devsw_convs; i++) {
510 		if (devsw_conv[i].d_bmajor == major) {
511 			name = devsw_conv[i].d_name;
512 			break;
513 		}
514 	}
515 	mutex_exit(&device_lock);
516 	return (name);
517 }
518 
519 /*
520  * Convert from device name to block major number.
521  *
522  * => Caller must ensure that the device is not detached, and therefore
523  *    that the major number is still valid when dereferenced.
524  */
525 devmajor_t
devsw_name2blk(const char * name,char * devname,size_t devnamelen)526 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
527 {
528 	struct devsw_conv *conv;
529 	devmajor_t bmajor;
530 	int i;
531 
532 	if (name == NULL)
533 		return (NODEVMAJOR);
534 
535 	mutex_enter(&device_lock);
536 	for (i = 0 ; i < max_devsw_convs ; i++) {
537 		size_t len;
538 
539 		conv = &devsw_conv[i];
540 		if (conv->d_name == NULL)
541 			continue;
542 		len = strlen(conv->d_name);
543 		if (strncmp(conv->d_name, name, len) != 0)
544 			continue;
545 		if (*(name +len) && !isdigit(*(name + len)))
546 			continue;
547 		bmajor = conv->d_bmajor;
548 		if (bmajor < 0 || bmajor >= max_bdevsws ||
549 		    bdevsw[bmajor] == NULL)
550 			break;
551 		if (devname != NULL) {
552 #ifdef DEVSW_DEBUG
553 			if (strlen(conv->d_name) >= devnamelen)
554 				printf("devsw_name2blk: too short buffer");
555 #endif /* DEVSW_DEBUG */
556 			strncpy(devname, conv->d_name, devnamelen);
557 			devname[devnamelen - 1] = '\0';
558 		}
559 		mutex_exit(&device_lock);
560 		return (bmajor);
561 	}
562 
563 	mutex_exit(&device_lock);
564 	return (NODEVMAJOR);
565 }
566 
567 /*
568  * Convert from device name to char major number.
569  *
570  * => Caller must ensure that the device is not detached, and therefore
571  *    that the major number is still valid when dereferenced.
572  */
573 devmajor_t
devsw_name2chr(const char * name,char * devname,size_t devnamelen)574 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
575 {
576 	struct devsw_conv *conv;
577 	devmajor_t cmajor;
578 	int i;
579 
580 	if (name == NULL)
581 		return (NODEVMAJOR);
582 
583 	mutex_enter(&device_lock);
584 	for (i = 0 ; i < max_devsw_convs ; i++) {
585 		size_t len;
586 
587 		conv = &devsw_conv[i];
588 		if (conv->d_name == NULL)
589 			continue;
590 		len = strlen(conv->d_name);
591 		if (strncmp(conv->d_name, name, len) != 0)
592 			continue;
593 		if (*(name +len) && !isdigit(*(name + len)))
594 			continue;
595 		cmajor = conv->d_cmajor;
596 		if (cmajor < 0 || cmajor >= max_cdevsws ||
597 		    cdevsw[cmajor] == NULL)
598 			break;
599 		if (devname != NULL) {
600 #ifdef DEVSW_DEBUG
601 			if (strlen(conv->d_name) >= devnamelen)
602 				printf("devsw_name2chr: too short buffer");
603 #endif /* DEVSW_DEBUG */
604 			strncpy(devname, conv->d_name, devnamelen);
605 			devname[devnamelen - 1] = '\0';
606 		}
607 		mutex_exit(&device_lock);
608 		return (cmajor);
609 	}
610 
611 	mutex_exit(&device_lock);
612 	return (NODEVMAJOR);
613 }
614 
615 /*
616  * Convert from character dev_t to block dev_t.
617  *
618  * => Caller must ensure that the device is not detached, and therefore
619  *    that the major number is still valid when dereferenced.
620  */
621 dev_t
devsw_chr2blk(dev_t cdev)622 devsw_chr2blk(dev_t cdev)
623 {
624 	devmajor_t bmajor, cmajor;
625 	int i;
626 	dev_t rv;
627 
628 	cmajor = major(cdev);
629 	bmajor = NODEVMAJOR;
630 	rv = NODEV;
631 
632 	mutex_enter(&device_lock);
633 	if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
634 		mutex_exit(&device_lock);
635 		return (NODEV);
636 	}
637 	for (i = 0 ; i < max_devsw_convs ; i++) {
638 		if (devsw_conv[i].d_cmajor == cmajor) {
639 			bmajor = devsw_conv[i].d_bmajor;
640 			break;
641 		}
642 	}
643 	if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
644 		rv = makedev(bmajor, minor(cdev));
645 	mutex_exit(&device_lock);
646 
647 	return (rv);
648 }
649 
650 /*
651  * Convert from block dev_t to character dev_t.
652  *
653  * => Caller must ensure that the device is not detached, and therefore
654  *    that the major number is still valid when dereferenced.
655  */
656 dev_t
devsw_blk2chr(dev_t bdev)657 devsw_blk2chr(dev_t bdev)
658 {
659 	devmajor_t bmajor, cmajor;
660 	int i;
661 	dev_t rv;
662 
663 	bmajor = major(bdev);
664 	cmajor = NODEVMAJOR;
665 	rv = NODEV;
666 
667 	mutex_enter(&device_lock);
668 	if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
669 		mutex_exit(&device_lock);
670 		return (NODEV);
671 	}
672 	for (i = 0 ; i < max_devsw_convs ; i++) {
673 		if (devsw_conv[i].d_bmajor == bmajor) {
674 			cmajor = devsw_conv[i].d_cmajor;
675 			break;
676 		}
677 	}
678 	if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
679 		rv = makedev(cmajor, minor(bdev));
680 	mutex_exit(&device_lock);
681 
682 	return (rv);
683 }
684 
685 /*
686  * Device access methods.
687  */
688 
689 #define	DEV_LOCK(d)						\
690 	if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) {		\
691 		KERNEL_LOCK(1, NULL);				\
692 	}
693 
694 #define	DEV_UNLOCK(d)						\
695 	if (mpflag == 0) {					\
696 		KERNEL_UNLOCK_ONE(NULL);			\
697 	}
698 
699 int
bdev_open(dev_t dev,int flag,int devtype,lwp_t * l)700 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
701 {
702 	const struct bdevsw *d;
703 	int rv, mpflag;
704 
705 	/*
706 	 * For open we need to lock, in order to synchronize
707 	 * with attach/detach.
708 	 */
709 	mutex_enter(&device_lock);
710 	d = bdevsw_lookup(dev);
711 	mutex_exit(&device_lock);
712 	if (d == NULL)
713 		return ENXIO;
714 
715 	DEV_LOCK(d);
716 	rv = (*d->d_open)(dev, flag, devtype, l);
717 	DEV_UNLOCK(d);
718 
719 	return rv;
720 }
721 
722 int
bdev_close(dev_t dev,int flag,int devtype,lwp_t * l)723 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
724 {
725 	const struct bdevsw *d;
726 	int rv, mpflag;
727 
728 	if ((d = bdevsw_lookup(dev)) == NULL)
729 		return ENXIO;
730 
731 	DEV_LOCK(d);
732 	rv = (*d->d_close)(dev, flag, devtype, l);
733 	DEV_UNLOCK(d);
734 
735 	return rv;
736 }
737 
738 SDT_PROVIDER_DECLARE(io);
739 SDT_PROBE_DEFINE1(io, kernel, , start, "struct buf *"/*bp*/);
740 
741 void
bdev_strategy(struct buf * bp)742 bdev_strategy(struct buf *bp)
743 {
744 	const struct bdevsw *d;
745 	int mpflag;
746 
747 	SDT_PROBE1(io, kernel, , start, bp);
748 
749 	if ((d = bdevsw_lookup(bp->b_dev)) == NULL) {
750 		bp->b_error = ENXIO;
751 		bp->b_resid = bp->b_bcount;
752 		biodone_vfs(bp); /* biodone() iff vfs present */
753 		return;
754 	}
755 
756 	DEV_LOCK(d);
757 	(*d->d_strategy)(bp);
758 	DEV_UNLOCK(d);
759 }
760 
761 int
bdev_ioctl(dev_t dev,u_long cmd,void * data,int flag,lwp_t * l)762 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
763 {
764 	const struct bdevsw *d;
765 	int rv, mpflag;
766 
767 	if ((d = bdevsw_lookup(dev)) == NULL)
768 		return ENXIO;
769 
770 	DEV_LOCK(d);
771 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
772 	DEV_UNLOCK(d);
773 
774 	return rv;
775 }
776 
777 int
bdev_dump(dev_t dev,daddr_t addr,void * data,size_t sz)778 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
779 {
780 	const struct bdevsw *d;
781 	int rv;
782 
783 	/*
784 	 * Dump can be called without the device open.  Since it can
785 	 * currently only be called with the system paused (and in a
786 	 * potentially unstable state), we don't perform any locking.
787 	 */
788 	if ((d = bdevsw_lookup(dev)) == NULL)
789 		return ENXIO;
790 
791 	/* DEV_LOCK(d); */
792 	rv = (*d->d_dump)(dev, addr, data, sz);
793 	/* DEV_UNLOCK(d); */
794 
795 	return rv;
796 }
797 
798 int
bdev_type(dev_t dev)799 bdev_type(dev_t dev)
800 {
801 	const struct bdevsw *d;
802 
803 	if ((d = bdevsw_lookup(dev)) == NULL)
804 		return D_OTHER;
805 	return d->d_flag & D_TYPEMASK;
806 }
807 
808 int
bdev_size(dev_t dev)809 bdev_size(dev_t dev)
810 {
811 	const struct bdevsw *d;
812 	int rv, mpflag = 0;
813 
814 	if ((d = bdevsw_lookup(dev)) == NULL ||
815 	    d->d_psize == NULL)
816 		return -1;
817 
818 	/*
819 	 * Don't to try lock the device if we're dumping.
820 	 * XXX: is there a better way to test this?
821 	 */
822 	if ((boothowto & RB_DUMP) == 0)
823 		DEV_LOCK(d);
824 	rv = (*d->d_psize)(dev);
825 	if ((boothowto & RB_DUMP) == 0)
826 		DEV_UNLOCK(d);
827 
828 	return rv;
829 }
830 
831 int
bdev_discard(dev_t dev,off_t pos,off_t len)832 bdev_discard(dev_t dev, off_t pos, off_t len)
833 {
834 	const struct bdevsw *d;
835 	int rv, mpflag;
836 
837 	if ((d = bdevsw_lookup(dev)) == NULL)
838 		return ENXIO;
839 
840 	DEV_LOCK(d);
841 	rv = (*d->d_discard)(dev, pos, len);
842 	DEV_UNLOCK(d);
843 
844 	return rv;
845 }
846 
847 int
cdev_open(dev_t dev,int flag,int devtype,lwp_t * l)848 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
849 {
850 	const struct cdevsw *d;
851 	int rv, mpflag;
852 
853 	/*
854 	 * For open we need to lock, in order to synchronize
855 	 * with attach/detach.
856 	 */
857 	mutex_enter(&device_lock);
858 	d = cdevsw_lookup(dev);
859 	mutex_exit(&device_lock);
860 	if (d == NULL)
861 		return ENXIO;
862 
863 	DEV_LOCK(d);
864 	rv = (*d->d_open)(dev, flag, devtype, l);
865 	DEV_UNLOCK(d);
866 
867 	return rv;
868 }
869 
870 int
cdev_close(dev_t dev,int flag,int devtype,lwp_t * l)871 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
872 {
873 	const struct cdevsw *d;
874 	int rv, mpflag;
875 
876 	if ((d = cdevsw_lookup(dev)) == NULL)
877 		return ENXIO;
878 
879 	DEV_LOCK(d);
880 	rv = (*d->d_close)(dev, flag, devtype, l);
881 	DEV_UNLOCK(d);
882 
883 	return rv;
884 }
885 
886 int
cdev_read(dev_t dev,struct uio * uio,int flag)887 cdev_read(dev_t dev, struct uio *uio, int flag)
888 {
889 	const struct cdevsw *d;
890 	int rv, mpflag;
891 
892 	if ((d = cdevsw_lookup(dev)) == NULL)
893 		return ENXIO;
894 
895 	DEV_LOCK(d);
896 	rv = (*d->d_read)(dev, uio, flag);
897 	DEV_UNLOCK(d);
898 
899 	return rv;
900 }
901 
902 int
cdev_write(dev_t dev,struct uio * uio,int flag)903 cdev_write(dev_t dev, struct uio *uio, int flag)
904 {
905 	const struct cdevsw *d;
906 	int rv, mpflag;
907 
908 	if ((d = cdevsw_lookup(dev)) == NULL)
909 		return ENXIO;
910 
911 	DEV_LOCK(d);
912 	rv = (*d->d_write)(dev, uio, flag);
913 	DEV_UNLOCK(d);
914 
915 	return rv;
916 }
917 
918 int
cdev_ioctl(dev_t dev,u_long cmd,void * data,int flag,lwp_t * l)919 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
920 {
921 	const struct cdevsw *d;
922 	int rv, mpflag;
923 
924 	if ((d = cdevsw_lookup(dev)) == NULL)
925 		return ENXIO;
926 
927 	DEV_LOCK(d);
928 	rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
929 	DEV_UNLOCK(d);
930 
931 	return rv;
932 }
933 
934 void
cdev_stop(struct tty * tp,int flag)935 cdev_stop(struct tty *tp, int flag)
936 {
937 	const struct cdevsw *d;
938 	int mpflag;
939 
940 	if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
941 		return;
942 
943 	DEV_LOCK(d);
944 	(*d->d_stop)(tp, flag);
945 	DEV_UNLOCK(d);
946 }
947 
948 struct tty *
cdev_tty(dev_t dev)949 cdev_tty(dev_t dev)
950 {
951 	const struct cdevsw *d;
952 
953 	if ((d = cdevsw_lookup(dev)) == NULL)
954 		return NULL;
955 
956 	/* XXX Check if necessary. */
957 	if (d->d_tty == NULL)
958 		return NULL;
959 
960 	return (*d->d_tty)(dev);
961 }
962 
963 int
cdev_poll(dev_t dev,int flag,lwp_t * l)964 cdev_poll(dev_t dev, int flag, lwp_t *l)
965 {
966 	const struct cdevsw *d;
967 	int rv, mpflag;
968 
969 	if ((d = cdevsw_lookup(dev)) == NULL)
970 		return POLLERR;
971 
972 	DEV_LOCK(d);
973 	rv = (*d->d_poll)(dev, flag, l);
974 	DEV_UNLOCK(d);
975 
976 	return rv;
977 }
978 
979 paddr_t
cdev_mmap(dev_t dev,off_t off,int flag)980 cdev_mmap(dev_t dev, off_t off, int flag)
981 {
982 	const struct cdevsw *d;
983 	paddr_t rv;
984 	int mpflag;
985 
986 	if ((d = cdevsw_lookup(dev)) == NULL)
987 		return (paddr_t)-1LL;
988 
989 	DEV_LOCK(d);
990 	rv = (*d->d_mmap)(dev, off, flag);
991 	DEV_UNLOCK(d);
992 
993 	return rv;
994 }
995 
996 int
cdev_kqfilter(dev_t dev,struct knote * kn)997 cdev_kqfilter(dev_t dev, struct knote *kn)
998 {
999 	const struct cdevsw *d;
1000 	int rv, mpflag;
1001 
1002 	if ((d = cdevsw_lookup(dev)) == NULL)
1003 		return ENXIO;
1004 
1005 	DEV_LOCK(d);
1006 	rv = (*d->d_kqfilter)(dev, kn);
1007 	DEV_UNLOCK(d);
1008 
1009 	return rv;
1010 }
1011 
1012 int
cdev_discard(dev_t dev,off_t pos,off_t len)1013 cdev_discard(dev_t dev, off_t pos, off_t len)
1014 {
1015 	const struct cdevsw *d;
1016 	int rv, mpflag;
1017 
1018 	if ((d = cdevsw_lookup(dev)) == NULL)
1019 		return ENXIO;
1020 
1021 	DEV_LOCK(d);
1022 	rv = (*d->d_discard)(dev, pos, len);
1023 	DEV_UNLOCK(d);
1024 
1025 	return rv;
1026 }
1027 
1028 int
cdev_type(dev_t dev)1029 cdev_type(dev_t dev)
1030 {
1031 	const struct cdevsw *d;
1032 
1033 	if ((d = cdevsw_lookup(dev)) == NULL)
1034 		return D_OTHER;
1035 	return d->d_flag & D_TYPEMASK;
1036 }
1037