xref: /dragonfly/sys/kern/subr_disk.c (revision cb740add)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * and Alex Hornung <ahornung@gmail.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * ----------------------------------------------------------------------------
36  * "THE BEER-WARE LICENSE" (Revision 42):
37  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
38  * can do whatever you want with this stuff. If we meet some day, and you think
39  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
40  * ----------------------------------------------------------------------------
41  *
42  * Copyright (c) 1982, 1986, 1988, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
75  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
76  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/buf.h>
85 #include <sys/conf.h>
86 #include <sys/disklabel.h>
87 #include <sys/disklabel32.h>
88 #include <sys/disklabel64.h>
89 #include <sys/diskslice.h>
90 #include <sys/diskmbr.h>
91 #include <sys/disk.h>
92 #include <sys/kerneldump.h>
93 #include <sys/malloc.h>
94 #include <machine/md_var.h>
95 #include <sys/ctype.h>
96 #include <sys/syslog.h>
97 #include <sys/device.h>
98 #include <sys/msgport.h>
99 #include <sys/devfs.h>
100 #include <sys/thread.h>
101 #include <sys/dsched.h>
102 #include <sys/queue.h>
103 #include <sys/lock.h>
104 #include <sys/udev.h>
105 #include <sys/uuid.h>
106 
107 #include <sys/buf2.h>
108 #include <sys/msgport2.h>
109 #include <sys/thread2.h>
110 
111 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
112 static int disk_debug_enable = 0;
113 
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
119 static void bioqwritereorder(struct bio_queue_head *bioq);
120 static void disk_cleanserial(char *serno);
121 static int disk_debug(int, char *, ...) __printflike(2, 3);
122 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
123     struct dev_ops *raw_ops, int clone);
124 
125 static d_open_t diskopen;
126 static d_close_t diskclose;
127 static d_ioctl_t diskioctl;
128 static d_strategy_t diskstrategy;
129 static d_psize_t diskpsize;
130 static d_dump_t diskdump;
131 
132 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
133 static struct lwkt_token disklist_token;
134 static struct lwkt_token ds_token;
135 
136 static struct dev_ops disk_ops = {
137 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE },
138 	.d_open = diskopen,
139 	.d_close = diskclose,
140 	.d_read = physread,
141 	.d_write = physwrite,
142 	.d_ioctl = diskioctl,
143 	.d_strategy = diskstrategy,
144 	.d_dump = diskdump,
145 	.d_psize = diskpsize,
146 };
147 
148 static struct objcache 	*disk_msg_cache;
149 
150 struct objcache_malloc_args disk_msg_malloc_args = {
151 	sizeof(struct disk_msg), M_DISK };
152 
153 static struct lwkt_port disk_dispose_port;
154 static struct lwkt_port disk_msg_port;
155 
156 static int
157 disk_debug(int level, char *fmt, ...)
158 {
159 	__va_list ap;
160 
161 	__va_start(ap, fmt);
162 	if (level <= disk_debug_enable)
163 		kvprintf(fmt, ap);
164 	__va_end(ap);
165 
166 	return 0;
167 }
168 
169 static int
170 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
171 {
172 	struct disk_info *info = &dp->d_info;
173 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
174 	disklabel_ops_t ops;
175 	struct partinfo part;
176 	const char *msg;
177 	char uuid_buf[128];
178 	cdev_t ndev;
179 	int sno;
180 	u_int i;
181 
182 	disk_debug(2, "disk_probe_slice (begin): %s (%s)\n",
183 		   dev->si_name, dp->d_cdev->si_name);
184 
185 	sno = slice ? slice - 1 : 0;
186 
187 	ops = &disklabel32_ops;
188 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
189 	if (msg && !strcmp(msg, "no disk label")) {
190 		ops = &disklabel64_ops;
191 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
192 	}
193 
194 	if (msg == NULL) {
195 		if (slice != WHOLE_DISK_SLICE)
196 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
197 		else
198 			sp->ds_reserved = 0;
199 
200 		sp->ds_ops = ops;
201 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
202 			ops->op_loadpartinfo(sp->ds_label, i, &part);
203 			if (part.fstype) {
204 				if (reprobe &&
205 				    (ndev = devfs_find_device_by_name("%s%c",
206 						dev->si_name, 'a' + i))
207 				) {
208 					/*
209 					 * Device already exists and
210 					 * is still valid.
211 					 */
212 					ndev->si_flags |= SI_REPROBE_TEST;
213 
214 					/*
215 					 * Destroy old UUID alias
216 					 */
217 					destroy_dev_alias(ndev, "part-by-uuid/*");
218 
219 					/* Create UUID alias */
220 					if (!kuuid_is_nil(&part.storage_uuid)) {
221 						snprintf_uuid(uuid_buf,
222 						    sizeof(uuid_buf),
223 						    &part.storage_uuid);
224 						make_dev_alias(ndev,
225 						    "part-by-uuid/%s",
226 						    uuid_buf);
227 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
228 					}
229 				} else {
230 					ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
231 						dkmakeminor(dkunit(dp->d_cdev),
232 							    slice, i),
233 						UID_ROOT, GID_OPERATOR, 0640,
234 						"%s%c", dev->si_name, 'a'+ i);
235 					ndev->si_parent = dev;
236 					ndev->si_iosize_max = dev->si_iosize_max;
237 					ndev->si_disk = dp;
238 					udev_dict_set_cstr(ndev, "subsystem", "disk");
239 					/* Inherit parent's disk type */
240 					if (dp->d_disktype) {
241 						udev_dict_set_cstr(ndev, "disk-type",
242 						    __DECONST(char *, dp->d_disktype));
243 					}
244 
245 					/* Create serno alias */
246 					if (dp->d_info.d_serialno) {
247 						make_dev_alias(ndev,
248 						    "serno/%s.s%d%c",
249 						    dp->d_info.d_serialno,
250 						    sno, 'a' + i);
251 					}
252 
253 					/* Create UUID alias */
254 					if (!kuuid_is_nil(&part.storage_uuid)) {
255 						snprintf_uuid(uuid_buf,
256 						    sizeof(uuid_buf),
257 						    &part.storage_uuid);
258 						make_dev_alias(ndev,
259 						    "part-by-uuid/%s",
260 						    uuid_buf);
261 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
262 					}
263 					ndev->si_flags |= SI_REPROBE_TEST;
264 				}
265 			}
266 		}
267 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
268 		msg = NULL;
269 		if (sp->ds_size >= 0x100000000ULL)
270 			ops = &disklabel64_ops;
271 		else
272 			ops = &disklabel32_ops;
273 		sp->ds_label = ops->op_clone_label(info, sp);
274 	} else {
275 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
276 		    sp->ds_type == DOSPTYP_NETBSD ||
277 		    sp->ds_type == DOSPTYP_OPENBSD) {
278 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
279 			    dev->si_name, msg);
280 		}
281 
282 		if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) {
283 			/* Clear out old label - it's not around anymore */
284 			disk_debug(2,
285 			    "disk_probe_slice: clear out old diskabel on %s\n",
286 			    dev->si_name);
287 
288 			sp->ds_ops->op_freedisklabel(&sp->ds_label);
289 			sp->ds_ops = NULL;
290 		}
291 	}
292 
293 	if (msg == NULL) {
294 		sp->ds_wlabel = FALSE;
295 	}
296 
297 	return (msg ? EINVAL : 0);
298 }
299 
300 /*
301  * This routine is only called for newly minted drives or to reprobe
302  * a drive with no open slices.  disk_probe_slice() is called directly
303  * when reprobing partition changes within slices.
304  */
305 static void
306 disk_probe(struct disk *dp, int reprobe)
307 {
308 	struct disk_info *info = &dp->d_info;
309 	cdev_t dev = dp->d_cdev;
310 	cdev_t ndev;
311 	int error, i, sno;
312 	struct diskslices *osp;
313 	struct diskslice *sp;
314 	char uuid_buf[128];
315 
316 	KKASSERT (info->d_media_blksize != 0);
317 
318 	osp = dp->d_slice;
319 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
320 	disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
321 
322 	error = mbrinit(dev, info, &(dp->d_slice));
323 	if (error) {
324 		dsgone(&osp);
325 		return;
326 	}
327 
328 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
329 		/*
330 		 * Ignore the whole-disk slice, it has already been created.
331 		 */
332 		if (i == WHOLE_DISK_SLICE)
333 			continue;
334 
335 #if 1
336 		/*
337 		 * Ignore the compatibility slice s0 if it's a device mapper
338 		 * volume.
339 		 */
340 		if ((i == COMPATIBILITY_SLICE) &&
341 		    (info->d_dsflags & DSO_DEVICEMAPPER))
342 			continue;
343 #endif
344 
345 		sp = &dp->d_slice->dss_slices[i];
346 
347 		/*
348 		 * Handle s0.  s0 is a compatibility slice if there are no
349 		 * other slices and it has not otherwise been set up, else
350 		 * we ignore it.
351 		 */
352 		if (i == COMPATIBILITY_SLICE) {
353 			sno = 0;
354 			if (sp->ds_type == 0 &&
355 			    dp->d_slice->dss_nslices == BASE_SLICE) {
356 				sp->ds_size = info->d_media_blocks;
357 				sp->ds_reserved = 0;
358 			}
359 		} else {
360 			sno = i - 1;
361 			sp->ds_reserved = 0;
362 		}
363 
364 		/*
365 		 * Ignore 0-length slices
366 		 */
367 		if (sp->ds_size == 0)
368 			continue;
369 
370 		if (reprobe &&
371 		    (ndev = devfs_find_device_by_name("%ss%d",
372 						      dev->si_name, sno))) {
373 			/*
374 			 * Device already exists and is still valid
375 			 */
376 			ndev->si_flags |= SI_REPROBE_TEST;
377 
378 			/*
379 			 * Destroy old UUID alias
380 			 */
381 			destroy_dev_alias(ndev, "slice-by-uuid/*");
382 
383 			/* Create UUID alias */
384 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
385 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
386 				    &sp->ds_stor_uuid);
387 				make_dev_alias(ndev, "slice-by-uuid/%s",
388 				    uuid_buf);
389 			}
390 		} else {
391 			/*
392 			 * Else create new device
393 			 */
394 			ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
395 					dkmakewholeslice(dkunit(dev), i),
396 					UID_ROOT, GID_OPERATOR, 0640,
397 					(info->d_dsflags & DSO_DEVICEMAPPER)?
398 					"%s.s%d" : "%ss%d", dev->si_name, sno);
399 			ndev->si_parent = dev;
400 			ndev->si_iosize_max = dev->si_iosize_max;
401 			udev_dict_set_cstr(ndev, "subsystem", "disk");
402 			/* Inherit parent's disk type */
403 			if (dp->d_disktype) {
404 				udev_dict_set_cstr(ndev, "disk-type",
405 				    __DECONST(char *, dp->d_disktype));
406 			}
407 
408 			/* Create serno alias */
409 			if (dp->d_info.d_serialno) {
410 				make_dev_alias(ndev, "serno/%s.s%d",
411 					       dp->d_info.d_serialno, sno);
412 			}
413 
414 			/* Create UUID alias */
415 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
416 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
417 				    &sp->ds_stor_uuid);
418 				make_dev_alias(ndev, "slice-by-uuid/%s",
419 				    uuid_buf);
420 			}
421 
422 			ndev->si_disk = dp;
423 			ndev->si_flags |= SI_REPROBE_TEST;
424 		}
425 		sp->ds_dev = ndev;
426 
427 		/*
428 		 * Probe appropriate slices for a disklabel
429 		 *
430 		 * XXX slice type 1 used by our gpt probe code.
431 		 * XXX slice type 0 used by mbr compat slice.
432 		 */
433 		if (sp->ds_type == DOSPTYP_386BSD ||
434 		    sp->ds_type == DOSPTYP_NETBSD ||
435 		    sp->ds_type == DOSPTYP_OPENBSD ||
436 		    sp->ds_type == 0 ||
437 		    sp->ds_type == 1) {
438 			if (dp->d_slice->dss_first_bsd_slice == 0)
439 				dp->d_slice->dss_first_bsd_slice = i;
440 			disk_probe_slice(dp, ndev, i, reprobe);
441 		}
442 	}
443 	dsgone(&osp);
444 	disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
445 }
446 
447 
448 static void
449 disk_msg_core(void *arg)
450 {
451 	struct disk	*dp;
452 	struct diskslice *sp;
453 	disk_msg_t msg;
454 	int run;
455 
456 	lwkt_gettoken(&disklist_token);
457 	lwkt_initport_thread(&disk_msg_port, curthread);
458 	wakeup(curthread);	/* synchronous startup */
459 	lwkt_reltoken(&disklist_token);
460 
461 	lwkt_gettoken(&ds_token);
462 	run = 1;
463 
464 	while (run) {
465 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
466 
467 		switch (msg->hdr.u.ms_result) {
468 		case DISK_DISK_PROBE:
469 			dp = (struct disk *)msg->load;
470 			disk_debug(1,
471 				    "DISK_DISK_PROBE: %s\n",
472 					dp->d_cdev->si_name);
473 			disk_iocom_update(dp);
474 			disk_probe(dp, 0);
475 			break;
476 		case DISK_DISK_DESTROY:
477 			dp = (struct disk *)msg->load;
478 			disk_debug(1,
479 				    "DISK_DISK_DESTROY: %s\n",
480 					dp->d_cdev->si_name);
481 			disk_iocom_uninit(dp);
482 
483 			/*
484 			 * Interlock against struct disk enumerations.
485 			 * Wait for enumerations to complete then remove
486 			 * the dp from the list before tearing it down.
487 			 * This avoids numerous races.
488 			 */
489 			lwkt_gettoken(&disklist_token);
490 			while (dp->d_refs)
491 				tsleep(&dp->d_refs, 0, "diskdel", hz / 10);
492 			LIST_REMOVE(dp, d_list);
493 
494 			dsched_disk_destroy(dp);
495 			devfs_destroy_related(dp->d_cdev);
496 			destroy_dev(dp->d_cdev);
497 			destroy_only_dev(dp->d_rawdev);
498 
499 			lwkt_reltoken(&disklist_token);
500 
501 			if (dp->d_info.d_serialno) {
502 				kfree(dp->d_info.d_serialno, M_TEMP);
503 				dp->d_info.d_serialno = NULL;
504 			}
505 			break;
506 		case DISK_UNPROBE:
507 			dp = (struct disk *)msg->load;
508 			disk_debug(1,
509 				    "DISK_DISK_UNPROBE: %s\n",
510 					dp->d_cdev->si_name);
511 			devfs_destroy_related(dp->d_cdev);
512 			break;
513 		case DISK_SLICE_REPROBE:
514 			dp = (struct disk *)msg->load;
515 			sp = (struct diskslice *)msg->load2;
516 			devfs_clr_related_flag(sp->ds_dev,
517 						SI_REPROBE_TEST);
518 			disk_debug(1,
519 				    "DISK_SLICE_REPROBE: %s\n",
520 				    sp->ds_dev->si_name);
521 			disk_probe_slice(dp, sp->ds_dev,
522 					 dkslice(sp->ds_dev), 1);
523 			devfs_destroy_related_without_flag(
524 					sp->ds_dev, SI_REPROBE_TEST);
525 			break;
526 		case DISK_DISK_REPROBE:
527 			dp = (struct disk *)msg->load;
528 			devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
529 			disk_debug(1,
530 				    "DISK_DISK_REPROBE: %s\n",
531 				    dp->d_cdev->si_name);
532 			disk_probe(dp, 1);
533 			devfs_destroy_related_without_flag(
534 					dp->d_cdev, SI_REPROBE_TEST);
535 			break;
536 		case DISK_SYNC:
537 			disk_debug(1, "DISK_SYNC\n");
538 			break;
539 		default:
540 			devfs_debug(DEVFS_DEBUG_WARNING,
541 				    "disk_msg_core: unknown message "
542 				    "received at core\n");
543 			break;
544 		}
545 		lwkt_replymsg(&msg->hdr, 0);
546 	}
547 	lwkt_reltoken(&ds_token);
548 	lwkt_exit();
549 }
550 
551 
552 /*
553  * Acts as a message drain. Any message that is replied to here gets
554  * destroyed and the memory freed.
555  */
556 static void
557 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
558 {
559 	objcache_put(disk_msg_cache, msg);
560 }
561 
562 
563 void
564 disk_msg_send(uint32_t cmd, void *load, void *load2)
565 {
566 	disk_msg_t disk_msg;
567 	lwkt_port_t port = &disk_msg_port;
568 
569 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
570 
571 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
572 
573 	disk_msg->hdr.u.ms_result = cmd;
574 	disk_msg->load = load;
575 	disk_msg->load2 = load2;
576 	KKASSERT(port);
577 	lwkt_sendmsg(port, &disk_msg->hdr);
578 }
579 
580 void
581 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
582 {
583 	struct lwkt_port rep_port;
584 	disk_msg_t disk_msg;
585 	lwkt_port_t port;
586 
587 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
588 	port = &disk_msg_port;
589 
590 	/* XXX could probably use curthread's built-in msgport */
591 	lwkt_initport_thread(&rep_port, curthread);
592 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
593 
594 	disk_msg->hdr.u.ms_result = cmd;
595 	disk_msg->load = load;
596 	disk_msg->load2 = load2;
597 
598 	lwkt_domsg(port, &disk_msg->hdr, 0);
599 	objcache_put(disk_msg_cache, disk_msg);
600 }
601 
602 /*
603  * Create a raw device for the dev_ops template (which is returned).  Also
604  * create a slice and unit managed disk and overload the user visible
605  * device space with it.
606  *
607  * NOTE: The returned raw device is NOT a slice and unit managed device.
608  * It is an actual raw device representing the raw disk as specified by
609  * the passed dev_ops.  The disk layer not only returns such a raw device,
610  * it also uses it internally when passing (modified) commands through.
611  */
612 cdev_t
613 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
614 {
615 	return _disk_create_named(NULL, unit, dp, raw_ops, 0);
616 }
617 
618 cdev_t
619 disk_create_clone(int unit, struct disk *dp,
620 		  struct dev_ops *raw_ops)
621 {
622 	return _disk_create_named(NULL, unit, dp, raw_ops, 1);
623 }
624 
625 cdev_t
626 disk_create_named(const char *name, int unit, struct disk *dp,
627 		  struct dev_ops *raw_ops)
628 {
629 	return _disk_create_named(name, unit, dp, raw_ops, 0);
630 }
631 
632 cdev_t
633 disk_create_named_clone(const char *name, int unit, struct disk *dp,
634 			struct dev_ops *raw_ops)
635 {
636 	return _disk_create_named(name, unit, dp, raw_ops, 1);
637 }
638 
639 static cdev_t
640 _disk_create_named(const char *name, int unit, struct disk *dp,
641 		   struct dev_ops *raw_ops, int clone)
642 {
643 	cdev_t rawdev;
644 
645 	disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
646 
647 	if (name) {
648 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
649 		    UID_ROOT, GID_OPERATOR, 0640, "%s", name);
650 	} else {
651 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
652 		    UID_ROOT, GID_OPERATOR, 0640,
653 		    "%s%d", raw_ops->head.name, unit);
654 	}
655 
656 	bzero(dp, sizeof(*dp));
657 
658 	dp->d_rawdev = rawdev;
659 	dp->d_raw_ops = raw_ops;
660 	dp->d_dev_ops = &disk_ops;
661 
662 	if (name) {
663 		if (clone) {
664 			dp->d_cdev = make_only_dev_covering(
665 					&disk_ops, dp->d_rawdev->si_ops,
666 					dkmakewholedisk(unit),
667 					UID_ROOT, GID_OPERATOR, 0640,
668 					"%s", name);
669 		} else {
670 			dp->d_cdev = make_dev_covering(
671 					&disk_ops, dp->d_rawdev->si_ops,
672 					dkmakewholedisk(unit),
673 					UID_ROOT, GID_OPERATOR, 0640,
674 					"%s", name);
675 		}
676 	} else {
677 		if (clone) {
678 			dp->d_cdev = make_only_dev_covering(
679 					&disk_ops, dp->d_rawdev->si_ops,
680 					dkmakewholedisk(unit),
681 					UID_ROOT, GID_OPERATOR, 0640,
682 					"%s%d", raw_ops->head.name, unit);
683 		} else {
684 			dp->d_cdev = make_dev_covering(
685 					&disk_ops, dp->d_rawdev->si_ops,
686 					dkmakewholedisk(unit),
687 					UID_ROOT, GID_OPERATOR, 0640,
688 					"%s%d", raw_ops->head.name, unit);
689 		}
690 	}
691 
692 	udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
693 	dp->d_cdev->si_disk = dp;
694 
695 	if (name)
696 		dsched_disk_create(dp, name, unit);
697 	else
698 		dsched_disk_create(dp, raw_ops->head.name, unit);
699 
700 	lwkt_gettoken(&disklist_token);
701 	LIST_INSERT_HEAD(&disklist, dp, d_list);
702 	lwkt_reltoken(&disklist_token);
703 
704 	disk_iocom_init(dp);
705 
706 	disk_debug(1, "disk_create (end): %s%d\n",
707 		   (name != NULL)?(name):(raw_ops->head.name), unit);
708 
709 	return (dp->d_rawdev);
710 }
711 
712 int
713 disk_setdisktype(struct disk *disk, const char *type)
714 {
715 	int error;
716 
717 	KKASSERT(disk != NULL);
718 
719 	disk->d_disktype = type;
720 	error = udev_dict_set_cstr(disk->d_cdev, "disk-type",
721 				   __DECONST(char *, type));
722 	return error;
723 }
724 
725 int
726 disk_getopencount(struct disk *disk)
727 {
728 	return disk->d_opencount;
729 }
730 
731 static void
732 _setdiskinfo(struct disk *disk, struct disk_info *info)
733 {
734 	char *oldserialno;
735 
736 	oldserialno = disk->d_info.d_serialno;
737 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
738 	info = &disk->d_info;
739 
740 	disk_debug(1, "_setdiskinfo: %s\n", disk->d_cdev->si_name);
741 
742 	/*
743 	 * The serial number is duplicated so the caller can throw
744 	 * their copy away.
745 	 */
746 	if (info->d_serialno && info->d_serialno[0] &&
747 	    (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
748 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
749 		disk_cleanserial(info->d_serialno);
750 		if (disk->d_cdev) {
751 			make_dev_alias(disk->d_cdev, "serno/%s",
752 				       info->d_serialno);
753 		}
754 	} else {
755 		info->d_serialno = NULL;
756 	}
757 	if (oldserialno)
758 		kfree(oldserialno, M_TEMP);
759 
760 	dsched_disk_update(disk, info);
761 
762 	/*
763 	 * The caller may set d_media_size or d_media_blocks and we
764 	 * calculate the other.
765 	 */
766 	KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
767 	if (info->d_media_size == 0 && info->d_media_blocks) {
768 		info->d_media_size = (u_int64_t)info->d_media_blocks *
769 				     info->d_media_blksize;
770 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
771 		   info->d_media_blksize) {
772 		info->d_media_blocks = info->d_media_size /
773 				       info->d_media_blksize;
774 	}
775 
776 	/*
777 	 * The si_* fields for rawdev are not set until after the
778 	 * disk_create() call, so someone using the cooked version
779 	 * of the raw device (i.e. da0s0) will not get the right
780 	 * si_iosize_max unless we fix it up here.
781 	 */
782 	if (disk->d_cdev && disk->d_rawdev &&
783 	    disk->d_cdev->si_iosize_max == 0) {
784 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
785 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
786 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
787 	}
788 
789 	/* Add the serial number to the udev_dictionary */
790 	if (info->d_serialno)
791 		udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
792 }
793 
794 /*
795  * Disk drivers must call this routine when media parameters are available
796  * or have changed.
797  */
798 void
799 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
800 {
801 	_setdiskinfo(disk, info);
802 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
803 	disk_debug(1, "disk_setdiskinfo: sent probe for %s\n",
804 		   disk->d_cdev->si_name);
805 }
806 
807 void
808 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
809 {
810 	_setdiskinfo(disk, info);
811 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
812 	disk_debug(1, "disk_setdiskinfo_sync: sent probe for %s\n",
813 		   disk->d_cdev->si_name);
814 }
815 
816 /*
817  * This routine is called when an adapter detaches.  The higher level
818  * managed disk device is destroyed while the lower level raw device is
819  * released.
820  */
821 void
822 disk_destroy(struct disk *disk)
823 {
824 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
825 	return;
826 }
827 
828 int
829 disk_dumpcheck(cdev_t dev, u_int64_t *size,
830 	       u_int64_t *blkno, u_int32_t *secsize)
831 {
832 	struct partinfo pinfo;
833 	int error;
834 
835 	if (size)
836 		*size = 0;	/* avoid gcc warnings */
837 	if (secsize)
838 		*secsize = 512;	/* avoid gcc warnings */
839 	bzero(&pinfo, sizeof(pinfo));
840 
841 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
842 			   proc0.p_ucred, NULL, NULL);
843 	if (error)
844 		return (error);
845 
846 	if (pinfo.media_blksize == 0)
847 		return (ENXIO);
848 
849 	if (blkno) /* XXX: make sure this reserved stuff is right */
850 		*blkno = pinfo.reserved_blocks +
851 			pinfo.media_offset / pinfo.media_blksize;
852 	if (secsize)
853 		*secsize = pinfo.media_blksize;
854 	if (size)
855 		*size = (pinfo.media_blocks - pinfo.reserved_blocks);
856 
857 	return (0);
858 }
859 
860 int
861 disk_dumpconf(cdev_t dev, u_int onoff)
862 {
863 	struct dumperinfo di;
864 	u_int64_t	size, blkno;
865 	u_int32_t	secsize;
866 	int error;
867 
868 	if (!onoff)
869 		return set_dumper(NULL);
870 
871 	error = disk_dumpcheck(dev, &size, &blkno, &secsize);
872 
873 	if (error)
874 		return ENXIO;
875 
876 	bzero(&di, sizeof(struct dumperinfo));
877 	di.dumper = diskdump;
878 	di.priv = dev;
879 	di.blocksize = secsize;
880 	di.maxiosize = dev->si_iosize_max;
881 	di.mediaoffset = blkno * DEV_BSIZE;
882 	di.mediasize = size * DEV_BSIZE;
883 
884 	return set_dumper(&di);
885 }
886 
887 void
888 disk_unprobe(struct disk *disk)
889 {
890 	if (disk == NULL)
891 		return;
892 
893 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
894 }
895 
896 void
897 disk_invalidate (struct disk *disk)
898 {
899 	dsgone(&disk->d_slice);
900 }
901 
902 /*
903  * Enumerate disks, pass a marker and an initial NULL dp to initialize,
904  * then loop with the previously returned dp.
905  *
906  * The returned dp will be referenced, preventing its destruction.  When
907  * you pass the returned dp back into the loop the ref is dropped.
908  *
909  * WARNING: If terminating your loop early you must call
910  *	    disk_enumerate_stop().
911  */
912 struct disk *
913 disk_enumerate(struct disk *marker, struct disk *dp)
914 {
915 	lwkt_gettoken(&disklist_token);
916 	if (dp) {
917 		--dp->d_refs;
918 		dp = LIST_NEXT(marker, d_list);
919 		LIST_REMOVE(marker, d_list);
920 	} else {
921 		bzero(marker, sizeof(*marker));
922 		marker->d_flags = DISKFLAG_MARKER;
923 		dp = LIST_FIRST(&disklist);
924 	}
925 	while (dp) {
926 		if ((dp->d_flags & DISKFLAG_MARKER) == 0)
927 			break;
928 		dp = LIST_NEXT(dp, d_list);
929 	}
930 	if (dp) {
931 		++dp->d_refs;
932 		LIST_INSERT_AFTER(dp, marker, d_list);
933 	}
934 	lwkt_reltoken(&disklist_token);
935 	return (dp);
936 }
937 
938 /*
939  * Terminate an enumeration early.  Do not call this function if the
940  * enumeration ended normally.  dp can be NULL, indicating that you
941  * wish to retain the ref count on dp.
942  *
943  * This function removes the marker.
944  */
945 void
946 disk_enumerate_stop(struct disk *marker, struct disk *dp)
947 {
948 	lwkt_gettoken(&disklist_token);
949 	LIST_REMOVE(marker, d_list);
950 	if (dp)
951 		--dp->d_refs;
952 	lwkt_reltoken(&disklist_token);
953 }
954 
955 static
956 int
957 sysctl_disks(SYSCTL_HANDLER_ARGS)
958 {
959 	struct disk marker;
960 	struct disk *dp;
961 	int error, first;
962 
963 	first = 1;
964 	error = 0;
965 	dp = NULL;
966 
967 	while ((dp = disk_enumerate(&marker, dp))) {
968 		if (!first) {
969 			error = SYSCTL_OUT(req, " ", 1);
970 			if (error) {
971 				disk_enumerate_stop(&marker, dp);
972 				break;
973 			}
974 		} else {
975 			first = 0;
976 		}
977 		error = SYSCTL_OUT(req, dp->d_rawdev->si_name,
978 				   strlen(dp->d_rawdev->si_name));
979 		if (error) {
980 			disk_enumerate_stop(&marker, dp);
981 			break;
982 		}
983 	}
984 	if (error == 0)
985 		error = SYSCTL_OUT(req, "", 1);
986 	return error;
987 }
988 
989 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
990     sysctl_disks, "A", "names of available disks");
991 
992 /*
993  * Open a disk device or partition.
994  */
995 static
996 int
997 diskopen(struct dev_open_args *ap)
998 {
999 	cdev_t dev = ap->a_head.a_dev;
1000 	struct disk *dp;
1001 	int error;
1002 
1003 	/*
1004 	 * dp can't be NULL here XXX.
1005 	 *
1006 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
1007 	 * setdiskinfo() is typically called whether the disk is present
1008 	 * or not (e.g. CD), but the base disk device is created first
1009 	 * and there may be a race.
1010 	 */
1011 	dp = dev->si_disk;
1012 	if (dp == NULL || dp->d_slice == NULL)
1013 		return (ENXIO);
1014 	error = 0;
1015 
1016 	/*
1017 	 * Deal with open races
1018 	 */
1019 	lwkt_gettoken(&ds_token);
1020 	while (dp->d_flags & DISKFLAG_LOCK) {
1021 		dp->d_flags |= DISKFLAG_WANTED;
1022 		error = tsleep(dp, PCATCH, "diskopen", hz);
1023 		if (error) {
1024 			lwkt_reltoken(&ds_token);
1025 			return (error);
1026 		}
1027 	}
1028 	dp->d_flags |= DISKFLAG_LOCK;
1029 
1030 	/*
1031 	 * Open the underlying raw device.
1032 	 */
1033 	if (!dsisopen(dp->d_slice)) {
1034 #if 0
1035 		if (!pdev->si_iosize_max)
1036 			pdev->si_iosize_max = dev->si_iosize_max;
1037 #endif
1038 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
1039 				  ap->a_devtype, ap->a_cred, NULL);
1040 	}
1041 
1042 	if (error)
1043 		goto out;
1044 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
1045 		       &dp->d_slice, &dp->d_info);
1046 	if (!dsisopen(dp->d_slice)) {
1047 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype, NULL);
1048 	}
1049 out:
1050 	dp->d_flags &= ~DISKFLAG_LOCK;
1051 	if (dp->d_flags & DISKFLAG_WANTED) {
1052 		dp->d_flags &= ~DISKFLAG_WANTED;
1053 		wakeup(dp);
1054 	}
1055 	lwkt_reltoken(&ds_token);
1056 
1057 	KKASSERT(dp->d_opencount >= 0);
1058 	/* If the open was successful, bump open count */
1059 	if (error == 0)
1060 		atomic_add_int(&dp->d_opencount, 1);
1061 
1062 	return(error);
1063 }
1064 
1065 /*
1066  * Close a disk device or partition
1067  */
1068 static
1069 int
1070 diskclose(struct dev_close_args *ap)
1071 {
1072 	cdev_t dev = ap->a_head.a_dev;
1073 	struct disk *dp;
1074 	int error;
1075 	int lcount;
1076 
1077 	error = 0;
1078 	dp = dev->si_disk;
1079 
1080 	/*
1081 	 * The cdev_t represents the disk/slice/part.  The shared
1082 	 * dp structure governs all cdevs associated with the disk.
1083 	 *
1084 	 * As a safety only close the underlying raw device on the last
1085 	 * close the disk device if our tracking of the slices/partitions
1086 	 * also indicates nothing is open.
1087 	 */
1088 	KKASSERT(dp->d_opencount >= 1);
1089 	lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1090 
1091 	lwkt_gettoken(&ds_token);
1092 	dsclose(dev, ap->a_devtype, dp->d_slice);
1093 	if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1094 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype, NULL);
1095 	}
1096 	lwkt_reltoken(&ds_token);
1097 
1098 	return (error);
1099 }
1100 
1101 /*
1102  * First execute the ioctl on the disk device, and if it isn't supported
1103  * try running it on the backing device.
1104  */
1105 static
1106 int
1107 diskioctl(struct dev_ioctl_args *ap)
1108 {
1109 	cdev_t dev = ap->a_head.a_dev;
1110 	struct disk *dp;
1111 	int error;
1112 	u_int u;
1113 
1114 	dp = dev->si_disk;
1115 	if (dp == NULL)
1116 		return (ENXIO);
1117 
1118 	devfs_debug(DEVFS_DEBUG_DEBUG,
1119 		    "diskioctl: cmd is: %lx (name: %s)\n",
1120 		    ap->a_cmd, dev->si_name);
1121 	devfs_debug(DEVFS_DEBUG_DEBUG,
1122 		    "diskioctl: &dp->d_slice is: %p, %p\n",
1123 		    &dp->d_slice, dp->d_slice);
1124 
1125 	if (ap->a_cmd == DIOCGKERNELDUMP) {
1126 		u = *(u_int *)ap->a_data;
1127 		return disk_dumpconf(dev, u);
1128 	}
1129 
1130 	if (ap->a_cmd == DIOCRECLUSTER && dev == dp->d_cdev) {
1131 		error = disk_iocom_ioctl(dp, ap->a_cmd, ap->a_data);
1132 		return error;
1133 	}
1134 
1135 	if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1136 	    ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1137 	     dkslice(dev) == WHOLE_DISK_SLICE)) {
1138 		error = ENOIOCTL;
1139 	} else {
1140 		lwkt_gettoken(&ds_token);
1141 		error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1142 				&dp->d_slice, &dp->d_info);
1143 		lwkt_reltoken(&ds_token);
1144 	}
1145 
1146 	if (error == ENOIOCTL) {
1147 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1148 				   ap->a_fflag, ap->a_cred, NULL, NULL);
1149 	}
1150 	return (error);
1151 }
1152 
1153 /*
1154  * Execute strategy routine
1155  */
1156 static
1157 int
1158 diskstrategy(struct dev_strategy_args *ap)
1159 {
1160 	cdev_t dev = ap->a_head.a_dev;
1161 	struct bio *bio = ap->a_bio;
1162 	struct bio *nbio;
1163 	struct disk *dp;
1164 
1165 	dp = dev->si_disk;
1166 
1167 	if (dp == NULL) {
1168 		bio->bio_buf->b_error = ENXIO;
1169 		bio->bio_buf->b_flags |= B_ERROR;
1170 		biodone(bio);
1171 		return(0);
1172 	}
1173 	KKASSERT(dev->si_disk == dp);
1174 
1175 	/*
1176 	 * The dscheck() function will also transform the slice relative
1177 	 * block number i.e. bio->bio_offset into a block number that can be
1178 	 * passed directly to the underlying raw device.  If dscheck()
1179 	 * returns NULL it will have handled the bio for us (e.g. EOF
1180 	 * or error due to being beyond the device size).
1181 	 */
1182 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1183 		dev_dstrategy(dp->d_rawdev, nbio);
1184 	} else {
1185 		biodone(bio);
1186 	}
1187 	return(0);
1188 }
1189 
1190 /*
1191  * Return the partition size in ?blocks?
1192  */
1193 static
1194 int
1195 diskpsize(struct dev_psize_args *ap)
1196 {
1197 	cdev_t dev = ap->a_head.a_dev;
1198 	struct disk *dp;
1199 
1200 	dp = dev->si_disk;
1201 	if (dp == NULL)
1202 		return(ENODEV);
1203 
1204 	ap->a_result = dssize(dev, &dp->d_slice);
1205 
1206 	if ((ap->a_result == -1) &&
1207 	   (dp->d_info.d_dsflags & DSO_RAWPSIZE)) {
1208 		ap->a_head.a_dev = dp->d_rawdev;
1209 		return dev_doperate(&ap->a_head);
1210 	}
1211 	return(0);
1212 }
1213 
1214 static int
1215 diskdump(struct dev_dump_args *ap)
1216 {
1217 	cdev_t dev = ap->a_head.a_dev;
1218 	struct disk *dp = dev->si_disk;
1219 	u_int64_t size, offset;
1220 	int error;
1221 
1222 	error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1223 	/* XXX: this should probably go in disk_dumpcheck somehow */
1224 	if (ap->a_length != 0) {
1225 		size *= DEV_BSIZE;
1226 		offset = ap->a_blkno * DEV_BSIZE;
1227 		if ((ap->a_offset < offset) ||
1228 		    (ap->a_offset + ap->a_length - offset > size)) {
1229 			kprintf("Attempt to write outside dump "
1230 				"device boundaries.\n");
1231 			error = ENOSPC;
1232 		}
1233 	}
1234 
1235 	if (error == 0) {
1236 		ap->a_head.a_dev = dp->d_rawdev;
1237 		error = dev_doperate(&ap->a_head);
1238 	}
1239 
1240 	return(error);
1241 }
1242 
1243 
1244 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1245 	   0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1246 
1247 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1248 	   0, sizeof(struct disk), "sizeof(struct disk)");
1249 
1250 /*
1251  * Reorder interval for burst write allowance and minor write
1252  * allowance.
1253  *
1254  * We always want to trickle some writes in to make use of the
1255  * disk's zone cache.  Bursting occurs on a longer interval and only
1256  * runningbufspace is well over the hirunningspace limit.
1257  */
1258 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
1259 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1260 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1261 int bioq_reorder_minor_interval = 5;
1262 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1263 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1264 
1265 int bioq_reorder_burst_bytes = 3000000;
1266 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1267 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1268 int bioq_reorder_minor_bytes = 262144;
1269 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1270 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1271 
1272 
1273 /*
1274  * Order I/Os.  Generally speaking this code is designed to make better
1275  * use of drive zone caches.  A drive zone cache can typically track linear
1276  * reads or writes for around 16 zones simultaniously.
1277  *
1278  * Read prioritization issues:  It is possible for hundreds of megabytes worth
1279  * of writes to be queued asynchronously.  This creates a huge bottleneck
1280  * for reads which reduce read bandwidth to a trickle.
1281  *
1282  * To solve this problem we generally reorder reads before writes.
1283  *
1284  * However, a large number of random reads can also starve writes and
1285  * make poor use of the drive zone cache so we allow writes to trickle
1286  * in every N reads.
1287  */
1288 void
1289 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1290 {
1291 #if 0
1292 	/*
1293 	 * The BIO wants to be ordered.  Adding to the tail also
1294 	 * causes transition to be set to NULL, forcing the ordering
1295 	 * of all prior I/O's.
1296 	 */
1297 	if (bio->bio_buf->b_flags & B_ORDERED) {
1298 		bioq_insert_tail(bioq, bio);
1299 		return;
1300 	}
1301 #endif
1302 
1303 	switch(bio->bio_buf->b_cmd) {
1304 	case BUF_CMD_READ:
1305 		if (bioq->transition) {
1306 			/*
1307 			 * Insert before the first write.  Bleedover writes
1308 			 * based on reorder intervals to prevent starvation.
1309 			 */
1310 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1311 			++bioq->reorder;
1312 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1313 				bioqwritereorder(bioq);
1314 				if (bioq->reorder >=
1315 				    bioq_reorder_burst_interval) {
1316 					bioq->reorder = 0;
1317 				}
1318 			}
1319 		} else {
1320 			/*
1321 			 * No writes queued (or ordering was forced),
1322 			 * insert at tail.
1323 			 */
1324 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1325 		}
1326 		break;
1327 	case BUF_CMD_WRITE:
1328 		/*
1329 		 * Writes are always appended.  If no writes were previously
1330 		 * queued or an ordered tail insertion occured the transition
1331 		 * field will be NULL.
1332 		 */
1333 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1334 		if (bioq->transition == NULL)
1335 			bioq->transition = bio;
1336 		break;
1337 	default:
1338 		/*
1339 		 * All other request types are forced to be ordered.
1340 		 */
1341 		bioq_insert_tail(bioq, bio);
1342 		break;
1343 	}
1344 }
1345 
1346 /*
1347  * Move the read-write transition point to prevent reads from
1348  * completely starving our writes.  This brings a number of writes into
1349  * the fold every N reads.
1350  *
1351  * We bring a few linear writes into the fold on a minor interval
1352  * and we bring a non-linear burst of writes into the fold on a major
1353  * interval.  Bursting only occurs if runningbufspace is really high
1354  * (typically from syncs, fsyncs, or HAMMER flushes).
1355  */
1356 static
1357 void
1358 bioqwritereorder(struct bio_queue_head *bioq)
1359 {
1360 	struct bio *bio;
1361 	off_t next_offset;
1362 	size_t left;
1363 	size_t n;
1364 	int check_off;
1365 
1366 	if (bioq->reorder < bioq_reorder_burst_interval ||
1367 	    !buf_runningbufspace_severe()) {
1368 		left = (size_t)bioq_reorder_minor_bytes;
1369 		check_off = 1;
1370 	} else {
1371 		left = (size_t)bioq_reorder_burst_bytes;
1372 		check_off = 0;
1373 	}
1374 
1375 	next_offset = bioq->transition->bio_offset;
1376 	while ((bio = bioq->transition) != NULL &&
1377 	       (check_off == 0 || next_offset == bio->bio_offset)
1378 	) {
1379 		n = bio->bio_buf->b_bcount;
1380 		next_offset = bio->bio_offset + n;
1381 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1382 		if (left < n)
1383 			break;
1384 		left -= n;
1385 	}
1386 }
1387 
1388 /*
1389  * Bounds checking against the media size, used for the raw partition.
1390  * secsize, mediasize and b_blkno must all be the same units.
1391  * Possibly this has to be DEV_BSIZE (512).
1392  */
1393 int
1394 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1395 {
1396 	struct buf *bp = bio->bio_buf;
1397 	int64_t sz;
1398 
1399 	sz = howmany(bp->b_bcount, secsize);
1400 
1401 	if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1402 		sz = mediasize - bio->bio_offset/DEV_BSIZE;
1403 		if (sz == 0) {
1404 			/* If exactly at end of disk, return EOF. */
1405 			bp->b_resid = bp->b_bcount;
1406 			return 0;
1407 		}
1408 		if (sz < 0) {
1409 			/* If past end of disk, return EINVAL. */
1410 			bp->b_error = EINVAL;
1411 			return 0;
1412 		}
1413 		/* Otherwise, truncate request. */
1414 		bp->b_bcount = sz * secsize;
1415 	}
1416 
1417 	return 1;
1418 }
1419 
1420 /*
1421  * Disk error is the preface to plaintive error messages
1422  * about failing disk transfers.  It prints messages of the form
1423 
1424 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1425 
1426  * if the offset of the error in the transfer and a disk label
1427  * are both available.  blkdone should be -1 if the position of the error
1428  * is unknown; the disklabel pointer may be null from drivers that have not
1429  * been converted to use them.  The message is printed with kprintf
1430  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1431  * The message should be completed (with at least a newline) with kprintf
1432  * or log(-1, ...), respectively.  There is no trailing space.
1433  */
1434 void
1435 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1436 {
1437 	struct buf *bp = bio->bio_buf;
1438 	const char *term;
1439 
1440 	switch(bp->b_cmd) {
1441 	case BUF_CMD_READ:
1442 		term = "read";
1443 		break;
1444 	case BUF_CMD_WRITE:
1445 		term = "write";
1446 		break;
1447 	default:
1448 		term = "access";
1449 		break;
1450 	}
1451 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1452 	kprintf("offset %012llx for %d",
1453 		(long long)bio->bio_offset,
1454 		bp->b_bcount);
1455 
1456 	if (donecnt)
1457 		kprintf(" (%d bytes completed)", donecnt);
1458 }
1459 
1460 /*
1461  * Locate a disk device
1462  */
1463 cdev_t
1464 disk_locate(const char *devname)
1465 {
1466 	return devfs_find_device_by_name("%s", devname);
1467 }
1468 
1469 void
1470 disk_config(void *arg)
1471 {
1472 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1473 }
1474 
1475 static void
1476 disk_init(void)
1477 {
1478 	struct thread* td_core;
1479 
1480 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1481 					 NULL, NULL, NULL,
1482 					 objcache_malloc_alloc,
1483 					 objcache_malloc_free,
1484 					 &disk_msg_malloc_args);
1485 
1486 	lwkt_token_init(&disklist_token, "disks");
1487 	lwkt_token_init(&ds_token, "ds");
1488 
1489 	/*
1490 	 * Initialize the reply-only port which acts as a message drain
1491 	 */
1492 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1493 
1494 	lwkt_gettoken(&disklist_token);
1495 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1496 		    0, -1, "disk_msg_core");
1497 	tsleep(td_core, 0, "diskcore", 0);
1498 	lwkt_reltoken(&disklist_token);
1499 }
1500 
1501 static void
1502 disk_uninit(void)
1503 {
1504 	objcache_destroy(disk_msg_cache);
1505 }
1506 
1507 /*
1508  * Clean out illegal characters in serial numbers.
1509  */
1510 static void
1511 disk_cleanserial(char *serno)
1512 {
1513 	char c;
1514 
1515 	while ((c = *serno) != 0) {
1516 		if (c >= 'a' && c <= 'z')
1517 			;
1518 		else if (c >= 'A' && c <= 'Z')
1519 			;
1520 		else if (c >= '0' && c <= '9')
1521 			;
1522 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1523 			;
1524 		else
1525 			c = '_';
1526 		*serno++= c;
1527 	}
1528 }
1529 
1530 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1531 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1532 	   0, "Enable subr_disk debugging");
1533 
1534 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1535 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1536