xref: /dragonfly/sys/kern/subr_disk.c (revision 32efd857)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * and Alex Hornung <ahornung@gmail.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  * 3. Neither the name of The DragonFly Project nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific, prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * ----------------------------------------------------------------------------
36  * "THE BEER-WARE LICENSE" (Revision 42):
37  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
38  * can do whatever you want with this stuff. If we meet some day, and you think
39  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
40  * ----------------------------------------------------------------------------
41  *
42  * Copyright (c) 1982, 1986, 1988, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
75  * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
76  * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
77  */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/buf.h>
85 #include <sys/conf.h>
86 #include <sys/disklabel.h>
87 #include <sys/disklabel32.h>
88 #include <sys/disklabel64.h>
89 #include <sys/diskslice.h>
90 #include <sys/diskmbr.h>
91 #include <sys/disk.h>
92 #include <sys/kerneldump.h>
93 #include <sys/malloc.h>
94 #include <machine/md_var.h>
95 #include <sys/ctype.h>
96 #include <sys/syslog.h>
97 #include <sys/device.h>
98 #include <sys/msgport.h>
99 #include <sys/devfs.h>
100 #include <sys/thread.h>
101 #include <sys/dsched.h>
102 #include <sys/queue.h>
103 #include <sys/lock.h>
104 #include <sys/udev.h>
105 #include <sys/uuid.h>
106 
107 #include <sys/buf2.h>
108 #include <sys/msgport2.h>
109 
110 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
111 static int disk_debug_enable = 0;
112 
113 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
114 static void disk_msg_core(void *);
115 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
116 static void disk_probe(struct disk *dp, int reprobe);
117 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
118 static void bioqwritereorder(struct bio_queue_head *bioq);
119 static void disk_cleanserial(char *serno);
120 static int disk_debug(int, char *, ...) __printflike(2, 3);
121 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
122     struct dev_ops *raw_ops, int clone);
123 
124 static d_open_t diskopen;
125 static d_close_t diskclose;
126 static d_ioctl_t diskioctl;
127 static d_strategy_t diskstrategy;
128 static d_psize_t diskpsize;
129 static d_dump_t diskdump;
130 
131 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
132 static struct lwkt_token disklist_token;
133 static struct lwkt_token ds_token;
134 
135 static struct dev_ops disk1_ops = {
136 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO },
137 	.d_open = diskopen,
138 	.d_close = diskclose,
139 	.d_read = physread,
140 	.d_write = physwrite,
141 	.d_ioctl = diskioctl,
142 	.d_strategy = diskstrategy,
143 	.d_dump = diskdump,
144 	.d_psize = diskpsize,
145 };
146 
147 static struct dev_ops disk2_ops = {
148 	{ "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO |
149 		     D_NOEMERGPGR },
150 	.d_open = diskopen,
151 	.d_close = diskclose,
152 	.d_read = physread,
153 	.d_write = physwrite,
154 	.d_ioctl = diskioctl,
155 	.d_strategy = diskstrategy,
156 	.d_dump = diskdump,
157 	.d_psize = diskpsize,
158 };
159 
160 static struct objcache 	*disk_msg_cache;
161 
162 static struct objcache_malloc_args disk_msg_malloc_args = {
163 	sizeof(struct disk_msg),
164 	M_DISK
165 };
166 
167 static struct lwkt_port disk_dispose_port;
168 static struct lwkt_port disk_msg_port;
169 
170 static int
171 disk_debug(int level, char *fmt, ...)
172 {
173 	__va_list ap;
174 
175 	__va_start(ap, fmt);
176 	if (level <= disk_debug_enable)
177 		kvprintf(fmt, ap);
178 	__va_end(ap);
179 
180 	return 0;
181 }
182 
183 static int
184 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
185 {
186 	struct disk_info *info = &dp->d_info;
187 	struct diskslice *sp = &dp->d_slice->dss_slices[slice];
188 	disklabel_ops_t ops;
189 	struct dev_ops *dops;
190 	struct partinfo part;
191 	const char *msg;
192 	char uuid_buf[128];
193 	cdev_t ndev;
194 	int sno;
195 	u_int i;
196 
197 	disk_debug(2, "disk_probe_slice (begin): %s (%s)\n",
198 		   dev->si_name, dp->d_cdev->si_name);
199 
200 	sno = slice ? slice - 1 : 0;
201 	dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
202 		&disk2_ops : &disk1_ops;
203 
204 	ops = &disklabel32_ops;
205 	msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
206 	if (msg && !strcmp(msg, "no disk label")) {
207 		ops = &disklabel64_ops;
208 		msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
209 	}
210 
211 	if (msg == NULL) {
212 		char packname[DISKLABEL_MAXPACKNAME];
213 
214 		if (slice != WHOLE_DISK_SLICE)
215 			ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
216 		else
217 			sp->ds_reserved = 0;
218 
219 		ops->op_getpackname(sp->ds_label, packname, sizeof(packname));
220 
221 		destroy_dev_alias(dev, "by-label/*");
222 		if (packname[0])
223 			make_dev_alias(dev, "by-label/%s", packname);
224 
225 		sp->ds_ops = ops;
226 		for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
227 			ops->op_loadpartinfo(sp->ds_label, i, &part);
228 
229 			if (part.fstype) {
230 				if (reprobe &&
231 				    (ndev = devfs_find_device_by_name("%s%c",
232 						dev->si_name, 'a' + i))
233 				) {
234 					/*
235 					 * Device already exists and
236 					 * is still valid.
237 					 */
238 					ndev->si_flags |= SI_REPROBE_TEST;
239 
240 					/*
241 					 * Destroy old UUID alias
242 					 */
243 					destroy_dev_alias(ndev,
244 							  "part-by-uuid/*");
245 					destroy_dev_alias(ndev,
246 							  "part-by-label/*");
247 
248 					/* Create UUID alias */
249 					if (!kuuid_is_nil(&part.storage_uuid)) {
250 						snprintf_uuid(uuid_buf,
251 						    sizeof(uuid_buf),
252 						    &part.storage_uuid);
253 						make_dev_alias(ndev,
254 						    "part-by-uuid/%s",
255 						    uuid_buf);
256 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
257 					}
258 					if (packname[0]) {
259 						make_dev_alias(ndev,
260 						    "part-by-label/%s.%c",
261 						    packname, 'a' + i);
262 					}
263 				} else {
264 					ndev = make_dev_covering(dops,
265 						dp->d_rawdev->si_ops,
266 						dkmakeminor(dkunit(dp->d_cdev),
267 							    slice, i),
268 						UID_ROOT, GID_OPERATOR, 0640,
269 						"%s%c", dev->si_name, 'a'+ i);
270 					ndev->si_parent = dev;
271 					ndev->si_iosize_max = dev->si_iosize_max;
272 					ndev->si_disk = dp;
273 					udev_dict_set_cstr(ndev, "subsystem", "disk");
274 					/* Inherit parent's disk type */
275 					if (dp->d_disktype) {
276 						udev_dict_set_cstr(ndev, "disk-type",
277 						    __DECONST(char *, dp->d_disktype));
278 					}
279 
280 					/* Create serno alias */
281 					if (dp->d_info.d_serialno) {
282 						make_dev_alias(ndev,
283 						    "serno/%s.s%d%c",
284 						    dp->d_info.d_serialno,
285 						    sno, 'a' + i);
286 					}
287 
288 					/* Create UUID alias */
289 					if (!kuuid_is_nil(&part.storage_uuid)) {
290 						snprintf_uuid(uuid_buf,
291 						    sizeof(uuid_buf),
292 						    &part.storage_uuid);
293 						make_dev_alias(ndev,
294 						    "part-by-uuid/%s",
295 						    uuid_buf);
296 						udev_dict_set_cstr(ndev, "uuid", uuid_buf);
297 					}
298 					if (packname[0]) {
299 						make_dev_alias(ndev,
300 						    "part-by-label/%s.%c",
301 						    packname, 'a' + i);
302 					}
303 					ndev->si_flags |= SI_REPROBE_TEST;
304 				}
305 			}
306 		}
307 	} else if (info->d_dsflags & DSO_COMPATLABEL) {
308 		msg = NULL;
309 		if (sp->ds_size >= 0x100000000ULL)
310 			ops = &disklabel64_ops;
311 		else
312 			ops = &disklabel32_ops;
313 		sp->ds_label = ops->op_clone_label(info, sp);
314 	} else {
315 		if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
316 		    sp->ds_type == DOSPTYP_NETBSD ||
317 		    sp->ds_type == DOSPTYP_OPENBSD ||
318 		    sp->ds_type == DOSPTYP_DFLYBSD) {
319 			log(LOG_WARNING, "%s: cannot find label (%s)\n",
320 			    dev->si_name, msg);
321 		}
322 
323 		if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) {
324 			/* Clear out old label - it's not around anymore */
325 			disk_debug(2,
326 			    "disk_probe_slice: clear out old diskabel on %s\n",
327 			    dev->si_name);
328 
329 			sp->ds_ops->op_freedisklabel(&sp->ds_label);
330 			sp->ds_ops = NULL;
331 		}
332 	}
333 
334 	if (msg == NULL) {
335 		sp->ds_wlabel = FALSE;
336 	}
337 
338 	return (msg ? EINVAL : 0);
339 }
340 
341 /*
342  * This routine is only called for newly minted drives or to reprobe
343  * a drive with no open slices.  disk_probe_slice() is called directly
344  * when reprobing partition changes within slices.
345  */
346 static void
347 disk_probe(struct disk *dp, int reprobe)
348 {
349 	struct disk_info *info = &dp->d_info;
350 	cdev_t dev = dp->d_cdev;
351 	cdev_t ndev;
352 	int error, i, sno;
353 	struct diskslices *osp;
354 	struct diskslice *sp;
355 	struct dev_ops *dops;
356 	char uuid_buf[128];
357 
358 	/*
359 	 * d_media_blksize can be 0 for non-disk storage devices such
360 	 * as audio CDs.
361 	 */
362 	if (info->d_media_blksize == 0)
363 		return;
364 
365 	osp = dp->d_slice;
366 	dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
367 	disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
368 
369 	error = mbrinit(dev, info, &(dp->d_slice));
370 	if (error) {
371 		dsgone(&osp);
372 		return;
373 	}
374 
375 	dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
376 		&disk2_ops : &disk1_ops;
377 
378 	for (i = 0; i < dp->d_slice->dss_nslices; i++) {
379 		/*
380 		 * Ignore the whole-disk slice, it has already been created.
381 		 */
382 		if (i == WHOLE_DISK_SLICE)
383 			continue;
384 
385 #if 1
386 		/*
387 		 * Ignore the compatibility slice s0 if it's a device mapper
388 		 * volume.
389 		 */
390 		if ((i == COMPATIBILITY_SLICE) &&
391 		    (info->d_dsflags & DSO_DEVICEMAPPER))
392 			continue;
393 #endif
394 
395 		sp = &dp->d_slice->dss_slices[i];
396 
397 		/*
398 		 * Handle s0.  s0 is a compatibility slice if there are no
399 		 * other slices and it has not otherwise been set up, else
400 		 * we ignore it.
401 		 */
402 		if (i == COMPATIBILITY_SLICE) {
403 			sno = 0;
404 			if (sp->ds_type == 0 &&
405 			    dp->d_slice->dss_nslices == BASE_SLICE) {
406 				sp->ds_size = info->d_media_blocks;
407 				sp->ds_reserved = 0;
408 			}
409 		} else {
410 			sno = i - 1;
411 			sp->ds_reserved = 0;
412 		}
413 
414 		/*
415 		 * Ignore 0-length slices
416 		 */
417 		if (sp->ds_size == 0)
418 			continue;
419 
420 		if (reprobe &&
421 		    (ndev = devfs_find_device_by_name("%ss%d",
422 						      dev->si_name, sno))) {
423 			/*
424 			 * Device already exists and is still valid
425 			 */
426 			ndev->si_flags |= SI_REPROBE_TEST;
427 
428 			/*
429 			 * Destroy old UUID alias
430 			 */
431 			destroy_dev_alias(ndev, "slice-by-uuid/*");
432 
433 			/* Create UUID alias */
434 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
435 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
436 				    &sp->ds_stor_uuid);
437 				make_dev_alias(ndev, "slice-by-uuid/%s",
438 				    uuid_buf);
439 			}
440 		} else {
441 			/*
442 			 * Else create new device
443 			 */
444 			ndev = make_dev_covering(dops, dp->d_rawdev->si_ops,
445 					dkmakewholeslice(dkunit(dev), i),
446 					UID_ROOT, GID_OPERATOR, 0640,
447 					(info->d_dsflags & DSO_DEVICEMAPPER)?
448 					"%s.s%d" : "%ss%d", dev->si_name, sno);
449 			ndev->si_parent = dev;
450 			ndev->si_iosize_max = dev->si_iosize_max;
451 			udev_dict_set_cstr(ndev, "subsystem", "disk");
452 			/* Inherit parent's disk type */
453 			if (dp->d_disktype) {
454 				udev_dict_set_cstr(ndev, "disk-type",
455 				    __DECONST(char *, dp->d_disktype));
456 			}
457 
458 			/* Create serno alias */
459 			if (dp->d_info.d_serialno) {
460 				make_dev_alias(ndev, "serno/%s.s%d",
461 					       dp->d_info.d_serialno, sno);
462 			}
463 
464 			/* Create UUID alias */
465 			if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
466 				snprintf_uuid(uuid_buf, sizeof(uuid_buf),
467 				    &sp->ds_stor_uuid);
468 				make_dev_alias(ndev, "slice-by-uuid/%s",
469 				    uuid_buf);
470 			}
471 
472 			ndev->si_disk = dp;
473 			ndev->si_flags |= SI_REPROBE_TEST;
474 		}
475 		sp->ds_dev = ndev;
476 
477 		/*
478 		 * Probe appropriate slices for a disklabel
479 		 *
480 		 * XXX slice type 1 used by our gpt probe code.
481 		 * XXX slice type 0 used by mbr compat slice.
482 		 */
483 		if (sp->ds_type == DOSPTYP_386BSD ||
484 		    sp->ds_type == DOSPTYP_NETBSD ||
485 		    sp->ds_type == DOSPTYP_OPENBSD ||
486 		    sp->ds_type == DOSPTYP_DFLYBSD ||
487 		    sp->ds_type == 0 ||
488 		    sp->ds_type == 1) {
489 			if (dp->d_slice->dss_first_bsd_slice == 0)
490 				dp->d_slice->dss_first_bsd_slice = i;
491 			disk_probe_slice(dp, ndev, i, reprobe);
492 		}
493 	}
494 	dsgone(&osp);
495 	disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
496 }
497 
498 
499 static void
500 disk_msg_core(void *arg)
501 {
502 	struct disk	*dp;
503 	struct diskslice *sp;
504 	disk_msg_t msg;
505 	int run;
506 
507 	lwkt_gettoken(&disklist_token);
508 	lwkt_initport_thread(&disk_msg_port, curthread);
509 	wakeup(curthread);	/* synchronous startup */
510 	lwkt_reltoken(&disklist_token);
511 
512 	lwkt_gettoken(&ds_token);
513 	run = 1;
514 
515 	while (run) {
516 		msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
517 
518 		switch (msg->hdr.u.ms_result) {
519 		case DISK_DISK_PROBE:
520 			dp = (struct disk *)msg->load;
521 			disk_debug(1,
522 				    "DISK_DISK_PROBE: %s\n",
523 					dp->d_cdev->si_name);
524 			disk_iocom_update(dp);
525 			disk_probe(dp, 0);
526 			break;
527 		case DISK_DISK_DESTROY:
528 			dp = (struct disk *)msg->load;
529 			disk_debug(1,
530 				    "DISK_DISK_DESTROY: %s\n",
531 					dp->d_cdev->si_name);
532 			disk_iocom_uninit(dp);
533 
534 			/*
535 			 * Interlock against struct disk enumerations.
536 			 * Wait for enumerations to complete then remove
537 			 * the dp from the list before tearing it down.
538 			 * This avoids numerous races.
539 			 */
540 			lwkt_gettoken(&disklist_token);
541 			while (dp->d_refs)
542 				tsleep(&dp->d_refs, 0, "diskdel", hz / 10);
543 			LIST_REMOVE(dp, d_list);
544 
545 			dsched_disk_destroy(dp);
546 			devfs_destroy_related(dp->d_cdev);
547 			destroy_dev(dp->d_cdev);
548 			destroy_only_dev(dp->d_rawdev);
549 
550 			lwkt_reltoken(&disklist_token);
551 
552 			if (dp->d_info.d_serialno) {
553 				kfree(dp->d_info.d_serialno, M_TEMP);
554 				dp->d_info.d_serialno = NULL;
555 			}
556 			break;
557 		case DISK_UNPROBE:
558 			dp = (struct disk *)msg->load;
559 			disk_debug(1,
560 				    "DISK_DISK_UNPROBE: %s\n",
561 					dp->d_cdev->si_name);
562 			devfs_destroy_related(dp->d_cdev);
563 			break;
564 		case DISK_SLICE_REPROBE:
565 			dp = (struct disk *)msg->load;
566 			sp = (struct diskslice *)msg->load2;
567 			devfs_clr_related_flag(sp->ds_dev,
568 						SI_REPROBE_TEST);
569 			disk_debug(1,
570 				    "DISK_SLICE_REPROBE: %s\n",
571 				    sp->ds_dev->si_name);
572 			disk_probe_slice(dp, sp->ds_dev,
573 					 dkslice(sp->ds_dev), 1);
574 			devfs_destroy_related_without_flag(
575 					sp->ds_dev, SI_REPROBE_TEST);
576 			break;
577 		case DISK_DISK_REPROBE:
578 			dp = (struct disk *)msg->load;
579 			devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
580 			disk_debug(1,
581 				    "DISK_DISK_REPROBE: %s\n",
582 				    dp->d_cdev->si_name);
583 			disk_probe(dp, 1);
584 			devfs_destroy_related_without_flag(
585 					dp->d_cdev, SI_REPROBE_TEST);
586 			break;
587 		case DISK_SYNC:
588 			disk_debug(1, "DISK_SYNC\n");
589 			break;
590 		default:
591 			devfs_debug(DEVFS_DEBUG_WARNING,
592 				    "disk_msg_core: unknown message "
593 				    "received at core\n");
594 			break;
595 		}
596 		lwkt_replymsg(&msg->hdr, 0);
597 	}
598 	lwkt_reltoken(&ds_token);
599 	lwkt_exit();
600 }
601 
602 
603 /*
604  * Acts as a message drain. Any message that is replied to here gets
605  * destroyed and the memory freed.
606  */
607 static void
608 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
609 {
610 	objcache_put(disk_msg_cache, msg);
611 }
612 
613 
614 void
615 disk_msg_send(uint32_t cmd, void *load, void *load2)
616 {
617 	disk_msg_t disk_msg;
618 	lwkt_port_t port = &disk_msg_port;
619 
620 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
621 
622 	lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
623 
624 	disk_msg->hdr.u.ms_result = cmd;
625 	disk_msg->load = load;
626 	disk_msg->load2 = load2;
627 	KKASSERT(port);
628 	lwkt_sendmsg(port, &disk_msg->hdr);
629 }
630 
631 void
632 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
633 {
634 	struct lwkt_port rep_port;
635 	disk_msg_t disk_msg;
636 	lwkt_port_t port;
637 
638 	disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
639 	port = &disk_msg_port;
640 
641 	/* XXX could probably use curthread's built-in msgport */
642 	lwkt_initport_thread(&rep_port, curthread);
643 	lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
644 
645 	disk_msg->hdr.u.ms_result = cmd;
646 	disk_msg->load = load;
647 	disk_msg->load2 = load2;
648 
649 	lwkt_domsg(port, &disk_msg->hdr, 0);
650 	objcache_put(disk_msg_cache, disk_msg);
651 }
652 
653 /*
654  * Create a raw device for the dev_ops template (which is returned).  Also
655  * create a slice and unit managed disk and overload the user visible
656  * device space with it.
657  *
658  * NOTE: The returned raw device is NOT a slice and unit managed device.
659  * It is an actual raw device representing the raw disk as specified by
660  * the passed dev_ops.  The disk layer not only returns such a raw device,
661  * it also uses it internally when passing (modified) commands through.
662  */
663 cdev_t
664 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
665 {
666 	return _disk_create_named(NULL, unit, dp, raw_ops, 0);
667 }
668 
669 cdev_t
670 disk_create_clone(int unit, struct disk *dp,
671 		  struct dev_ops *raw_ops)
672 {
673 	return _disk_create_named(NULL, unit, dp, raw_ops, 1);
674 }
675 
676 cdev_t
677 disk_create_named(const char *name, int unit, struct disk *dp,
678 		  struct dev_ops *raw_ops)
679 {
680 	return _disk_create_named(name, unit, dp, raw_ops, 0);
681 }
682 
683 cdev_t
684 disk_create_named_clone(const char *name, int unit, struct disk *dp,
685 			struct dev_ops *raw_ops)
686 {
687 	return _disk_create_named(name, unit, dp, raw_ops, 1);
688 }
689 
690 static cdev_t
691 _disk_create_named(const char *name, int unit, struct disk *dp,
692 		   struct dev_ops *raw_ops, int clone)
693 {
694 	cdev_t rawdev;
695 	struct dev_ops *dops;
696 
697 	disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
698 
699 	if (name) {
700 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
701 		    UID_ROOT, GID_OPERATOR, 0640, "%s", name);
702 	} else {
703 		rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
704 		    UID_ROOT, GID_OPERATOR, 0640,
705 		    "%s%d", raw_ops->head.name, unit);
706 	}
707 
708 	bzero(dp, sizeof(*dp));
709 
710 	dops = (raw_ops->head.flags & D_NOEMERGPGR) ? &disk2_ops : &disk1_ops;
711 
712 	dp->d_rawdev = rawdev;
713 	dp->d_raw_ops = raw_ops;
714 	dp->d_dev_ops = dops;
715 
716 	if (name) {
717 		if (clone) {
718 			dp->d_cdev = make_only_dev_covering(
719 					dops, dp->d_rawdev->si_ops,
720 					dkmakewholedisk(unit),
721 					UID_ROOT, GID_OPERATOR, 0640,
722 					"%s", name);
723 		} else {
724 			dp->d_cdev = make_dev_covering(
725 					dops, dp->d_rawdev->si_ops,
726 					dkmakewholedisk(unit),
727 					UID_ROOT, GID_OPERATOR, 0640,
728 					"%s", name);
729 		}
730 	} else {
731 		if (clone) {
732 			dp->d_cdev = make_only_dev_covering(
733 					dops, dp->d_rawdev->si_ops,
734 					dkmakewholedisk(unit),
735 					UID_ROOT, GID_OPERATOR, 0640,
736 					"%s%d", raw_ops->head.name, unit);
737 		} else {
738 			dp->d_cdev = make_dev_covering(
739 					dops, dp->d_rawdev->si_ops,
740 					dkmakewholedisk(unit),
741 					UID_ROOT, GID_OPERATOR, 0640,
742 					"%s%d", raw_ops->head.name, unit);
743 		}
744 	}
745 
746 	udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
747 	dp->d_cdev->si_disk = dp;
748 
749 	if (name)
750 		dsched_disk_create(dp, name, unit);
751 	else
752 		dsched_disk_create(dp, raw_ops->head.name, unit);
753 
754 	lwkt_gettoken(&disklist_token);
755 	LIST_INSERT_HEAD(&disklist, dp, d_list);
756 	lwkt_reltoken(&disklist_token);
757 
758 	disk_iocom_init(dp);
759 
760 	disk_debug(1, "disk_create (end): %s%d\n",
761 		   (name != NULL)?(name):(raw_ops->head.name), unit);
762 
763 	return (dp->d_rawdev);
764 }
765 
766 int
767 disk_setdisktype(struct disk *disk, const char *type)
768 {
769 	int error;
770 
771 	KKASSERT(disk != NULL);
772 
773 	disk->d_disktype = type;
774 	error = udev_dict_set_cstr(disk->d_cdev, "disk-type",
775 				   __DECONST(char *, type));
776 	return error;
777 }
778 
779 int
780 disk_getopencount(struct disk *disk)
781 {
782 	return disk->d_opencount;
783 }
784 
785 static void
786 _setdiskinfo(struct disk *disk, struct disk_info *info)
787 {
788 	char *oldserialno;
789 
790 	oldserialno = disk->d_info.d_serialno;
791 	bcopy(info, &disk->d_info, sizeof(disk->d_info));
792 	info = &disk->d_info;
793 
794 	disk_debug(1, "_setdiskinfo: %s\n", disk->d_cdev->si_name);
795 
796 	/*
797 	 * The serial number is duplicated so the caller can throw
798 	 * their copy away.
799 	 */
800 	if (info->d_serialno && info->d_serialno[0] &&
801 	    (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
802 		info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
803 		disk_cleanserial(info->d_serialno);
804 		if (disk->d_cdev) {
805 			make_dev_alias(disk->d_cdev, "serno/%s",
806 				       info->d_serialno);
807 		}
808 	} else {
809 		info->d_serialno = NULL;
810 	}
811 	if (oldserialno)
812 		kfree(oldserialno, M_TEMP);
813 
814 	dsched_disk_update(disk, info);
815 
816 	/*
817 	 * The caller may set d_media_size or d_media_blocks and we
818 	 * calculate the other.
819 	 */
820 	KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
821 	if (info->d_media_size == 0 && info->d_media_blocks) {
822 		info->d_media_size = (u_int64_t)info->d_media_blocks *
823 				     info->d_media_blksize;
824 	} else if (info->d_media_size && info->d_media_blocks == 0 &&
825 		   info->d_media_blksize) {
826 		info->d_media_blocks = info->d_media_size /
827 				       info->d_media_blksize;
828 	}
829 
830 	/*
831 	 * The si_* fields for rawdev are not set until after the
832 	 * disk_create() call, so someone using the cooked version
833 	 * of the raw device (i.e. da0s0) will not get the right
834 	 * si_iosize_max unless we fix it up here.
835 	 */
836 	if (disk->d_cdev && disk->d_rawdev &&
837 	    disk->d_cdev->si_iosize_max == 0) {
838 		disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
839 		disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
840 		disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
841 	}
842 
843 	/* Add the serial number to the udev_dictionary */
844 	if (info->d_serialno)
845 		udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
846 }
847 
848 /*
849  * Disk drivers must call this routine when media parameters are available
850  * or have changed.
851  */
852 void
853 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
854 {
855 	_setdiskinfo(disk, info);
856 	disk_msg_send(DISK_DISK_PROBE, disk, NULL);
857 	disk_debug(1, "disk_setdiskinfo: sent probe for %s\n",
858 		   disk->d_cdev->si_name);
859 }
860 
861 void
862 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
863 {
864 	_setdiskinfo(disk, info);
865 	disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
866 	disk_debug(1, "disk_setdiskinfo_sync: sent probe for %s\n",
867 		   disk->d_cdev->si_name);
868 }
869 
870 /*
871  * This routine is called when an adapter detaches.  The higher level
872  * managed disk device is destroyed while the lower level raw device is
873  * released.
874  */
875 void
876 disk_destroy(struct disk *disk)
877 {
878 	disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
879 	return;
880 }
881 
882 int
883 disk_dumpcheck(cdev_t dev, u_int64_t *size,
884 	       u_int64_t *blkno, u_int32_t *secsize)
885 {
886 	struct partinfo pinfo;
887 	int error;
888 
889 	if (size)
890 		*size = 0;	/* avoid gcc warnings */
891 	if (secsize)
892 		*secsize = 512;	/* avoid gcc warnings */
893 	bzero(&pinfo, sizeof(pinfo));
894 
895 	error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
896 			   proc0.p_ucred, NULL, NULL);
897 	if (error)
898 		return (error);
899 
900 	if (pinfo.media_blksize == 0)
901 		return (ENXIO);
902 
903 	if (blkno) /* XXX: make sure this reserved stuff is right */
904 		*blkno = pinfo.reserved_blocks +
905 			pinfo.media_offset / pinfo.media_blksize;
906 	if (secsize)
907 		*secsize = pinfo.media_blksize;
908 	if (size)
909 		*size = (pinfo.media_blocks - pinfo.reserved_blocks);
910 
911 	return (0);
912 }
913 
914 int
915 disk_dumpconf(cdev_t dev, u_int onoff)
916 {
917 	struct dumperinfo di;
918 	u_int64_t	size, blkno;
919 	u_int32_t	secsize;
920 	int error;
921 
922 	if (!onoff)
923 		return set_dumper(NULL);
924 
925 	error = disk_dumpcheck(dev, &size, &blkno, &secsize);
926 
927 	if (error)
928 		return ENXIO;
929 
930 	bzero(&di, sizeof(struct dumperinfo));
931 	di.dumper = diskdump;
932 	di.priv = dev;
933 	di.blocksize = secsize;
934 	di.maxiosize = dev->si_iosize_max;
935 	di.mediaoffset = blkno * DEV_BSIZE;
936 	di.mediasize = size * DEV_BSIZE;
937 
938 	return set_dumper(&di);
939 }
940 
941 void
942 disk_unprobe(struct disk *disk)
943 {
944 	if (disk == NULL)
945 		return;
946 
947 	disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
948 }
949 
950 void
951 disk_invalidate (struct disk *disk)
952 {
953 	dsgone(&disk->d_slice);
954 }
955 
956 /*
957  * Enumerate disks, pass a marker and an initial NULL dp to initialize,
958  * then loop with the previously returned dp.
959  *
960  * The returned dp will be referenced, preventing its destruction.  When
961  * you pass the returned dp back into the loop the ref is dropped.
962  *
963  * WARNING: If terminating your loop early you must call
964  *	    disk_enumerate_stop().
965  */
966 struct disk *
967 disk_enumerate(struct disk *marker, struct disk *dp)
968 {
969 	lwkt_gettoken(&disklist_token);
970 	if (dp) {
971 		--dp->d_refs;
972 		dp = LIST_NEXT(marker, d_list);
973 		LIST_REMOVE(marker, d_list);
974 	} else {
975 		bzero(marker, sizeof(*marker));
976 		marker->d_flags = DISKFLAG_MARKER;
977 		dp = LIST_FIRST(&disklist);
978 	}
979 	while (dp) {
980 		if ((dp->d_flags & DISKFLAG_MARKER) == 0)
981 			break;
982 		dp = LIST_NEXT(dp, d_list);
983 	}
984 	if (dp) {
985 		++dp->d_refs;
986 		LIST_INSERT_AFTER(dp, marker, d_list);
987 	}
988 	lwkt_reltoken(&disklist_token);
989 	return (dp);
990 }
991 
992 /*
993  * Terminate an enumeration early.  Do not call this function if the
994  * enumeration ended normally.  dp can be NULL, indicating that you
995  * wish to retain the ref count on dp.
996  *
997  * This function removes the marker.
998  */
999 void
1000 disk_enumerate_stop(struct disk *marker, struct disk *dp)
1001 {
1002 	lwkt_gettoken(&disklist_token);
1003 	LIST_REMOVE(marker, d_list);
1004 	if (dp)
1005 		--dp->d_refs;
1006 	lwkt_reltoken(&disklist_token);
1007 }
1008 
1009 static
1010 int
1011 sysctl_disks(SYSCTL_HANDLER_ARGS)
1012 {
1013 	struct disk marker;
1014 	struct disk *dp;
1015 	int error, first;
1016 
1017 	first = 1;
1018 	error = 0;
1019 	dp = NULL;
1020 
1021 	while ((dp = disk_enumerate(&marker, dp))) {
1022 		if (!first) {
1023 			error = SYSCTL_OUT(req, " ", 1);
1024 			if (error) {
1025 				disk_enumerate_stop(&marker, dp);
1026 				break;
1027 			}
1028 		} else {
1029 			first = 0;
1030 		}
1031 		error = SYSCTL_OUT(req, dp->d_rawdev->si_name,
1032 				   strlen(dp->d_rawdev->si_name));
1033 		if (error) {
1034 			disk_enumerate_stop(&marker, dp);
1035 			break;
1036 		}
1037 	}
1038 	if (error == 0)
1039 		error = SYSCTL_OUT(req, "", 1);
1040 	return error;
1041 }
1042 
1043 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
1044     sysctl_disks, "A", "names of available disks");
1045 
1046 /*
1047  * Open a disk device or partition.
1048  */
1049 static
1050 int
1051 diskopen(struct dev_open_args *ap)
1052 {
1053 	cdev_t dev = ap->a_head.a_dev;
1054 	struct disk *dp;
1055 	int error;
1056 
1057 	/*
1058 	 * dp can't be NULL here XXX.
1059 	 *
1060 	 * d_slice will be NULL if setdiskinfo() has not been called yet.
1061 	 * setdiskinfo() is typically called whether the disk is present
1062 	 * or not (e.g. CD), but the base disk device is created first
1063 	 * and there may be a race.
1064 	 */
1065 	dp = dev->si_disk;
1066 	if (dp == NULL || dp->d_slice == NULL)
1067 		return (ENXIO);
1068 	error = 0;
1069 
1070 	/*
1071 	 * Deal with open races
1072 	 */
1073 	lwkt_gettoken(&ds_token);
1074 	while (dp->d_flags & DISKFLAG_LOCK) {
1075 		dp->d_flags |= DISKFLAG_WANTED;
1076 		error = tsleep(dp, PCATCH, "diskopen", hz);
1077 		if (error) {
1078 			lwkt_reltoken(&ds_token);
1079 			return (error);
1080 		}
1081 	}
1082 	dp->d_flags |= DISKFLAG_LOCK;
1083 
1084 	/*
1085 	 * Open the underlying raw device.
1086 	 */
1087 	if (!dsisopen(dp->d_slice)) {
1088 #if 0
1089 		if (!pdev->si_iosize_max)
1090 			pdev->si_iosize_max = dev->si_iosize_max;
1091 #endif
1092 		error = dev_dopen(dp->d_rawdev, ap->a_oflags,
1093 				  ap->a_devtype, ap->a_cred, NULL, NULL);
1094 	}
1095 
1096 	if (error)
1097 		goto out;
1098 	error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
1099 		       &dp->d_slice, &dp->d_info);
1100 	if (!dsisopen(dp->d_slice)) {
1101 		dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype, NULL);
1102 	}
1103 out:
1104 	dp->d_flags &= ~DISKFLAG_LOCK;
1105 	if (dp->d_flags & DISKFLAG_WANTED) {
1106 		dp->d_flags &= ~DISKFLAG_WANTED;
1107 		wakeup(dp);
1108 	}
1109 	lwkt_reltoken(&ds_token);
1110 
1111 	KKASSERT(dp->d_opencount >= 0);
1112 	/* If the open was successful, bump open count */
1113 	if (error == 0)
1114 		atomic_add_int(&dp->d_opencount, 1);
1115 
1116 	return(error);
1117 }
1118 
1119 /*
1120  * Close a disk device or partition
1121  */
1122 static
1123 int
1124 diskclose(struct dev_close_args *ap)
1125 {
1126 	cdev_t dev = ap->a_head.a_dev;
1127 	struct disk *dp;
1128 	int error;
1129 	int lcount;
1130 
1131 	error = 0;
1132 	dp = dev->si_disk;
1133 
1134 	/*
1135 	 * The cdev_t represents the disk/slice/part.  The shared
1136 	 * dp structure governs all cdevs associated with the disk.
1137 	 *
1138 	 * As a safety only close the underlying raw device on the last
1139 	 * close the disk device if our tracking of the slices/partitions
1140 	 * also indicates nothing is open.
1141 	 */
1142 	KKASSERT(dp->d_opencount >= 1);
1143 	lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1144 
1145 	lwkt_gettoken(&ds_token);
1146 	dsclose(dev, ap->a_devtype, dp->d_slice);
1147 	if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1148 		error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype, NULL);
1149 	}
1150 	lwkt_reltoken(&ds_token);
1151 
1152 	return (error);
1153 }
1154 
1155 /*
1156  * First execute the ioctl on the disk device, and if it isn't supported
1157  * try running it on the backing device.
1158  */
1159 static
1160 int
1161 diskioctl(struct dev_ioctl_args *ap)
1162 {
1163 	cdev_t dev = ap->a_head.a_dev;
1164 	struct disk *dp;
1165 	int error;
1166 	u_int u;
1167 
1168 	dp = dev->si_disk;
1169 	if (dp == NULL)
1170 		return (ENXIO);
1171 
1172 	devfs_debug(DEVFS_DEBUG_DEBUG,
1173 		    "diskioctl: cmd is: %lx (name: %s)\n",
1174 		    ap->a_cmd, dev->si_name);
1175 	devfs_debug(DEVFS_DEBUG_DEBUG,
1176 		    "diskioctl: &dp->d_slice is: %p, %p\n",
1177 		    &dp->d_slice, dp->d_slice);
1178 
1179 	if (ap->a_cmd == DIOCGKERNELDUMP) {
1180 		u = *(u_int *)ap->a_data;
1181 		return disk_dumpconf(dev, u);
1182 	}
1183 
1184 	if (ap->a_cmd == DIOCRECLUSTER && dev == dp->d_cdev) {
1185 		error = disk_iocom_ioctl(dp, ap->a_cmd, ap->a_data);
1186 		return error;
1187 	}
1188 
1189 	if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1190 	    ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1191 	     dkslice(dev) == WHOLE_DISK_SLICE)) {
1192 		error = ENOIOCTL;
1193 	} else {
1194 		lwkt_gettoken(&ds_token);
1195 		error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1196 				&dp->d_slice, &dp->d_info);
1197 		lwkt_reltoken(&ds_token);
1198 	}
1199 
1200 	if (error == ENOIOCTL) {
1201 		error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1202 				   ap->a_fflag, ap->a_cred, NULL, NULL);
1203 	}
1204 	return (error);
1205 }
1206 
1207 /*
1208  * Execute strategy routine
1209  *
1210  * WARNING! We are using the KVABIO API and must not access memory
1211  *         through bp->b_data without first calling bkvasync(bp).
1212  */
1213 static
1214 int
1215 diskstrategy(struct dev_strategy_args *ap)
1216 {
1217 	cdev_t dev = ap->a_head.a_dev;
1218 	struct bio *bio = ap->a_bio;
1219 	struct bio *nbio;
1220 	struct disk *dp;
1221 
1222 	dp = dev->si_disk;
1223 
1224 	if (dp == NULL) {
1225 		bio->bio_buf->b_error = ENXIO;
1226 		bio->bio_buf->b_flags |= B_ERROR;
1227 		biodone(bio);
1228 		return(0);
1229 	}
1230 	KKASSERT(dev->si_disk == dp);
1231 
1232 	/*
1233 	 * The dscheck() function will also transform the slice relative
1234 	 * block number i.e. bio->bio_offset into a block number that can be
1235 	 * passed directly to the underlying raw device.  If dscheck()
1236 	 * returns NULL it will have handled the bio for us (e.g. EOF
1237 	 * or error due to being beyond the device size).
1238 	 */
1239 	if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1240 		dev_dstrategy(dp->d_rawdev, nbio);
1241 	} else {
1242 		biodone(bio);
1243 	}
1244 	return(0);
1245 }
1246 
1247 /*
1248  * Return the partition size in ?blocks?
1249  */
1250 static
1251 int
1252 diskpsize(struct dev_psize_args *ap)
1253 {
1254 	cdev_t dev = ap->a_head.a_dev;
1255 	struct disk *dp;
1256 
1257 	dp = dev->si_disk;
1258 	if (dp == NULL)
1259 		return(ENODEV);
1260 
1261 	ap->a_result = dssize(dev, &dp->d_slice);
1262 
1263 	if ((ap->a_result == -1) &&
1264 	   (dp->d_info.d_dsflags & DSO_RAWPSIZE)) {
1265 		ap->a_head.a_dev = dp->d_rawdev;
1266 		return dev_doperate(&ap->a_head);
1267 	}
1268 	return(0);
1269 }
1270 
1271 static int
1272 diskdump(struct dev_dump_args *ap)
1273 {
1274 	cdev_t dev = ap->a_head.a_dev;
1275 	struct disk *dp = dev->si_disk;
1276 	u_int64_t size, offset;
1277 	int error;
1278 
1279 	error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1280 	/* XXX: this should probably go in disk_dumpcheck somehow */
1281 	if (ap->a_length != 0) {
1282 		size *= DEV_BSIZE;
1283 		offset = ap->a_blkno * DEV_BSIZE;
1284 		if ((ap->a_offset < offset) ||
1285 		    (ap->a_offset + ap->a_length - offset > size)) {
1286 			kprintf("Attempt to write outside dump "
1287 				"device boundaries.\n");
1288 			error = ENOSPC;
1289 		}
1290 	}
1291 
1292 	if (error == 0) {
1293 		ap->a_head.a_dev = dp->d_rawdev;
1294 		error = dev_doperate(&ap->a_head);
1295 	}
1296 
1297 	return(error);
1298 }
1299 
1300 
1301 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1302 	   0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1303 
1304 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1305 	   0, sizeof(struct disk), "sizeof(struct disk)");
1306 
1307 /*
1308  * Reorder interval for burst write allowance and minor write
1309  * allowance.
1310  *
1311  * We always want to trickle some writes in to make use of the
1312  * disk's zone cache.  Bursting occurs on a longer interval and only
1313  * runningbufspace is well over the hirunningspace limit.
1314  */
1315 int bioq_reorder_burst_interval = 60;	/* should be multiple of minor */
1316 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1317 	   CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1318 int bioq_reorder_minor_interval = 5;
1319 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1320 	   CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1321 
1322 int bioq_reorder_burst_bytes = 3000000;
1323 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1324 	   CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1325 int bioq_reorder_minor_bytes = 262144;
1326 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1327 	   CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1328 
1329 
1330 /*
1331  * Order I/Os.  Generally speaking this code is designed to make better
1332  * use of drive zone caches.  A drive zone cache can typically track linear
1333  * reads or writes for around 16 zones simultaniously.
1334  *
1335  * Read prioritization issues:  It is possible for hundreds of megabytes worth
1336  * of writes to be queued asynchronously.  This creates a huge bottleneck
1337  * for reads which reduce read bandwidth to a trickle.
1338  *
1339  * To solve this problem we generally reorder reads before writes.
1340  *
1341  * However, a large number of random reads can also starve writes and
1342  * make poor use of the drive zone cache so we allow writes to trickle
1343  * in every N reads.
1344  */
1345 void
1346 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1347 {
1348 #if 0
1349 	/*
1350 	 * The BIO wants to be ordered.  Adding to the tail also
1351 	 * causes transition to be set to NULL, forcing the ordering
1352 	 * of all prior I/O's.
1353 	 */
1354 	if (bio->bio_buf->b_flags & B_ORDERED) {
1355 		bioq_insert_tail(bioq, bio);
1356 		return;
1357 	}
1358 #endif
1359 
1360 	switch(bio->bio_buf->b_cmd) {
1361 	case BUF_CMD_READ:
1362 		if (bioq->transition) {
1363 			/*
1364 			 * Insert before the first write.  Bleedover writes
1365 			 * based on reorder intervals to prevent starvation.
1366 			 */
1367 			TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1368 			++bioq->reorder;
1369 			if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1370 				bioqwritereorder(bioq);
1371 				if (bioq->reorder >=
1372 				    bioq_reorder_burst_interval) {
1373 					bioq->reorder = 0;
1374 				}
1375 			}
1376 		} else {
1377 			/*
1378 			 * No writes queued (or ordering was forced),
1379 			 * insert at tail.
1380 			 */
1381 			TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1382 		}
1383 		break;
1384 	case BUF_CMD_WRITE:
1385 		/*
1386 		 * Writes are always appended.  If no writes were previously
1387 		 * queued or an ordered tail insertion occured the transition
1388 		 * field will be NULL.
1389 		 */
1390 		TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1391 		if (bioq->transition == NULL)
1392 			bioq->transition = bio;
1393 		break;
1394 	default:
1395 		/*
1396 		 * All other request types are forced to be ordered.
1397 		 */
1398 		bioq_insert_tail(bioq, bio);
1399 		break;
1400 	}
1401 }
1402 
1403 /*
1404  * Move the read-write transition point to prevent reads from
1405  * completely starving our writes.  This brings a number of writes into
1406  * the fold every N reads.
1407  *
1408  * We bring a few linear writes into the fold on a minor interval
1409  * and we bring a non-linear burst of writes into the fold on a major
1410  * interval.  Bursting only occurs if runningbufspace is really high
1411  * (typically from syncs, fsyncs, or HAMMER flushes).
1412  */
1413 static
1414 void
1415 bioqwritereorder(struct bio_queue_head *bioq)
1416 {
1417 	struct bio *bio;
1418 	off_t next_offset;
1419 	size_t left;
1420 	size_t n;
1421 	int check_off;
1422 
1423 	if (bioq->reorder < bioq_reorder_burst_interval ||
1424 	    !buf_runningbufspace_severe()) {
1425 		left = (size_t)bioq_reorder_minor_bytes;
1426 		check_off = 1;
1427 	} else {
1428 		left = (size_t)bioq_reorder_burst_bytes;
1429 		check_off = 0;
1430 	}
1431 
1432 	next_offset = bioq->transition->bio_offset;
1433 	while ((bio = bioq->transition) != NULL &&
1434 	       (check_off == 0 || next_offset == bio->bio_offset)
1435 	) {
1436 		n = bio->bio_buf->b_bcount;
1437 		next_offset = bio->bio_offset + n;
1438 		bioq->transition = TAILQ_NEXT(bio, bio_act);
1439 		if (left < n)
1440 			break;
1441 		left -= n;
1442 	}
1443 }
1444 
1445 /*
1446  * Bounds checking against the media size, used for the raw partition.
1447  * secsize, mediasize and b_blkno must all be the same units.
1448  * Possibly this has to be DEV_BSIZE (512).
1449  */
1450 int
1451 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1452 {
1453 	struct buf *bp = bio->bio_buf;
1454 	int64_t sz;
1455 
1456 	sz = howmany(bp->b_bcount, secsize);
1457 
1458 	if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1459 		sz = mediasize - bio->bio_offset/DEV_BSIZE;
1460 		if (sz == 0) {
1461 			/* If exactly at end of disk, return EOF. */
1462 			bp->b_resid = bp->b_bcount;
1463 			return 0;
1464 		}
1465 		if (sz < 0) {
1466 			/* If past end of disk, return EINVAL. */
1467 			bp->b_error = EINVAL;
1468 			return 0;
1469 		}
1470 		/* Otherwise, truncate request. */
1471 		bp->b_bcount = sz * secsize;
1472 	}
1473 
1474 	return 1;
1475 }
1476 
1477 /*
1478  * Disk error is the preface to plaintive error messages
1479  * about failing disk transfers.  It prints messages of the form
1480 
1481 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1482 
1483  * if the offset of the error in the transfer and a disk label
1484  * are both available.  blkdone should be -1 if the position of the error
1485  * is unknown; the disklabel pointer may be null from drivers that have not
1486  * been converted to use them.  The message is printed with kprintf
1487  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1488  * The message should be completed (with at least a newline) with kprintf
1489  * or log(-1, ...), respectively.  There is no trailing space.
1490  */
1491 void
1492 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1493 {
1494 	struct buf *bp = bio->bio_buf;
1495 	const char *term;
1496 
1497 	switch(bp->b_cmd) {
1498 	case BUF_CMD_READ:
1499 		term = "read";
1500 		break;
1501 	case BUF_CMD_WRITE:
1502 		term = "write";
1503 		break;
1504 	default:
1505 		term = "access";
1506 		break;
1507 	}
1508 	kprintf("%s: %s %sing ", dev->si_name, what, term);
1509 	kprintf("offset %012llx for %d",
1510 		(long long)bio->bio_offset,
1511 		bp->b_bcount);
1512 
1513 	if (donecnt)
1514 		kprintf(" (%d bytes completed)", donecnt);
1515 }
1516 
1517 /*
1518  * Locate a disk device
1519  */
1520 cdev_t
1521 disk_locate(const char *devname)
1522 {
1523 	return devfs_find_device_by_name("%s", devname);
1524 }
1525 
1526 void
1527 disk_config(void *arg)
1528 {
1529 	disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1530 }
1531 
1532 static void
1533 disk_init(void)
1534 {
1535 	struct thread* td_core;
1536 
1537 	disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1538 					 NULL, NULL, NULL,
1539 					 objcache_malloc_alloc,
1540 					 objcache_malloc_free,
1541 					 &disk_msg_malloc_args);
1542 
1543 	lwkt_token_init(&disklist_token, "disks");
1544 	lwkt_token_init(&ds_token, "ds");
1545 
1546 	/*
1547 	 * Initialize the reply-only port which acts as a message drain
1548 	 */
1549 	lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1550 
1551 	lwkt_gettoken(&disklist_token);
1552 	lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1553 		    0, -1, "disk_msg_core");
1554 	tsleep(td_core, 0, "diskcore", 0);
1555 	lwkt_reltoken(&disklist_token);
1556 }
1557 
1558 static void
1559 disk_uninit(void)
1560 {
1561 	objcache_destroy(disk_msg_cache);
1562 }
1563 
1564 /*
1565  * Clean out illegal characters in serial numbers.
1566  */
1567 static void
1568 disk_cleanserial(char *serno)
1569 {
1570 	char c;
1571 
1572 	while ((c = *serno) != 0) {
1573 		if (c >= 'a' && c <= 'z')
1574 			;
1575 		else if (c >= 'A' && c <= 'Z')
1576 			;
1577 		else if (c >= '0' && c <= '9')
1578 			;
1579 		else if (c == '-' || c == '@' || c == '+' || c == '.')
1580 			;
1581 		else
1582 			c = '_';
1583 		*serno++= c;
1584 	}
1585 }
1586 
1587 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1588 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1589 	   0, "Enable subr_disk debugging");
1590 
1591 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1592 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1593