1 /*
2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * and Alex Hornung <ahornung@gmail.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * ----------------------------------------------------------------------------
36 * "THE BEER-WARE LICENSE" (Revision 42):
37 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
38 * can do whatever you want with this stuff. If we meet some day, and you think
39 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
40 * ----------------------------------------------------------------------------
41 *
42 * Copyright (c) 1982, 1986, 1988, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
75 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
76 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
77 */
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/proc.h>
83 #include <sys/sysctl.h>
84 #include <sys/buf.h>
85 #include <sys/caps.h>
86 #include <sys/conf.h>
87 #include <sys/disklabel.h>
88 #include <sys/disklabel32.h>
89 #include <sys/disklabel64.h>
90 #include <sys/diskslice.h>
91 #include <sys/diskmbr.h>
92 #include <sys/disk.h>
93 #include <sys/kerneldump.h>
94 #include <sys/malloc.h>
95 #include <machine/md_var.h>
96 #include <sys/ctype.h>
97 #include <sys/syslog.h>
98 #include <sys/device.h>
99 #include <sys/msgport.h>
100 #include <sys/devfs.h>
101 #include <sys/thread.h>
102 #include <sys/dsched.h>
103 #include <sys/queue.h>
104 #include <sys/lock.h>
105 #include <sys/udev.h>
106 #include <sys/uuid.h>
107
108 #include <sys/buf2.h>
109 #include <sys/msgport2.h>
110
111 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
112 static int disk_debug_enable = 0;
113
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
119 static void bioqwritereorder(struct bio_queue_head *bioq);
120 static void disk_cleanserial(char *serno);
121 static int disk_debug(int, char *, ...) __printflike(2, 3);
122 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp,
123 struct dev_ops *raw_ops, int clone);
124
125 static d_open_t diskopen;
126 static d_close_t diskclose;
127 static d_ioctl_t diskioctl;
128 static d_strategy_t diskstrategy;
129 static d_psize_t diskpsize;
130 static d_dump_t diskdump;
131
132 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
133 static struct lwkt_token disklist_token;
134 static struct lwkt_token ds_token;
135
136 static struct dev_ops disk1_ops = {
137 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO },
138 .d_open = diskopen,
139 .d_close = diskclose,
140 .d_read = physread,
141 .d_write = physwrite,
142 .d_ioctl = diskioctl,
143 .d_strategy = diskstrategy,
144 .d_dump = diskdump,
145 .d_psize = diskpsize,
146 };
147
148 static struct dev_ops disk2_ops = {
149 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE | D_KVABIO |
150 D_NOEMERGPGR },
151 .d_open = diskopen,
152 .d_close = diskclose,
153 .d_read = physread,
154 .d_write = physwrite,
155 .d_ioctl = diskioctl,
156 .d_strategy = diskstrategy,
157 .d_dump = diskdump,
158 .d_psize = diskpsize,
159 };
160
161 static struct objcache *disk_msg_cache;
162
163 static struct objcache_malloc_args disk_msg_malloc_args = {
164 sizeof(struct disk_msg),
165 M_DISK
166 };
167
168 static struct lwkt_port disk_dispose_port;
169 static struct lwkt_port disk_msg_port;
170
171 static int
disk_debug(int level,char * fmt,...)172 disk_debug(int level, char *fmt, ...)
173 {
174 __va_list ap;
175
176 __va_start(ap, fmt);
177 if (level <= disk_debug_enable)
178 kvprintf(fmt, ap);
179 __va_end(ap);
180
181 return 0;
182 }
183
184 static int
disk_probe_slice(struct disk * dp,cdev_t dev,int slice,int reprobe)185 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
186 {
187 struct disk_info *info = &dp->d_info;
188 struct diskslice *sp = &dp->d_slice->dss_slices[slice];
189 disklabel_ops_t ops;
190 struct dev_ops *dops;
191 struct partinfo part;
192 const char *msg;
193 char uuid_buf[128];
194 cdev_t ndev;
195 int sno;
196 u_int i;
197
198 disk_debug(2, "disk_probe_slice (begin): %s (%s)\n",
199 dev->si_name, dp->d_cdev->si_name);
200
201 sno = slice ? slice - 1 : 0;
202 dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
203 &disk2_ops : &disk1_ops;
204
205 ops = &disklabel32_ops;
206 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
207 if (msg && !strcmp(msg, "no disk label")) {
208 ops = &disklabel64_ops;
209 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
210 }
211
212 if (msg == NULL) {
213 char packname[DISKLABEL_MAXPACKNAME];
214
215 if (slice != WHOLE_DISK_SLICE)
216 ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
217 else
218 sp->ds_reserved = 0;
219
220 ops->op_getpackname(sp->ds_label, packname, sizeof(packname));
221
222 destroy_dev_alias(dev, "by-label/*");
223 if (packname[0])
224 make_dev_alias(dev, "by-label/%s", packname);
225
226 sp->ds_ops = ops;
227 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
228 ops->op_loadpartinfo(sp->ds_label, i, &part);
229
230 if (part.fstype) {
231 if (reprobe &&
232 (ndev = devfs_find_device_by_name("%s%c",
233 dev->si_name, 'a' + i))
234 ) {
235 /*
236 * Device already exists and
237 * is still valid.
238 */
239 ndev->si_flags |= SI_REPROBE_TEST;
240
241 /*
242 * Destroy old UUID alias
243 */
244 destroy_dev_alias(ndev,
245 "part-by-uuid/*");
246 destroy_dev_alias(ndev,
247 "part-by-label/*");
248
249 /* Create UUID alias */
250 if (!kuuid_is_nil(&part.storage_uuid)) {
251 snprintf_uuid(uuid_buf,
252 sizeof(uuid_buf),
253 &part.storage_uuid);
254 make_dev_alias(ndev,
255 "part-by-uuid/%s",
256 uuid_buf);
257 udev_dict_set_cstr(ndev, "uuid", uuid_buf);
258 }
259 if (packname[0]) {
260 make_dev_alias(ndev,
261 "part-by-label/%s.%c",
262 packname, 'a' + i);
263 }
264 } else {
265 ndev = make_dev_covering(dops,
266 dp->d_rawdev->si_ops,
267 dkmakeminor(dkunit(dp->d_cdev),
268 slice, i),
269 UID_ROOT, GID_OPERATOR, 0640,
270 "%s%c", dev->si_name, 'a'+ i);
271 ndev->si_parent = dev;
272 ndev->si_iosize_max = dev->si_iosize_max;
273 ndev->si_disk = dp;
274 udev_dict_set_cstr(ndev, "subsystem", "disk");
275 /* Inherit parent's disk type */
276 if (dp->d_disktype) {
277 udev_dict_set_cstr(ndev, "disk-type",
278 __DECONST(char *, dp->d_disktype));
279 }
280
281 /* Create serno alias */
282 if (dp->d_info.d_serialno) {
283 make_dev_alias(ndev,
284 "serno/%s.s%d%c",
285 dp->d_info.d_serialno,
286 sno, 'a' + i);
287 }
288
289 /* Create UUID alias */
290 if (!kuuid_is_nil(&part.storage_uuid)) {
291 snprintf_uuid(uuid_buf,
292 sizeof(uuid_buf),
293 &part.storage_uuid);
294 make_dev_alias(ndev,
295 "part-by-uuid/%s",
296 uuid_buf);
297 udev_dict_set_cstr(ndev, "uuid", uuid_buf);
298 }
299 if (packname[0]) {
300 make_dev_alias(ndev,
301 "part-by-label/%s.%c",
302 packname, 'a' + i);
303 }
304 ndev->si_flags |= SI_REPROBE_TEST;
305 }
306 }
307 }
308 } else if (info->d_dsflags & DSO_COMPATLABEL) {
309 msg = NULL;
310 if (sp->ds_size >= 0x100000000ULL)
311 ops = &disklabel64_ops;
312 else
313 ops = &disklabel32_ops;
314 sp->ds_label = ops->op_clone_label(info, sp);
315 } else {
316 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
317 sp->ds_type == DOSPTYP_NETBSD ||
318 sp->ds_type == DOSPTYP_OPENBSD ||
319 sp->ds_type == DOSPTYP_DFLYBSD) {
320 log(LOG_WARNING, "%s: cannot find label (%s)\n",
321 dev->si_name, msg);
322 }
323
324 if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) {
325 /* Clear out old label - it's not around anymore */
326 disk_debug(2,
327 "disk_probe_slice: clear out old diskabel on %s\n",
328 dev->si_name);
329
330 sp->ds_ops->op_freedisklabel(&sp->ds_label);
331 sp->ds_ops = NULL;
332 }
333 }
334
335 if (msg == NULL) {
336 sp->ds_wlabel = FALSE;
337 }
338
339 return (msg ? EINVAL : 0);
340 }
341
342 /*
343 * This routine is only called for newly minted drives or to reprobe
344 * a drive with no open slices. disk_probe_slice() is called directly
345 * when reprobing partition changes within slices.
346 */
347 static void
disk_probe(struct disk * dp,int reprobe)348 disk_probe(struct disk *dp, int reprobe)
349 {
350 struct disk_info *info = &dp->d_info;
351 cdev_t dev = dp->d_cdev;
352 cdev_t ndev;
353 int error, i, sno;
354 struct diskslices *osp;
355 struct diskslice *sp;
356 struct dev_ops *dops;
357 char uuid_buf[128];
358
359 /*
360 * d_media_blksize can be 0 for non-disk storage devices such
361 * as audio CDs.
362 */
363 if (info->d_media_blksize == 0)
364 return;
365
366 osp = dp->d_slice;
367 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
368 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
369
370 error = mbrinit(dev, info, &(dp->d_slice));
371 if (error) {
372 dsgone(&osp);
373 return;
374 }
375
376 dops = (dp->d_rawdev->si_ops->head.flags & D_NOEMERGPGR) ?
377 &disk2_ops : &disk1_ops;
378
379 for (i = 0; i < dp->d_slice->dss_nslices; i++) {
380 /*
381 * Ignore the whole-disk slice, it has already been created.
382 */
383 if (i == WHOLE_DISK_SLICE)
384 continue;
385
386 #if 1
387 /*
388 * Ignore the compatibility slice s0 if it's a device mapper
389 * volume.
390 */
391 if ((i == COMPATIBILITY_SLICE) &&
392 (info->d_dsflags & DSO_DEVICEMAPPER))
393 continue;
394 #endif
395
396 sp = &dp->d_slice->dss_slices[i];
397
398 /*
399 * Handle s0. s0 is a compatibility slice if there are no
400 * other slices and it has not otherwise been set up, else
401 * we ignore it.
402 */
403 if (i == COMPATIBILITY_SLICE) {
404 sno = 0;
405 if (sp->ds_type == 0 &&
406 dp->d_slice->dss_nslices == BASE_SLICE) {
407 sp->ds_size = info->d_media_blocks;
408 sp->ds_reserved = 0;
409 }
410 } else {
411 sno = i - 1;
412 sp->ds_reserved = 0;
413 }
414
415 /*
416 * Ignore 0-length slices
417 */
418 if (sp->ds_size == 0)
419 continue;
420
421 if (reprobe &&
422 (ndev = devfs_find_device_by_name("%ss%d",
423 dev->si_name, sno))) {
424 /*
425 * Device already exists and is still valid
426 */
427 ndev->si_flags |= SI_REPROBE_TEST;
428
429 /*
430 * Destroy old UUID alias
431 */
432 destroy_dev_alias(ndev, "slice-by-uuid/*");
433
434 /* Create UUID alias */
435 if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
436 snprintf_uuid(uuid_buf, sizeof(uuid_buf),
437 &sp->ds_stor_uuid);
438 make_dev_alias(ndev, "slice-by-uuid/%s",
439 uuid_buf);
440 }
441 } else {
442 /*
443 * Else create new device
444 */
445 ndev = make_dev_covering(dops, dp->d_rawdev->si_ops,
446 dkmakewholeslice(dkunit(dev), i),
447 UID_ROOT, GID_OPERATOR, 0640,
448 (info->d_dsflags & DSO_DEVICEMAPPER)?
449 "%s.s%d" : "%ss%d", dev->si_name, sno);
450 ndev->si_parent = dev;
451 ndev->si_iosize_max = dev->si_iosize_max;
452 udev_dict_set_cstr(ndev, "subsystem", "disk");
453 /* Inherit parent's disk type */
454 if (dp->d_disktype) {
455 udev_dict_set_cstr(ndev, "disk-type",
456 __DECONST(char *, dp->d_disktype));
457 }
458
459 /* Create serno alias */
460 if (dp->d_info.d_serialno) {
461 make_dev_alias(ndev, "serno/%s.s%d",
462 dp->d_info.d_serialno, sno);
463 }
464
465 /* Create UUID alias */
466 if (!kuuid_is_nil(&sp->ds_stor_uuid)) {
467 snprintf_uuid(uuid_buf, sizeof(uuid_buf),
468 &sp->ds_stor_uuid);
469 make_dev_alias(ndev, "slice-by-uuid/%s",
470 uuid_buf);
471 }
472
473 ndev->si_disk = dp;
474 ndev->si_flags |= SI_REPROBE_TEST;
475 }
476 sp->ds_dev = ndev;
477
478 /*
479 * Probe appropriate slices for a disklabel
480 *
481 * XXX slice type 1 used by our gpt probe code.
482 * XXX slice type 0 used by mbr compat slice.
483 */
484 if (sp->ds_type == DOSPTYP_386BSD ||
485 sp->ds_type == DOSPTYP_NETBSD ||
486 sp->ds_type == DOSPTYP_OPENBSD ||
487 sp->ds_type == DOSPTYP_DFLYBSD ||
488 sp->ds_type == 0 ||
489 sp->ds_type == 1) {
490 if (dp->d_slice->dss_first_bsd_slice == 0)
491 dp->d_slice->dss_first_bsd_slice = i;
492 disk_probe_slice(dp, ndev, i, reprobe);
493 }
494 }
495 dsgone(&osp);
496 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
497 }
498
499
500 static void
disk_msg_core(void * arg)501 disk_msg_core(void *arg)
502 {
503 struct disk *dp;
504 struct diskslice *sp;
505 disk_msg_t msg;
506 int run;
507
508 lwkt_gettoken(&disklist_token);
509 lwkt_initport_thread(&disk_msg_port, curthread);
510 wakeup(curthread); /* synchronous startup */
511 lwkt_reltoken(&disklist_token);
512
513 lwkt_gettoken(&ds_token);
514 run = 1;
515
516 while (run) {
517 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
518
519 switch (msg->hdr.u.ms_result) {
520 case DISK_DISK_PROBE:
521 dp = (struct disk *)msg->load;
522 disk_debug(1,
523 "DISK_DISK_PROBE: %s\n",
524 dp->d_cdev->si_name);
525 disk_iocom_update(dp);
526 disk_probe(dp, 0);
527 break;
528 case DISK_DISK_DESTROY:
529 dp = (struct disk *)msg->load;
530 disk_debug(1,
531 "DISK_DISK_DESTROY: %s\n",
532 dp->d_cdev->si_name);
533 disk_iocom_uninit(dp);
534
535 /*
536 * Interlock against struct disk enumerations.
537 * Wait for enumerations to complete then remove
538 * the dp from the list before tearing it down.
539 * This avoids numerous races.
540 */
541 lwkt_gettoken(&disklist_token);
542 while (dp->d_refs)
543 tsleep(&dp->d_refs, 0, "diskdel", hz / 10);
544 LIST_REMOVE(dp, d_list);
545
546 dsched_disk_destroy(dp);
547 devfs_destroy_related(dp->d_cdev);
548 destroy_dev(dp->d_cdev);
549 destroy_only_dev(dp->d_rawdev);
550
551 lwkt_reltoken(&disklist_token);
552
553 if (dp->d_info.d_serialno) {
554 kfree(dp->d_info.d_serialno, M_TEMP);
555 dp->d_info.d_serialno = NULL;
556 }
557 break;
558 case DISK_UNPROBE:
559 dp = (struct disk *)msg->load;
560 disk_debug(1,
561 "DISK_DISK_UNPROBE: %s\n",
562 dp->d_cdev->si_name);
563 devfs_destroy_related(dp->d_cdev);
564 break;
565 case DISK_SLICE_REPROBE:
566 dp = (struct disk *)msg->load;
567 sp = (struct diskslice *)msg->load2;
568 devfs_clr_related_flag(sp->ds_dev,
569 SI_REPROBE_TEST);
570 disk_debug(1,
571 "DISK_SLICE_REPROBE: %s\n",
572 sp->ds_dev->si_name);
573 disk_probe_slice(dp, sp->ds_dev,
574 dkslice(sp->ds_dev), 1);
575 devfs_destroy_related_without_flag(
576 sp->ds_dev, SI_REPROBE_TEST);
577 break;
578 case DISK_DISK_REPROBE:
579 dp = (struct disk *)msg->load;
580 devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST);
581 disk_debug(1,
582 "DISK_DISK_REPROBE: %s\n",
583 dp->d_cdev->si_name);
584 disk_probe(dp, 1);
585 devfs_destroy_related_without_flag(
586 dp->d_cdev, SI_REPROBE_TEST);
587 break;
588 case DISK_SYNC:
589 disk_debug(1, "DISK_SYNC\n");
590 break;
591 default:
592 devfs_debug(DEVFS_DEBUG_WARNING,
593 "disk_msg_core: unknown message "
594 "received at core\n");
595 break;
596 }
597 lwkt_replymsg(&msg->hdr, 0);
598 }
599 lwkt_reltoken(&ds_token);
600 lwkt_exit();
601 }
602
603
604 /*
605 * Acts as a message drain. Any message that is replied to here gets
606 * destroyed and the memory freed.
607 */
608 static void
disk_msg_autofree_reply(lwkt_port_t port,lwkt_msg_t msg)609 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
610 {
611 objcache_put(disk_msg_cache, msg);
612 }
613
614
615 void
disk_msg_send(uint32_t cmd,void * load,void * load2)616 disk_msg_send(uint32_t cmd, void *load, void *load2)
617 {
618 disk_msg_t disk_msg;
619 lwkt_port_t port = &disk_msg_port;
620
621 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
622
623 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
624
625 disk_msg->hdr.u.ms_result = cmd;
626 disk_msg->load = load;
627 disk_msg->load2 = load2;
628 KKASSERT(port);
629 lwkt_sendmsg(port, &disk_msg->hdr);
630 }
631
632 void
disk_msg_send_sync(uint32_t cmd,void * load,void * load2)633 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
634 {
635 struct lwkt_port rep_port;
636 disk_msg_t disk_msg;
637 lwkt_port_t port;
638
639 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
640 port = &disk_msg_port;
641
642 /* XXX could probably use curthread's built-in msgport */
643 lwkt_initport_thread(&rep_port, curthread);
644 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
645
646 disk_msg->hdr.u.ms_result = cmd;
647 disk_msg->load = load;
648 disk_msg->load2 = load2;
649
650 lwkt_domsg(port, &disk_msg->hdr, 0);
651 objcache_put(disk_msg_cache, disk_msg);
652 }
653
654 /*
655 * Create a raw device for the dev_ops template (which is returned). Also
656 * create a slice and unit managed disk and overload the user visible
657 * device space with it.
658 *
659 * NOTE: The returned raw device is NOT a slice and unit managed device.
660 * It is an actual raw device representing the raw disk as specified by
661 * the passed dev_ops. The disk layer not only returns such a raw device,
662 * it also uses it internally when passing (modified) commands through.
663 */
664 cdev_t
disk_create(int unit,struct disk * dp,struct dev_ops * raw_ops)665 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
666 {
667 return _disk_create_named(NULL, unit, dp, raw_ops, 0);
668 }
669
670 cdev_t
disk_create_clone(int unit,struct disk * dp,struct dev_ops * raw_ops)671 disk_create_clone(int unit, struct disk *dp,
672 struct dev_ops *raw_ops)
673 {
674 return _disk_create_named(NULL, unit, dp, raw_ops, 1);
675 }
676
677 cdev_t
disk_create_named(const char * name,int unit,struct disk * dp,struct dev_ops * raw_ops)678 disk_create_named(const char *name, int unit, struct disk *dp,
679 struct dev_ops *raw_ops)
680 {
681 return _disk_create_named(name, unit, dp, raw_ops, 0);
682 }
683
684 cdev_t
disk_create_named_clone(const char * name,int unit,struct disk * dp,struct dev_ops * raw_ops)685 disk_create_named_clone(const char *name, int unit, struct disk *dp,
686 struct dev_ops *raw_ops)
687 {
688 return _disk_create_named(name, unit, dp, raw_ops, 1);
689 }
690
691 static cdev_t
_disk_create_named(const char * name,int unit,struct disk * dp,struct dev_ops * raw_ops,int clone)692 _disk_create_named(const char *name, int unit, struct disk *dp,
693 struct dev_ops *raw_ops, int clone)
694 {
695 cdev_t rawdev;
696 struct dev_ops *dops;
697
698 disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
699
700 if (name) {
701 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
702 UID_ROOT, GID_OPERATOR, 0640, "%s", name);
703 } else {
704 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
705 UID_ROOT, GID_OPERATOR, 0640,
706 "%s%d", raw_ops->head.name, unit);
707 }
708
709 bzero(dp, sizeof(*dp));
710
711 dops = (raw_ops->head.flags & D_NOEMERGPGR) ? &disk2_ops : &disk1_ops;
712
713 dp->d_rawdev = rawdev;
714 dp->d_raw_ops = raw_ops;
715 dp->d_dev_ops = dops;
716
717 if (name) {
718 if (clone) {
719 dp->d_cdev = make_only_dev_covering(
720 dops, dp->d_rawdev->si_ops,
721 dkmakewholedisk(unit),
722 UID_ROOT, GID_OPERATOR, 0640,
723 "%s", name);
724 } else {
725 dp->d_cdev = make_dev_covering(
726 dops, dp->d_rawdev->si_ops,
727 dkmakewholedisk(unit),
728 UID_ROOT, GID_OPERATOR, 0640,
729 "%s", name);
730 }
731 } else {
732 if (clone) {
733 dp->d_cdev = make_only_dev_covering(
734 dops, dp->d_rawdev->si_ops,
735 dkmakewholedisk(unit),
736 UID_ROOT, GID_OPERATOR, 0640,
737 "%s%d", raw_ops->head.name, unit);
738 } else {
739 dp->d_cdev = make_dev_covering(
740 dops, dp->d_rawdev->si_ops,
741 dkmakewholedisk(unit),
742 UID_ROOT, GID_OPERATOR, 0640,
743 "%s%d", raw_ops->head.name, unit);
744 }
745 }
746
747 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
748 dp->d_cdev->si_disk = dp;
749
750 if (name)
751 dsched_disk_create(dp, name, unit);
752 else
753 dsched_disk_create(dp, raw_ops->head.name, unit);
754
755 lwkt_gettoken(&disklist_token);
756 LIST_INSERT_HEAD(&disklist, dp, d_list);
757 lwkt_reltoken(&disklist_token);
758
759 disk_iocom_init(dp);
760
761 disk_debug(1, "disk_create (end): %s%d\n",
762 (name != NULL)?(name):(raw_ops->head.name), unit);
763
764 return (dp->d_rawdev);
765 }
766
767 int
disk_setdisktype(struct disk * disk,const char * type)768 disk_setdisktype(struct disk *disk, const char *type)
769 {
770 int error;
771
772 KKASSERT(disk != NULL);
773
774 disk->d_disktype = type;
775 error = udev_dict_set_cstr(disk->d_cdev, "disk-type",
776 __DECONST(char *, type));
777 return error;
778 }
779
780 int
disk_getopencount(struct disk * disk)781 disk_getopencount(struct disk *disk)
782 {
783 return disk->d_opencount;
784 }
785
786 static void
_setdiskinfo(struct disk * disk,struct disk_info * info)787 _setdiskinfo(struct disk *disk, struct disk_info *info)
788 {
789 char *oldserialno;
790
791 oldserialno = disk->d_info.d_serialno;
792 bcopy(info, &disk->d_info, sizeof(disk->d_info));
793 info = &disk->d_info;
794
795 disk_debug(1, "_setdiskinfo: %s\n", disk->d_cdev->si_name);
796
797 /*
798 * The serial number is duplicated so the caller can throw
799 * their copy away.
800 */
801 if (info->d_serialno && info->d_serialno[0] &&
802 (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) {
803 info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
804 disk_cleanserial(info->d_serialno);
805 if (disk->d_cdev) {
806 make_dev_alias(disk->d_cdev, "serno/%s",
807 info->d_serialno);
808 }
809 } else {
810 info->d_serialno = NULL;
811 }
812 if (oldserialno)
813 kfree(oldserialno, M_TEMP);
814
815 dsched_disk_update(disk, info);
816
817 /*
818 * The caller may set d_media_size or d_media_blocks and we
819 * calculate the other.
820 */
821 KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0);
822 if (info->d_media_size == 0 && info->d_media_blocks) {
823 info->d_media_size = (u_int64_t)info->d_media_blocks *
824 info->d_media_blksize;
825 } else if (info->d_media_size && info->d_media_blocks == 0 &&
826 info->d_media_blksize) {
827 info->d_media_blocks = info->d_media_size /
828 info->d_media_blksize;
829 }
830
831 /*
832 * The si_* fields for rawdev are not set until after the
833 * disk_create() call, so someone using the cooked version
834 * of the raw device (i.e. da0s0) will not get the right
835 * si_iosize_max unless we fix it up here.
836 */
837 if (disk->d_cdev && disk->d_rawdev &&
838 disk->d_cdev->si_iosize_max == 0) {
839 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
840 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
841 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
842 }
843
844 /* Add the serial number to the udev_dictionary */
845 if (info->d_serialno)
846 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
847 }
848
849 /*
850 * Disk drivers must call this routine when media parameters are available
851 * or have changed.
852 */
853 void
disk_setdiskinfo(struct disk * disk,struct disk_info * info)854 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
855 {
856 _setdiskinfo(disk, info);
857 disk_msg_send(DISK_DISK_PROBE, disk, NULL);
858 disk_debug(1, "disk_setdiskinfo: sent probe for %s\n",
859 disk->d_cdev->si_name);
860 }
861
862 void
disk_setdiskinfo_sync(struct disk * disk,struct disk_info * info)863 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
864 {
865 _setdiskinfo(disk, info);
866 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
867 disk_debug(1, "disk_setdiskinfo_sync: sent probe for %s\n",
868 disk->d_cdev->si_name);
869 }
870
871 /*
872 * This routine is called when an adapter detaches. The higher level
873 * managed disk device is destroyed while the lower level raw device is
874 * released.
875 */
876 void
disk_destroy(struct disk * disk)877 disk_destroy(struct disk *disk)
878 {
879 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
880 return;
881 }
882
883 int
disk_dumpcheck(cdev_t dev,u_int64_t * size,u_int64_t * blkno,u_int32_t * secsize)884 disk_dumpcheck(cdev_t dev, u_int64_t *size,
885 u_int64_t *blkno, u_int32_t *secsize)
886 {
887 struct partinfo pinfo;
888 int error;
889
890 if (size)
891 *size = 0; /* avoid gcc warnings */
892 if (secsize)
893 *secsize = 512; /* avoid gcc warnings */
894 bzero(&pinfo, sizeof(pinfo));
895
896 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
897 proc0.p_ucred, NULL, NULL);
898 if (error)
899 return (error);
900
901 if (pinfo.media_blksize == 0)
902 return (ENXIO);
903
904 if (blkno) /* XXX: make sure this reserved stuff is right */
905 *blkno = pinfo.reserved_blocks +
906 pinfo.media_offset / pinfo.media_blksize;
907 if (secsize)
908 *secsize = pinfo.media_blksize;
909 if (size)
910 *size = (pinfo.media_blocks - pinfo.reserved_blocks);
911
912 return (0);
913 }
914
915 int
disk_dumpconf(cdev_t dev,u_int onoff)916 disk_dumpconf(cdev_t dev, u_int onoff)
917 {
918 struct dumperinfo di;
919 u_int64_t size, blkno;
920 u_int32_t secsize;
921 int error;
922
923 if (!onoff)
924 return set_dumper(NULL);
925
926 error = disk_dumpcheck(dev, &size, &blkno, &secsize);
927
928 if (error)
929 return ENXIO;
930
931 bzero(&di, sizeof(struct dumperinfo));
932 di.dumper = diskdump;
933 di.priv = dev;
934 di.blocksize = secsize;
935 di.maxiosize = dev->si_iosize_max;
936 di.mediaoffset = blkno * DEV_BSIZE;
937 di.mediasize = size * DEV_BSIZE;
938
939 return set_dumper(&di);
940 }
941
942 void
disk_unprobe(struct disk * disk)943 disk_unprobe(struct disk *disk)
944 {
945 if (disk == NULL)
946 return;
947
948 disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
949 }
950
951 void
disk_invalidate(struct disk * disk)952 disk_invalidate (struct disk *disk)
953 {
954 dsgone(&disk->d_slice);
955 }
956
957 /*
958 * Enumerate disks, pass a marker and an initial NULL dp to initialize,
959 * then loop with the previously returned dp.
960 *
961 * The returned dp will be referenced, preventing its destruction. When
962 * you pass the returned dp back into the loop the ref is dropped.
963 *
964 * WARNING: If terminating your loop early you must call
965 * disk_enumerate_stop().
966 */
967 struct disk *
disk_enumerate(struct disk * marker,struct disk * dp)968 disk_enumerate(struct disk *marker, struct disk *dp)
969 {
970 lwkt_gettoken(&disklist_token);
971 if (dp) {
972 --dp->d_refs;
973 dp = LIST_NEXT(marker, d_list);
974 LIST_REMOVE(marker, d_list);
975 } else {
976 bzero(marker, sizeof(*marker));
977 marker->d_flags = DISKFLAG_MARKER;
978 dp = LIST_FIRST(&disklist);
979 }
980 while (dp) {
981 if ((dp->d_flags & DISKFLAG_MARKER) == 0)
982 break;
983 dp = LIST_NEXT(dp, d_list);
984 }
985 if (dp) {
986 ++dp->d_refs;
987 LIST_INSERT_AFTER(dp, marker, d_list);
988 }
989 lwkt_reltoken(&disklist_token);
990 return (dp);
991 }
992
993 /*
994 * Terminate an enumeration early. Do not call this function if the
995 * enumeration ended normally. dp can be NULL, indicating that you
996 * wish to retain the ref count on dp.
997 *
998 * This function removes the marker.
999 */
1000 void
disk_enumerate_stop(struct disk * marker,struct disk * dp)1001 disk_enumerate_stop(struct disk *marker, struct disk *dp)
1002 {
1003 lwkt_gettoken(&disklist_token);
1004 LIST_REMOVE(marker, d_list);
1005 if (dp)
1006 --dp->d_refs;
1007 lwkt_reltoken(&disklist_token);
1008 }
1009
1010 static
1011 int
sysctl_disks(SYSCTL_HANDLER_ARGS)1012 sysctl_disks(SYSCTL_HANDLER_ARGS)
1013 {
1014 struct disk marker;
1015 struct disk *dp;
1016 int error, first;
1017
1018 first = 1;
1019 error = 0;
1020 dp = NULL;
1021
1022 while ((dp = disk_enumerate(&marker, dp))) {
1023 if (!first) {
1024 error = SYSCTL_OUT(req, " ", 1);
1025 if (error) {
1026 disk_enumerate_stop(&marker, dp);
1027 break;
1028 }
1029 } else {
1030 first = 0;
1031 }
1032 error = SYSCTL_OUT(req, dp->d_rawdev->si_name,
1033 strlen(dp->d_rawdev->si_name));
1034 if (error) {
1035 disk_enumerate_stop(&marker, dp);
1036 break;
1037 }
1038 }
1039 if (error == 0)
1040 error = SYSCTL_OUT(req, "", 1);
1041 return error;
1042 }
1043
1044 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
1045 sysctl_disks, "A", "names of available disks");
1046
1047 /*
1048 * Open a disk device or partition.
1049 */
1050 static
1051 int
diskopen(struct dev_open_args * ap)1052 diskopen(struct dev_open_args *ap)
1053 {
1054 cdev_t dev = ap->a_head.a_dev;
1055 struct disk *dp;
1056 int error;
1057
1058 /*
1059 * dp can't be NULL here XXX.
1060 *
1061 * d_slice will be NULL if setdiskinfo() has not been called yet.
1062 * setdiskinfo() is typically called whether the disk is present
1063 * or not (e.g. CD), but the base disk device is created first
1064 * and there may be a race.
1065 */
1066 dp = dev->si_disk;
1067 if (dp == NULL || dp->d_slice == NULL)
1068 return (ENXIO);
1069
1070 /*
1071 * Disallow access to disk volumes if RESTRICTEDROOT
1072 */
1073 if (caps_priv_check_self(SYSCAP_RESTRICTEDROOT))
1074 return (EPERM);
1075
1076 error = 0;
1077
1078 /*
1079 * Deal with open races
1080 */
1081 lwkt_gettoken(&ds_token);
1082 while (dp->d_flags & DISKFLAG_LOCK) {
1083 dp->d_flags |= DISKFLAG_WANTED;
1084 error = tsleep(dp, PCATCH, "diskopen", hz);
1085 if (error) {
1086 lwkt_reltoken(&ds_token);
1087 return (error);
1088 }
1089 }
1090 dp->d_flags |= DISKFLAG_LOCK;
1091
1092 /*
1093 * Open the underlying raw device.
1094 */
1095 if (!dsisopen(dp->d_slice)) {
1096 #if 0
1097 if (!pdev->si_iosize_max)
1098 pdev->si_iosize_max = dev->si_iosize_max;
1099 #endif
1100 error = dev_dopen(dp->d_rawdev, ap->a_oflags,
1101 ap->a_devtype, ap->a_cred, NULL, NULL);
1102 }
1103
1104 if (error)
1105 goto out;
1106 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
1107 &dp->d_slice, &dp->d_info);
1108 if (!dsisopen(dp->d_slice)) {
1109 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype, NULL);
1110 }
1111 out:
1112 dp->d_flags &= ~DISKFLAG_LOCK;
1113 if (dp->d_flags & DISKFLAG_WANTED) {
1114 dp->d_flags &= ~DISKFLAG_WANTED;
1115 wakeup(dp);
1116 }
1117 lwkt_reltoken(&ds_token);
1118
1119 KKASSERT(dp->d_opencount >= 0);
1120 /* If the open was successful, bump open count */
1121 if (error == 0)
1122 atomic_add_int(&dp->d_opencount, 1);
1123
1124 return(error);
1125 }
1126
1127 /*
1128 * Close a disk device or partition
1129 */
1130 static
1131 int
diskclose(struct dev_close_args * ap)1132 diskclose(struct dev_close_args *ap)
1133 {
1134 cdev_t dev = ap->a_head.a_dev;
1135 struct disk *dp;
1136 int error;
1137 int lcount;
1138
1139 error = 0;
1140 dp = dev->si_disk;
1141
1142 /*
1143 * The cdev_t represents the disk/slice/part. The shared
1144 * dp structure governs all cdevs associated with the disk.
1145 *
1146 * As a safety only close the underlying raw device on the last
1147 * close the disk device if our tracking of the slices/partitions
1148 * also indicates nothing is open.
1149 */
1150 KKASSERT(dp->d_opencount >= 1);
1151 lcount = atomic_fetchadd_int(&dp->d_opencount, -1);
1152
1153 lwkt_gettoken(&ds_token);
1154 dsclose(dev, ap->a_devtype, dp->d_slice);
1155 if (lcount <= 1 && !dsisopen(dp->d_slice)) {
1156 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype, NULL);
1157 }
1158 lwkt_reltoken(&ds_token);
1159
1160 return (error);
1161 }
1162
1163 /*
1164 * First execute the ioctl on the disk device, and if it isn't supported
1165 * try running it on the backing device.
1166 */
1167 static
1168 int
diskioctl(struct dev_ioctl_args * ap)1169 diskioctl(struct dev_ioctl_args *ap)
1170 {
1171 cdev_t dev = ap->a_head.a_dev;
1172 struct disk *dp;
1173 int error;
1174 u_int u;
1175
1176 dp = dev->si_disk;
1177 if (dp == NULL)
1178 return (ENXIO);
1179
1180 devfs_debug(DEVFS_DEBUG_DEBUG,
1181 "diskioctl: cmd is: %lx (name: %s)\n",
1182 ap->a_cmd, dev->si_name);
1183 devfs_debug(DEVFS_DEBUG_DEBUG,
1184 "diskioctl: &dp->d_slice is: %p, %p\n",
1185 &dp->d_slice, dp->d_slice);
1186
1187 if (ap->a_cmd == DIOCGKERNELDUMP) {
1188 u = *(u_int *)ap->a_data;
1189 return disk_dumpconf(dev, u);
1190 }
1191
1192 if (ap->a_cmd == DIOCRECLUSTER && dev == dp->d_cdev) {
1193 error = disk_iocom_ioctl(dp, ap->a_cmd, ap->a_data);
1194 return error;
1195 }
1196
1197 if (&dp->d_slice == NULL || dp->d_slice == NULL ||
1198 ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) &&
1199 dkslice(dev) == WHOLE_DISK_SLICE)) {
1200 error = ENOIOCTL;
1201 } else {
1202 lwkt_gettoken(&ds_token);
1203 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
1204 &dp->d_slice, &dp->d_info);
1205 lwkt_reltoken(&ds_token);
1206 }
1207
1208 if (error == ENOIOCTL) {
1209 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
1210 ap->a_fflag, ap->a_cred, NULL, NULL);
1211 }
1212 return (error);
1213 }
1214
1215 /*
1216 * Execute strategy routine
1217 *
1218 * WARNING! We are using the KVABIO API and must not access memory
1219 * through bp->b_data without first calling bkvasync(bp).
1220 */
1221 static
1222 int
diskstrategy(struct dev_strategy_args * ap)1223 diskstrategy(struct dev_strategy_args *ap)
1224 {
1225 cdev_t dev = ap->a_head.a_dev;
1226 struct bio *bio = ap->a_bio;
1227 struct bio *nbio;
1228 struct disk *dp;
1229
1230 dp = dev->si_disk;
1231
1232 if (dp == NULL) {
1233 bio->bio_buf->b_error = ENXIO;
1234 bio->bio_buf->b_flags |= B_ERROR;
1235 biodone(bio);
1236 return(0);
1237 }
1238 KKASSERT(dev->si_disk == dp);
1239
1240 /*
1241 * The dscheck() function will also transform the slice relative
1242 * block number i.e. bio->bio_offset into a block number that can be
1243 * passed directly to the underlying raw device. If dscheck()
1244 * returns NULL it will have handled the bio for us (e.g. EOF
1245 * or error due to being beyond the device size).
1246 */
1247 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
1248 dev_dstrategy(dp->d_rawdev, nbio);
1249 } else {
1250 biodone(bio);
1251 }
1252 return(0);
1253 }
1254
1255 /*
1256 * Return the partition size in ?blocks?
1257 */
1258 static
1259 int
diskpsize(struct dev_psize_args * ap)1260 diskpsize(struct dev_psize_args *ap)
1261 {
1262 cdev_t dev = ap->a_head.a_dev;
1263 struct disk *dp;
1264
1265 dp = dev->si_disk;
1266 if (dp == NULL)
1267 return(ENODEV);
1268
1269 ap->a_result = dssize(dev, &dp->d_slice);
1270
1271 if ((ap->a_result == -1) &&
1272 (dp->d_info.d_dsflags & DSO_RAWPSIZE)) {
1273 ap->a_head.a_dev = dp->d_rawdev;
1274 return dev_doperate(&ap->a_head);
1275 }
1276 return(0);
1277 }
1278
1279 static int
diskdump(struct dev_dump_args * ap)1280 diskdump(struct dev_dump_args *ap)
1281 {
1282 cdev_t dev = ap->a_head.a_dev;
1283 struct disk *dp = dev->si_disk;
1284 u_int64_t size, offset;
1285 int error;
1286
1287 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1288 /* XXX: this should probably go in disk_dumpcheck somehow */
1289 if (ap->a_length != 0) {
1290 size *= DEV_BSIZE;
1291 offset = ap->a_blkno * DEV_BSIZE;
1292 if ((ap->a_offset < offset) ||
1293 (ap->a_offset + ap->a_length - offset > size)) {
1294 kprintf("Attempt to write outside dump "
1295 "device boundaries.\n");
1296 error = ENOSPC;
1297 }
1298 }
1299
1300 if (error == 0) {
1301 ap->a_head.a_dev = dp->d_rawdev;
1302 error = dev_doperate(&ap->a_head);
1303 }
1304
1305 return(error);
1306 }
1307
1308
1309 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1310 0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1311
1312 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1313 0, sizeof(struct disk), "sizeof(struct disk)");
1314
1315 /*
1316 * Reorder interval for burst write allowance and minor write
1317 * allowance.
1318 *
1319 * We always want to trickle some writes in to make use of the
1320 * disk's zone cache. Bursting occurs on a longer interval and only
1321 * runningbufspace is well over the hirunningspace limit.
1322 */
1323 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */
1324 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1325 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1326 int bioq_reorder_minor_interval = 5;
1327 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1328 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1329
1330 int bioq_reorder_burst_bytes = 3000000;
1331 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1332 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1333 int bioq_reorder_minor_bytes = 262144;
1334 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1335 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1336
1337
1338 /*
1339 * Order I/Os. Generally speaking this code is designed to make better
1340 * use of drive zone caches. A drive zone cache can typically track linear
1341 * reads or writes for around 16 zones simultaniously.
1342 *
1343 * Read prioritization issues: It is possible for hundreds of megabytes worth
1344 * of writes to be queued asynchronously. This creates a huge bottleneck
1345 * for reads which reduce read bandwidth to a trickle.
1346 *
1347 * To solve this problem we generally reorder reads before writes.
1348 *
1349 * However, a large number of random reads can also starve writes and
1350 * make poor use of the drive zone cache so we allow writes to trickle
1351 * in every N reads.
1352 */
1353 void
bioqdisksort(struct bio_queue_head * bioq,struct bio * bio)1354 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1355 {
1356 #if 0
1357 /*
1358 * The BIO wants to be ordered. Adding to the tail also
1359 * causes transition to be set to NULL, forcing the ordering
1360 * of all prior I/O's.
1361 */
1362 if (bio->bio_buf->b_flags & B_ORDERED) {
1363 bioq_insert_tail(bioq, bio);
1364 return;
1365 }
1366 #endif
1367
1368 switch(bio->bio_buf->b_cmd) {
1369 case BUF_CMD_READ:
1370 if (bioq->transition) {
1371 /*
1372 * Insert before the first write. Bleedover writes
1373 * based on reorder intervals to prevent starvation.
1374 */
1375 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1376 ++bioq->reorder;
1377 if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1378 bioqwritereorder(bioq);
1379 if (bioq->reorder >=
1380 bioq_reorder_burst_interval) {
1381 bioq->reorder = 0;
1382 }
1383 }
1384 } else {
1385 /*
1386 * No writes queued (or ordering was forced),
1387 * insert at tail.
1388 */
1389 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1390 }
1391 break;
1392 case BUF_CMD_WRITE:
1393 /*
1394 * Writes are always appended. If no writes were previously
1395 * queued or an ordered tail insertion occured the transition
1396 * field will be NULL.
1397 */
1398 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1399 if (bioq->transition == NULL)
1400 bioq->transition = bio;
1401 break;
1402 default:
1403 /*
1404 * All other request types are forced to be ordered.
1405 */
1406 bioq_insert_tail(bioq, bio);
1407 break;
1408 }
1409 }
1410
1411 /*
1412 * Move the read-write transition point to prevent reads from
1413 * completely starving our writes. This brings a number of writes into
1414 * the fold every N reads.
1415 *
1416 * We bring a few linear writes into the fold on a minor interval
1417 * and we bring a non-linear burst of writes into the fold on a major
1418 * interval. Bursting only occurs if runningbufspace is really high
1419 * (typically from syncs, fsyncs, or HAMMER flushes).
1420 */
1421 static
1422 void
bioqwritereorder(struct bio_queue_head * bioq)1423 bioqwritereorder(struct bio_queue_head *bioq)
1424 {
1425 struct bio *bio;
1426 off_t next_offset;
1427 size_t left;
1428 size_t n;
1429 int check_off;
1430
1431 if (bioq->reorder < bioq_reorder_burst_interval ||
1432 !buf_runningbufspace_severe()) {
1433 left = (size_t)bioq_reorder_minor_bytes;
1434 check_off = 1;
1435 } else {
1436 left = (size_t)bioq_reorder_burst_bytes;
1437 check_off = 0;
1438 }
1439
1440 next_offset = bioq->transition->bio_offset;
1441 while ((bio = bioq->transition) != NULL &&
1442 (check_off == 0 || next_offset == bio->bio_offset)
1443 ) {
1444 n = bio->bio_buf->b_bcount;
1445 next_offset = bio->bio_offset + n;
1446 bioq->transition = TAILQ_NEXT(bio, bio_act);
1447 if (left < n)
1448 break;
1449 left -= n;
1450 }
1451 }
1452
1453 /*
1454 * Bounds checking against the media size, used for the raw partition.
1455 * secsize, mediasize and b_blkno must all be the same units.
1456 * Possibly this has to be DEV_BSIZE (512).
1457 */
1458 int
bounds_check_with_mediasize(struct bio * bio,int secsize,uint64_t mediasize)1459 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1460 {
1461 struct buf *bp = bio->bio_buf;
1462 int64_t sz;
1463
1464 sz = howmany(bp->b_bcount, secsize);
1465
1466 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1467 sz = mediasize - bio->bio_offset/DEV_BSIZE;
1468 if (sz == 0) {
1469 /* If exactly at end of disk, return EOF. */
1470 bp->b_resid = bp->b_bcount;
1471 return 0;
1472 }
1473 if (sz < 0) {
1474 /* If past end of disk, return EINVAL. */
1475 bp->b_error = EINVAL;
1476 return 0;
1477 }
1478 /* Otherwise, truncate request. */
1479 bp->b_bcount = sz * secsize;
1480 }
1481
1482 return 1;
1483 }
1484
1485 /*
1486 * Disk error is the preface to plaintive error messages
1487 * about failing disk transfers. It prints messages of the form
1488
1489 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1490
1491 * if the offset of the error in the transfer and a disk label
1492 * are both available. blkdone should be -1 if the position of the error
1493 * is unknown; the disklabel pointer may be null from drivers that have not
1494 * been converted to use them. The message is printed with kprintf
1495 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1496 * The message should be completed (with at least a newline) with kprintf
1497 * or log(-1, ...), respectively. There is no trailing space.
1498 */
1499 void
diskerr(struct bio * bio,cdev_t dev,const char * what,int pri,int donecnt)1500 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1501 {
1502 struct buf *bp = bio->bio_buf;
1503 const char *term;
1504
1505 switch(bp->b_cmd) {
1506 case BUF_CMD_READ:
1507 term = "read";
1508 break;
1509 case BUF_CMD_WRITE:
1510 term = "write";
1511 break;
1512 default:
1513 term = "access";
1514 break;
1515 }
1516 kprintf("%s: %s %sing ", dev->si_name, what, term);
1517 kprintf("offset %012llx for %d",
1518 (long long)bio->bio_offset,
1519 bp->b_bcount);
1520
1521 if (donecnt)
1522 kprintf(" (%d bytes completed)", donecnt);
1523 }
1524
1525 /*
1526 * Locate a disk device
1527 */
1528 cdev_t
disk_locate(const char * devname)1529 disk_locate(const char *devname)
1530 {
1531 return devfs_find_device_by_name("%s", devname);
1532 }
1533
1534 void
disk_config(void * arg)1535 disk_config(void *arg)
1536 {
1537 disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1538 }
1539
1540 static void
disk_init(void)1541 disk_init(void)
1542 {
1543 struct thread* td_core;
1544
1545 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1546 NULL, NULL, NULL,
1547 objcache_malloc_alloc,
1548 objcache_malloc_free,
1549 &disk_msg_malloc_args);
1550
1551 lwkt_token_init(&disklist_token, "disks");
1552 lwkt_token_init(&ds_token, "ds");
1553
1554 /*
1555 * Initialize the reply-only port which acts as a message drain
1556 */
1557 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1558
1559 lwkt_gettoken(&disklist_token);
1560 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1561 0, -1, "disk_msg_core");
1562 tsleep(td_core, 0, "diskcore", 0);
1563 lwkt_reltoken(&disklist_token);
1564 }
1565
1566 static void
disk_uninit(void)1567 disk_uninit(void)
1568 {
1569 objcache_destroy(disk_msg_cache);
1570 }
1571
1572 /*
1573 * Clean out illegal characters in serial numbers.
1574 */
1575 static void
disk_cleanserial(char * serno)1576 disk_cleanserial(char *serno)
1577 {
1578 char c;
1579
1580 while ((c = *serno) != 0) {
1581 if (c >= 'a' && c <= 'z')
1582 ;
1583 else if (c >= 'A' && c <= 'Z')
1584 ;
1585 else if (c >= '0' && c <= '9')
1586 ;
1587 else if (c == '-' || c == '@' || c == '+' || c == '.')
1588 ;
1589 else
1590 c = '_';
1591 *serno++= c;
1592 }
1593 }
1594
1595 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1596 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1597 0, "Enable subr_disk debugging");
1598
1599 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1600 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);
1601