1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * and Alex Hornung <ahornung@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * ---------------------------------------------------------------------------- 36 * "THE BEER-WARE LICENSE" (Revision 42): 37 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 38 * can do whatever you want with this stuff. If we meet some day, and you think 39 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 40 * ---------------------------------------------------------------------------- 41 * 42 * Copyright (c) 1982, 1986, 1988, 1993 43 * The Regents of the University of California. All rights reserved. 44 * (c) UNIX System Laboratories, Inc. 45 * All or some portions of this file are derived from material licensed 46 * to the University of California by American Telephone and Telegraph 47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 48 * the permission of UNIX System Laboratories, Inc. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the University of 61 * California, Berkeley and its contributors. 62 * 4. Neither the name of the University nor the names of its contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 * 78 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 79 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 80 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/kernel.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/buf.h> 89 #include <sys/conf.h> 90 #include <sys/disklabel.h> 91 #include <sys/disklabel32.h> 92 #include <sys/disklabel64.h> 93 #include <sys/diskslice.h> 94 #include <sys/diskmbr.h> 95 #include <sys/disk.h> 96 #include <sys/kerneldump.h> 97 #include <sys/malloc.h> 98 #include <machine/md_var.h> 99 #include <sys/ctype.h> 100 #include <sys/syslog.h> 101 #include <sys/device.h> 102 #include <sys/msgport.h> 103 #include <sys/devfs.h> 104 #include <sys/thread.h> 105 #include <sys/dsched.h> 106 #include <sys/queue.h> 107 #include <sys/lock.h> 108 #include <sys/udev.h> 109 #include <sys/uuid.h> 110 111 #include <sys/buf2.h> 112 #include <sys/mplock2.h> 113 #include <sys/msgport2.h> 114 #include <sys/thread2.h> 115 116 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 117 static int disk_debug_enable = 0; 118 119 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 120 static void disk_msg_core(void *); 121 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe); 122 static void disk_probe(struct disk *dp, int reprobe); 123 static void _setdiskinfo(struct disk *disk, struct disk_info *info); 124 static void bioqwritereorder(struct bio_queue_head *bioq); 125 static void disk_cleanserial(char *serno); 126 static int disk_debug(int, char *, ...) __printflike(2, 3); 127 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp, 128 struct dev_ops *raw_ops, int clone); 129 130 static d_open_t diskopen; 131 static d_close_t diskclose; 132 static d_ioctl_t diskioctl; 133 static d_strategy_t diskstrategy; 134 static d_psize_t diskpsize; 135 static d_dump_t diskdump; 136 137 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 138 static struct lwkt_token disklist_token; 139 140 static struct dev_ops disk_ops = { 141 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE }, 142 .d_open = diskopen, 143 .d_close = diskclose, 144 .d_read = physread, 145 .d_write = physwrite, 146 .d_ioctl = diskioctl, 147 .d_strategy = diskstrategy, 148 .d_dump = diskdump, 149 .d_psize = diskpsize, 150 }; 151 152 static struct objcache *disk_msg_cache; 153 154 struct objcache_malloc_args disk_msg_malloc_args = { 155 sizeof(struct disk_msg), M_DISK }; 156 157 static struct lwkt_port disk_dispose_port; 158 static struct lwkt_port disk_msg_port; 159 160 static int 161 disk_debug(int level, char *fmt, ...) 162 { 163 __va_list ap; 164 165 __va_start(ap, fmt); 166 if (level <= disk_debug_enable) 167 kvprintf(fmt, ap); 168 __va_end(ap); 169 170 return 0; 171 } 172 173 static int 174 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe) 175 { 176 struct disk_info *info = &dp->d_info; 177 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 178 disklabel_ops_t ops; 179 struct partinfo part; 180 const char *msg; 181 char uuid_buf[128]; 182 cdev_t ndev; 183 int sno; 184 u_int i; 185 186 disk_debug(2, 187 "disk_probe_slice (begin): %s (%s)\n", 188 dev->si_name, dp->d_cdev->si_name); 189 190 sno = slice ? slice - 1 : 0; 191 192 ops = &disklabel32_ops; 193 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 194 if (msg && !strcmp(msg, "no disk label")) { 195 ops = &disklabel64_ops; 196 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 197 } 198 if (msg == NULL) { 199 if (slice != WHOLE_DISK_SLICE) 200 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 201 else 202 sp->ds_reserved = 0; 203 204 sp->ds_ops = ops; 205 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 206 ops->op_loadpartinfo(sp->ds_label, i, &part); 207 if (part.fstype) { 208 if (reprobe && 209 (ndev = devfs_find_device_by_name("%s%c", 210 dev->si_name, 'a' + i)) 211 ) { 212 /* 213 * Device already exists and 214 * is still valid. 215 */ 216 ndev->si_flags |= SI_REPROBE_TEST; 217 218 /* 219 * Destroy old UUID alias 220 */ 221 destroy_dev_alias(ndev, "part-by-uuid/*"); 222 223 /* Create UUID alias */ 224 if (!kuuid_is_nil(&part.storage_uuid)) { 225 snprintf_uuid(uuid_buf, 226 sizeof(uuid_buf), 227 &part.storage_uuid); 228 make_dev_alias(ndev, 229 "part-by-uuid/%s", 230 uuid_buf); 231 } 232 } else { 233 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 234 dkmakeminor(dkunit(dp->d_cdev), 235 slice, i), 236 UID_ROOT, GID_OPERATOR, 0640, 237 "%s%c", dev->si_name, 'a'+ i); 238 ndev->si_parent = dev; 239 ndev->si_disk = dp; 240 udev_dict_set_cstr(ndev, "subsystem", "disk"); 241 /* Inherit parent's disk type */ 242 if (dp->d_disktype) { 243 udev_dict_set_cstr(ndev, "disk-type", 244 __DECONST(char *, dp->d_disktype)); 245 } 246 247 /* Create serno alias */ 248 if (dp->d_info.d_serialno) { 249 make_dev_alias(ndev, 250 "serno/%s.s%d%c", 251 dp->d_info.d_serialno, 252 sno, 'a' + i); 253 } 254 255 /* Create UUID alias */ 256 if (!kuuid_is_nil(&part.storage_uuid)) { 257 snprintf_uuid(uuid_buf, 258 sizeof(uuid_buf), 259 &part.storage_uuid); 260 make_dev_alias(ndev, 261 "part-by-uuid/%s", 262 uuid_buf); 263 } 264 ndev->si_flags |= SI_REPROBE_TEST; 265 } 266 } 267 } 268 } else if (info->d_dsflags & DSO_COMPATLABEL) { 269 msg = NULL; 270 if (sp->ds_size >= 0x100000000ULL) 271 ops = &disklabel64_ops; 272 else 273 ops = &disklabel32_ops; 274 sp->ds_label = ops->op_clone_label(info, sp); 275 } else { 276 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */ 277 sp->ds_type == DOSPTYP_NETBSD || 278 sp->ds_type == DOSPTYP_OPENBSD) { 279 log(LOG_WARNING, "%s: cannot find label (%s)\n", 280 dev->si_name, msg); 281 } 282 } 283 284 if (msg == NULL) { 285 sp->ds_wlabel = FALSE; 286 } 287 288 return (msg ? EINVAL : 0); 289 } 290 291 /* 292 * This routine is only called for newly minted drives or to reprobe 293 * a drive with no open slices. disk_probe_slice() is called directly 294 * when reprobing partition changes within slices. 295 */ 296 static void 297 disk_probe(struct disk *dp, int reprobe) 298 { 299 struct disk_info *info = &dp->d_info; 300 cdev_t dev = dp->d_cdev; 301 cdev_t ndev; 302 int error, i, sno; 303 struct diskslices *osp; 304 struct diskslice *sp; 305 char uuid_buf[128]; 306 307 KKASSERT (info->d_media_blksize != 0); 308 309 osp = dp->d_slice; 310 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 311 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name); 312 313 error = mbrinit(dev, info, &(dp->d_slice)); 314 if (error) { 315 dsgone(&osp); 316 return; 317 } 318 319 for (i = 0; i < dp->d_slice->dss_nslices; i++) { 320 /* 321 * Ignore the whole-disk slice, it has already been created. 322 */ 323 if (i == WHOLE_DISK_SLICE) 324 continue; 325 326 #if 1 327 /* 328 * Ignore the compatibility slice s0 if it's a device mapper 329 * volume. 330 */ 331 if ((i == COMPATIBILITY_SLICE) && 332 (info->d_dsflags & DSO_DEVICEMAPPER)) 333 continue; 334 #endif 335 336 sp = &dp->d_slice->dss_slices[i]; 337 338 /* 339 * Handle s0. s0 is a compatibility slice if there are no 340 * other slices and it has not otherwise been set up, else 341 * we ignore it. 342 */ 343 if (i == COMPATIBILITY_SLICE) { 344 sno = 0; 345 if (sp->ds_type == 0 && 346 dp->d_slice->dss_nslices == BASE_SLICE) { 347 sp->ds_size = info->d_media_blocks; 348 sp->ds_reserved = 0; 349 } 350 } else { 351 sno = i - 1; 352 sp->ds_reserved = 0; 353 } 354 355 /* 356 * Ignore 0-length slices 357 */ 358 if (sp->ds_size == 0) 359 continue; 360 361 if (reprobe && 362 (ndev = devfs_find_device_by_name("%ss%d", 363 dev->si_name, sno))) { 364 /* 365 * Device already exists and is still valid 366 */ 367 ndev->si_flags |= SI_REPROBE_TEST; 368 369 /* 370 * Destroy old UUID alias 371 */ 372 destroy_dev_alias(ndev, "slice-by-uuid/*"); 373 374 /* Create UUID alias */ 375 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 376 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 377 &sp->ds_stor_uuid); 378 make_dev_alias(ndev, "slice-by-uuid/%s", 379 uuid_buf); 380 } 381 } else { 382 /* 383 * Else create new device 384 */ 385 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 386 dkmakewholeslice(dkunit(dev), i), 387 UID_ROOT, GID_OPERATOR, 0640, 388 (info->d_dsflags & DSO_DEVICEMAPPER)? 389 "%s.s%d" : "%ss%d", dev->si_name, sno); 390 ndev->si_parent = dev; 391 udev_dict_set_cstr(ndev, "subsystem", "disk"); 392 /* Inherit parent's disk type */ 393 if (dp->d_disktype) { 394 udev_dict_set_cstr(ndev, "disk-type", 395 __DECONST(char *, dp->d_disktype)); 396 } 397 398 /* Create serno alias */ 399 if (dp->d_info.d_serialno) { 400 make_dev_alias(ndev, "serno/%s.s%d", 401 dp->d_info.d_serialno, sno); 402 } 403 404 /* Create UUID alias */ 405 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 406 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 407 &sp->ds_stor_uuid); 408 make_dev_alias(ndev, "slice-by-uuid/%s", 409 uuid_buf); 410 } 411 412 ndev->si_disk = dp; 413 ndev->si_flags |= SI_REPROBE_TEST; 414 } 415 sp->ds_dev = ndev; 416 417 /* 418 * Probe appropriate slices for a disklabel 419 * 420 * XXX slice type 1 used by our gpt probe code. 421 * XXX slice type 0 used by mbr compat slice. 422 */ 423 if (sp->ds_type == DOSPTYP_386BSD || 424 sp->ds_type == DOSPTYP_NETBSD || 425 sp->ds_type == DOSPTYP_OPENBSD || 426 sp->ds_type == 0 || 427 sp->ds_type == 1) { 428 if (dp->d_slice->dss_first_bsd_slice == 0) 429 dp->d_slice->dss_first_bsd_slice = i; 430 disk_probe_slice(dp, ndev, i, reprobe); 431 } 432 } 433 dsgone(&osp); 434 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name); 435 } 436 437 438 static void 439 disk_msg_core(void *arg) 440 { 441 struct disk *dp; 442 struct diskslice *sp; 443 disk_msg_t msg; 444 int run; 445 446 lwkt_gettoken(&disklist_token); 447 lwkt_initport_thread(&disk_msg_port, curthread); 448 wakeup(curthread); /* synchronous startup */ 449 lwkt_reltoken(&disklist_token); 450 451 get_mplock(); /* not mpsafe yet? */ 452 run = 1; 453 454 while (run) { 455 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 456 457 switch (msg->hdr.u.ms_result) { 458 case DISK_DISK_PROBE: 459 dp = (struct disk *)msg->load; 460 disk_debug(1, 461 "DISK_DISK_PROBE: %s\n", 462 dp->d_cdev->si_name); 463 disk_probe(dp, 0); 464 break; 465 case DISK_DISK_DESTROY: 466 dp = (struct disk *)msg->load; 467 disk_debug(1, 468 "DISK_DISK_DESTROY: %s\n", 469 dp->d_cdev->si_name); 470 devfs_destroy_related(dp->d_cdev); 471 destroy_dev(dp->d_cdev); 472 destroy_only_dev(dp->d_rawdev); 473 lwkt_gettoken(&disklist_token); 474 LIST_REMOVE(dp, d_list); 475 lwkt_reltoken(&disklist_token); 476 if (dp->d_info.d_serialno) { 477 kfree(dp->d_info.d_serialno, M_TEMP); 478 dp->d_info.d_serialno = NULL; 479 } 480 break; 481 case DISK_UNPROBE: 482 dp = (struct disk *)msg->load; 483 disk_debug(1, 484 "DISK_DISK_UNPROBE: %s\n", 485 dp->d_cdev->si_name); 486 devfs_destroy_related(dp->d_cdev); 487 break; 488 case DISK_SLICE_REPROBE: 489 dp = (struct disk *)msg->load; 490 sp = (struct diskslice *)msg->load2; 491 devfs_clr_related_flag(sp->ds_dev, 492 SI_REPROBE_TEST); 493 disk_debug(1, 494 "DISK_SLICE_REPROBE: %s\n", 495 sp->ds_dev->si_name); 496 disk_probe_slice(dp, sp->ds_dev, 497 dkslice(sp->ds_dev), 1); 498 devfs_destroy_related_without_flag( 499 sp->ds_dev, SI_REPROBE_TEST); 500 break; 501 case DISK_DISK_REPROBE: 502 dp = (struct disk *)msg->load; 503 devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST); 504 disk_debug(1, 505 "DISK_DISK_REPROBE: %s\n", 506 dp->d_cdev->si_name); 507 disk_probe(dp, 1); 508 devfs_destroy_related_without_flag( 509 dp->d_cdev, SI_REPROBE_TEST); 510 break; 511 case DISK_SYNC: 512 disk_debug(1, "DISK_SYNC\n"); 513 break; 514 default: 515 devfs_debug(DEVFS_DEBUG_WARNING, 516 "disk_msg_core: unknown message " 517 "received at core\n"); 518 break; 519 } 520 lwkt_replymsg(&msg->hdr, 0); 521 } 522 lwkt_exit(); 523 } 524 525 526 /* 527 * Acts as a message drain. Any message that is replied to here gets 528 * destroyed and the memory freed. 529 */ 530 static void 531 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 532 { 533 objcache_put(disk_msg_cache, msg); 534 } 535 536 537 void 538 disk_msg_send(uint32_t cmd, void *load, void *load2) 539 { 540 disk_msg_t disk_msg; 541 lwkt_port_t port = &disk_msg_port; 542 543 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 544 545 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 546 547 disk_msg->hdr.u.ms_result = cmd; 548 disk_msg->load = load; 549 disk_msg->load2 = load2; 550 KKASSERT(port); 551 lwkt_sendmsg(port, &disk_msg->hdr); 552 } 553 554 void 555 disk_msg_send_sync(uint32_t cmd, void *load, void *load2) 556 { 557 struct lwkt_port rep_port; 558 disk_msg_t disk_msg; 559 lwkt_port_t port; 560 561 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 562 port = &disk_msg_port; 563 564 /* XXX could probably use curthread's built-in msgport */ 565 lwkt_initport_thread(&rep_port, curthread); 566 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 567 568 disk_msg->hdr.u.ms_result = cmd; 569 disk_msg->load = load; 570 disk_msg->load2 = load2; 571 572 lwkt_sendmsg(port, &disk_msg->hdr); 573 lwkt_waitmsg(&disk_msg->hdr, 0); 574 objcache_put(disk_msg_cache, disk_msg); 575 } 576 577 /* 578 * Create a raw device for the dev_ops template (which is returned). Also 579 * create a slice and unit managed disk and overload the user visible 580 * device space with it. 581 * 582 * NOTE: The returned raw device is NOT a slice and unit managed device. 583 * It is an actual raw device representing the raw disk as specified by 584 * the passed dev_ops. The disk layer not only returns such a raw device, 585 * it also uses it internally when passing (modified) commands through. 586 */ 587 cdev_t 588 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 589 { 590 return _disk_create_named(NULL, unit, dp, raw_ops, 0); 591 } 592 593 cdev_t 594 disk_create_clone(int unit, struct disk *dp, struct dev_ops *raw_ops) 595 { 596 return _disk_create_named(NULL, unit, dp, raw_ops, 1); 597 } 598 599 cdev_t 600 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 601 { 602 return _disk_create_named(name, unit, dp, raw_ops, 0); 603 } 604 605 cdev_t 606 disk_create_named_clone(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 607 { 608 return _disk_create_named(name, unit, dp, raw_ops, 1); 609 } 610 611 static cdev_t 612 _disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops, int clone) 613 { 614 cdev_t rawdev; 615 616 disk_debug(1, "disk_create (begin): %s%d\n", name, unit); 617 618 if (name) { 619 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 620 UID_ROOT, GID_OPERATOR, 0640, "%s", name); 621 } else { 622 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 623 UID_ROOT, GID_OPERATOR, 0640, 624 "%s%d", raw_ops->head.name, unit); 625 } 626 627 bzero(dp, sizeof(*dp)); 628 629 dp->d_rawdev = rawdev; 630 dp->d_raw_ops = raw_ops; 631 dp->d_dev_ops = &disk_ops; 632 633 if (name) { 634 if (clone) { 635 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 636 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 637 "%s", name); 638 } else { 639 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 640 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 641 "%s", name); 642 } 643 } else { 644 if (clone) { 645 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 646 dkmakewholedisk(unit), 647 UID_ROOT, GID_OPERATOR, 0640, 648 "%s%d", raw_ops->head.name, unit); 649 } else { 650 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 651 dkmakewholedisk(unit), 652 UID_ROOT, GID_OPERATOR, 0640, 653 "%s%d", raw_ops->head.name, unit); 654 } 655 } 656 657 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk"); 658 dp->d_cdev->si_disk = dp; 659 660 if (name) 661 dsched_disk_create_callback(dp, name, unit); 662 else 663 dsched_disk_create_callback(dp, raw_ops->head.name, unit); 664 665 lwkt_gettoken(&disklist_token); 666 LIST_INSERT_HEAD(&disklist, dp, d_list); 667 lwkt_reltoken(&disklist_token); 668 669 disk_debug(1, "disk_create (end): %s%d\n", 670 (name != NULL)?(name):(raw_ops->head.name), unit); 671 672 return (dp->d_rawdev); 673 } 674 675 int 676 disk_setdisktype(struct disk *disk, const char *type) 677 { 678 KKASSERT(disk != NULL); 679 680 disk->d_disktype = type; 681 return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type)); 682 } 683 684 int 685 disk_getopencount(struct disk *disk) 686 { 687 return disk->d_opencount; 688 } 689 690 static void 691 _setdiskinfo(struct disk *disk, struct disk_info *info) 692 { 693 char *oldserialno; 694 695 oldserialno = disk->d_info.d_serialno; 696 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 697 info = &disk->d_info; 698 699 disk_debug(1, 700 "_setdiskinfo: %s\n", 701 disk->d_cdev->si_name); 702 703 /* 704 * The serial number is duplicated so the caller can throw 705 * their copy away. 706 */ 707 if (info->d_serialno && info->d_serialno[0] && 708 (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) { 709 info->d_serialno = kstrdup(info->d_serialno, M_TEMP); 710 disk_cleanserial(info->d_serialno); 711 if (disk->d_cdev) { 712 make_dev_alias(disk->d_cdev, "serno/%s", 713 info->d_serialno); 714 } 715 } else { 716 info->d_serialno = NULL; 717 } 718 if (oldserialno) 719 kfree(oldserialno, M_TEMP); 720 721 dsched_disk_update_callback(disk, info); 722 723 /* 724 * The caller may set d_media_size or d_media_blocks and we 725 * calculate the other. 726 */ 727 KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0); 728 if (info->d_media_size == 0 && info->d_media_blocks) { 729 info->d_media_size = (u_int64_t)info->d_media_blocks * 730 info->d_media_blksize; 731 } else if (info->d_media_size && info->d_media_blocks == 0 && 732 info->d_media_blksize) { 733 info->d_media_blocks = info->d_media_size / 734 info->d_media_blksize; 735 } 736 737 /* 738 * The si_* fields for rawdev are not set until after the 739 * disk_create() call, so someone using the cooked version 740 * of the raw device (i.e. da0s0) will not get the right 741 * si_iosize_max unless we fix it up here. 742 */ 743 if (disk->d_cdev && disk->d_rawdev && 744 disk->d_cdev->si_iosize_max == 0) { 745 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 746 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 747 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 748 } 749 750 /* Add the serial number to the udev_dictionary */ 751 if (info->d_serialno) 752 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno); 753 } 754 755 /* 756 * Disk drivers must call this routine when media parameters are available 757 * or have changed. 758 */ 759 void 760 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 761 { 762 _setdiskinfo(disk, info); 763 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 764 disk_debug(1, 765 "disk_setdiskinfo: sent probe for %s\n", 766 disk->d_cdev->si_name); 767 } 768 769 void 770 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info) 771 { 772 _setdiskinfo(disk, info); 773 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL); 774 disk_debug(1, 775 "disk_setdiskinfo_sync: sent probe for %s\n", 776 disk->d_cdev->si_name); 777 } 778 779 /* 780 * This routine is called when an adapter detaches. The higher level 781 * managed disk device is destroyed while the lower level raw device is 782 * released. 783 */ 784 void 785 disk_destroy(struct disk *disk) 786 { 787 dsched_disk_destroy_callback(disk); 788 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL); 789 return; 790 } 791 792 int 793 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize) 794 { 795 struct partinfo pinfo; 796 int error; 797 798 bzero(&pinfo, sizeof(pinfo)); 799 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, 800 proc0.p_ucred, NULL); 801 if (error) 802 return (error); 803 804 if (pinfo.media_blksize == 0) 805 return (ENXIO); 806 807 if (blkno) /* XXX: make sure this reserved stuff is right */ 808 *blkno = pinfo.reserved_blocks + 809 pinfo.media_offset / pinfo.media_blksize; 810 if (secsize) 811 *secsize = pinfo.media_blksize; 812 if (size) 813 *size = (pinfo.media_blocks - pinfo.reserved_blocks); 814 815 return (0); 816 } 817 818 int 819 disk_dumpconf(cdev_t dev, u_int onoff) 820 { 821 struct dumperinfo di; 822 u_int64_t size, blkno; 823 u_int32_t secsize; 824 int error; 825 826 if (!onoff) 827 return set_dumper(NULL); 828 829 error = disk_dumpcheck(dev, &size, &blkno, &secsize); 830 831 if (error) 832 return ENXIO; 833 834 bzero(&di, sizeof(struct dumperinfo)); 835 di.dumper = diskdump; 836 di.priv = dev; 837 di.blocksize = secsize; 838 di.mediaoffset = blkno * DEV_BSIZE; 839 di.mediasize = size * DEV_BSIZE; 840 841 return set_dumper(&di); 842 } 843 844 void 845 disk_unprobe(struct disk *disk) 846 { 847 if (disk == NULL) 848 return; 849 850 disk_msg_send_sync(DISK_UNPROBE, disk, NULL); 851 } 852 853 void 854 disk_invalidate (struct disk *disk) 855 { 856 dsgone(&disk->d_slice); 857 } 858 859 struct disk * 860 disk_enumerate(struct disk *disk) 861 { 862 struct disk *dp; 863 864 lwkt_gettoken(&disklist_token); 865 if (!disk) 866 dp = (LIST_FIRST(&disklist)); 867 else 868 dp = (LIST_NEXT(disk, d_list)); 869 lwkt_reltoken(&disklist_token); 870 871 return dp; 872 } 873 874 static 875 int 876 sysctl_disks(SYSCTL_HANDLER_ARGS) 877 { 878 struct disk *disk; 879 int error, first; 880 881 disk = NULL; 882 first = 1; 883 884 while ((disk = disk_enumerate(disk))) { 885 if (!first) { 886 error = SYSCTL_OUT(req, " ", 1); 887 if (error) 888 return error; 889 } else { 890 first = 0; 891 } 892 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 893 strlen(disk->d_rawdev->si_name)); 894 if (error) 895 return error; 896 } 897 error = SYSCTL_OUT(req, "", 1); 898 return error; 899 } 900 901 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 902 sysctl_disks, "A", "names of available disks"); 903 904 /* 905 * Open a disk device or partition. 906 */ 907 static 908 int 909 diskopen(struct dev_open_args *ap) 910 { 911 cdev_t dev = ap->a_head.a_dev; 912 struct disk *dp; 913 int error; 914 915 /* 916 * dp can't be NULL here XXX. 917 * 918 * d_slice will be NULL if setdiskinfo() has not been called yet. 919 * setdiskinfo() is typically called whether the disk is present 920 * or not (e.g. CD), but the base disk device is created first 921 * and there may be a race. 922 */ 923 dp = dev->si_disk; 924 if (dp == NULL || dp->d_slice == NULL) 925 return (ENXIO); 926 error = 0; 927 928 /* 929 * Deal with open races 930 */ 931 get_mplock(); 932 while (dp->d_flags & DISKFLAG_LOCK) { 933 dp->d_flags |= DISKFLAG_WANTED; 934 error = tsleep(dp, PCATCH, "diskopen", hz); 935 if (error) { 936 rel_mplock(); 937 return (error); 938 } 939 } 940 dp->d_flags |= DISKFLAG_LOCK; 941 942 /* 943 * Open the underlying raw device. 944 */ 945 if (!dsisopen(dp->d_slice)) { 946 #if 0 947 if (!pdev->si_iosize_max) 948 pdev->si_iosize_max = dev->si_iosize_max; 949 #endif 950 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 951 ap->a_devtype, ap->a_cred); 952 } 953 954 if (error) 955 goto out; 956 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 957 &dp->d_slice, &dp->d_info); 958 if (!dsisopen(dp->d_slice)) { 959 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 960 } 961 out: 962 dp->d_flags &= ~DISKFLAG_LOCK; 963 if (dp->d_flags & DISKFLAG_WANTED) { 964 dp->d_flags &= ~DISKFLAG_WANTED; 965 wakeup(dp); 966 } 967 rel_mplock(); 968 969 KKASSERT(dp->d_opencount >= 0); 970 /* If the open was successful, bump open count */ 971 if (error == 0) 972 atomic_add_int(&dp->d_opencount, 1); 973 974 return(error); 975 } 976 977 /* 978 * Close a disk device or partition 979 */ 980 static 981 int 982 diskclose(struct dev_close_args *ap) 983 { 984 cdev_t dev = ap->a_head.a_dev; 985 struct disk *dp; 986 int error; 987 int lcount; 988 989 error = 0; 990 dp = dev->si_disk; 991 992 /* 993 * The cdev_t represents the disk/slice/part. The shared 994 * dp structure governs all cdevs associated with the disk. 995 * 996 * As a safety only close the underlying raw device on the last 997 * close the disk device if our tracking of the slices/partitions 998 * also indicates nothing is open. 999 */ 1000 KKASSERT(dp->d_opencount >= 1); 1001 lcount = atomic_fetchadd_int(&dp->d_opencount, -1); 1002 1003 get_mplock(); 1004 dsclose(dev, ap->a_devtype, dp->d_slice); 1005 if (lcount <= 1 && !dsisopen(dp->d_slice)) { 1006 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 1007 } 1008 rel_mplock(); 1009 return (error); 1010 } 1011 1012 /* 1013 * First execute the ioctl on the disk device, and if it isn't supported 1014 * try running it on the backing device. 1015 */ 1016 static 1017 int 1018 diskioctl(struct dev_ioctl_args *ap) 1019 { 1020 cdev_t dev = ap->a_head.a_dev; 1021 struct disk *dp; 1022 int error; 1023 u_int u; 1024 1025 dp = dev->si_disk; 1026 if (dp == NULL) 1027 return (ENXIO); 1028 1029 devfs_debug(DEVFS_DEBUG_DEBUG, 1030 "diskioctl: cmd is: %lx (name: %s)\n", 1031 ap->a_cmd, dev->si_name); 1032 devfs_debug(DEVFS_DEBUG_DEBUG, 1033 "diskioctl: &dp->d_slice is: %p, %p\n", 1034 &dp->d_slice, dp->d_slice); 1035 1036 if (ap->a_cmd == DIOCGKERNELDUMP) { 1037 u = *(u_int *)ap->a_data; 1038 return disk_dumpconf(dev, u); 1039 } 1040 1041 if (&dp->d_slice == NULL || dp->d_slice == NULL || 1042 ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) && 1043 dkslice(dev) == WHOLE_DISK_SLICE)) { 1044 error = ENOIOCTL; 1045 } else { 1046 get_mplock(); 1047 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 1048 &dp->d_slice, &dp->d_info); 1049 rel_mplock(); 1050 } 1051 1052 if (error == ENOIOCTL) { 1053 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 1054 ap->a_fflag, ap->a_cred, NULL); 1055 } 1056 return (error); 1057 } 1058 1059 /* 1060 * Execute strategy routine 1061 */ 1062 static 1063 int 1064 diskstrategy(struct dev_strategy_args *ap) 1065 { 1066 cdev_t dev = ap->a_head.a_dev; 1067 struct bio *bio = ap->a_bio; 1068 struct bio *nbio; 1069 struct disk *dp; 1070 1071 dp = dev->si_disk; 1072 1073 if (dp == NULL) { 1074 bio->bio_buf->b_error = ENXIO; 1075 bio->bio_buf->b_flags |= B_ERROR; 1076 biodone(bio); 1077 return(0); 1078 } 1079 KKASSERT(dev->si_disk == dp); 1080 1081 /* 1082 * The dscheck() function will also transform the slice relative 1083 * block number i.e. bio->bio_offset into a block number that can be 1084 * passed directly to the underlying raw device. If dscheck() 1085 * returns NULL it will have handled the bio for us (e.g. EOF 1086 * or error due to being beyond the device size). 1087 */ 1088 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 1089 dsched_queue(dp, nbio); 1090 } else { 1091 biodone(bio); 1092 } 1093 return(0); 1094 } 1095 1096 /* 1097 * Return the partition size in ?blocks? 1098 */ 1099 static 1100 int 1101 diskpsize(struct dev_psize_args *ap) 1102 { 1103 cdev_t dev = ap->a_head.a_dev; 1104 struct disk *dp; 1105 1106 dp = dev->si_disk; 1107 if (dp == NULL) 1108 return(ENODEV); 1109 1110 ap->a_result = dssize(dev, &dp->d_slice); 1111 1112 if ((ap->a_result == -1) && 1113 (dp->d_info.d_dsflags & DSO_DEVICEMAPPER)) { 1114 ap->a_head.a_dev = dp->d_rawdev; 1115 return dev_doperate(&ap->a_head); 1116 } 1117 return(0); 1118 } 1119 1120 int 1121 diskdump(struct dev_dump_args *ap) 1122 { 1123 cdev_t dev = ap->a_head.a_dev; 1124 struct disk *dp = dev->si_disk; 1125 u_int64_t size, offset; 1126 int error; 1127 1128 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize); 1129 /* XXX: this should probably go in disk_dumpcheck somehow */ 1130 if (ap->a_length != 0) { 1131 size *= DEV_BSIZE; 1132 offset = ap->a_blkno * DEV_BSIZE; 1133 if ((ap->a_offset < offset) || 1134 (ap->a_offset + ap->a_length - offset > size)) { 1135 kprintf("Attempt to write outside dump device boundaries.\n"); 1136 error = ENOSPC; 1137 } 1138 } 1139 1140 if (error == 0) { 1141 ap->a_head.a_dev = dp->d_rawdev; 1142 error = dev_doperate(&ap->a_head); 1143 } 1144 1145 return(error); 1146 } 1147 1148 1149 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 1150 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 1151 1152 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 1153 0, sizeof(struct disk), "sizeof(struct disk)"); 1154 1155 /* 1156 * Reorder interval for burst write allowance and minor write 1157 * allowance. 1158 * 1159 * We always want to trickle some writes in to make use of the 1160 * disk's zone cache. Bursting occurs on a longer interval and only 1161 * runningbufspace is well over the hirunningspace limit. 1162 */ 1163 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */ 1164 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval, 1165 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, ""); 1166 int bioq_reorder_minor_interval = 5; 1167 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval, 1168 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, ""); 1169 1170 int bioq_reorder_burst_bytes = 3000000; 1171 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes, 1172 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, ""); 1173 int bioq_reorder_minor_bytes = 262144; 1174 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes, 1175 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, ""); 1176 1177 1178 /* 1179 * Order I/Os. Generally speaking this code is designed to make better 1180 * use of drive zone caches. A drive zone cache can typically track linear 1181 * reads or writes for around 16 zones simultaniously. 1182 * 1183 * Read prioritization issues: It is possible for hundreds of megabytes worth 1184 * of writes to be queued asynchronously. This creates a huge bottleneck 1185 * for reads which reduce read bandwidth to a trickle. 1186 * 1187 * To solve this problem we generally reorder reads before writes. 1188 * 1189 * However, a large number of random reads can also starve writes and 1190 * make poor use of the drive zone cache so we allow writes to trickle 1191 * in every N reads. 1192 */ 1193 void 1194 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 1195 { 1196 /* 1197 * The BIO wants to be ordered. Adding to the tail also 1198 * causes transition to be set to NULL, forcing the ordering 1199 * of all prior I/O's. 1200 */ 1201 if (bio->bio_buf->b_flags & B_ORDERED) { 1202 bioq_insert_tail(bioq, bio); 1203 return; 1204 } 1205 1206 switch(bio->bio_buf->b_cmd) { 1207 case BUF_CMD_READ: 1208 if (bioq->transition) { 1209 /* 1210 * Insert before the first write. Bleedover writes 1211 * based on reorder intervals to prevent starvation. 1212 */ 1213 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act); 1214 ++bioq->reorder; 1215 if (bioq->reorder % bioq_reorder_minor_interval == 0) { 1216 bioqwritereorder(bioq); 1217 if (bioq->reorder >= 1218 bioq_reorder_burst_interval) { 1219 bioq->reorder = 0; 1220 } 1221 } 1222 } else { 1223 /* 1224 * No writes queued (or ordering was forced), 1225 * insert at tail. 1226 */ 1227 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1228 } 1229 break; 1230 case BUF_CMD_WRITE: 1231 /* 1232 * Writes are always appended. If no writes were previously 1233 * queued or an ordered tail insertion occured the transition 1234 * field will be NULL. 1235 */ 1236 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1237 if (bioq->transition == NULL) 1238 bioq->transition = bio; 1239 break; 1240 default: 1241 /* 1242 * All other request types are forced to be ordered. 1243 */ 1244 bioq_insert_tail(bioq, bio); 1245 break; 1246 } 1247 } 1248 1249 /* 1250 * Move the read-write transition point to prevent reads from 1251 * completely starving our writes. This brings a number of writes into 1252 * the fold every N reads. 1253 * 1254 * We bring a few linear writes into the fold on a minor interval 1255 * and we bring a non-linear burst of writes into the fold on a major 1256 * interval. Bursting only occurs if runningbufspace is really high 1257 * (typically from syncs, fsyncs, or HAMMER flushes). 1258 */ 1259 static 1260 void 1261 bioqwritereorder(struct bio_queue_head *bioq) 1262 { 1263 struct bio *bio; 1264 off_t next_offset; 1265 size_t left; 1266 size_t n; 1267 int check_off; 1268 1269 if (bioq->reorder < bioq_reorder_burst_interval || 1270 !buf_runningbufspace_severe()) { 1271 left = (size_t)bioq_reorder_minor_bytes; 1272 check_off = 1; 1273 } else { 1274 left = (size_t)bioq_reorder_burst_bytes; 1275 check_off = 0; 1276 } 1277 1278 next_offset = bioq->transition->bio_offset; 1279 while ((bio = bioq->transition) != NULL && 1280 (check_off == 0 || next_offset == bio->bio_offset) 1281 ) { 1282 n = bio->bio_buf->b_bcount; 1283 next_offset = bio->bio_offset + n; 1284 bioq->transition = TAILQ_NEXT(bio, bio_act); 1285 if (left < n) 1286 break; 1287 left -= n; 1288 } 1289 } 1290 1291 /* 1292 * Bounds checking against the media size, used for the raw partition. 1293 * secsize, mediasize and b_blkno must all be the same units. 1294 * Possibly this has to be DEV_BSIZE (512). 1295 */ 1296 int 1297 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize) 1298 { 1299 struct buf *bp = bio->bio_buf; 1300 int64_t sz; 1301 1302 sz = howmany(bp->b_bcount, secsize); 1303 1304 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) { 1305 sz = mediasize - bio->bio_offset/DEV_BSIZE; 1306 if (sz == 0) { 1307 /* If exactly at end of disk, return EOF. */ 1308 bp->b_resid = bp->b_bcount; 1309 return 0; 1310 } 1311 if (sz < 0) { 1312 /* If past end of disk, return EINVAL. */ 1313 bp->b_error = EINVAL; 1314 return 0; 1315 } 1316 /* Otherwise, truncate request. */ 1317 bp->b_bcount = sz * secsize; 1318 } 1319 1320 return 1; 1321 } 1322 1323 /* 1324 * Disk error is the preface to plaintive error messages 1325 * about failing disk transfers. It prints messages of the form 1326 1327 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 1328 1329 * if the offset of the error in the transfer and a disk label 1330 * are both available. blkdone should be -1 if the position of the error 1331 * is unknown; the disklabel pointer may be null from drivers that have not 1332 * been converted to use them. The message is printed with kprintf 1333 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 1334 * The message should be completed (with at least a newline) with kprintf 1335 * or log(-1, ...), respectively. There is no trailing space. 1336 */ 1337 void 1338 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 1339 { 1340 struct buf *bp = bio->bio_buf; 1341 const char *term; 1342 1343 switch(bp->b_cmd) { 1344 case BUF_CMD_READ: 1345 term = "read"; 1346 break; 1347 case BUF_CMD_WRITE: 1348 term = "write"; 1349 break; 1350 default: 1351 term = "access"; 1352 break; 1353 } 1354 kprintf("%s: %s %sing ", dev->si_name, what, term); 1355 kprintf("offset %012llx for %d", 1356 (long long)bio->bio_offset, 1357 bp->b_bcount); 1358 1359 if (donecnt) 1360 kprintf(" (%d bytes completed)", donecnt); 1361 } 1362 1363 /* 1364 * Locate a disk device 1365 */ 1366 cdev_t 1367 disk_locate(const char *devname) 1368 { 1369 return devfs_find_device_by_name(devname); 1370 } 1371 1372 void 1373 disk_config(void *arg) 1374 { 1375 disk_msg_send_sync(DISK_SYNC, NULL, NULL); 1376 } 1377 1378 static void 1379 disk_init(void) 1380 { 1381 struct thread* td_core; 1382 1383 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 1384 NULL, NULL, NULL, 1385 objcache_malloc_alloc, 1386 objcache_malloc_free, 1387 &disk_msg_malloc_args); 1388 1389 lwkt_token_init(&disklist_token, "disks"); 1390 1391 /* 1392 * Initialize the reply-only port which acts as a message drain 1393 */ 1394 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 1395 1396 lwkt_gettoken(&disklist_token); 1397 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 1398 0, -1, "disk_msg_core"); 1399 tsleep(td_core, 0, "diskcore", 0); 1400 lwkt_reltoken(&disklist_token); 1401 } 1402 1403 static void 1404 disk_uninit(void) 1405 { 1406 objcache_destroy(disk_msg_cache); 1407 } 1408 1409 /* 1410 * Clean out illegal characters in serial numbers. 1411 */ 1412 static void 1413 disk_cleanserial(char *serno) 1414 { 1415 char c; 1416 1417 while ((c = *serno) != 0) { 1418 if (c >= 'a' && c <= 'z') 1419 ; 1420 else if (c >= 'A' && c <= 'Z') 1421 ; 1422 else if (c >= '0' && c <= '9') 1423 ; 1424 else if (c == '-' || c == '@' || c == '+' || c == '.') 1425 ; 1426 else 1427 c = '_'; 1428 *serno++= c; 1429 } 1430 } 1431 1432 TUNABLE_INT("kern.disk_debug", &disk_debug_enable); 1433 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable, 1434 0, "Enable subr_disk debugging"); 1435 1436 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 1437 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 1438