1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * and Alex Hornung <ahornung@gmail.com> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * ---------------------------------------------------------------------------- 36 * "THE BEER-WARE LICENSE" (Revision 42): 37 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 38 * can do whatever you want with this stuff. If we meet some day, and you think 39 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 40 * ---------------------------------------------------------------------------- 41 * 42 * Copyright (c) 1982, 1986, 1988, 1993 43 * The Regents of the University of California. All rights reserved. 44 * (c) UNIX System Laboratories, Inc. 45 * All or some portions of this file are derived from material licensed 46 * to the University of California by American Telephone and Telegraph 47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 48 * the permission of UNIX System Laboratories, Inc. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. All advertising materials mentioning features or use of this software 59 * must display the following acknowledgement: 60 * This product includes software developed by the University of 61 * California, Berkeley and its contributors. 62 * 4. Neither the name of the University nor the names of its contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 76 * SUCH DAMAGE. 77 * 78 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 79 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $ 80 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $ 81 */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/kernel.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/buf.h> 89 #include <sys/conf.h> 90 #include <sys/disklabel.h> 91 #include <sys/disklabel32.h> 92 #include <sys/disklabel64.h> 93 #include <sys/diskslice.h> 94 #include <sys/diskmbr.h> 95 #include <sys/disk.h> 96 #include <sys/kerneldump.h> 97 #include <sys/malloc.h> 98 #include <machine/md_var.h> 99 #include <sys/ctype.h> 100 #include <sys/syslog.h> 101 #include <sys/device.h> 102 #include <sys/msgport.h> 103 #include <sys/devfs.h> 104 #include <sys/thread.h> 105 #include <sys/dsched.h> 106 #include <sys/queue.h> 107 #include <sys/lock.h> 108 #include <sys/udev.h> 109 #include <sys/uuid.h> 110 111 #include <sys/buf2.h> 112 #include <sys/mplock2.h> 113 #include <sys/msgport2.h> 114 #include <sys/thread2.h> 115 116 static MALLOC_DEFINE(M_DISK, "disk", "disk data"); 117 static int disk_debug_enable = 0; 118 119 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); 120 static void disk_msg_core(void *); 121 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe); 122 static void disk_probe(struct disk *dp, int reprobe); 123 static void _setdiskinfo(struct disk *disk, struct disk_info *info); 124 static void bioqwritereorder(struct bio_queue_head *bioq); 125 static void disk_cleanserial(char *serno); 126 static int disk_debug(int, char *, ...) __printflike(2, 3); 127 static cdev_t _disk_create_named(const char *name, int unit, struct disk *dp, 128 struct dev_ops *raw_ops, int clone); 129 130 static d_open_t diskopen; 131 static d_close_t diskclose; 132 static d_ioctl_t diskioctl; 133 static d_strategy_t diskstrategy; 134 static d_psize_t diskpsize; 135 static d_dump_t diskdump; 136 137 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist); 138 static struct lwkt_token disklist_token; 139 140 static struct dev_ops disk_ops = { 141 { "disk", 0, D_DISK | D_MPSAFE | D_TRACKCLOSE }, 142 .d_open = diskopen, 143 .d_close = diskclose, 144 .d_read = physread, 145 .d_write = physwrite, 146 .d_ioctl = diskioctl, 147 .d_strategy = diskstrategy, 148 .d_dump = diskdump, 149 .d_psize = diskpsize, 150 }; 151 152 static struct objcache *disk_msg_cache; 153 154 struct objcache_malloc_args disk_msg_malloc_args = { 155 sizeof(struct disk_msg), M_DISK }; 156 157 static struct lwkt_port disk_dispose_port; 158 static struct lwkt_port disk_msg_port; 159 160 static int 161 disk_debug(int level, char *fmt, ...) 162 { 163 __va_list ap; 164 165 __va_start(ap, fmt); 166 if (level <= disk_debug_enable) 167 kvprintf(fmt, ap); 168 __va_end(ap); 169 170 return 0; 171 } 172 173 static int 174 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe) 175 { 176 struct disk_info *info = &dp->d_info; 177 struct diskslice *sp = &dp->d_slice->dss_slices[slice]; 178 disklabel_ops_t ops; 179 struct partinfo part; 180 const char *msg; 181 char uuid_buf[128]; 182 cdev_t ndev; 183 int sno; 184 u_int i; 185 186 disk_debug(2, 187 "disk_probe_slice (begin): %s (%s)\n", 188 dev->si_name, dp->d_cdev->si_name); 189 190 sno = slice ? slice - 1 : 0; 191 192 ops = &disklabel32_ops; 193 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 194 if (msg && !strcmp(msg, "no disk label")) { 195 ops = &disklabel64_ops; 196 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info); 197 } 198 199 if (msg == NULL) { 200 if (slice != WHOLE_DISK_SLICE) 201 ops->op_adjust_label_reserved(dp->d_slice, slice, sp); 202 else 203 sp->ds_reserved = 0; 204 205 sp->ds_ops = ops; 206 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) { 207 ops->op_loadpartinfo(sp->ds_label, i, &part); 208 if (part.fstype) { 209 if (reprobe && 210 (ndev = devfs_find_device_by_name("%s%c", 211 dev->si_name, 'a' + i)) 212 ) { 213 /* 214 * Device already exists and 215 * is still valid. 216 */ 217 ndev->si_flags |= SI_REPROBE_TEST; 218 219 /* 220 * Destroy old UUID alias 221 */ 222 destroy_dev_alias(ndev, "part-by-uuid/*"); 223 224 /* Create UUID alias */ 225 if (!kuuid_is_nil(&part.storage_uuid)) { 226 snprintf_uuid(uuid_buf, 227 sizeof(uuid_buf), 228 &part.storage_uuid); 229 make_dev_alias(ndev, 230 "part-by-uuid/%s", 231 uuid_buf); 232 udev_dict_set_cstr(ndev, "uuid", uuid_buf); 233 } 234 } else { 235 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 236 dkmakeminor(dkunit(dp->d_cdev), 237 slice, i), 238 UID_ROOT, GID_OPERATOR, 0640, 239 "%s%c", dev->si_name, 'a'+ i); 240 ndev->si_parent = dev; 241 ndev->si_iosize_max = dev->si_iosize_max; 242 ndev->si_disk = dp; 243 udev_dict_set_cstr(ndev, "subsystem", "disk"); 244 /* Inherit parent's disk type */ 245 if (dp->d_disktype) { 246 udev_dict_set_cstr(ndev, "disk-type", 247 __DECONST(char *, dp->d_disktype)); 248 } 249 250 /* Create serno alias */ 251 if (dp->d_info.d_serialno) { 252 make_dev_alias(ndev, 253 "serno/%s.s%d%c", 254 dp->d_info.d_serialno, 255 sno, 'a' + i); 256 } 257 258 /* Create UUID alias */ 259 if (!kuuid_is_nil(&part.storage_uuid)) { 260 snprintf_uuid(uuid_buf, 261 sizeof(uuid_buf), 262 &part.storage_uuid); 263 make_dev_alias(ndev, 264 "part-by-uuid/%s", 265 uuid_buf); 266 udev_dict_set_cstr(ndev, "uuid", uuid_buf); 267 } 268 ndev->si_flags |= SI_REPROBE_TEST; 269 } 270 } 271 } 272 } else if (info->d_dsflags & DSO_COMPATLABEL) { 273 msg = NULL; 274 if (sp->ds_size >= 0x100000000ULL) 275 ops = &disklabel64_ops; 276 else 277 ops = &disklabel32_ops; 278 sp->ds_label = ops->op_clone_label(info, sp); 279 } else { 280 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */ 281 sp->ds_type == DOSPTYP_NETBSD || 282 sp->ds_type == DOSPTYP_OPENBSD) { 283 log(LOG_WARNING, "%s: cannot find label (%s)\n", 284 dev->si_name, msg); 285 } 286 287 if (sp->ds_label.opaque != NULL && sp->ds_ops != NULL) { 288 /* Clear out old label - it's not around anymore */ 289 disk_debug(2, 290 "disk_probe_slice: clear out old diskabel on %s\n", 291 dev->si_name); 292 293 sp->ds_ops->op_freedisklabel(&sp->ds_label); 294 sp->ds_ops = NULL; 295 } 296 } 297 298 if (msg == NULL) { 299 sp->ds_wlabel = FALSE; 300 } 301 302 return (msg ? EINVAL : 0); 303 } 304 305 /* 306 * This routine is only called for newly minted drives or to reprobe 307 * a drive with no open slices. disk_probe_slice() is called directly 308 * when reprobing partition changes within slices. 309 */ 310 static void 311 disk_probe(struct disk *dp, int reprobe) 312 { 313 struct disk_info *info = &dp->d_info; 314 cdev_t dev = dp->d_cdev; 315 cdev_t ndev; 316 int error, i, sno; 317 struct diskslices *osp; 318 struct diskslice *sp; 319 char uuid_buf[128]; 320 321 KKASSERT (info->d_media_blksize != 0); 322 323 osp = dp->d_slice; 324 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info); 325 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name); 326 327 error = mbrinit(dev, info, &(dp->d_slice)); 328 if (error) { 329 dsgone(&osp); 330 return; 331 } 332 333 for (i = 0; i < dp->d_slice->dss_nslices; i++) { 334 /* 335 * Ignore the whole-disk slice, it has already been created. 336 */ 337 if (i == WHOLE_DISK_SLICE) 338 continue; 339 340 #if 1 341 /* 342 * Ignore the compatibility slice s0 if it's a device mapper 343 * volume. 344 */ 345 if ((i == COMPATIBILITY_SLICE) && 346 (info->d_dsflags & DSO_DEVICEMAPPER)) 347 continue; 348 #endif 349 350 sp = &dp->d_slice->dss_slices[i]; 351 352 /* 353 * Handle s0. s0 is a compatibility slice if there are no 354 * other slices and it has not otherwise been set up, else 355 * we ignore it. 356 */ 357 if (i == COMPATIBILITY_SLICE) { 358 sno = 0; 359 if (sp->ds_type == 0 && 360 dp->d_slice->dss_nslices == BASE_SLICE) { 361 sp->ds_size = info->d_media_blocks; 362 sp->ds_reserved = 0; 363 } 364 } else { 365 sno = i - 1; 366 sp->ds_reserved = 0; 367 } 368 369 /* 370 * Ignore 0-length slices 371 */ 372 if (sp->ds_size == 0) 373 continue; 374 375 if (reprobe && 376 (ndev = devfs_find_device_by_name("%ss%d", 377 dev->si_name, sno))) { 378 /* 379 * Device already exists and is still valid 380 */ 381 ndev->si_flags |= SI_REPROBE_TEST; 382 383 /* 384 * Destroy old UUID alias 385 */ 386 destroy_dev_alias(ndev, "slice-by-uuid/*"); 387 388 /* Create UUID alias */ 389 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 390 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 391 &sp->ds_stor_uuid); 392 make_dev_alias(ndev, "slice-by-uuid/%s", 393 uuid_buf); 394 } 395 } else { 396 /* 397 * Else create new device 398 */ 399 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 400 dkmakewholeslice(dkunit(dev), i), 401 UID_ROOT, GID_OPERATOR, 0640, 402 (info->d_dsflags & DSO_DEVICEMAPPER)? 403 "%s.s%d" : "%ss%d", dev->si_name, sno); 404 ndev->si_parent = dev; 405 ndev->si_iosize_max = dev->si_iosize_max; 406 udev_dict_set_cstr(ndev, "subsystem", "disk"); 407 /* Inherit parent's disk type */ 408 if (dp->d_disktype) { 409 udev_dict_set_cstr(ndev, "disk-type", 410 __DECONST(char *, dp->d_disktype)); 411 } 412 413 /* Create serno alias */ 414 if (dp->d_info.d_serialno) { 415 make_dev_alias(ndev, "serno/%s.s%d", 416 dp->d_info.d_serialno, sno); 417 } 418 419 /* Create UUID alias */ 420 if (!kuuid_is_nil(&sp->ds_stor_uuid)) { 421 snprintf_uuid(uuid_buf, sizeof(uuid_buf), 422 &sp->ds_stor_uuid); 423 make_dev_alias(ndev, "slice-by-uuid/%s", 424 uuid_buf); 425 } 426 427 ndev->si_disk = dp; 428 ndev->si_flags |= SI_REPROBE_TEST; 429 } 430 sp->ds_dev = ndev; 431 432 /* 433 * Probe appropriate slices for a disklabel 434 * 435 * XXX slice type 1 used by our gpt probe code. 436 * XXX slice type 0 used by mbr compat slice. 437 */ 438 if (sp->ds_type == DOSPTYP_386BSD || 439 sp->ds_type == DOSPTYP_NETBSD || 440 sp->ds_type == DOSPTYP_OPENBSD || 441 sp->ds_type == 0 || 442 sp->ds_type == 1) { 443 if (dp->d_slice->dss_first_bsd_slice == 0) 444 dp->d_slice->dss_first_bsd_slice = i; 445 disk_probe_slice(dp, ndev, i, reprobe); 446 } 447 } 448 dsgone(&osp); 449 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name); 450 } 451 452 453 static void 454 disk_msg_core(void *arg) 455 { 456 struct disk *dp; 457 struct diskslice *sp; 458 disk_msg_t msg; 459 int run; 460 461 lwkt_gettoken(&disklist_token); 462 lwkt_initport_thread(&disk_msg_port, curthread); 463 wakeup(curthread); /* synchronous startup */ 464 lwkt_reltoken(&disklist_token); 465 466 get_mplock(); /* not mpsafe yet? */ 467 run = 1; 468 469 while (run) { 470 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0); 471 472 switch (msg->hdr.u.ms_result) { 473 case DISK_DISK_PROBE: 474 dp = (struct disk *)msg->load; 475 disk_debug(1, 476 "DISK_DISK_PROBE: %s\n", 477 dp->d_cdev->si_name); 478 disk_probe(dp, 0); 479 break; 480 case DISK_DISK_DESTROY: 481 dp = (struct disk *)msg->load; 482 disk_debug(1, 483 "DISK_DISK_DESTROY: %s\n", 484 dp->d_cdev->si_name); 485 devfs_destroy_related(dp->d_cdev); 486 destroy_dev(dp->d_cdev); 487 destroy_only_dev(dp->d_rawdev); 488 lwkt_gettoken(&disklist_token); 489 LIST_REMOVE(dp, d_list); 490 lwkt_reltoken(&disklist_token); 491 if (dp->d_info.d_serialno) { 492 kfree(dp->d_info.d_serialno, M_TEMP); 493 dp->d_info.d_serialno = NULL; 494 } 495 break; 496 case DISK_UNPROBE: 497 dp = (struct disk *)msg->load; 498 disk_debug(1, 499 "DISK_DISK_UNPROBE: %s\n", 500 dp->d_cdev->si_name); 501 devfs_destroy_related(dp->d_cdev); 502 break; 503 case DISK_SLICE_REPROBE: 504 dp = (struct disk *)msg->load; 505 sp = (struct diskslice *)msg->load2; 506 devfs_clr_related_flag(sp->ds_dev, 507 SI_REPROBE_TEST); 508 disk_debug(1, 509 "DISK_SLICE_REPROBE: %s\n", 510 sp->ds_dev->si_name); 511 disk_probe_slice(dp, sp->ds_dev, 512 dkslice(sp->ds_dev), 1); 513 devfs_destroy_related_without_flag( 514 sp->ds_dev, SI_REPROBE_TEST); 515 break; 516 case DISK_DISK_REPROBE: 517 dp = (struct disk *)msg->load; 518 devfs_clr_related_flag(dp->d_cdev, SI_REPROBE_TEST); 519 disk_debug(1, 520 "DISK_DISK_REPROBE: %s\n", 521 dp->d_cdev->si_name); 522 disk_probe(dp, 1); 523 devfs_destroy_related_without_flag( 524 dp->d_cdev, SI_REPROBE_TEST); 525 break; 526 case DISK_SYNC: 527 disk_debug(1, "DISK_SYNC\n"); 528 break; 529 default: 530 devfs_debug(DEVFS_DEBUG_WARNING, 531 "disk_msg_core: unknown message " 532 "received at core\n"); 533 break; 534 } 535 lwkt_replymsg(&msg->hdr, 0); 536 } 537 lwkt_exit(); 538 } 539 540 541 /* 542 * Acts as a message drain. Any message that is replied to here gets 543 * destroyed and the memory freed. 544 */ 545 static void 546 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) 547 { 548 objcache_put(disk_msg_cache, msg); 549 } 550 551 552 void 553 disk_msg_send(uint32_t cmd, void *load, void *load2) 554 { 555 disk_msg_t disk_msg; 556 lwkt_port_t port = &disk_msg_port; 557 558 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 559 560 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0); 561 562 disk_msg->hdr.u.ms_result = cmd; 563 disk_msg->load = load; 564 disk_msg->load2 = load2; 565 KKASSERT(port); 566 lwkt_sendmsg(port, &disk_msg->hdr); 567 } 568 569 void 570 disk_msg_send_sync(uint32_t cmd, void *load, void *load2) 571 { 572 struct lwkt_port rep_port; 573 disk_msg_t disk_msg; 574 lwkt_port_t port; 575 576 disk_msg = objcache_get(disk_msg_cache, M_WAITOK); 577 port = &disk_msg_port; 578 579 /* XXX could probably use curthread's built-in msgport */ 580 lwkt_initport_thread(&rep_port, curthread); 581 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0); 582 583 disk_msg->hdr.u.ms_result = cmd; 584 disk_msg->load = load; 585 disk_msg->load2 = load2; 586 587 lwkt_sendmsg(port, &disk_msg->hdr); 588 lwkt_waitmsg(&disk_msg->hdr, 0); 589 objcache_put(disk_msg_cache, disk_msg); 590 } 591 592 /* 593 * Create a raw device for the dev_ops template (which is returned). Also 594 * create a slice and unit managed disk and overload the user visible 595 * device space with it. 596 * 597 * NOTE: The returned raw device is NOT a slice and unit managed device. 598 * It is an actual raw device representing the raw disk as specified by 599 * the passed dev_ops. The disk layer not only returns such a raw device, 600 * it also uses it internally when passing (modified) commands through. 601 */ 602 cdev_t 603 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops) 604 { 605 return _disk_create_named(NULL, unit, dp, raw_ops, 0); 606 } 607 608 cdev_t 609 disk_create_clone(int unit, struct disk *dp, struct dev_ops *raw_ops) 610 { 611 return _disk_create_named(NULL, unit, dp, raw_ops, 1); 612 } 613 614 cdev_t 615 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 616 { 617 return _disk_create_named(name, unit, dp, raw_ops, 0); 618 } 619 620 cdev_t 621 disk_create_named_clone(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops) 622 { 623 return _disk_create_named(name, unit, dp, raw_ops, 1); 624 } 625 626 static cdev_t 627 _disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops, int clone) 628 { 629 cdev_t rawdev; 630 631 disk_debug(1, "disk_create (begin): %s%d\n", name, unit); 632 633 if (name) { 634 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 635 UID_ROOT, GID_OPERATOR, 0640, "%s", name); 636 } else { 637 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit), 638 UID_ROOT, GID_OPERATOR, 0640, 639 "%s%d", raw_ops->head.name, unit); 640 } 641 642 bzero(dp, sizeof(*dp)); 643 644 dp->d_rawdev = rawdev; 645 dp->d_raw_ops = raw_ops; 646 dp->d_dev_ops = &disk_ops; 647 648 if (name) { 649 if (clone) { 650 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 651 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 652 "%s", name); 653 } else { 654 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 655 dkmakewholedisk(unit), UID_ROOT, GID_OPERATOR, 0640, 656 "%s", name); 657 } 658 } else { 659 if (clone) { 660 dp->d_cdev = make_only_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 661 dkmakewholedisk(unit), 662 UID_ROOT, GID_OPERATOR, 0640, 663 "%s%d", raw_ops->head.name, unit); 664 } else { 665 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops, 666 dkmakewholedisk(unit), 667 UID_ROOT, GID_OPERATOR, 0640, 668 "%s%d", raw_ops->head.name, unit); 669 } 670 } 671 672 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk"); 673 dp->d_cdev->si_disk = dp; 674 675 if (name) 676 dsched_disk_create_callback(dp, name, unit); 677 else 678 dsched_disk_create_callback(dp, raw_ops->head.name, unit); 679 680 lwkt_gettoken(&disklist_token); 681 LIST_INSERT_HEAD(&disklist, dp, d_list); 682 lwkt_reltoken(&disklist_token); 683 684 disk_debug(1, "disk_create (end): %s%d\n", 685 (name != NULL)?(name):(raw_ops->head.name), unit); 686 687 return (dp->d_rawdev); 688 } 689 690 int 691 disk_setdisktype(struct disk *disk, const char *type) 692 { 693 KKASSERT(disk != NULL); 694 695 disk->d_disktype = type; 696 return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type)); 697 } 698 699 int 700 disk_getopencount(struct disk *disk) 701 { 702 return disk->d_opencount; 703 } 704 705 static void 706 _setdiskinfo(struct disk *disk, struct disk_info *info) 707 { 708 char *oldserialno; 709 710 oldserialno = disk->d_info.d_serialno; 711 bcopy(info, &disk->d_info, sizeof(disk->d_info)); 712 info = &disk->d_info; 713 714 disk_debug(1, 715 "_setdiskinfo: %s\n", 716 disk->d_cdev->si_name); 717 718 /* 719 * The serial number is duplicated so the caller can throw 720 * their copy away. 721 */ 722 if (info->d_serialno && info->d_serialno[0] && 723 (info->d_serialno[0] != ' ' || strlen(info->d_serialno) > 1)) { 724 info->d_serialno = kstrdup(info->d_serialno, M_TEMP); 725 disk_cleanserial(info->d_serialno); 726 if (disk->d_cdev) { 727 make_dev_alias(disk->d_cdev, "serno/%s", 728 info->d_serialno); 729 } 730 } else { 731 info->d_serialno = NULL; 732 } 733 if (oldserialno) 734 kfree(oldserialno, M_TEMP); 735 736 dsched_disk_update_callback(disk, info); 737 738 /* 739 * The caller may set d_media_size or d_media_blocks and we 740 * calculate the other. 741 */ 742 KKASSERT(info->d_media_size == 0 || info->d_media_blocks == 0); 743 if (info->d_media_size == 0 && info->d_media_blocks) { 744 info->d_media_size = (u_int64_t)info->d_media_blocks * 745 info->d_media_blksize; 746 } else if (info->d_media_size && info->d_media_blocks == 0 && 747 info->d_media_blksize) { 748 info->d_media_blocks = info->d_media_size / 749 info->d_media_blksize; 750 } 751 752 /* 753 * The si_* fields for rawdev are not set until after the 754 * disk_create() call, so someone using the cooked version 755 * of the raw device (i.e. da0s0) will not get the right 756 * si_iosize_max unless we fix it up here. 757 */ 758 if (disk->d_cdev && disk->d_rawdev && 759 disk->d_cdev->si_iosize_max == 0) { 760 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max; 761 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys; 762 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best; 763 } 764 765 /* Add the serial number to the udev_dictionary */ 766 if (info->d_serialno) 767 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno); 768 } 769 770 /* 771 * Disk drivers must call this routine when media parameters are available 772 * or have changed. 773 */ 774 void 775 disk_setdiskinfo(struct disk *disk, struct disk_info *info) 776 { 777 _setdiskinfo(disk, info); 778 disk_msg_send(DISK_DISK_PROBE, disk, NULL); 779 disk_debug(1, 780 "disk_setdiskinfo: sent probe for %s\n", 781 disk->d_cdev->si_name); 782 } 783 784 void 785 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info) 786 { 787 _setdiskinfo(disk, info); 788 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL); 789 disk_debug(1, 790 "disk_setdiskinfo_sync: sent probe for %s\n", 791 disk->d_cdev->si_name); 792 } 793 794 /* 795 * This routine is called when an adapter detaches. The higher level 796 * managed disk device is destroyed while the lower level raw device is 797 * released. 798 */ 799 void 800 disk_destroy(struct disk *disk) 801 { 802 dsched_disk_destroy_callback(disk); 803 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL); 804 return; 805 } 806 807 int 808 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize) 809 { 810 struct partinfo pinfo; 811 int error; 812 813 bzero(&pinfo, sizeof(pinfo)); 814 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, 815 proc0.p_ucred, NULL); 816 if (error) 817 return (error); 818 819 if (pinfo.media_blksize == 0) 820 return (ENXIO); 821 822 if (blkno) /* XXX: make sure this reserved stuff is right */ 823 *blkno = pinfo.reserved_blocks + 824 pinfo.media_offset / pinfo.media_blksize; 825 if (secsize) 826 *secsize = pinfo.media_blksize; 827 if (size) 828 *size = (pinfo.media_blocks - pinfo.reserved_blocks); 829 830 return (0); 831 } 832 833 int 834 disk_dumpconf(cdev_t dev, u_int onoff) 835 { 836 struct dumperinfo di; 837 u_int64_t size, blkno; 838 u_int32_t secsize; 839 int error; 840 841 if (!onoff) 842 return set_dumper(NULL); 843 844 error = disk_dumpcheck(dev, &size, &blkno, &secsize); 845 846 if (error) 847 return ENXIO; 848 849 bzero(&di, sizeof(struct dumperinfo)); 850 di.dumper = diskdump; 851 di.priv = dev; 852 di.blocksize = secsize; 853 di.maxiosize = dev->si_iosize_max; 854 di.mediaoffset = blkno * DEV_BSIZE; 855 di.mediasize = size * DEV_BSIZE; 856 857 return set_dumper(&di); 858 } 859 860 void 861 disk_unprobe(struct disk *disk) 862 { 863 if (disk == NULL) 864 return; 865 866 disk_msg_send_sync(DISK_UNPROBE, disk, NULL); 867 } 868 869 void 870 disk_invalidate (struct disk *disk) 871 { 872 dsgone(&disk->d_slice); 873 } 874 875 struct disk * 876 disk_enumerate(struct disk *disk) 877 { 878 struct disk *dp; 879 880 lwkt_gettoken(&disklist_token); 881 if (!disk) 882 dp = (LIST_FIRST(&disklist)); 883 else 884 dp = (LIST_NEXT(disk, d_list)); 885 lwkt_reltoken(&disklist_token); 886 887 return dp; 888 } 889 890 static 891 int 892 sysctl_disks(SYSCTL_HANDLER_ARGS) 893 { 894 struct disk *disk; 895 int error, first; 896 897 disk = NULL; 898 first = 1; 899 900 while ((disk = disk_enumerate(disk))) { 901 if (!first) { 902 error = SYSCTL_OUT(req, " ", 1); 903 if (error) 904 return error; 905 } else { 906 first = 0; 907 } 908 error = SYSCTL_OUT(req, disk->d_rawdev->si_name, 909 strlen(disk->d_rawdev->si_name)); 910 if (error) 911 return error; 912 } 913 error = SYSCTL_OUT(req, "", 1); 914 return error; 915 } 916 917 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 918 sysctl_disks, "A", "names of available disks"); 919 920 /* 921 * Open a disk device or partition. 922 */ 923 static 924 int 925 diskopen(struct dev_open_args *ap) 926 { 927 cdev_t dev = ap->a_head.a_dev; 928 struct disk *dp; 929 int error; 930 931 /* 932 * dp can't be NULL here XXX. 933 * 934 * d_slice will be NULL if setdiskinfo() has not been called yet. 935 * setdiskinfo() is typically called whether the disk is present 936 * or not (e.g. CD), but the base disk device is created first 937 * and there may be a race. 938 */ 939 dp = dev->si_disk; 940 if (dp == NULL || dp->d_slice == NULL) 941 return (ENXIO); 942 error = 0; 943 944 /* 945 * Deal with open races 946 */ 947 get_mplock(); 948 while (dp->d_flags & DISKFLAG_LOCK) { 949 dp->d_flags |= DISKFLAG_WANTED; 950 error = tsleep(dp, PCATCH, "diskopen", hz); 951 if (error) { 952 rel_mplock(); 953 return (error); 954 } 955 } 956 dp->d_flags |= DISKFLAG_LOCK; 957 958 /* 959 * Open the underlying raw device. 960 */ 961 if (!dsisopen(dp->d_slice)) { 962 #if 0 963 if (!pdev->si_iosize_max) 964 pdev->si_iosize_max = dev->si_iosize_max; 965 #endif 966 error = dev_dopen(dp->d_rawdev, ap->a_oflags, 967 ap->a_devtype, ap->a_cred); 968 } 969 970 if (error) 971 goto out; 972 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags, 973 &dp->d_slice, &dp->d_info); 974 if (!dsisopen(dp->d_slice)) { 975 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype); 976 } 977 out: 978 dp->d_flags &= ~DISKFLAG_LOCK; 979 if (dp->d_flags & DISKFLAG_WANTED) { 980 dp->d_flags &= ~DISKFLAG_WANTED; 981 wakeup(dp); 982 } 983 rel_mplock(); 984 985 KKASSERT(dp->d_opencount >= 0); 986 /* If the open was successful, bump open count */ 987 if (error == 0) 988 atomic_add_int(&dp->d_opencount, 1); 989 990 return(error); 991 } 992 993 /* 994 * Close a disk device or partition 995 */ 996 static 997 int 998 diskclose(struct dev_close_args *ap) 999 { 1000 cdev_t dev = ap->a_head.a_dev; 1001 struct disk *dp; 1002 int error; 1003 int lcount; 1004 1005 error = 0; 1006 dp = dev->si_disk; 1007 1008 /* 1009 * The cdev_t represents the disk/slice/part. The shared 1010 * dp structure governs all cdevs associated with the disk. 1011 * 1012 * As a safety only close the underlying raw device on the last 1013 * close the disk device if our tracking of the slices/partitions 1014 * also indicates nothing is open. 1015 */ 1016 KKASSERT(dp->d_opencount >= 1); 1017 lcount = atomic_fetchadd_int(&dp->d_opencount, -1); 1018 1019 get_mplock(); 1020 dsclose(dev, ap->a_devtype, dp->d_slice); 1021 if (lcount <= 1 && !dsisopen(dp->d_slice)) { 1022 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype); 1023 } 1024 rel_mplock(); 1025 return (error); 1026 } 1027 1028 /* 1029 * First execute the ioctl on the disk device, and if it isn't supported 1030 * try running it on the backing device. 1031 */ 1032 static 1033 int 1034 diskioctl(struct dev_ioctl_args *ap) 1035 { 1036 cdev_t dev = ap->a_head.a_dev; 1037 struct disk *dp; 1038 int error; 1039 u_int u; 1040 1041 dp = dev->si_disk; 1042 if (dp == NULL) 1043 return (ENXIO); 1044 1045 devfs_debug(DEVFS_DEBUG_DEBUG, 1046 "diskioctl: cmd is: %lx (name: %s)\n", 1047 ap->a_cmd, dev->si_name); 1048 devfs_debug(DEVFS_DEBUG_DEBUG, 1049 "diskioctl: &dp->d_slice is: %p, %p\n", 1050 &dp->d_slice, dp->d_slice); 1051 1052 if (ap->a_cmd == DIOCGKERNELDUMP) { 1053 u = *(u_int *)ap->a_data; 1054 return disk_dumpconf(dev, u); 1055 } 1056 1057 if (&dp->d_slice == NULL || dp->d_slice == NULL || 1058 ((dp->d_info.d_dsflags & DSO_DEVICEMAPPER) && 1059 dkslice(dev) == WHOLE_DISK_SLICE)) { 1060 error = ENOIOCTL; 1061 } else { 1062 get_mplock(); 1063 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, 1064 &dp->d_slice, &dp->d_info); 1065 rel_mplock(); 1066 } 1067 1068 if (error == ENOIOCTL) { 1069 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data, 1070 ap->a_fflag, ap->a_cred, NULL); 1071 } 1072 return (error); 1073 } 1074 1075 /* 1076 * Execute strategy routine 1077 */ 1078 static 1079 int 1080 diskstrategy(struct dev_strategy_args *ap) 1081 { 1082 cdev_t dev = ap->a_head.a_dev; 1083 struct bio *bio = ap->a_bio; 1084 struct bio *nbio; 1085 struct disk *dp; 1086 1087 dp = dev->si_disk; 1088 1089 if (dp == NULL) { 1090 bio->bio_buf->b_error = ENXIO; 1091 bio->bio_buf->b_flags |= B_ERROR; 1092 biodone(bio); 1093 return(0); 1094 } 1095 KKASSERT(dev->si_disk == dp); 1096 1097 /* 1098 * The dscheck() function will also transform the slice relative 1099 * block number i.e. bio->bio_offset into a block number that can be 1100 * passed directly to the underlying raw device. If dscheck() 1101 * returns NULL it will have handled the bio for us (e.g. EOF 1102 * or error due to being beyond the device size). 1103 */ 1104 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) { 1105 dsched_queue(dp, nbio); 1106 } else { 1107 biodone(bio); 1108 } 1109 return(0); 1110 } 1111 1112 /* 1113 * Return the partition size in ?blocks? 1114 */ 1115 static 1116 int 1117 diskpsize(struct dev_psize_args *ap) 1118 { 1119 cdev_t dev = ap->a_head.a_dev; 1120 struct disk *dp; 1121 1122 dp = dev->si_disk; 1123 if (dp == NULL) 1124 return(ENODEV); 1125 1126 ap->a_result = dssize(dev, &dp->d_slice); 1127 1128 if ((ap->a_result == -1) && 1129 (dp->d_info.d_dsflags & DSO_RAWPSIZE)) { 1130 ap->a_head.a_dev = dp->d_rawdev; 1131 return dev_doperate(&ap->a_head); 1132 } 1133 return(0); 1134 } 1135 1136 int 1137 diskdump(struct dev_dump_args *ap) 1138 { 1139 cdev_t dev = ap->a_head.a_dev; 1140 struct disk *dp = dev->si_disk; 1141 u_int64_t size, offset; 1142 int error; 1143 1144 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize); 1145 /* XXX: this should probably go in disk_dumpcheck somehow */ 1146 if (ap->a_length != 0) { 1147 size *= DEV_BSIZE; 1148 offset = ap->a_blkno * DEV_BSIZE; 1149 if ((ap->a_offset < offset) || 1150 (ap->a_offset + ap->a_length - offset > size)) { 1151 kprintf("Attempt to write outside dump device boundaries.\n"); 1152 error = ENOSPC; 1153 } 1154 } 1155 1156 if (error == 0) { 1157 ap->a_head.a_dev = dp->d_rawdev; 1158 error = dev_doperate(&ap->a_head); 1159 } 1160 1161 return(error); 1162 } 1163 1164 1165 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD, 1166 0, sizeof(struct diskslices), "sizeof(struct diskslices)"); 1167 1168 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD, 1169 0, sizeof(struct disk), "sizeof(struct disk)"); 1170 1171 /* 1172 * Reorder interval for burst write allowance and minor write 1173 * allowance. 1174 * 1175 * We always want to trickle some writes in to make use of the 1176 * disk's zone cache. Bursting occurs on a longer interval and only 1177 * runningbufspace is well over the hirunningspace limit. 1178 */ 1179 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */ 1180 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval, 1181 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, ""); 1182 int bioq_reorder_minor_interval = 5; 1183 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval, 1184 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, ""); 1185 1186 int bioq_reorder_burst_bytes = 3000000; 1187 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes, 1188 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, ""); 1189 int bioq_reorder_minor_bytes = 262144; 1190 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes, 1191 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, ""); 1192 1193 1194 /* 1195 * Order I/Os. Generally speaking this code is designed to make better 1196 * use of drive zone caches. A drive zone cache can typically track linear 1197 * reads or writes for around 16 zones simultaniously. 1198 * 1199 * Read prioritization issues: It is possible for hundreds of megabytes worth 1200 * of writes to be queued asynchronously. This creates a huge bottleneck 1201 * for reads which reduce read bandwidth to a trickle. 1202 * 1203 * To solve this problem we generally reorder reads before writes. 1204 * 1205 * However, a large number of random reads can also starve writes and 1206 * make poor use of the drive zone cache so we allow writes to trickle 1207 * in every N reads. 1208 */ 1209 void 1210 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio) 1211 { 1212 /* 1213 * The BIO wants to be ordered. Adding to the tail also 1214 * causes transition to be set to NULL, forcing the ordering 1215 * of all prior I/O's. 1216 */ 1217 if (bio->bio_buf->b_flags & B_ORDERED) { 1218 bioq_insert_tail(bioq, bio); 1219 return; 1220 } 1221 1222 switch(bio->bio_buf->b_cmd) { 1223 case BUF_CMD_READ: 1224 if (bioq->transition) { 1225 /* 1226 * Insert before the first write. Bleedover writes 1227 * based on reorder intervals to prevent starvation. 1228 */ 1229 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act); 1230 ++bioq->reorder; 1231 if (bioq->reorder % bioq_reorder_minor_interval == 0) { 1232 bioqwritereorder(bioq); 1233 if (bioq->reorder >= 1234 bioq_reorder_burst_interval) { 1235 bioq->reorder = 0; 1236 } 1237 } 1238 } else { 1239 /* 1240 * No writes queued (or ordering was forced), 1241 * insert at tail. 1242 */ 1243 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1244 } 1245 break; 1246 case BUF_CMD_WRITE: 1247 /* 1248 * Writes are always appended. If no writes were previously 1249 * queued or an ordered tail insertion occured the transition 1250 * field will be NULL. 1251 */ 1252 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 1253 if (bioq->transition == NULL) 1254 bioq->transition = bio; 1255 break; 1256 default: 1257 /* 1258 * All other request types are forced to be ordered. 1259 */ 1260 bioq_insert_tail(bioq, bio); 1261 break; 1262 } 1263 } 1264 1265 /* 1266 * Move the read-write transition point to prevent reads from 1267 * completely starving our writes. This brings a number of writes into 1268 * the fold every N reads. 1269 * 1270 * We bring a few linear writes into the fold on a minor interval 1271 * and we bring a non-linear burst of writes into the fold on a major 1272 * interval. Bursting only occurs if runningbufspace is really high 1273 * (typically from syncs, fsyncs, or HAMMER flushes). 1274 */ 1275 static 1276 void 1277 bioqwritereorder(struct bio_queue_head *bioq) 1278 { 1279 struct bio *bio; 1280 off_t next_offset; 1281 size_t left; 1282 size_t n; 1283 int check_off; 1284 1285 if (bioq->reorder < bioq_reorder_burst_interval || 1286 !buf_runningbufspace_severe()) { 1287 left = (size_t)bioq_reorder_minor_bytes; 1288 check_off = 1; 1289 } else { 1290 left = (size_t)bioq_reorder_burst_bytes; 1291 check_off = 0; 1292 } 1293 1294 next_offset = bioq->transition->bio_offset; 1295 while ((bio = bioq->transition) != NULL && 1296 (check_off == 0 || next_offset == bio->bio_offset) 1297 ) { 1298 n = bio->bio_buf->b_bcount; 1299 next_offset = bio->bio_offset + n; 1300 bioq->transition = TAILQ_NEXT(bio, bio_act); 1301 if (left < n) 1302 break; 1303 left -= n; 1304 } 1305 } 1306 1307 /* 1308 * Bounds checking against the media size, used for the raw partition. 1309 * secsize, mediasize and b_blkno must all be the same units. 1310 * Possibly this has to be DEV_BSIZE (512). 1311 */ 1312 int 1313 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize) 1314 { 1315 struct buf *bp = bio->bio_buf; 1316 int64_t sz; 1317 1318 sz = howmany(bp->b_bcount, secsize); 1319 1320 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) { 1321 sz = mediasize - bio->bio_offset/DEV_BSIZE; 1322 if (sz == 0) { 1323 /* If exactly at end of disk, return EOF. */ 1324 bp->b_resid = bp->b_bcount; 1325 return 0; 1326 } 1327 if (sz < 0) { 1328 /* If past end of disk, return EINVAL. */ 1329 bp->b_error = EINVAL; 1330 return 0; 1331 } 1332 /* Otherwise, truncate request. */ 1333 bp->b_bcount = sz * secsize; 1334 } 1335 1336 return 1; 1337 } 1338 1339 /* 1340 * Disk error is the preface to plaintive error messages 1341 * about failing disk transfers. It prints messages of the form 1342 1343 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) 1344 1345 * if the offset of the error in the transfer and a disk label 1346 * are both available. blkdone should be -1 if the position of the error 1347 * is unknown; the disklabel pointer may be null from drivers that have not 1348 * been converted to use them. The message is printed with kprintf 1349 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. 1350 * The message should be completed (with at least a newline) with kprintf 1351 * or log(-1, ...), respectively. There is no trailing space. 1352 */ 1353 void 1354 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt) 1355 { 1356 struct buf *bp = bio->bio_buf; 1357 const char *term; 1358 1359 switch(bp->b_cmd) { 1360 case BUF_CMD_READ: 1361 term = "read"; 1362 break; 1363 case BUF_CMD_WRITE: 1364 term = "write"; 1365 break; 1366 default: 1367 term = "access"; 1368 break; 1369 } 1370 kprintf("%s: %s %sing ", dev->si_name, what, term); 1371 kprintf("offset %012llx for %d", 1372 (long long)bio->bio_offset, 1373 bp->b_bcount); 1374 1375 if (donecnt) 1376 kprintf(" (%d bytes completed)", donecnt); 1377 } 1378 1379 /* 1380 * Locate a disk device 1381 */ 1382 cdev_t 1383 disk_locate(const char *devname) 1384 { 1385 return devfs_find_device_by_name("%s", devname); 1386 } 1387 1388 void 1389 disk_config(void *arg) 1390 { 1391 disk_msg_send_sync(DISK_SYNC, NULL, NULL); 1392 } 1393 1394 static void 1395 disk_init(void) 1396 { 1397 struct thread* td_core; 1398 1399 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0, 1400 NULL, NULL, NULL, 1401 objcache_malloc_alloc, 1402 objcache_malloc_free, 1403 &disk_msg_malloc_args); 1404 1405 lwkt_token_init(&disklist_token, "disks"); 1406 1407 /* 1408 * Initialize the reply-only port which acts as a message drain 1409 */ 1410 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply); 1411 1412 lwkt_gettoken(&disklist_token); 1413 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL, 1414 0, -1, "disk_msg_core"); 1415 tsleep(td_core, 0, "diskcore", 0); 1416 lwkt_reltoken(&disklist_token); 1417 } 1418 1419 static void 1420 disk_uninit(void) 1421 { 1422 objcache_destroy(disk_msg_cache); 1423 } 1424 1425 /* 1426 * Clean out illegal characters in serial numbers. 1427 */ 1428 static void 1429 disk_cleanserial(char *serno) 1430 { 1431 char c; 1432 1433 while ((c = *serno) != 0) { 1434 if (c >= 'a' && c <= 'z') 1435 ; 1436 else if (c >= 'A' && c <= 'Z') 1437 ; 1438 else if (c >= '0' && c <= '9') 1439 ; 1440 else if (c == '-' || c == '@' || c == '+' || c == '.') 1441 ; 1442 else 1443 c = '_'; 1444 *serno++= c; 1445 } 1446 } 1447 1448 TUNABLE_INT("kern.disk_debug", &disk_debug_enable); 1449 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable, 1450 0, "Enable subr_disk debugging"); 1451 1452 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL); 1453 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL); 1454