1 /* $OpenBSD: sd.c,v 1.330 2021/10/24 16:57:30 mpi Exp $ */ 2 /* $NetBSD: sd.c,v 1.111 1997/04/02 02:29:41 mycroft Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 2003, 2004 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Charles M. Hannum. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Originally written by Julian Elischer (julian@dialix.oz.au) 35 * for TRW Financial Systems for use under the MACH(2.5) operating system. 36 * 37 * TRW Financial Systems, in accordance with their agreement with Carnegie 38 * Mellon University, makes this software available to CMU to distribute 39 * or use in any manner that they see fit as long as this message is kept with 40 * the software. For this reason TFS also grants any other persons or 41 * organisations permission to use or modify this software. 42 * 43 * TFS supplies this software to be publicly redistributed 44 * on the understanding that TFS is not responsible for the correct 45 * functioning of this software in any circumstances. 46 * 47 * Ported to run under 386BSD by Julian Elischer (julian@dialix.oz.au) Sept 1992 48 */ 49 50 #include <sys/stdint.h> 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/timeout.h> 54 #include <sys/fcntl.h> 55 #include <sys/stat.h> 56 #include <sys/ioctl.h> 57 #include <sys/mtio.h> 58 #include <sys/mutex.h> 59 #include <sys/buf.h> 60 #include <sys/uio.h> 61 #include <sys/malloc.h> 62 #include <sys/pool.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/disklabel.h> 66 #include <sys/disk.h> 67 #include <sys/conf.h> 68 #include <sys/scsiio.h> 69 #include <sys/dkio.h> 70 #include <sys/reboot.h> 71 72 #include <scsi/scsi_all.h> 73 #include <scsi/scsi_debug.h> 74 #include <scsi/scsi_disk.h> 75 #include <scsi/scsiconf.h> 76 #include <scsi/sdvar.h> 77 78 #include <ufs/ffs/fs.h> /* for BBSIZE and SBSIZE */ 79 80 #include <sys/vnode.h> 81 82 int sdmatch(struct device *, void *, void *); 83 void sdattach(struct device *, struct device *, void *); 84 int sdactivate(struct device *, int); 85 int sddetach(struct device *, int); 86 87 void sdminphys(struct buf *); 88 int sdgetdisklabel(dev_t, struct sd_softc *, struct disklabel *, int); 89 void sdstart(struct scsi_xfer *); 90 int sd_interpret_sense(struct scsi_xfer *); 91 int sd_read_cap_10(struct sd_softc *, int); 92 int sd_read_cap_16(struct sd_softc *, int); 93 int sd_read_cap(struct sd_softc *, int); 94 int sd_thin_pages(struct sd_softc *, int); 95 int sd_vpd_block_limits(struct sd_softc *, int); 96 int sd_vpd_thin(struct sd_softc *, int); 97 int sd_thin_params(struct sd_softc *, int); 98 int sd_get_parms(struct sd_softc *, int); 99 int sd_flush(struct sd_softc *, int); 100 101 void viscpy(u_char *, u_char *, int); 102 103 int sd_ioctl_inquiry(struct sd_softc *, struct dk_inquiry *); 104 int sd_ioctl_cache(struct sd_softc *, long, struct dk_cache *); 105 106 int sd_cmd_rw6(struct scsi_generic *, int, u_int64_t, u_int32_t); 107 int sd_cmd_rw10(struct scsi_generic *, int, u_int64_t, u_int32_t); 108 int sd_cmd_rw12(struct scsi_generic *, int, u_int64_t, u_int32_t); 109 int sd_cmd_rw16(struct scsi_generic *, int, u_int64_t, u_int32_t); 110 111 void sd_buf_done(struct scsi_xfer *); 112 113 const struct cfattach sd_ca = { 114 sizeof(struct sd_softc), sdmatch, sdattach, 115 sddetach, sdactivate 116 }; 117 118 struct cfdriver sd_cd = { 119 NULL, "sd", DV_DISK 120 }; 121 122 const struct scsi_inquiry_pattern sd_patterns[] = { 123 {T_DIRECT, T_FIXED, 124 "", "", ""}, 125 {T_DIRECT, T_REMOV, 126 "", "", ""}, 127 {T_RDIRECT, T_FIXED, 128 "", "", ""}, 129 {T_RDIRECT, T_REMOV, 130 "", "", ""}, 131 {T_OPTICAL, T_FIXED, 132 "", "", ""}, 133 {T_OPTICAL, T_REMOV, 134 "", "", ""}, 135 }; 136 137 #define sdlookup(unit) (struct sd_softc *)disk_lookup(&sd_cd, (unit)) 138 139 int 140 sdmatch(struct device *parent, void *match, void *aux) 141 { 142 struct scsi_attach_args *sa = aux; 143 struct scsi_inquiry_data *inq = &sa->sa_sc_link->inqdata; 144 int priority; 145 146 (void)scsi_inqmatch(inq, sd_patterns, nitems(sd_patterns), 147 sizeof(sd_patterns[0]), &priority); 148 149 return priority; 150 } 151 152 /* 153 * The routine called by the low level scsi routine when it discovers 154 * a device suitable for this driver. 155 */ 156 void 157 sdattach(struct device *parent, struct device *self, void *aux) 158 { 159 struct dk_cache dkc; 160 struct sd_softc *sc = (struct sd_softc *)self; 161 struct scsi_attach_args *sa = aux; 162 struct disk_parms *dp = &sc->params; 163 struct scsi_link *link = sa->sa_sc_link; 164 int error, sd_autoconf; 165 int sortby = BUFQ_DEFAULT; 166 167 SC_DEBUG(link, SDEV_DB2, ("sdattach:\n")); 168 169 sd_autoconf = scsi_autoconf | SCSI_SILENT | 170 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE; 171 172 /* 173 * Store information needed to contact our base driver. 174 */ 175 sc->sc_link = link; 176 link->interpret_sense = sd_interpret_sense; 177 link->device_softc = sc; 178 179 if (ISSET(link->flags, SDEV_ATAPI) && ISSET(link->flags, 180 SDEV_REMOVABLE)) 181 SET(link->quirks, SDEV_NOSYNCCACHE); 182 183 /* 184 * Use the subdriver to request information regarding the drive. We 185 * cannot use interrupts yet, so the request must specify this. 186 */ 187 printf("\n"); 188 189 scsi_xsh_set(&sc->sc_xsh, link, sdstart); 190 191 /* Spin up non-UMASS devices ready or not. */ 192 if (!ISSET(link->flags, SDEV_UMASS)) 193 scsi_start(link, SSS_START, sd_autoconf); 194 195 /* 196 * Some devices (e.g. BlackBerry Pearl) won't admit they have 197 * media loaded unless its been locked in. 198 */ 199 if (ISSET(link->flags, SDEV_REMOVABLE)) 200 scsi_prevent(link, PR_PREVENT, sd_autoconf); 201 202 /* Check that it is still responding and ok. */ 203 error = scsi_test_unit_ready(sc->sc_link, TEST_READY_RETRIES * 3, 204 sd_autoconf); 205 if (error == 0) 206 error = sd_get_parms(sc, sd_autoconf); 207 208 if (ISSET(link->flags, SDEV_REMOVABLE)) 209 scsi_prevent(link, PR_ALLOW, sd_autoconf); 210 211 if (error == 0) { 212 printf("%s: %lluMB, %u bytes/sector, %llu sectors", 213 sc->sc_dev.dv_xname, 214 dp->disksize / (1048576 / dp->secsize), dp->secsize, 215 dp->disksize); 216 if (ISSET(sc->flags, SDF_THIN)) { 217 sortby = BUFQ_FIFO; 218 printf(", thin"); 219 } 220 if (ISSET(link->flags, SDEV_READONLY)) 221 printf(", readonly"); 222 printf("\n"); 223 } 224 225 /* 226 * Initialize disk structures. 227 */ 228 sc->sc_dk.dk_name = sc->sc_dev.dv_xname; 229 bufq_init(&sc->sc_bufq, sortby); 230 231 /* 232 * Enable write cache by default. 233 */ 234 memset(&dkc, 0, sizeof(dkc)); 235 if (sd_ioctl_cache(sc, DIOCGCACHE, &dkc) == 0 && dkc.wrcache == 0) { 236 dkc.wrcache = 1; 237 sd_ioctl_cache(sc, DIOCSCACHE, &dkc); 238 } 239 240 /* Attach disk. */ 241 disk_attach(&sc->sc_dev, &sc->sc_dk); 242 } 243 244 int 245 sdactivate(struct device *self, int act) 246 { 247 struct scsi_link *link; 248 struct sd_softc *sc = (struct sd_softc *)self; 249 250 if (ISSET(sc->flags, SDF_DYING)) 251 return ENXIO; 252 link = sc->sc_link; 253 254 switch (act) { 255 case DVACT_SUSPEND: 256 /* 257 * We flush the cache, since we our next step before 258 * DVACT_POWERDOWN might be a hibernate operation. 259 */ 260 if (ISSET(sc->flags, SDF_DIRTY)) 261 sd_flush(sc, SCSI_AUTOCONF); 262 break; 263 case DVACT_POWERDOWN: 264 /* 265 * Stop the disk. Stopping the disk should flush the 266 * cache, but we are paranoid so we flush the cache 267 * first. We're cold at this point, so we poll for 268 * completion. 269 */ 270 if (ISSET(sc->flags, SDF_DIRTY)) 271 sd_flush(sc, SCSI_AUTOCONF); 272 if (ISSET(boothowto, RB_POWERDOWN)) 273 scsi_start(link, SSS_STOP, 274 SCSI_IGNORE_ILLEGAL_REQUEST | 275 SCSI_IGNORE_NOT_READY | SCSI_AUTOCONF); 276 break; 277 case DVACT_RESUME: 278 scsi_start(link, SSS_START, 279 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_AUTOCONF); 280 break; 281 case DVACT_DEACTIVATE: 282 SET(sc->flags, SDF_DYING); 283 scsi_xsh_del(&sc->sc_xsh); 284 break; 285 } 286 return 0; 287 } 288 289 int 290 sddetach(struct device *self, int flags) 291 { 292 struct sd_softc *sc = (struct sd_softc *)self; 293 294 bufq_drain(&sc->sc_bufq); 295 296 disk_gone(sdopen, self->dv_unit); 297 298 /* Detach disk. */ 299 bufq_destroy(&sc->sc_bufq); 300 disk_detach(&sc->sc_dk); 301 302 return 0; 303 } 304 305 /* 306 * Open the device. Make sure the partition info is as up-to-date as can be. 307 */ 308 int 309 sdopen(dev_t dev, int flag, int fmt, struct proc *p) 310 { 311 struct scsi_link *link; 312 struct sd_softc *sc; 313 int error = 0, part, rawopen, unit; 314 315 unit = DISKUNIT(dev); 316 part = DISKPART(dev); 317 318 SC_DEBUG(link, SDEV_DB1, 319 ("sdopen: dev=0x%x (unit %d (of %d), partition %d)\n", dev, unit, 320 sd_cd.cd_ndevs, part)); 321 322 rawopen = (part == RAW_PART) && (fmt == S_IFCHR); 323 324 sc = sdlookup(unit); 325 if (sc == NULL) 326 return ENXIO; 327 if (ISSET(sc->flags, SDF_DYING)) { 328 device_unref(&sc->sc_dev); 329 return ENXIO; 330 } 331 link = sc->sc_link; 332 333 if (ISSET(flag, FWRITE) && ISSET(link->flags, SDEV_READONLY)) { 334 device_unref(&sc->sc_dev); 335 return EACCES; 336 } 337 if ((error = disk_lock(&sc->sc_dk)) != 0) { 338 device_unref(&sc->sc_dev); 339 return error; 340 } 341 if (ISSET(sc->flags, SDF_DYING)) { 342 error = ENXIO; 343 goto die; 344 } 345 346 if (sc->sc_dk.dk_openmask != 0) { 347 /* 348 * If any partition is open, but the disk has been invalidated, 349 * disallow further opens of non-raw partition. 350 */ 351 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 352 if (rawopen) 353 goto out; 354 error = EIO; 355 goto bad; 356 } 357 } else { 358 /* Spin up non-UMASS devices ready or not. */ 359 if (!ISSET(link->flags, SDEV_UMASS)) 360 scsi_start(link, SSS_START, (rawopen ? SCSI_SILENT : 361 0) | SCSI_IGNORE_ILLEGAL_REQUEST | 362 SCSI_IGNORE_MEDIA_CHANGE); 363 364 /* 365 * Use sd_interpret_sense() for sense errors. 366 * 367 * But only after spinning the disk up! Just in case a broken 368 * device returns "Initialization command required." and causes 369 * a loop of scsi_start() calls. 370 */ 371 if (ISSET(sc->flags, SDF_DYING)) { 372 error = ENXIO; 373 goto die; 374 } 375 SET(link->flags, SDEV_OPEN); 376 377 /* 378 * Try to prevent the unloading of a removable device while 379 * it's open. But allow the open to proceed if the device can't 380 * be locked in. 381 */ 382 if (ISSET(link->flags, SDEV_REMOVABLE)) { 383 scsi_prevent(link, PR_PREVENT, SCSI_SILENT | 384 SCSI_IGNORE_ILLEGAL_REQUEST | 385 SCSI_IGNORE_MEDIA_CHANGE); 386 } 387 388 /* Check that it is still responding and ok. */ 389 if (ISSET(sc->flags, SDF_DYING)) { 390 error = ENXIO; 391 goto die; 392 } 393 error = scsi_test_unit_ready(link, 394 TEST_READY_RETRIES, SCSI_SILENT | 395 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_IGNORE_MEDIA_CHANGE); 396 if (error) { 397 if (rawopen) { 398 error = 0; 399 goto out; 400 } else 401 goto bad; 402 } 403 404 /* Load the physical device parameters. */ 405 if (ISSET(sc->flags, SDF_DYING)) { 406 error = ENXIO; 407 goto die; 408 } 409 SET(link->flags, SDEV_MEDIA_LOADED); 410 if (sd_get_parms(sc, (rawopen ? SCSI_SILENT : 0)) == -1) { 411 if (ISSET(sc->flags, SDF_DYING)) { 412 error = ENXIO; 413 goto die; 414 } 415 CLR(link->flags, SDEV_MEDIA_LOADED); 416 error = ENXIO; 417 goto bad; 418 } 419 SC_DEBUG(link, SDEV_DB3, ("Params loaded\n")); 420 421 /* Load the partition info if not already loaded. */ 422 error = sdgetdisklabel(dev, sc, sc->sc_dk.dk_label, 0); 423 if (error == EIO || error == ENXIO) 424 goto bad; 425 SC_DEBUG(link, SDEV_DB3, ("Disklabel loaded\n")); 426 } 427 428 out: 429 if ((error = disk_openpart(&sc->sc_dk, part, fmt, 1)) != 0) 430 goto bad; 431 432 SC_DEBUG(link, SDEV_DB3, ("open complete\n")); 433 434 /* It's OK to fall through because dk_openmask is now non-zero. */ 435 bad: 436 if (sc->sc_dk.dk_openmask == 0) { 437 if (ISSET(sc->flags, SDF_DYING)) { 438 error = ENXIO; 439 goto die; 440 } 441 if (ISSET(link->flags, SDEV_REMOVABLE)) 442 scsi_prevent(link, PR_ALLOW, SCSI_SILENT | 443 SCSI_IGNORE_ILLEGAL_REQUEST | 444 SCSI_IGNORE_MEDIA_CHANGE); 445 if (ISSET(sc->flags, SDF_DYING)) { 446 error = ENXIO; 447 goto die; 448 } 449 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED); 450 } 451 452 die: 453 disk_unlock(&sc->sc_dk); 454 device_unref(&sc->sc_dev); 455 return error; 456 } 457 458 /* 459 * Close the device. Only called if we are the last occurrence of an open 460 * device. Convenient now but usually a pain. 461 */ 462 int 463 sdclose(dev_t dev, int flag, int fmt, struct proc *p) 464 { 465 struct scsi_link *link; 466 struct sd_softc *sc; 467 int part = DISKPART(dev); 468 int error = 0; 469 470 sc = sdlookup(DISKUNIT(dev)); 471 if (sc == NULL) 472 return ENXIO; 473 if (ISSET(sc->flags, SDF_DYING)) { 474 device_unref(&sc->sc_dev); 475 return ENXIO; 476 } 477 link = sc->sc_link; 478 479 disk_lock_nointr(&sc->sc_dk); 480 481 disk_closepart(&sc->sc_dk, part, fmt); 482 483 if ((ISSET(flag, FWRITE) || sc->sc_dk.dk_openmask == 0) && 484 ISSET(sc->flags, SDF_DIRTY)) 485 sd_flush(sc, 0); 486 487 if (sc->sc_dk.dk_openmask == 0) { 488 if (ISSET(sc->flags, SDF_DYING)) { 489 error = ENXIO; 490 goto die; 491 } 492 if (ISSET(link->flags, SDEV_REMOVABLE)) 493 scsi_prevent(link, PR_ALLOW, 494 SCSI_IGNORE_ILLEGAL_REQUEST | 495 SCSI_IGNORE_NOT_READY | SCSI_SILENT); 496 if (ISSET(sc->flags, SDF_DYING)) { 497 error = ENXIO; 498 goto die; 499 } 500 CLR(link->flags, SDEV_OPEN | SDEV_MEDIA_LOADED); 501 502 if (ISSET(link->flags, SDEV_EJECTING)) { 503 scsi_start(link, SSS_STOP|SSS_LOEJ, 0); 504 if (ISSET(sc->flags, SDF_DYING)) { 505 error = ENXIO; 506 goto die; 507 } 508 CLR(link->flags, SDEV_EJECTING); 509 } 510 511 scsi_xsh_del(&sc->sc_xsh); 512 } 513 514 die: 515 disk_unlock(&sc->sc_dk); 516 device_unref(&sc->sc_dev); 517 return error; 518 } 519 520 /* 521 * Actually translate the requested transfer into one the physical driver 522 * can understand. The transfer is described by a buf and will include 523 * only one physical transfer. 524 */ 525 void 526 sdstrategy(struct buf *bp) 527 { 528 struct scsi_link *link; 529 struct sd_softc *sc; 530 int s; 531 532 sc = sdlookup(DISKUNIT(bp->b_dev)); 533 if (sc == NULL) { 534 bp->b_error = ENXIO; 535 goto bad; 536 } 537 if (ISSET(sc->flags, SDF_DYING)) { 538 bp->b_error = ENXIO; 539 goto bad; 540 } 541 link = sc->sc_link; 542 543 SC_DEBUG(link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n", 544 bp->b_bcount, (long long)bp->b_blkno)); 545 /* 546 * If the device has been made invalid, error out. 547 */ 548 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 549 if (ISSET(link->flags, SDEV_OPEN)) 550 bp->b_error = EIO; 551 else 552 bp->b_error = ENODEV; 553 goto bad; 554 } 555 556 /* Validate the request. */ 557 if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) 558 goto done; 559 560 /* Place it in the queue of disk activities for this disk. */ 561 bufq_queue(&sc->sc_bufq, bp); 562 563 /* 564 * Tell the device to get going on the transfer if it's 565 * not doing anything, otherwise just wait for completion 566 */ 567 scsi_xsh_add(&sc->sc_xsh); 568 569 device_unref(&sc->sc_dev); 570 return; 571 572 bad: 573 SET(bp->b_flags, B_ERROR); 574 bp->b_resid = bp->b_bcount; 575 done: 576 s = splbio(); 577 biodone(bp); 578 splx(s); 579 if (sc != NULL) 580 device_unref(&sc->sc_dev); 581 } 582 583 int 584 sd_cmd_rw6(struct scsi_generic *generic, int read, u_int64_t secno, 585 u_int32_t nsecs) 586 { 587 struct scsi_rw *cmd = (struct scsi_rw *)generic; 588 589 cmd->opcode = read ? READ_COMMAND : WRITE_COMMAND; 590 _lto3b(secno, cmd->addr); 591 cmd->length = nsecs; 592 593 return sizeof(*cmd); 594 } 595 596 int 597 sd_cmd_rw10(struct scsi_generic *generic, int read, u_int64_t secno, 598 u_int32_t nsecs) 599 { 600 struct scsi_rw_10 *cmd = (struct scsi_rw_10 *)generic; 601 602 cmd->opcode = read ? READ_10 : WRITE_10; 603 _lto4b(secno, cmd->addr); 604 _lto2b(nsecs, cmd->length); 605 606 return sizeof(*cmd); 607 } 608 609 int 610 sd_cmd_rw12(struct scsi_generic *generic, int read, u_int64_t secno, 611 u_int32_t nsecs) 612 { 613 struct scsi_rw_12 *cmd = (struct scsi_rw_12 *)generic; 614 615 cmd->opcode = read ? READ_12 : WRITE_12; 616 _lto4b(secno, cmd->addr); 617 _lto4b(nsecs, cmd->length); 618 619 return sizeof(*cmd); 620 } 621 622 int 623 sd_cmd_rw16(struct scsi_generic *generic, int read, u_int64_t secno, 624 u_int32_t nsecs) 625 { 626 struct scsi_rw_16 *cmd = (struct scsi_rw_16 *)generic; 627 628 cmd->opcode = read ? READ_16 : WRITE_16; 629 _lto8b(secno, cmd->addr); 630 _lto4b(nsecs, cmd->length); 631 632 return sizeof(*cmd); 633 } 634 635 /* 636 * sdstart looks to see if there is a buf waiting for the device 637 * and that the device is not already busy. If both are true, 638 * It dequeues the buf and creates a scsi command to perform the 639 * transfer in the buf. The transfer request will call scsi_done 640 * on completion, which will in turn call this routine again 641 * so that the next queued transfer is performed. 642 * The bufs are queued by the strategy routine (sdstrategy) 643 * 644 * This routine is also called after other non-queued requests 645 * have been made of the scsi driver, to ensure that the queue 646 * continues to be drained. 647 */ 648 void 649 sdstart(struct scsi_xfer *xs) 650 { 651 struct scsi_link *link = xs->sc_link; 652 struct sd_softc *sc = link->device_softc; 653 struct buf *bp; 654 struct partition *p; 655 u_int64_t secno; 656 u_int32_t nsecs; 657 int read; 658 659 if (ISSET(sc->flags, SDF_DYING)) { 660 scsi_xs_put(xs); 661 return; 662 } 663 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 664 bufq_drain(&sc->sc_bufq); 665 scsi_xs_put(xs); 666 return; 667 } 668 669 bp = bufq_dequeue(&sc->sc_bufq); 670 if (bp == NULL) { 671 scsi_xs_put(xs); 672 return; 673 } 674 read = ISSET(bp->b_flags, B_READ); 675 676 SET(xs->flags, (read ? SCSI_DATA_IN : SCSI_DATA_OUT)); 677 xs->timeout = 60000; 678 xs->data = bp->b_data; 679 xs->datalen = bp->b_bcount; 680 xs->done = sd_buf_done; 681 xs->cookie = bp; 682 xs->bp = bp; 683 684 p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; 685 secno = DL_GETPOFFSET(p) + DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno); 686 nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize); 687 688 if (!ISSET(link->flags, SDEV_ATAPI | SDEV_UMASS) && 689 (SID_ANSII_REV(&link->inqdata) < SCSI_REV_2) && 690 ((secno & 0x1fffff) == secno) && 691 ((nsecs & 0xff) == nsecs)) 692 xs->cmdlen = sd_cmd_rw6(&xs->cmd, read, secno, nsecs); 693 694 else if (sc->params.disksize > UINT32_MAX) 695 xs->cmdlen = sd_cmd_rw16(&xs->cmd, read, secno, nsecs); 696 697 else if (nsecs <= UINT16_MAX) 698 xs->cmdlen = sd_cmd_rw10(&xs->cmd, read, secno, nsecs); 699 700 else 701 xs->cmdlen = sd_cmd_rw12(&xs->cmd, read, secno, nsecs); 702 703 disk_busy(&sc->sc_dk); 704 if (!read) 705 SET(sc->flags, SDF_DIRTY); 706 scsi_xs_exec(xs); 707 708 /* Move onto the next io. */ 709 if (bufq_peek(&sc->sc_bufq)) 710 scsi_xsh_add(&sc->sc_xsh); 711 } 712 713 void 714 sd_buf_done(struct scsi_xfer *xs) 715 { 716 struct sd_softc *sc = xs->sc_link->device_softc; 717 struct buf *bp = xs->cookie; 718 int error, s; 719 720 switch (xs->error) { 721 case XS_NOERROR: 722 bp->b_error = 0; 723 CLR(bp->b_flags, B_ERROR); 724 bp->b_resid = xs->resid; 725 break; 726 727 case XS_SENSE: 728 case XS_SHORTSENSE: 729 SC_DEBUG_SENSE(xs); 730 error = sd_interpret_sense(xs); 731 if (error == 0) { 732 bp->b_error = 0; 733 CLR(bp->b_flags, B_ERROR); 734 bp->b_resid = xs->resid; 735 break; 736 } 737 if (error != ERESTART) { 738 bp->b_error = error; 739 SET(bp->b_flags, B_ERROR); 740 xs->retries = 0; 741 } 742 goto retry; 743 744 case XS_BUSY: 745 if (xs->retries) { 746 if (scsi_delay(xs, 1) != ERESTART) 747 xs->retries = 0; 748 } 749 goto retry; 750 751 case XS_TIMEOUT: 752 retry: 753 if (xs->retries--) { 754 scsi_xs_exec(xs); 755 return; 756 } 757 /* FALLTHROUGH */ 758 759 default: 760 if (bp->b_error == 0) 761 bp->b_error = EIO; 762 SET(bp->b_flags, B_ERROR); 763 bp->b_resid = bp->b_bcount; 764 break; 765 } 766 767 disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid, bp->b_blkno, 768 bp->b_flags & B_READ); 769 770 s = splbio(); 771 biodone(bp); 772 splx(s); 773 scsi_xs_put(xs); 774 } 775 776 void 777 sdminphys(struct buf *bp) 778 { 779 struct scsi_link *link; 780 struct sd_softc *sc; 781 long max; 782 783 sc = sdlookup(DISKUNIT(bp->b_dev)); 784 if (sc == NULL) 785 return; /* XXX - right way to fail this? */ 786 if (ISSET(sc->flags, SDF_DYING)) { 787 device_unref(&sc->sc_dev); 788 return; 789 } 790 link = sc->sc_link; 791 792 /* 793 * If the device is ancient, we want to make sure that 794 * the transfer fits into a 6-byte cdb. 795 * 796 * XXX Note that the SCSI-I spec says that 256-block transfers 797 * are allowed in a 6-byte read/write, and are specified 798 * by setting the "length" to 0. However, we're conservative 799 * here, allowing only 255-block transfers in case an 800 * ancient device gets confused by length == 0. A length of 0 801 * in a 10-byte read/write actually means 0 blocks. 802 */ 803 if (!ISSET(link->flags, SDEV_ATAPI | SDEV_UMASS) && 804 SID_ANSII_REV(&link->inqdata) < SCSI_REV_2) { 805 max = sc->sc_dk.dk_label->d_secsize * 0xff; 806 807 if (bp->b_bcount > max) 808 bp->b_bcount = max; 809 } 810 811 if (link->bus->sb_adapter->dev_minphys != NULL) 812 (*link->bus->sb_adapter->dev_minphys)(bp, link); 813 else 814 minphys(bp); 815 816 device_unref(&sc->sc_dev); 817 } 818 819 int 820 sdread(dev_t dev, struct uio *uio, int ioflag) 821 { 822 return physio(sdstrategy, dev, B_READ, sdminphys, uio); 823 } 824 825 int 826 sdwrite(dev_t dev, struct uio *uio, int ioflag) 827 { 828 return physio(sdstrategy, dev, B_WRITE, sdminphys, uio); 829 } 830 831 /* 832 * Perform special action on behalf of the user. Knows about the internals of 833 * this device 834 */ 835 int 836 sdioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 837 { 838 struct scsi_link *link; 839 struct sd_softc *sc; 840 struct disklabel *lp; 841 int error = 0; 842 int part = DISKPART(dev); 843 844 sc = sdlookup(DISKUNIT(dev)); 845 if (sc == NULL) 846 return ENXIO; 847 if (ISSET(sc->flags, SDF_DYING)) { 848 device_unref(&sc->sc_dev); 849 return ENXIO; 850 } 851 link = sc->sc_link; 852 853 SC_DEBUG(link, SDEV_DB2, ("sdioctl 0x%lx\n", cmd)); 854 855 /* 856 * If the device is not valid, abandon ship. 857 */ 858 if (!ISSET(link->flags, SDEV_MEDIA_LOADED)) { 859 switch (cmd) { 860 case DIOCLOCK: 861 case DIOCEJECT: 862 case SCIOCIDENTIFY: 863 case SCIOCCOMMAND: 864 case SCIOCDEBUG: 865 if (part == RAW_PART) 866 break; 867 /* FALLTHROUGH */ 868 default: 869 if (!ISSET(link->flags, SDEV_OPEN)) { 870 error = ENODEV; 871 goto exit; 872 } else { 873 error = EIO; 874 goto exit; 875 } 876 } 877 } 878 879 switch (cmd) { 880 case DIOCRLDINFO: 881 lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); 882 sdgetdisklabel(dev, sc, lp, 0); 883 memcpy(sc->sc_dk.dk_label, lp, sizeof(*lp)); 884 free(lp, M_TEMP, sizeof(*lp)); 885 goto exit; 886 887 case DIOCGPDINFO: 888 sdgetdisklabel(dev, sc, (struct disklabel *)addr, 1); 889 goto exit; 890 891 case DIOCGDINFO: 892 *(struct disklabel *)addr = *(sc->sc_dk.dk_label); 893 goto exit; 894 895 case DIOCGPART: 896 ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; 897 ((struct partinfo *)addr)->part = 898 &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 899 goto exit; 900 901 case DIOCWDINFO: 902 case DIOCSDINFO: 903 if (!ISSET(flag, FWRITE)) { 904 error = EBADF; 905 goto exit; 906 } 907 908 if ((error = disk_lock(&sc->sc_dk)) != 0) 909 goto exit; 910 911 error = setdisklabel(sc->sc_dk.dk_label, 912 (struct disklabel *)addr, sc->sc_dk.dk_openmask); 913 if (error == 0) { 914 if (cmd == DIOCWDINFO) 915 error = writedisklabel(DISKLABELDEV(dev), 916 sdstrategy, sc->sc_dk.dk_label); 917 } 918 919 disk_unlock(&sc->sc_dk); 920 goto exit; 921 922 case DIOCLOCK: 923 error = scsi_prevent(link, 924 (*(int *)addr) ? PR_PREVENT : PR_ALLOW, 0); 925 goto exit; 926 927 case MTIOCTOP: 928 if (((struct mtop *)addr)->mt_op != MTOFFL) { 929 error = EIO; 930 goto exit; 931 } 932 /* FALLTHROUGH */ 933 case DIOCEJECT: 934 if (!ISSET(link->flags, SDEV_REMOVABLE)) { 935 error = ENOTTY; 936 goto exit; 937 } 938 SET(link->flags, SDEV_EJECTING); 939 goto exit; 940 941 case DIOCINQ: 942 error = scsi_do_ioctl(link, cmd, addr, flag); 943 if (error == ENOTTY) 944 error = sd_ioctl_inquiry(sc, 945 (struct dk_inquiry *)addr); 946 goto exit; 947 948 case DIOCSCACHE: 949 if (!ISSET(flag, FWRITE)) { 950 error = EBADF; 951 goto exit; 952 } 953 /* FALLTHROUGH */ 954 case DIOCGCACHE: 955 error = sd_ioctl_cache(sc, cmd, (struct dk_cache *)addr); 956 goto exit; 957 958 case DIOCCACHESYNC: 959 if (!ISSET(flag, FWRITE)) { 960 error = EBADF; 961 goto exit; 962 } 963 if (ISSET(sc->flags, SDF_DIRTY) || *(int *)addr != 0) 964 error = sd_flush(sc, 0); 965 goto exit; 966 967 default: 968 if (part != RAW_PART) { 969 error = ENOTTY; 970 goto exit; 971 } 972 error = scsi_do_ioctl(link, cmd, addr, flag); 973 } 974 975 exit: 976 device_unref(&sc->sc_dev); 977 return error; 978 } 979 980 int 981 sd_ioctl_inquiry(struct sd_softc *sc, struct dk_inquiry *di) 982 { 983 struct scsi_link *link; 984 struct scsi_vpd_serial *vpd; 985 986 vpd = dma_alloc(sizeof(*vpd), PR_WAITOK | PR_ZERO); 987 988 if (ISSET(sc->flags, SDF_DYING)) { 989 dma_free(vpd, sizeof(*vpd)); 990 return ENXIO; 991 } 992 link = sc->sc_link; 993 994 bzero(di, sizeof(struct dk_inquiry)); 995 scsi_strvis(di->vendor, link->inqdata.vendor, 996 sizeof(link->inqdata.vendor)); 997 scsi_strvis(di->product, link->inqdata.product, 998 sizeof(link->inqdata.product)); 999 scsi_strvis(di->revision, link->inqdata.revision, 1000 sizeof(link->inqdata.revision)); 1001 1002 /* the serial vpd page is optional */ 1003 if (scsi_inquire_vpd(link, vpd, sizeof(*vpd), SI_PG_SERIAL, 0) == 0) 1004 scsi_strvis(di->serial, vpd->serial, sizeof(vpd->serial)); 1005 else 1006 strlcpy(di->serial, "(unknown)", sizeof(vpd->serial)); 1007 1008 dma_free(vpd, sizeof(*vpd)); 1009 return 0; 1010 } 1011 1012 int 1013 sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc) 1014 { 1015 struct scsi_link *link; 1016 union scsi_mode_sense_buf *buf; 1017 struct page_caching_mode *mode = NULL; 1018 u_int wrcache, rdcache; 1019 int big, rv; 1020 1021 if (ISSET(sc->flags, SDF_DYING)) 1022 return ENXIO; 1023 link = sc->sc_link; 1024 1025 if (ISSET(link->flags, SDEV_UMASS)) 1026 return EOPNOTSUPP; 1027 1028 /* See if the adapter has special handling. */ 1029 rv = scsi_do_ioctl(link, cmd, (caddr_t)dkc, 0); 1030 if (rv != ENOTTY) 1031 return rv; 1032 1033 buf = dma_alloc(sizeof(*buf), PR_WAITOK); 1034 if (buf == NULL) 1035 return ENOMEM; 1036 1037 if (ISSET(sc->flags, SDF_DYING)) { 1038 rv = ENXIO; 1039 goto done; 1040 } 1041 rv = scsi_do_mode_sense(link, PAGE_CACHING_MODE, buf, (void **)&mode, 1042 sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big); 1043 if (rv == 0 && mode == NULL) 1044 rv = EIO; 1045 if (rv != 0) 1046 goto done; 1047 1048 wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0); 1049 rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1); 1050 1051 switch (cmd) { 1052 case DIOCGCACHE: 1053 dkc->wrcache = wrcache; 1054 dkc->rdcache = rdcache; 1055 break; 1056 1057 case DIOCSCACHE: 1058 if (dkc->wrcache == wrcache && dkc->rdcache == rdcache) 1059 break; 1060 1061 if (dkc->wrcache) 1062 SET(mode->flags, PG_CACHE_FL_WCE); 1063 else 1064 CLR(mode->flags, PG_CACHE_FL_WCE); 1065 1066 if (dkc->rdcache) 1067 CLR(mode->flags, PG_CACHE_FL_RCD); 1068 else 1069 SET(mode->flags, PG_CACHE_FL_RCD); 1070 1071 if (ISSET(sc->flags, SDF_DYING)) { 1072 rv = ENXIO; 1073 goto done; 1074 } 1075 if (big) { 1076 rv = scsi_mode_select_big(link, SMS_PF, 1077 &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000); 1078 } else { 1079 rv = scsi_mode_select(link, SMS_PF, 1080 &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000); 1081 } 1082 break; 1083 } 1084 1085 done: 1086 dma_free(buf, sizeof(*buf)); 1087 return rv; 1088 } 1089 1090 /* 1091 * Load the label information on the named device. 1092 */ 1093 int 1094 sdgetdisklabel(dev_t dev, struct sd_softc *sc, struct disklabel *lp, 1095 int spoofonly) 1096 { 1097 char packname[sizeof(lp->d_packname) + 1]; 1098 char product[17], vendor[9]; 1099 struct scsi_link *link; 1100 size_t len; 1101 1102 if (ISSET(sc->flags, SDF_DYING)) 1103 return ENXIO; 1104 link = sc->sc_link; 1105 1106 bzero(lp, sizeof(struct disklabel)); 1107 1108 lp->d_secsize = sc->params.secsize; 1109 lp->d_ntracks = sc->params.heads; 1110 lp->d_nsectors = sc->params.sectors; 1111 lp->d_ncylinders = sc->params.cyls; 1112 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors; 1113 if (lp->d_secpercyl == 0) { 1114 lp->d_secpercyl = 100; 1115 /* As long as it's not 0 - readdisklabel divides by it. */ 1116 } 1117 1118 lp->d_type = DTYPE_SCSI; 1119 if ((link->inqdata.device & SID_TYPE) == T_OPTICAL) 1120 strncpy(lp->d_typename, "SCSI optical", 1121 sizeof(lp->d_typename)); 1122 else 1123 strncpy(lp->d_typename, "SCSI disk", 1124 sizeof(lp->d_typename)); 1125 1126 /* 1127 * Try to fit '<vendor> <product>' into d_packname. If that doesn't fit 1128 * then leave out '<vendor> ' and use only as much of '<product>' as 1129 * does fit. 1130 */ 1131 viscpy(vendor, link->inqdata.vendor, 8); 1132 viscpy(product, link->inqdata.product, 16); 1133 len = snprintf(packname, sizeof(packname), "%s %s", vendor, product); 1134 if (len > sizeof(lp->d_packname)) { 1135 strlcpy(packname, product, sizeof(packname)); 1136 len = strlen(packname); 1137 } 1138 /* 1139 * It is safe to use len as the count of characters to copy because 1140 * packname is sizeof(lp->d_packname)+1, the string in packname is 1141 * always null terminated and len does not count the terminating null. 1142 * d_packname is not a null terminated string. 1143 */ 1144 memcpy(lp->d_packname, packname, len); 1145 1146 DL_SETDSIZE(lp, sc->params.disksize); 1147 lp->d_version = 1; 1148 lp->d_flags = 0; 1149 1150 /* XXX - These values for BBSIZE and SBSIZE assume ffs. */ 1151 lp->d_bbsize = BBSIZE; 1152 lp->d_sbsize = SBSIZE; 1153 1154 lp->d_magic = DISKMAGIC; 1155 lp->d_magic2 = DISKMAGIC; 1156 lp->d_checksum = dkcksum(lp); 1157 1158 /* 1159 * Call the generic disklabel extraction routine. 1160 */ 1161 return readdisklabel(DISKLABELDEV(dev), sdstrategy, lp, spoofonly); 1162 } 1163 1164 1165 /* 1166 * Check Errors. 1167 */ 1168 int 1169 sd_interpret_sense(struct scsi_xfer *xs) 1170 { 1171 struct scsi_sense_data *sense = &xs->sense; 1172 struct scsi_link *link = xs->sc_link; 1173 int retval; 1174 u_int8_t serr = sense->error_code & SSD_ERRCODE; 1175 1176 /* 1177 * Let the generic code handle everything except a few categories of 1178 * LUN not ready errors on open devices. 1179 */ 1180 if ((!ISSET(link->flags, SDEV_OPEN)) || 1181 (serr != SSD_ERRCODE_CURRENT && serr != SSD_ERRCODE_DEFERRED) || 1182 ((sense->flags & SSD_KEY) != SKEY_NOT_READY) || 1183 (sense->extra_len < 6)) 1184 return scsi_interpret_sense(xs); 1185 1186 if (ISSET(xs->flags, SCSI_IGNORE_NOT_READY)) 1187 return 0; 1188 1189 switch (ASC_ASCQ(sense)) { 1190 case SENSE_NOT_READY_BECOMING_READY: 1191 SC_DEBUG(link, SDEV_DB1, ("becoming ready.\n")); 1192 retval = scsi_delay(xs, 5); 1193 break; 1194 1195 case SENSE_NOT_READY_INIT_REQUIRED: 1196 SC_DEBUG(link, SDEV_DB1, ("spinning up\n")); 1197 retval = scsi_start(link, SSS_START, 1198 SCSI_IGNORE_ILLEGAL_REQUEST | SCSI_NOSLEEP); 1199 if (retval == 0) 1200 retval = ERESTART; 1201 else if (retval == ENOMEM) 1202 /* Can't issue the command. Fall back on a delay. */ 1203 retval = scsi_delay(xs, 5); 1204 else 1205 SC_DEBUG(link, SDEV_DB1, ("spin up failed (%#x)\n", 1206 retval)); 1207 break; 1208 1209 default: 1210 retval = scsi_interpret_sense(xs); 1211 break; 1212 } 1213 1214 return retval; 1215 } 1216 1217 daddr_t 1218 sdsize(dev_t dev) 1219 { 1220 struct disklabel *lp; 1221 struct sd_softc *sc; 1222 daddr_t size; 1223 int part, omask; 1224 1225 sc = sdlookup(DISKUNIT(dev)); 1226 if (sc == NULL) 1227 return -1; 1228 if (ISSET(sc->flags, SDF_DYING)) { 1229 size = -1; 1230 goto exit; 1231 } 1232 1233 part = DISKPART(dev); 1234 omask = sc->sc_dk.dk_openmask & (1 << part); 1235 1236 if (omask == 0 && sdopen(dev, 0, S_IFBLK, NULL) != 0) { 1237 size = -1; 1238 goto exit; 1239 } 1240 1241 lp = sc->sc_dk.dk_label; 1242 if (ISSET(sc->flags, SDF_DYING)) { 1243 size = -1; 1244 goto exit; 1245 } 1246 if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED)) 1247 size = -1; 1248 else if (lp->d_partitions[part].p_fstype != FS_SWAP) 1249 size = -1; 1250 else 1251 size = DL_SECTOBLK(lp, DL_GETPSIZE(&lp->d_partitions[part])); 1252 if (omask == 0 && sdclose(dev, 0, S_IFBLK, NULL) != 0) 1253 size = -1; 1254 1255 exit: 1256 device_unref(&sc->sc_dev); 1257 return size; 1258 } 1259 1260 /* #define SD_DUMP_NOT_TRUSTED if you just want to watch. */ 1261 static int sddoingadump; 1262 1263 /* 1264 * Dump all of physical memory into the partition specified, starting 1265 * at offset 'dumplo' into the partition. 1266 */ 1267 int 1268 sddump(dev_t dev, daddr_t blkno, caddr_t va, size_t size) 1269 { 1270 struct sd_softc *sc; 1271 struct disklabel *lp; 1272 struct scsi_xfer *xs; 1273 u_int64_t nsects; /* partition sectors */ 1274 u_int64_t sectoff; /* partition offset */ 1275 u_int64_t totwrt; /* sectors left */ 1276 int part, rv, unit; 1277 u_int32_t sectorsize; 1278 u_int32_t nwrt; /* sectors to write */ 1279 1280 /* Check if recursive dump; if so, punt. */ 1281 if (sddoingadump) 1282 return EFAULT; 1283 if (blkno < 0) 1284 return EINVAL; 1285 1286 /* Mark as active early. */ 1287 sddoingadump = 1; 1288 1289 unit = DISKUNIT(dev); /* Decompose unit & partition. */ 1290 part = DISKPART(dev); 1291 1292 /* Check for acceptable drive number. */ 1293 if (unit >= sd_cd.cd_ndevs || (sc = sd_cd.cd_devs[unit]) == NULL) 1294 return ENXIO; 1295 1296 /* 1297 * XXX Can't do this check, since the media might have been 1298 * XXX marked `invalid' by successful unmounting of all 1299 * XXX filesystems. 1300 */ 1301 #if 0 1302 /* Make sure it was initialized. */ 1303 if (!ISSET(sc->sc_link->flags, SDEV_MEDIA_LOADED)) 1304 return ENXIO; 1305 #endif /* 0 */ 1306 1307 /* Convert to disk sectors. Request must be a multiple of size. */ 1308 lp = sc->sc_dk.dk_label; 1309 sectorsize = lp->d_secsize; 1310 if ((size % sectorsize) != 0) 1311 return EFAULT; 1312 if ((blkno % DL_BLKSPERSEC(lp)) != 0) 1313 return EFAULT; 1314 totwrt = size / sectorsize; 1315 blkno = DL_BLKTOSEC(lp, blkno); 1316 1317 nsects = DL_GETPSIZE(&lp->d_partitions[part]); 1318 sectoff = DL_GETPOFFSET(&lp->d_partitions[part]); 1319 1320 /* Check transfer bounds against partition size. */ 1321 if ((blkno + totwrt) > nsects) 1322 return EINVAL; 1323 1324 /* Offset block number to start of partition. */ 1325 blkno += sectoff; 1326 1327 while (totwrt > 0) { 1328 if (totwrt > UINT32_MAX) 1329 nwrt = UINT32_MAX; 1330 else 1331 nwrt = totwrt; 1332 1333 #ifndef SD_DUMP_NOT_TRUSTED 1334 xs = scsi_xs_get(sc->sc_link, SCSI_NOSLEEP); 1335 if (xs == NULL) 1336 return ENOMEM; 1337 1338 xs->timeout = 10000; 1339 SET(xs->flags, SCSI_DATA_OUT); 1340 xs->data = va; 1341 xs->datalen = nwrt * sectorsize; 1342 1343 xs->cmdlen = sd_cmd_rw10(&xs->cmd, 0, blkno, nwrt); /* XXX */ 1344 1345 rv = scsi_xs_sync(xs); 1346 scsi_xs_put(xs); 1347 if (rv != 0) 1348 return ENXIO; 1349 #else /* SD_DUMP_NOT_TRUSTED */ 1350 /* Let's just talk about this first. */ 1351 printf("sd%d: dump addr 0x%x, blk %lld\n", unit, va, 1352 (long long)blkno); 1353 delay(500 * 1000); /* 1/2 a second */ 1354 #endif /* ~SD_DUMP_NOT_TRUSTED */ 1355 1356 /* Update block count. */ 1357 totwrt -= nwrt; 1358 blkno += nwrt; 1359 va += sectorsize * nwrt; 1360 } 1361 1362 sddoingadump = 0; 1363 1364 return 0; 1365 } 1366 1367 /* 1368 * Copy up to len chars from src to dst, ignoring non-printables. 1369 * Must be room for len+1 chars in dst so we can write the NUL. 1370 * Does not assume src is NUL-terminated. 1371 */ 1372 void 1373 viscpy(u_char *dst, u_char *src, int len) 1374 { 1375 while (len > 0 && *src != '\0') { 1376 if (*src < 0x20 || *src >= 0x80) { 1377 src++; 1378 continue; 1379 } 1380 *dst++ = *src++; 1381 len--; 1382 } 1383 *dst = '\0'; 1384 } 1385 1386 int 1387 sd_read_cap_10(struct sd_softc *sc, int flags) 1388 { 1389 struct scsi_read_cap_data *rdcap; 1390 int rv; 1391 1392 rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ? 1393 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1394 if (rdcap == NULL) 1395 return -1; 1396 1397 if (ISSET(sc->flags, SDF_DYING)) { 1398 rv = -1; 1399 goto done; 1400 } 1401 1402 rv = scsi_read_cap_10(sc->sc_link, rdcap, flags); 1403 if (rv == 0) { 1404 if (_4btol(rdcap->addr) == 0) { 1405 rv = -1; 1406 goto done; 1407 } 1408 sc->params.disksize = _4btol(rdcap->addr) + 1ll; 1409 sc->params.secsize = _4btol(rdcap->length); 1410 CLR(sc->flags, SDF_THIN); 1411 } 1412 1413 done: 1414 dma_free(rdcap, sizeof(*rdcap)); 1415 return rv; 1416 } 1417 1418 int 1419 sd_read_cap_16(struct sd_softc *sc, int flags) 1420 { 1421 struct scsi_read_cap_data_16 *rdcap; 1422 int rv; 1423 1424 rdcap = dma_alloc(sizeof(*rdcap), (ISSET(flags, SCSI_NOSLEEP) ? 1425 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1426 if (rdcap == NULL) 1427 return -1; 1428 1429 if (ISSET(sc->flags, SDF_DYING)) { 1430 rv = -1; 1431 goto done; 1432 } 1433 1434 rv = scsi_read_cap_16(sc->sc_link, rdcap, flags); 1435 if (rv == 0) { 1436 if (_8btol(rdcap->addr) == 0) { 1437 rv = -1; 1438 goto done; 1439 } 1440 sc->params.disksize = _8btol(rdcap->addr) + 1ll; 1441 sc->params.secsize = _4btol(rdcap->length); 1442 if (ISSET(_2btol(rdcap->lowest_aligned), READ_CAP_16_TPE)) 1443 SET(sc->flags, SDF_THIN); 1444 else 1445 CLR(sc->flags, SDF_THIN); 1446 } 1447 1448 done: 1449 dma_free(rdcap, sizeof(*rdcap)); 1450 return rv; 1451 } 1452 1453 int 1454 sd_read_cap(struct sd_softc *sc, int flags) 1455 { 1456 int rv; 1457 1458 CLR(flags, SCSI_IGNORE_ILLEGAL_REQUEST); 1459 1460 /* 1461 * post-SPC2 (i.e. post-SCSI-3) devices can start with 16 byte 1462 * read capacity commands. Older devices start with the 10 byte 1463 * version and move up to the 16 byte version if the device 1464 * says it has more sectors than can be reported via the 10 byte 1465 * read capacity. 1466 */ 1467 if (SID_ANSII_REV(&sc->sc_link->inqdata) > SCSI_REV_SPC2) { 1468 rv = sd_read_cap_16(sc, flags); 1469 if (rv != 0) 1470 rv = sd_read_cap_10(sc, flags); 1471 } else { 1472 rv = sd_read_cap_10(sc, flags); 1473 if (rv == 0 && sc->params.disksize == 0x100000000ll) 1474 rv = sd_read_cap_16(sc, flags); 1475 } 1476 1477 return rv; 1478 } 1479 1480 int 1481 sd_thin_pages(struct sd_softc *sc, int flags) 1482 { 1483 struct scsi_vpd_hdr *pg; 1484 u_int8_t *pages; 1485 size_t len = 0; 1486 int i, rv, score = 0; 1487 1488 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1489 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1490 if (pg == NULL) 1491 return ENOMEM; 1492 1493 if (ISSET(sc->flags, SDF_DYING)) { 1494 rv = ENXIO; 1495 goto done; 1496 } 1497 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1498 SI_PG_SUPPORTED, flags); 1499 if (rv != 0) 1500 goto done; 1501 1502 len = _2btol(pg->page_length); 1503 1504 dma_free(pg, sizeof(*pg)); 1505 pg = dma_alloc(sizeof(*pg) + len, (ISSET(flags, SCSI_NOSLEEP) ? 1506 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1507 if (pg == NULL) 1508 return ENOMEM; 1509 1510 if (ISSET(sc->flags, SDF_DYING)) { 1511 rv = ENXIO; 1512 goto done; 1513 } 1514 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg) + len, 1515 SI_PG_SUPPORTED, flags); 1516 if (rv != 0) 1517 goto done; 1518 1519 pages = (u_int8_t *)(pg + 1); 1520 if (pages[0] != SI_PG_SUPPORTED) { 1521 rv = EIO; 1522 goto done; 1523 } 1524 1525 for (i = 1; i < len; i++) { 1526 switch (pages[i]) { 1527 case SI_PG_DISK_LIMITS: 1528 case SI_PG_DISK_THIN: 1529 score++; 1530 break; 1531 } 1532 } 1533 1534 if (score < 2) 1535 rv = EOPNOTSUPP; 1536 1537 done: 1538 dma_free(pg, sizeof(*pg) + len); 1539 return rv; 1540 } 1541 1542 int 1543 sd_vpd_block_limits(struct sd_softc *sc, int flags) 1544 { 1545 struct scsi_vpd_disk_limits *pg; 1546 int rv; 1547 1548 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1549 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1550 if (pg == NULL) 1551 return ENOMEM; 1552 1553 if (ISSET(sc->flags, SDF_DYING)) { 1554 rv = ENXIO; 1555 goto done; 1556 } 1557 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1558 SI_PG_DISK_LIMITS, flags); 1559 if (rv != 0) 1560 goto done; 1561 1562 if (_2btol(pg->hdr.page_length) == SI_PG_DISK_LIMITS_LEN_THIN) { 1563 sc->params.unmap_sectors = _4btol(pg->max_unmap_lba_count); 1564 sc->params.unmap_descs = _4btol(pg->max_unmap_desc_count); 1565 } else 1566 rv = EOPNOTSUPP; 1567 1568 done: 1569 dma_free(pg, sizeof(*pg)); 1570 return rv; 1571 } 1572 1573 int 1574 sd_vpd_thin(struct sd_softc *sc, int flags) 1575 { 1576 struct scsi_vpd_disk_thin *pg; 1577 int rv; 1578 1579 pg = dma_alloc(sizeof(*pg), (ISSET(flags, SCSI_NOSLEEP) ? 1580 PR_NOWAIT : PR_WAITOK) | PR_ZERO); 1581 if (pg == NULL) 1582 return ENOMEM; 1583 1584 if (ISSET(sc->flags, SDF_DYING)) { 1585 rv = ENXIO; 1586 goto done; 1587 } 1588 rv = scsi_inquire_vpd(sc->sc_link, pg, sizeof(*pg), 1589 SI_PG_DISK_THIN, flags); 1590 if (rv != 0) 1591 goto done; 1592 1593 #ifdef notyet 1594 if (ISSET(pg->flags, VPD_DISK_THIN_TPU)) 1595 sc->sc_delete = sd_unmap; 1596 else if (ISSET(pg->flags, VPD_DISK_THIN_TPWS)) { 1597 sc->sc_delete = sd_write_same_16; 1598 sc->params.unmap_descs = 1; /* WRITE SAME 16 only does one */ 1599 } else 1600 rv = EOPNOTSUPP; 1601 #endif /* notyet */ 1602 1603 done: 1604 dma_free(pg, sizeof(*pg)); 1605 return rv; 1606 } 1607 1608 int 1609 sd_thin_params(struct sd_softc *sc, int flags) 1610 { 1611 int rv; 1612 1613 rv = sd_thin_pages(sc, flags); 1614 if (rv != 0) 1615 return rv; 1616 1617 rv = sd_vpd_block_limits(sc, flags); 1618 if (rv != 0) 1619 return rv; 1620 1621 rv = sd_vpd_thin(sc, flags); 1622 if (rv != 0) 1623 return rv; 1624 1625 return 0; 1626 } 1627 1628 /* 1629 * Fill out the disk parameter structure. Return 0 if the structure is correctly 1630 * filled in, otherwise return -1. 1631 * 1632 * The caller is responsible for clearing the SDEV_MEDIA_LOADED flag if the 1633 * structure cannot be completed. 1634 */ 1635 int 1636 sd_get_parms(struct sd_softc *sc, int flags) 1637 { 1638 struct disk_parms dp; 1639 struct scsi_link *link = sc->sc_link; 1640 union scsi_mode_sense_buf *buf = NULL; 1641 struct page_rigid_geometry *rigid = NULL; 1642 struct page_flex_geometry *flex = NULL; 1643 struct page_reduced_geometry *reduced = NULL; 1644 u_char *page0 = NULL; 1645 int big, err = 0; 1646 1647 if (sd_read_cap(sc, flags) != 0) 1648 return -1; 1649 1650 if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) { 1651 /* we dont know the unmap limits, so we cant use thin shizz */ 1652 CLR(sc->flags, SDF_THIN); 1653 } 1654 1655 /* 1656 * Work on a copy of the values initialized by sd_read_cap() and 1657 * sd_thin_params(). 1658 */ 1659 dp = sc->params; 1660 1661 buf = dma_alloc(sizeof(*buf), PR_NOWAIT); 1662 if (buf == NULL) 1663 goto validate; 1664 1665 if (ISSET(sc->flags, SDF_DYING)) 1666 goto die; 1667 1668 /* 1669 * Ask for page 0 (vendor specific) mode sense data to find 1670 * READONLY info. The only thing USB devices will ask for. 1671 * 1672 * page0 == NULL is a valid situation. 1673 */ 1674 err = scsi_do_mode_sense(link, 0, buf, (void **)&page0, 1, 1675 flags | SCSI_SILENT, &big); 1676 if (ISSET(sc->flags, SDF_DYING)) 1677 goto die; 1678 if (err == 0) { 1679 if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT) 1680 SET(link->flags, SDEV_READONLY); 1681 else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT) 1682 SET(link->flags, SDEV_READONLY); 1683 else 1684 CLR(link->flags, SDEV_READONLY); 1685 } 1686 1687 /* 1688 * Many UMASS devices choke when asked about their geometry. Most 1689 * don't have a meaningful geometry anyway, so just fake it if 1690 * sd_read_cap() worked. 1691 */ 1692 if (ISSET(link->flags, SDEV_UMASS) && dp.disksize > 0) 1693 goto validate; 1694 1695 switch (link->inqdata.device & SID_TYPE) { 1696 case T_OPTICAL: 1697 /* No more information needed or available. */ 1698 break; 1699 1700 case T_RDIRECT: 1701 /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ 1702 err = scsi_do_mode_sense(link, PAGE_REDUCED_GEOMETRY, buf, 1703 (void **)&reduced, sizeof(*reduced), flags | SCSI_SILENT, 1704 &big); 1705 if (err == 0) { 1706 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1707 &dp.secsize); 1708 if (reduced != NULL) { 1709 if (dp.disksize == 0) 1710 dp.disksize = _5btol(reduced->sectors); 1711 if (dp.secsize == 0) 1712 dp.secsize = _2btol(reduced->bytes_s); 1713 } 1714 } 1715 break; 1716 1717 default: 1718 /* 1719 * NOTE: Some devices leave off the last four bytes of 1720 * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. 1721 * The only information in those four bytes is RPM information 1722 * so accept the page. The extra bytes will be zero and RPM will 1723 * end up with the default value of 3600. 1724 */ 1725 err = 0; 1726 if (!ISSET(link->flags, SDEV_ATAPI) || 1727 !ISSET(link->flags, SDEV_REMOVABLE)) 1728 err = scsi_do_mode_sense(link, PAGE_RIGID_GEOMETRY, buf, 1729 (void **)&rigid, sizeof(*rigid) - 4, 1730 flags | SCSI_SILENT, &big); 1731 if (err == 0) { 1732 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1733 &dp.secsize); 1734 if (rigid != NULL) { 1735 dp.heads = rigid->nheads; 1736 dp.cyls = _3btol(rigid->ncyl); 1737 if (dp.heads * dp.cyls > 0) 1738 dp.sectors = dp.disksize / (dp.heads * 1739 dp.cyls); 1740 } 1741 } else { 1742 if (ISSET(sc->flags, SDF_DYING)) 1743 goto die; 1744 err = scsi_do_mode_sense(link, PAGE_FLEX_GEOMETRY, buf, 1745 (void **)&flex, sizeof(*flex) - 4, 1746 flags | SCSI_SILENT, &big); 1747 if (err == 0) { 1748 scsi_parse_blkdesc(link, buf, big, NULL, NULL, 1749 &dp.secsize); 1750 if (flex != NULL) { 1751 dp.sectors = flex->ph_sec_tr; 1752 dp.heads = flex->nheads; 1753 dp.cyls = _2btol(flex->ncyl); 1754 if (dp.secsize == 0) 1755 dp.secsize = 1756 _2btol(flex->bytes_s); 1757 if (dp.disksize == 0) 1758 dp.disksize = 1759 (u_int64_t)dp.cyls * 1760 dp.heads * dp.sectors; 1761 } 1762 } 1763 } 1764 break; 1765 } 1766 1767 validate: 1768 if (buf) { 1769 dma_free(buf, sizeof(*buf)); 1770 buf = NULL; 1771 } 1772 1773 if (dp.disksize == 0) 1774 goto die; 1775 1776 /* 1777 * Restrict secsize values to powers of two between 512 and 64k. 1778 */ 1779 switch (dp.secsize) { 1780 case 0: 1781 dp.secsize = DEV_BSIZE; 1782 break; 1783 case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ 1784 case 0x400: 1785 case 0x800: 1786 case 0x1000: 1787 case 0x2000: 1788 case 0x4000: 1789 case 0x8000: 1790 case 0x10000: 1791 break; 1792 default: 1793 SC_DEBUG(sc->sc_link, SDEV_DB1, 1794 ("sd_get_parms: bad secsize: %#x\n", dp.secsize)); 1795 return -1; 1796 } 1797 1798 /* 1799 * XXX THINK ABOUT THIS!! Using values such that sectors * heads * 1800 * cyls is <= disk_size can lead to wasted space. We need a more 1801 * careful calculation/validation to make everything work out 1802 * optimally. 1803 */ 1804 if (dp.disksize > 0xffffffff && (dp.heads * dp.sectors) < 0xffff) { 1805 dp.heads = 511; 1806 dp.sectors = 255; 1807 dp.cyls = 0; 1808 } 1809 1810 /* 1811 * Use standard geometry values for anything we still don't 1812 * know. 1813 */ 1814 if (dp.heads == 0) 1815 dp.heads = 255; 1816 if (dp.sectors == 0) 1817 dp.sectors = 63; 1818 if (dp.cyls == 0) { 1819 dp.cyls = dp.disksize / (dp.heads * dp.sectors); 1820 if (dp.cyls == 0) { 1821 /* Put everything into one cylinder. */ 1822 dp.heads = dp.cyls = 1; 1823 dp.sectors = dp.disksize; 1824 } 1825 } 1826 1827 #ifdef SCSIDEBUG 1828 if (dp.disksize != (u_int64_t)dp.cyls * dp.heads * dp.sectors) { 1829 sc_print_addr(sc->sc_link); 1830 printf("disksize (%llu) != cyls (%u) * heads (%u) * " 1831 "sectors/track (%u) (%llu)\n", dp.disksize, dp.cyls, 1832 dp.heads, dp.sectors, 1833 (u_int64_t)dp.cyls * dp.heads * dp.sectors); 1834 } 1835 #endif /* SCSIDEBUG */ 1836 1837 sc->params = dp; 1838 return 0; 1839 1840 die: 1841 dma_free(buf, sizeof(*buf)); 1842 return -1; 1843 } 1844 1845 int 1846 sd_flush(struct sd_softc *sc, int flags) 1847 { 1848 struct scsi_link *link; 1849 struct scsi_xfer *xs; 1850 struct scsi_synchronize_cache *cmd; 1851 int error; 1852 1853 if (ISSET(sc->flags, SDF_DYING)) 1854 return ENXIO; 1855 link = sc->sc_link; 1856 1857 if (ISSET(link->quirks, SDEV_NOSYNCCACHE)) 1858 return 0; 1859 1860 /* 1861 * Issue a SYNCHRONIZE CACHE. Address 0, length 0 means "all remaining 1862 * blocks starting at address 0". Ignore ILLEGAL REQUEST in the event 1863 * that the command is not supported by the device. 1864 */ 1865 1866 xs = scsi_xs_get(link, flags); 1867 if (xs == NULL) { 1868 SC_DEBUG(link, SDEV_DB1, ("cache sync failed to get xs\n")); 1869 return EIO; 1870 } 1871 1872 cmd = (struct scsi_synchronize_cache *)&xs->cmd; 1873 cmd->opcode = SYNCHRONIZE_CACHE; 1874 1875 xs->cmdlen = sizeof(*cmd); 1876 xs->timeout = 100000; 1877 SET(xs->flags, SCSI_IGNORE_ILLEGAL_REQUEST); 1878 1879 error = scsi_xs_sync(xs); 1880 1881 scsi_xs_put(xs); 1882 1883 if (error) 1884 SC_DEBUG(link, SDEV_DB1, ("cache sync failed\n")); 1885 else 1886 CLR(sc->flags, SDF_DIRTY); 1887 1888 return error; 1889 } 1890