1 /* $OpenBSD: atascsi.c,v 1.116 2011/08/03 00:27:20 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com> 6 * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/buf.h> 24 #include <sys/kernel.h> 25 #include <sys/malloc.h> 26 #include <sys/device.h> 27 #include <sys/proc.h> 28 #include <sys/queue.h> 29 #include <sys/pool.h> 30 31 #include <scsi/scsi_all.h> 32 #include <scsi/scsi_disk.h> 33 #include <scsi/scsiconf.h> 34 35 #include <dev/ata/atascsi.h> 36 #include <dev/ata/pmreg.h> 37 38 #include <sys/ataio.h> 39 40 struct atascsi_port; 41 42 struct atascsi { 43 struct device *as_dev; 44 void *as_cookie; 45 46 struct atascsi_host_port **as_host_ports; 47 48 struct atascsi_methods *as_methods; 49 struct scsi_adapter as_switch; 50 struct scsi_link as_link; 51 struct scsibus_softc *as_scsibus; 52 53 int as_capability; 54 int as_ncqdepth; 55 }; 56 57 /* 58 * atascsi_host_port is a port attached to the host controller, and 59 * only holds the details relevant to the host controller. 60 * atascsi_port is any port, including ports on port multipliers, and 61 * it holds details of the device attached to the port. 62 * 63 * When there is a port multiplier attached to a port, the ahp_ports 64 * array in the atascsi_host_port struct contains one atascsi_port for 65 * each port, and one for the control port (port 15). The index into 66 * the array is the LUN used to address the port. For the control port, 67 * the LUN is 0, and for the port multiplier ports, the LUN is the 68 * port number plus one. 69 * 70 * When there is no port multiplier attached to a port, the ahp_ports 71 * array contains a single entry for the device. The LUN and port number 72 * for this entry are both 0. 73 */ 74 75 struct atascsi_host_port { 76 struct scsi_iopool ahp_iopool; 77 struct atascsi *ahp_as; 78 int ahp_port; 79 int ahp_nports; 80 81 struct atascsi_port **ahp_ports; 82 }; 83 84 struct atascsi_port { 85 struct ata_identify ap_identify; 86 struct atascsi_host_port *ap_host_port; 87 struct atascsi *ap_as; 88 int ap_pmp_port; 89 int ap_type; 90 int ap_ncqdepth; 91 int ap_features; 92 #define ATA_PORT_F_NCQ 0x1 93 #define ATA_PORT_F_TRIM 0x2 94 }; 95 96 void atascsi_cmd(struct scsi_xfer *); 97 int atascsi_probe(struct scsi_link *); 98 void atascsi_free(struct scsi_link *); 99 100 /* template */ 101 struct scsi_adapter atascsi_switch = { 102 atascsi_cmd, /* scsi_cmd */ 103 scsi_minphys, /* scsi_minphys */ 104 atascsi_probe, /* dev_probe */ 105 atascsi_free, /* dev_free */ 106 NULL, /* ioctl */ 107 }; 108 109 void ata_swapcopy(void *, void *, size_t); 110 111 void atascsi_disk_cmd(struct scsi_xfer *); 112 void atascsi_disk_cmd_done(struct ata_xfer *); 113 void atascsi_disk_inq(struct scsi_xfer *); 114 void atascsi_disk_inquiry(struct scsi_xfer *); 115 void atascsi_disk_vpd_supported(struct scsi_xfer *); 116 void atascsi_disk_vpd_serial(struct scsi_xfer *); 117 void atascsi_disk_vpd_ident(struct scsi_xfer *); 118 void atascsi_disk_vpd_ata(struct scsi_xfer *); 119 void atascsi_disk_vpd_limits(struct scsi_xfer *); 120 void atascsi_disk_vpd_info(struct scsi_xfer *); 121 void atascsi_disk_vpd_thin(struct scsi_xfer *); 122 void atascsi_disk_write_same_16(struct scsi_xfer *); 123 void atascsi_disk_write_same_16_done(struct ata_xfer *); 124 void atascsi_disk_unmap(struct scsi_xfer *); 125 void atascsi_disk_unmap_task(void *, void *); 126 void atascsi_disk_unmap_done(struct ata_xfer *); 127 void atascsi_disk_capacity(struct scsi_xfer *); 128 void atascsi_disk_capacity16(struct scsi_xfer *); 129 void atascsi_disk_sync(struct scsi_xfer *); 130 void atascsi_disk_sync_done(struct ata_xfer *); 131 void atascsi_disk_sense(struct scsi_xfer *); 132 void atascsi_disk_start_stop(struct scsi_xfer *); 133 void atascsi_disk_start_stop_done(struct ata_xfer *); 134 135 void atascsi_atapi_cmd(struct scsi_xfer *); 136 void atascsi_atapi_cmd_done(struct ata_xfer *); 137 138 void atascsi_pmp_cmd(struct scsi_xfer *); 139 void atascsi_pmp_cmd_done(struct ata_xfer *); 140 void atascsi_pmp_sense(struct scsi_xfer *xs); 141 void atascsi_pmp_inq(struct scsi_xfer *xs); 142 143 144 void atascsi_passthru_12(struct scsi_xfer *); 145 void atascsi_passthru_16(struct scsi_xfer *); 146 int atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t); 147 void atascsi_passthru_done(struct ata_xfer *); 148 149 void atascsi_done(struct scsi_xfer *, int); 150 151 void ata_exec(struct atascsi *, struct ata_xfer *); 152 153 void ata_polled_complete(struct ata_xfer *); 154 int ata_polled(struct ata_xfer *); 155 156 u_int64_t ata_identify_blocks(struct ata_identify *); 157 u_int ata_identify_blocksize(struct ata_identify *); 158 u_int ata_identify_block_l2p_exp(struct ata_identify *); 159 u_int ata_identify_block_logical_align(struct ata_identify *); 160 161 void *atascsi_io_get(void *); 162 void atascsi_io_put(void *, void *); 163 struct atascsi_port * atascsi_lookup_port(struct scsi_link *); 164 165 int atascsi_port_identify(struct atascsi_port *, 166 struct ata_identify *); 167 int atascsi_port_set_features(struct atascsi_port *, int, int); 168 169 170 struct atascsi * 171 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa) 172 { 173 struct scsibus_attach_args saa; 174 struct atascsi *as; 175 176 as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO); 177 178 as->as_dev = self; 179 as->as_cookie = aaa->aaa_cookie; 180 as->as_methods = aaa->aaa_methods; 181 as->as_capability = aaa->aaa_capability; 182 as->as_ncqdepth = aaa->aaa_ncmds; 183 184 /* copy from template and modify for ourselves */ 185 as->as_switch = atascsi_switch; 186 if (aaa->aaa_minphys != NULL) 187 as->as_switch.scsi_minphys = aaa->aaa_minphys; 188 189 /* fill in our scsi_link */ 190 as->as_link.adapter = &as->as_switch; 191 as->as_link.adapter_softc = as; 192 as->as_link.adapter_buswidth = aaa->aaa_nports; 193 as->as_link.luns = SATA_PMP_MAX_PORTS; 194 as->as_link.adapter_target = aaa->aaa_nports; 195 as->as_link.openings = 1; 196 197 as->as_host_ports = malloc(sizeof(struct atascsi_host_port *) * 198 aaa->aaa_nports, M_DEVBUF, M_WAITOK | M_ZERO); 199 200 bzero(&saa, sizeof(saa)); 201 saa.saa_sc_link = &as->as_link; 202 203 /* stash the scsibus so we can do hotplug on it */ 204 as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa, 205 scsiprint); 206 207 return (as); 208 } 209 210 int 211 atascsi_detach(struct atascsi *as, int flags) 212 { 213 int rv; 214 215 rv = config_detach((struct device *)as->as_scsibus, flags); 216 if (rv != 0) 217 return (rv); 218 219 free(as->as_host_ports, M_DEVBUF); 220 free(as, M_DEVBUF); 221 222 return (0); 223 } 224 225 int 226 atascsi_probe_dev(struct atascsi *as, int port, int lun) 227 { 228 if (lun == 0) { 229 return (scsi_probe_target(as->as_scsibus, port)); 230 } else { 231 return (scsi_probe_lun(as->as_scsibus, port, lun)); 232 } 233 } 234 235 int 236 atascsi_detach_dev(struct atascsi *as, int port, int lun, int flags) 237 { 238 if (lun == 0) { 239 return (scsi_detach_target(as->as_scsibus, port, flags)); 240 } else { 241 return (scsi_detach_lun(as->as_scsibus, port, lun, flags)); 242 } 243 } 244 245 struct atascsi_port * 246 atascsi_lookup_port(struct scsi_link *link) 247 { 248 struct atascsi *as = link->adapter_softc; 249 struct atascsi_host_port *ahp; 250 251 if (link->target >= as->as_link.adapter_buswidth) 252 return (NULL); 253 254 ahp = as->as_host_ports[link->target]; 255 if (link->lun >= ahp->ahp_nports) 256 return (NULL); 257 258 return (ahp->ahp_ports[link->lun]); 259 } 260 261 int 262 atascsi_probe(struct scsi_link *link) 263 { 264 struct atascsi *as = link->adapter_softc; 265 struct atascsi_host_port *ahp; 266 struct atascsi_port *ap; 267 struct ata_xfer *xa; 268 struct ata_identify *identify; 269 int port, type, qdepth; 270 int rv; 271 u_int16_t cmdset; 272 273 port = link->target; 274 if (port >= as->as_link.adapter_buswidth) 275 return (ENXIO); 276 277 /* if this is a PMP port, check it's valid */ 278 if (link->lun > 0) { 279 if (link->lun >= as->as_host_ports[port]->ahp_nports) 280 return (ENXIO); 281 } 282 283 type = as->as_methods->probe(as->as_cookie, port, link->lun); 284 switch (type) { 285 case ATA_PORT_T_DISK: 286 break; 287 case ATA_PORT_T_ATAPI: 288 link->flags |= SDEV_ATAPI; 289 link->quirks |= SDEV_ONLYBIG; 290 break; 291 case ATA_PORT_T_PM: 292 if (link->lun != 0) { 293 printf("%s.%d.%d: Port multipliers cannot be nested\n", 294 as->as_dev->dv_xname, port, link->lun); 295 rv = ENODEV; 296 goto unsupported; 297 } 298 break; 299 default: 300 rv = ENODEV; 301 goto unsupported; 302 } 303 304 ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 305 ap->ap_as = as; 306 307 if (link->lun == 0) { 308 ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO); 309 ahp->ahp_as = as; 310 ahp->ahp_port = port; 311 312 scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get, 313 atascsi_io_put); 314 315 as->as_host_ports[port] = ahp; 316 317 if (type == ATA_PORT_T_PM) { 318 ahp->ahp_nports = SATA_PMP_MAX_PORTS; 319 ap->ap_pmp_port = SATA_PMP_CONTROL_PORT; 320 } else { 321 ahp->ahp_nports = 1; 322 ap->ap_pmp_port = 0; 323 } 324 ahp->ahp_ports = malloc(sizeof(struct atascsi_port *) * 325 ahp->ahp_nports, M_DEVBUF, M_WAITOK | M_ZERO); 326 } else { 327 ahp = as->as_host_ports[port]; 328 ap->ap_pmp_port = link->lun - 1; 329 } 330 331 ap->ap_host_port = ahp; 332 ap->ap_type = type; 333 334 link->pool = &ahp->ahp_iopool; 335 336 /* fetch the device info, except for port multipliers */ 337 if (type != ATA_PORT_T_PM) { 338 339 /* devices attached to port multipliers tend not to be 340 * spun up at this point, and sometimes this prevents 341 * identification from working, so we retry a few times 342 * with a fairly long delay. 343 */ 344 345 identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO); 346 347 int count = (link->lun > 0) ? 6 : 2; 348 while (count--) { 349 rv = atascsi_port_identify(ap, identify); 350 if (rv == 0) { 351 ap->ap_identify = *identify; 352 break; 353 } 354 if (count > 0) 355 delay(5000000); 356 } 357 358 dma_free(identify, sizeof(*identify)); 359 360 if (rv != 0) { 361 goto error; 362 } 363 } 364 365 ahp->ahp_ports[link->lun] = ap; 366 367 if (type != ATA_PORT_T_DISK) 368 return (0); 369 370 if (as->as_capability & ASAA_CAP_NCQ && 371 ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) && 372 (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) { 373 ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth)); 374 qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth); 375 if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED)) 376 qdepth--; 377 378 if (qdepth > 1) { 379 SET(ap->ap_features, ATA_PORT_F_NCQ); 380 381 /* Raise the number of openings */ 382 link->openings = qdepth; 383 384 /* 385 * XXX for directly attached devices, throw away any xfers 386 * that have tag numbers higher than what the device supports. 387 */ 388 if (link->lun == 0) { 389 while (qdepth--) { 390 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 391 if (xa->tag < link->openings) { 392 xa->state = ATA_S_COMPLETE; 393 scsi_io_put(&ahp->ahp_iopool, xa); 394 } 395 } 396 } 397 } 398 } 399 400 if (ISSET(letoh16(ap->ap_identify.data_set_mgmt), 401 ATA_ID_DATA_SET_MGMT_TRIM)) 402 SET(ap->ap_features, ATA_PORT_F_TRIM); 403 404 cmdset = letoh16(ap->ap_identify.cmdset82); 405 406 /* Enable write cache if supported */ 407 if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) { 408 /* We don't care if it fails. */ 409 (void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0); 410 } 411 412 /* Enable read lookahead if supported */ 413 if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) { 414 /* We don't care if it fails. */ 415 (void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0); 416 } 417 418 /* 419 * FREEZE LOCK the device so malicous users can't lock it on us. 420 * As there is no harm in issuing this to devices that don't 421 * support the security feature set we just send it, and don't bother 422 * checking if the device sends a command abort to tell us it doesn't 423 * support it 424 */ 425 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 426 if (xa == NULL) 427 panic("no free xfers on a new port"); 428 xa->fis->command = ATA_C_SEC_FREEZE_LOCK; 429 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 430 xa->flags = ATA_F_POLL; 431 xa->timeout = 1000; 432 xa->complete = ata_polled_complete; 433 xa->pmp_port = ap->ap_pmp_port; 434 xa->atascsi_private = &ahp->ahp_iopool; 435 ata_exec(as, xa); 436 ata_polled(xa); /* we dont care if it doesnt work */ 437 438 return (0); 439 error: 440 free(ap, M_DEVBUF); 441 unsupported: 442 443 as->as_methods->free(as->as_cookie, port, link->lun); 444 return (rv); 445 } 446 447 void 448 atascsi_free(struct scsi_link *link) 449 { 450 struct atascsi *as = link->adapter_softc; 451 struct atascsi_host_port *ahp; 452 struct atascsi_port *ap; 453 int port; 454 455 port = link->target; 456 if (port >= as->as_link.adapter_buswidth) 457 return; 458 459 ahp = as->as_host_ports[port]; 460 if (ahp == NULL) 461 return; 462 463 if (link->lun >= ahp->ahp_nports) 464 return; 465 466 ap = ahp->ahp_ports[link->lun]; 467 free(ap, M_DEVBUF); 468 ahp->ahp_ports[link->lun] = NULL; 469 470 as->as_methods->free(as->as_cookie, port, link->lun); 471 472 if (link->lun == ahp->ahp_nports - 1) { 473 /* we've already freed all of ahp->ahp_ports, now 474 * free ahp itself. this relies on the order luns are 475 * detached in scsi_detach_target(). 476 */ 477 free(ahp, M_DEVBUF); 478 as->as_host_ports[port] = NULL; 479 } 480 } 481 482 void 483 atascsi_cmd(struct scsi_xfer *xs) 484 { 485 struct scsi_link *link = xs->sc_link; 486 struct atascsi_port *ap; 487 488 ap = atascsi_lookup_port(link); 489 if (ap == NULL) { 490 atascsi_done(xs, XS_DRIVER_STUFFUP); 491 return; 492 } 493 494 switch (ap->ap_type) { 495 case ATA_PORT_T_DISK: 496 atascsi_disk_cmd(xs); 497 break; 498 case ATA_PORT_T_ATAPI: 499 atascsi_atapi_cmd(xs); 500 break; 501 case ATA_PORT_T_PM: 502 atascsi_pmp_cmd(xs); 503 break; 504 505 case ATA_PORT_T_NONE: 506 default: 507 atascsi_done(xs, XS_DRIVER_STUFFUP); 508 break; 509 } 510 } 511 512 void 513 atascsi_disk_cmd(struct scsi_xfer *xs) 514 { 515 struct scsi_link *link = xs->sc_link; 516 struct atascsi *as = link->adapter_softc; 517 struct atascsi_port *ap; 518 struct ata_xfer *xa = xs->io; 519 int flags = 0; 520 struct ata_fis_h2d *fis; 521 u_int64_t lba; 522 u_int32_t sector_count; 523 524 ap = atascsi_lookup_port(link); 525 526 switch (xs->cmd->opcode) { 527 case READ_COMMAND: 528 case READ_BIG: 529 case READ_12: 530 case READ_16: 531 flags = ATA_F_READ; 532 break; 533 case WRITE_COMMAND: 534 case WRITE_BIG: 535 case WRITE_12: 536 case WRITE_16: 537 flags = ATA_F_WRITE; 538 /* deal with io outside the switch */ 539 break; 540 541 case WRITE_SAME_16: 542 atascsi_disk_write_same_16(xs); 543 return; 544 case UNMAP: 545 atascsi_disk_unmap(xs); 546 return; 547 548 case SYNCHRONIZE_CACHE: 549 atascsi_disk_sync(xs); 550 return; 551 case REQUEST_SENSE: 552 atascsi_disk_sense(xs); 553 return; 554 case INQUIRY: 555 atascsi_disk_inq(xs); 556 return; 557 case READ_CAPACITY: 558 atascsi_disk_capacity(xs); 559 return; 560 case READ_CAPACITY_16: 561 atascsi_disk_capacity16(xs); 562 return; 563 564 case ATA_PASSTHRU_12: 565 atascsi_passthru_12(xs); 566 return; 567 case ATA_PASSTHRU_16: 568 atascsi_passthru_16(xs); 569 return; 570 571 case START_STOP: 572 atascsi_disk_start_stop(xs); 573 return; 574 575 case TEST_UNIT_READY: 576 case PREVENT_ALLOW: 577 atascsi_done(xs, XS_NOERROR); 578 return; 579 580 default: 581 atascsi_done(xs, XS_DRIVER_STUFFUP); 582 return; 583 } 584 585 xa->flags = flags; 586 scsi_cmd_rw_decode(xs->cmd, &lba, §or_count); 587 if ((lba >> 48) != 0 || (sector_count >> 16) != 0) { 588 atascsi_done(xs, XS_DRIVER_STUFFUP); 589 return; 590 } 591 592 fis = xa->fis; 593 594 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 595 fis->lba_low = lba & 0xff; 596 fis->lba_mid = (lba >> 8) & 0xff; 597 fis->lba_high = (lba >> 16) & 0xff; 598 599 if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) && 600 (xa->tag < ap->ap_ncqdepth) && 601 !(xs->flags & SCSI_POLL)) { 602 /* Use NCQ */ 603 xa->flags |= ATA_F_NCQ; 604 fis->command = (xa->flags & ATA_F_WRITE) ? 605 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA; 606 fis->device = ATA_H2D_DEVICE_LBA; 607 fis->lba_low_exp = (lba >> 24) & 0xff; 608 fis->lba_mid_exp = (lba >> 32) & 0xff; 609 fis->lba_high_exp = (lba >> 40) & 0xff; 610 fis->sector_count = xa->tag << 3; 611 fis->features = sector_count & 0xff; 612 fis->features_exp = (sector_count >> 8) & 0xff; 613 } else if (sector_count > 0x100 || lba > 0xfffffff) { 614 /* Use LBA48 */ 615 fis->command = (xa->flags & ATA_F_WRITE) ? 616 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT; 617 fis->device = ATA_H2D_DEVICE_LBA; 618 fis->lba_low_exp = (lba >> 24) & 0xff; 619 fis->lba_mid_exp = (lba >> 32) & 0xff; 620 fis->lba_high_exp = (lba >> 40) & 0xff; 621 fis->sector_count = sector_count & 0xff; 622 fis->sector_count_exp = (sector_count >> 8) & 0xff; 623 } else { 624 /* Use LBA */ 625 fis->command = (xa->flags & ATA_F_WRITE) ? 626 ATA_C_WRITEDMA : ATA_C_READDMA; 627 fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f); 628 fis->sector_count = sector_count & 0xff; 629 } 630 631 xa->data = xs->data; 632 xa->datalen = xs->datalen; 633 xa->complete = atascsi_disk_cmd_done; 634 xa->timeout = xs->timeout; 635 xa->pmp_port = ap->ap_pmp_port; 636 xa->atascsi_private = xs; 637 if (xs->flags & SCSI_POLL) 638 xa->flags |= ATA_F_POLL; 639 640 ata_exec(as, xa); 641 } 642 643 void 644 atascsi_disk_cmd_done(struct ata_xfer *xa) 645 { 646 struct scsi_xfer *xs = xa->atascsi_private; 647 648 switch (xa->state) { 649 case ATA_S_COMPLETE: 650 xs->error = XS_NOERROR; 651 break; 652 case ATA_S_ERROR: 653 /* fake sense? */ 654 xs->error = XS_DRIVER_STUFFUP; 655 break; 656 case ATA_S_TIMEOUT: 657 xs->error = XS_TIMEOUT; 658 break; 659 default: 660 panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)", 661 xa->state); 662 } 663 664 xs->resid = xa->resid; 665 666 scsi_done(xs); 667 } 668 669 void 670 atascsi_disk_inq(struct scsi_xfer *xs) 671 { 672 struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd; 673 674 if (xs->cmdlen != sizeof(*inq)) { 675 atascsi_done(xs, XS_DRIVER_STUFFUP); 676 return; 677 } 678 679 if (ISSET(inq->flags, SI_EVPD)) { 680 switch (inq->pagecode) { 681 case SI_PG_SUPPORTED: 682 atascsi_disk_vpd_supported(xs); 683 break; 684 case SI_PG_SERIAL: 685 atascsi_disk_vpd_serial(xs); 686 break; 687 case SI_PG_DEVID: 688 atascsi_disk_vpd_ident(xs); 689 break; 690 case SI_PG_ATA: 691 atascsi_disk_vpd_ata(xs); 692 break; 693 case SI_PG_DISK_LIMITS: 694 atascsi_disk_vpd_limits(xs); 695 break; 696 case SI_PG_DISK_INFO: 697 atascsi_disk_vpd_info(xs); 698 break; 699 case SI_PG_DISK_THIN: 700 atascsi_disk_vpd_thin(xs); 701 break; 702 default: 703 atascsi_done(xs, XS_DRIVER_STUFFUP); 704 break; 705 } 706 } else 707 atascsi_disk_inquiry(xs); 708 } 709 710 void 711 atascsi_disk_inquiry(struct scsi_xfer *xs) 712 { 713 struct scsi_inquiry_data inq; 714 struct scsi_link *link = xs->sc_link; 715 struct atascsi_port *ap; 716 717 ap = atascsi_lookup_port(link); 718 719 bzero(&inq, sizeof(inq)); 720 721 inq.device = T_DIRECT; 722 inq.version = 0x05; /* SPC-3 */ 723 inq.response_format = 2; 724 inq.additional_length = 32; 725 inq.flags |= SID_CmdQue; 726 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 727 ata_swapcopy(ap->ap_identify.model, inq.product, 728 sizeof(inq.product)); 729 ata_swapcopy(ap->ap_identify.firmware, inq.revision, 730 sizeof(inq.revision)); 731 732 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 733 734 atascsi_done(xs, XS_NOERROR); 735 } 736 737 void 738 atascsi_disk_vpd_supported(struct scsi_xfer *xs) 739 { 740 struct { 741 struct scsi_vpd_hdr hdr; 742 u_int8_t list[7]; 743 } pg; 744 struct scsi_link *link = xs->sc_link; 745 struct atascsi_port *ap; 746 int fat; 747 748 ap = atascsi_lookup_port(link); 749 fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1; 750 751 bzero(&pg, sizeof(pg)); 752 753 pg.hdr.device = T_DIRECT; 754 pg.hdr.page_code = SI_PG_SUPPORTED; 755 _lto2b(sizeof(pg.list) - fat, pg.hdr.page_length); 756 pg.list[0] = SI_PG_SUPPORTED; 757 pg.list[1] = SI_PG_SERIAL; 758 pg.list[2] = SI_PG_DEVID; 759 pg.list[3] = SI_PG_ATA; 760 pg.list[4] = SI_PG_DISK_LIMITS; 761 pg.list[5] = SI_PG_DISK_INFO; 762 pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */ 763 764 bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen)); 765 766 atascsi_done(xs, XS_NOERROR); 767 } 768 769 void 770 atascsi_disk_vpd_serial(struct scsi_xfer *xs) 771 { 772 struct scsi_link *link = xs->sc_link; 773 struct atascsi_port *ap; 774 struct scsi_vpd_serial pg; 775 776 ap = atascsi_lookup_port(link); 777 bzero(&pg, sizeof(pg)); 778 779 pg.hdr.device = T_DIRECT; 780 pg.hdr.page_code = SI_PG_SERIAL; 781 _lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length); 782 ata_swapcopy(ap->ap_identify.serial, pg.serial, 783 sizeof(ap->ap_identify.serial)); 784 785 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 786 787 atascsi_done(xs, XS_NOERROR); 788 } 789 790 void 791 atascsi_disk_vpd_ident(struct scsi_xfer *xs) 792 { 793 struct scsi_link *link = xs->sc_link; 794 struct atascsi_port *ap; 795 struct { 796 struct scsi_vpd_hdr hdr; 797 struct scsi_vpd_devid_hdr devid_hdr; 798 u_int8_t devid[68]; 799 } pg; 800 u_int8_t *p; 801 size_t pg_len; 802 803 ap = atascsi_lookup_port(link); 804 bzero(&pg, sizeof(pg)); 805 if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) { 806 pg_len = 8; 807 808 pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY; 809 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA; 810 811 ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len); 812 } else { 813 pg_len = 68; 814 815 pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII; 816 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10; 817 818 p = pg.devid; 819 bcopy("ATA ", p, 8); 820 p += 8; 821 ata_swapcopy(ap->ap_identify.model, p, 822 sizeof(ap->ap_identify.model)); 823 p += sizeof(ap->ap_identify.model); 824 ata_swapcopy(ap->ap_identify.serial, p, 825 sizeof(ap->ap_identify.serial)); 826 } 827 828 pg.devid_hdr.len = pg_len; 829 pg_len += sizeof(pg.devid_hdr); 830 831 pg.hdr.device = T_DIRECT; 832 pg.hdr.page_code = SI_PG_DEVID; 833 _lto2b(pg_len, pg.hdr.page_length); 834 pg_len += sizeof(pg.hdr); 835 836 bcopy(&pg, xs->data, MIN(pg_len, xs->datalen)); 837 838 atascsi_done(xs, XS_NOERROR); 839 } 840 841 void 842 atascsi_disk_vpd_ata(struct scsi_xfer *xs) 843 { 844 struct scsi_link *link = xs->sc_link; 845 struct atascsi_port *ap; 846 struct scsi_vpd_ata pg; 847 848 ap = atascsi_lookup_port(link); 849 bzero(&pg, sizeof(pg)); 850 851 pg.hdr.device = T_DIRECT; 852 pg.hdr.page_code = SI_PG_ATA; 853 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 854 855 memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor)); 856 memcpy(pg.sat_vendor, "OpenBSD", 857 MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor))); 858 memset(pg.sat_product, ' ', sizeof(pg.sat_product)); 859 memcpy(pg.sat_product, "atascsi", 860 MIN(strlen("atascsi"), sizeof(pg.sat_product))); 861 memset(pg.sat_revision, ' ', sizeof(pg.sat_revision)); 862 memcpy(pg.sat_revision, osrelease, 863 MIN(strlen(osrelease), sizeof(pg.sat_product))); 864 865 /* XXX device signature */ 866 867 switch (ap->ap_type) { 868 case ATA_PORT_T_DISK: 869 pg.command_code = VPD_ATA_COMMAND_CODE_ATA; 870 break; 871 case ATA_PORT_T_ATAPI: 872 pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI; 873 break; 874 } 875 876 memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify)); 877 878 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 879 880 atascsi_done(xs, XS_NOERROR); 881 } 882 883 void 884 atascsi_disk_vpd_limits(struct scsi_xfer *xs) 885 { 886 struct scsi_link *link = xs->sc_link; 887 struct atascsi_port *ap; 888 struct scsi_vpd_disk_limits pg; 889 890 ap = atascsi_lookup_port(link); 891 bzero(&pg, sizeof(pg)); 892 pg.hdr.device = T_DIRECT; 893 pg.hdr.page_code = SI_PG_DISK_LIMITS; 894 _lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length); 895 896 _lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify), 897 pg.optimal_xfer_granularity); 898 899 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 900 /* 901 * ATA only supports 65535 blocks per TRIM descriptor, so 902 * avoid having to split UNMAP descriptors and overflow the page 903 * limit by using that as a max. 904 */ 905 _lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count); 906 _lto4b(512 / 8, pg.max_unmap_desc_count); 907 } 908 909 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 910 911 atascsi_done(xs, XS_NOERROR); 912 } 913 914 void 915 atascsi_disk_vpd_info(struct scsi_xfer *xs) 916 { 917 struct scsi_link *link = xs->sc_link; 918 struct atascsi_port *ap; 919 struct scsi_vpd_disk_info pg; 920 921 ap = atascsi_lookup_port(link); 922 bzero(&pg, sizeof(pg)); 923 pg.hdr.device = T_DIRECT; 924 pg.hdr.page_code = SI_PG_DISK_INFO; 925 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 926 927 _lto2b(letoh16(ap->ap_identify.rpm), pg.rpm); 928 pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK; 929 930 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 931 932 atascsi_done(xs, XS_NOERROR); 933 } 934 935 void 936 atascsi_disk_vpd_thin(struct scsi_xfer *xs) 937 { 938 struct scsi_link *link = xs->sc_link; 939 struct atascsi_port *ap; 940 struct scsi_vpd_disk_thin pg; 941 942 ap = atascsi_lookup_port(link); 943 if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 944 atascsi_done(xs, XS_DRIVER_STUFFUP); 945 return; 946 } 947 948 bzero(&pg, sizeof(pg)); 949 pg.hdr.device = T_DIRECT; 950 pg.hdr.page_code = SI_PG_DISK_THIN; 951 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length); 952 953 pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS; 954 955 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen)); 956 957 atascsi_done(xs, XS_NOERROR); 958 } 959 960 void 961 atascsi_disk_write_same_16(struct scsi_xfer *xs) 962 { 963 struct scsi_link *link = xs->sc_link; 964 struct atascsi *as = link->adapter_softc; 965 struct atascsi_port *ap; 966 struct scsi_write_same_16 *cdb; 967 struct ata_xfer *xa = xs->io; 968 struct ata_fis_h2d *fis; 969 u_int64_t lba; 970 u_int32_t length; 971 u_int64_t desc; 972 973 if (xs->cmdlen != sizeof(*cdb)) { 974 atascsi_done(xs, XS_DRIVER_STUFFUP); 975 return; 976 } 977 978 ap = atascsi_lookup_port(link); 979 cdb = (struct scsi_write_same_16 *)xs->cmd; 980 981 if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) || 982 !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 983 /* generate sense data */ 984 atascsi_done(xs, XS_DRIVER_STUFFUP); 985 return; 986 } 987 988 if (xs->datalen < 512) { 989 /* generate sense data */ 990 atascsi_done(xs, XS_DRIVER_STUFFUP); 991 return; 992 } 993 994 lba = _8btol(cdb->lba); 995 length = _4btol(cdb->length); 996 997 if (length > ATA_DSM_TRIM_MAX_LEN) { 998 /* XXX we dont support requests over 65535 blocks */ 999 atascsi_done(xs, XS_DRIVER_STUFFUP); 1000 return; 1001 } 1002 1003 xa->data = xs->data; 1004 xa->datalen = 512; 1005 xa->flags = ATA_F_WRITE; 1006 xa->pmp_port = ap->ap_pmp_port; 1007 if (xs->flags & SCSI_POLL) 1008 xa->flags |= ATA_F_POLL; 1009 xa->complete = atascsi_disk_write_same_16_done; 1010 xa->atascsi_private = xs; 1011 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1012 1013 /* TRIM sends a list of blocks to discard in the databuf. */ 1014 memset(xa->data, 0, xa->datalen); 1015 desc = htole64(ATA_DSM_TRIM_DESC(lba, length)); 1016 memcpy(xa->data, &desc, sizeof(desc)); 1017 1018 fis = xa->fis; 1019 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1020 fis->command = ATA_C_DSM; 1021 fis->features = ATA_DSM_TRIM; 1022 fis->sector_count = 1; 1023 1024 ata_exec(as, xa); 1025 } 1026 1027 void 1028 atascsi_disk_write_same_16_done(struct ata_xfer *xa) 1029 { 1030 struct scsi_xfer *xs = xa->atascsi_private; 1031 1032 switch (xa->state) { 1033 case ATA_S_COMPLETE: 1034 xs->error = XS_NOERROR; 1035 break; 1036 case ATA_S_ERROR: 1037 xs->error = XS_DRIVER_STUFFUP; 1038 break; 1039 case ATA_S_TIMEOUT: 1040 xs->error = XS_TIMEOUT; 1041 break; 1042 1043 default: 1044 panic("atascsi_disk_write_same_16_done: " 1045 "unexpected ata_xfer state (%d)", xa->state); 1046 } 1047 1048 scsi_done(xs); 1049 } 1050 1051 void 1052 atascsi_disk_unmap(struct scsi_xfer *xs) 1053 { 1054 struct scsi_unmap *cdb; 1055 struct scsi_unmap_data *unmap; 1056 u_int len; 1057 1058 if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb)) 1059 atascsi_done(xs, XS_DRIVER_STUFFUP); 1060 1061 cdb = (struct scsi_unmap *)xs->cmd; 1062 len = _2btol(cdb->list_len); 1063 if (xs->datalen != len || len < sizeof(*unmap)) 1064 atascsi_done(xs, XS_DRIVER_STUFFUP); 1065 1066 unmap = (struct scsi_unmap_data *)xs->data; 1067 if (_2btol(unmap->data_length) != len) 1068 atascsi_done(xs, XS_DRIVER_STUFFUP); 1069 1070 len = _2btol(unmap->desc_length); 1071 if (len != xs->datalen - sizeof(*unmap)) 1072 atascsi_done(xs, XS_DRIVER_STUFFUP); 1073 1074 if (len < sizeof(struct scsi_unmap_desc)) { 1075 /* no work, no error according to sbc3 */ 1076 atascsi_done(xs, XS_NOERROR); 1077 } 1078 1079 if (len > sizeof(struct scsi_unmap_desc) * 64) { 1080 /* more work than we advertised */ 1081 atascsi_done(xs, XS_DRIVER_STUFFUP); 1082 } 1083 1084 /* let's go */ 1085 if (!ISSET(xs->flags, SCSI_NOSLEEP)) 1086 atascsi_disk_unmap_task(xs, NULL); 1087 else if (workq_add_task(NULL, 0, atascsi_disk_unmap_task, 1088 xs, NULL) != 0) 1089 atascsi_done(xs, XS_DRIVER_STUFFUP); 1090 } 1091 1092 void 1093 atascsi_disk_unmap_task(void *xxs, void *a) 1094 { 1095 struct scsi_xfer *xs = xxs; 1096 struct scsi_link *link = xs->sc_link; 1097 struct atascsi *as = link->adapter_softc; 1098 struct atascsi_port *ap; 1099 struct ata_xfer *xa = xs->io; 1100 struct ata_fis_h2d *fis; 1101 struct scsi_unmap_data *unmap; 1102 struct scsi_unmap_desc *descs, *d; 1103 u_int64_t *trims; 1104 u_int len, i; 1105 1106 trims = dma_alloc(512, PR_WAITOK | PR_ZERO); 1107 1108 ap = atascsi_lookup_port(link); 1109 unmap = (struct scsi_unmap_data *)xs->data; 1110 descs = (struct scsi_unmap_desc *)(unmap + 1); 1111 1112 len = _2btol(unmap->desc_length) / sizeof(*d); 1113 for (i = 0; i < len; i++) { 1114 d = &descs[i]; 1115 if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN) 1116 goto fail; 1117 1118 trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr), 1119 _4btol(d->logical_blocks))); 1120 } 1121 1122 xa->data = trims; 1123 xa->datalen = 512; 1124 xa->flags = ATA_F_WRITE; 1125 xa->pmp_port = ap->ap_pmp_port; 1126 xa->complete = atascsi_disk_unmap_done; 1127 xa->atascsi_private = xs; 1128 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1129 1130 fis = xa->fis; 1131 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1132 fis->command = ATA_C_DSM; 1133 fis->features = ATA_DSM_TRIM; 1134 fis->sector_count = 1; 1135 1136 ata_exec(as, xa); 1137 return; 1138 1139 fail: 1140 dma_free(xa->data, 512); 1141 atascsi_done(xs, XS_DRIVER_STUFFUP); 1142 } 1143 1144 void 1145 atascsi_disk_unmap_done(struct ata_xfer *xa) 1146 { 1147 struct scsi_xfer *xs = xa->atascsi_private; 1148 1149 dma_free(xa->data, 512); 1150 1151 switch (xa->state) { 1152 case ATA_S_COMPLETE: 1153 xs->error = XS_NOERROR; 1154 break; 1155 case ATA_S_ERROR: 1156 xs->error = XS_DRIVER_STUFFUP; 1157 break; 1158 case ATA_S_TIMEOUT: 1159 xs->error = XS_TIMEOUT; 1160 break; 1161 1162 default: 1163 panic("atascsi_disk_unmap_done: " 1164 "unexpected ata_xfer state (%d)", xa->state); 1165 } 1166 1167 scsi_done(xs); 1168 } 1169 1170 void 1171 atascsi_disk_sync(struct scsi_xfer *xs) 1172 { 1173 struct scsi_link *link = xs->sc_link; 1174 struct atascsi *as = link->adapter_softc; 1175 struct atascsi_port *ap; 1176 struct ata_xfer *xa = xs->io; 1177 1178 if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) { 1179 atascsi_done(xs, XS_DRIVER_STUFFUP); 1180 return; 1181 } 1182 1183 ap = atascsi_lookup_port(link); 1184 xa->datalen = 0; 1185 xa->flags = ATA_F_READ; 1186 xa->complete = atascsi_disk_sync_done; 1187 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1188 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1189 xa->atascsi_private = xs; 1190 xa->pmp_port = ap->ap_pmp_port; 1191 if (xs->flags & SCSI_POLL) 1192 xa->flags |= ATA_F_POLL; 1193 1194 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1195 xa->fis->command = ATA_C_FLUSH_CACHE; 1196 xa->fis->device = 0; 1197 1198 ata_exec(as, xa); 1199 } 1200 1201 void 1202 atascsi_disk_sync_done(struct ata_xfer *xa) 1203 { 1204 struct scsi_xfer *xs = xa->atascsi_private; 1205 1206 switch (xa->state) { 1207 case ATA_S_COMPLETE: 1208 xs->error = XS_NOERROR; 1209 break; 1210 1211 case ATA_S_ERROR: 1212 case ATA_S_TIMEOUT: 1213 printf("atascsi_disk_sync_done: %s\n", 1214 xa->state == ATA_S_TIMEOUT ? "timeout" : "error"); 1215 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1216 XS_DRIVER_STUFFUP); 1217 break; 1218 1219 default: 1220 panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)", 1221 xa->state); 1222 } 1223 1224 scsi_done(xs); 1225 } 1226 1227 u_int64_t 1228 ata_identify_blocks(struct ata_identify *id) 1229 { 1230 u_int64_t blocks = 0; 1231 int i; 1232 1233 if (letoh16(id->cmdset83) & 0x0400) { 1234 /* LBA48 feature set supported */ 1235 for (i = 3; i >= 0; --i) { 1236 blocks <<= 16; 1237 blocks += letoh16(id->addrsecxt[i]); 1238 } 1239 } else { 1240 blocks = letoh16(id->addrsec[1]); 1241 blocks <<= 16; 1242 blocks += letoh16(id->addrsec[0]); 1243 } 1244 1245 return (blocks - 1); 1246 } 1247 1248 u_int 1249 ata_identify_blocksize(struct ata_identify *id) 1250 { 1251 u_int blocksize = 512; 1252 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1253 1254 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1255 ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) { 1256 blocksize = letoh16(id->words_lsec[1]); 1257 blocksize <<= 16; 1258 blocksize += letoh16(id->words_lsec[0]); 1259 blocksize <<= 1; 1260 } 1261 1262 return (blocksize); 1263 } 1264 1265 u_int 1266 ata_identify_block_l2p_exp(struct ata_identify *id) 1267 { 1268 u_int exponent = 0; 1269 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1270 1271 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1272 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) { 1273 exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE); 1274 } 1275 1276 return (exponent); 1277 } 1278 1279 u_int 1280 ata_identify_block_logical_align(struct ata_identify *id) 1281 { 1282 u_int align = 0; 1283 u_int16_t p2l_sect = letoh16(id->p2l_sect); 1284 u_int16_t logical_align = letoh16(id->logical_align); 1285 1286 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID && 1287 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) && 1288 (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID) 1289 align = logical_align & ATA_ID_LALIGN; 1290 1291 return (align); 1292 } 1293 1294 void 1295 atascsi_disk_capacity(struct scsi_xfer *xs) 1296 { 1297 struct scsi_link *link = xs->sc_link; 1298 struct atascsi_port *ap; 1299 struct scsi_read_cap_data rcd; 1300 u_int64_t capacity; 1301 1302 ap = atascsi_lookup_port(link); 1303 if (xs->cmdlen != sizeof(struct scsi_read_capacity)) { 1304 atascsi_done(xs, XS_DRIVER_STUFFUP); 1305 return; 1306 } 1307 1308 bzero(&rcd, sizeof(rcd)); 1309 capacity = ata_identify_blocks(&ap->ap_identify); 1310 if (capacity > 0xffffffff) 1311 capacity = 0xffffffff; 1312 1313 _lto4b(capacity, rcd.addr); 1314 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1315 1316 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1317 1318 atascsi_done(xs, XS_NOERROR); 1319 } 1320 1321 void 1322 atascsi_disk_capacity16(struct scsi_xfer *xs) 1323 { 1324 struct scsi_link *link = xs->sc_link; 1325 struct atascsi_port *ap; 1326 struct scsi_read_cap_data_16 rcd; 1327 u_int align; 1328 u_int16_t lowest_aligned = 0; 1329 1330 ap = atascsi_lookup_port(link); 1331 if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) { 1332 atascsi_done(xs, XS_DRIVER_STUFFUP); 1333 return; 1334 } 1335 1336 bzero(&rcd, sizeof(rcd)); 1337 1338 _lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr); 1339 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length); 1340 rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify); 1341 align = ata_identify_block_logical_align(&ap->ap_identify); 1342 if (align > 0) 1343 lowest_aligned = (1 << rcd.logical_per_phys) - align; 1344 1345 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) { 1346 SET(lowest_aligned, READ_CAP_16_TPE); 1347 1348 if (ISSET(letoh16(ap->ap_identify.add_support), 1349 ATA_ID_ADD_SUPPORT_DRT)) 1350 SET(lowest_aligned, READ_CAP_16_TPRZ); 1351 } 1352 _lto2b(lowest_aligned, rcd.lowest_aligned); 1353 1354 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen)); 1355 1356 atascsi_done(xs, XS_NOERROR); 1357 } 1358 1359 int 1360 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags) 1361 { 1362 struct ata_xfer *xa = xs->io; 1363 1364 xa->data = xs->data; 1365 xa->datalen = xs->datalen; 1366 xa->timeout = xs->timeout; 1367 xa->flags = 0; 1368 if (xs->flags & SCSI_DATA_IN) 1369 xa->flags |= ATA_F_READ; 1370 if (xs->flags & SCSI_DATA_OUT) 1371 xa->flags |= ATA_F_WRITE; 1372 if (xs->flags & SCSI_POLL) 1373 xa->flags |= ATA_F_POLL; 1374 1375 switch (count_proto & ATA_PASSTHRU_PROTO_MASK) { 1376 case ATA_PASSTHRU_PROTO_NON_DATA: 1377 case ATA_PASSTHRU_PROTO_PIO_DATAIN: 1378 case ATA_PASSTHRU_PROTO_PIO_DATAOUT: 1379 xa->flags |= ATA_F_PIO; 1380 break; 1381 default: 1382 /* we dont support this yet */ 1383 return (1); 1384 } 1385 1386 xa->atascsi_private = xs; 1387 xa->complete = atascsi_passthru_done; 1388 1389 return (0); 1390 } 1391 1392 void 1393 atascsi_passthru_12(struct scsi_xfer *xs) 1394 { 1395 struct scsi_link *link = xs->sc_link; 1396 struct atascsi *as = link->adapter_softc; 1397 struct atascsi_port *ap; 1398 struct ata_xfer *xa = xs->io; 1399 struct scsi_ata_passthru_12 *cdb; 1400 struct ata_fis_h2d *fis; 1401 1402 if (xs->cmdlen != sizeof(*cdb)) { 1403 atascsi_done(xs, XS_DRIVER_STUFFUP); 1404 return; 1405 } 1406 1407 cdb = (struct scsi_ata_passthru_12 *)xs->cmd; 1408 /* validate cdb */ 1409 1410 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1411 atascsi_done(xs, XS_DRIVER_STUFFUP); 1412 return; 1413 } 1414 1415 ap = atascsi_lookup_port(link); 1416 fis = xa->fis; 1417 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1418 fis->command = cdb->command; 1419 fis->features = cdb->features; 1420 fis->lba_low = cdb->lba_low; 1421 fis->lba_mid = cdb->lba_mid; 1422 fis->lba_high = cdb->lba_high; 1423 fis->device = cdb->device; 1424 fis->sector_count = cdb->sector_count; 1425 xa->pmp_port = ap->ap_pmp_port; 1426 1427 ata_exec(as, xa); 1428 } 1429 1430 void 1431 atascsi_passthru_16(struct scsi_xfer *xs) 1432 { 1433 struct scsi_link *link = xs->sc_link; 1434 struct atascsi *as = link->adapter_softc; 1435 struct atascsi_port *ap; 1436 struct ata_xfer *xa = xs->io; 1437 struct scsi_ata_passthru_16 *cdb; 1438 struct ata_fis_h2d *fis; 1439 1440 if (xs->cmdlen != sizeof(*cdb)) { 1441 atascsi_done(xs, XS_DRIVER_STUFFUP); 1442 return; 1443 } 1444 1445 cdb = (struct scsi_ata_passthru_16 *)xs->cmd; 1446 /* validate cdb */ 1447 1448 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) { 1449 atascsi_done(xs, XS_DRIVER_STUFFUP); 1450 return; 1451 } 1452 1453 ap = atascsi_lookup_port(link); 1454 fis = xa->fis; 1455 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1456 fis->command = cdb->command; 1457 fis->features = cdb->features[1]; 1458 fis->lba_low = cdb->lba_low[1]; 1459 fis->lba_mid = cdb->lba_mid[1]; 1460 fis->lba_high = cdb->lba_high[1]; 1461 fis->device = cdb->device; 1462 fis->lba_low_exp = cdb->lba_low[0]; 1463 fis->lba_mid_exp = cdb->lba_mid[0]; 1464 fis->lba_high_exp = cdb->lba_high[0]; 1465 fis->features_exp = cdb->features[0]; 1466 fis->sector_count = cdb->sector_count[1]; 1467 fis->sector_count_exp = cdb->sector_count[0]; 1468 xa->pmp_port = ap->ap_pmp_port; 1469 1470 ata_exec(as, xa); 1471 } 1472 1473 void 1474 atascsi_passthru_done(struct ata_xfer *xa) 1475 { 1476 struct scsi_xfer *xs = xa->atascsi_private; 1477 1478 /* 1479 * XXX need to generate sense if cdb wants it 1480 */ 1481 1482 switch (xa->state) { 1483 case ATA_S_COMPLETE: 1484 xs->error = XS_NOERROR; 1485 break; 1486 case ATA_S_ERROR: 1487 xs->error = XS_DRIVER_STUFFUP; 1488 break; 1489 case ATA_S_TIMEOUT: 1490 printf("atascsi_passthru_done, timeout\n"); 1491 xs->error = XS_TIMEOUT; 1492 break; 1493 default: 1494 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1495 xa->state); 1496 } 1497 1498 xs->resid = xa->resid; 1499 1500 scsi_done(xs); 1501 } 1502 1503 void 1504 atascsi_disk_sense(struct scsi_xfer *xs) 1505 { 1506 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1507 1508 bzero(xs->data, xs->datalen); 1509 /* check datalen > sizeof(struct scsi_sense_data)? */ 1510 sd->error_code = SSD_ERRCODE_CURRENT; 1511 sd->flags = SKEY_NO_SENSE; 1512 1513 atascsi_done(xs, XS_NOERROR); 1514 } 1515 1516 void 1517 atascsi_disk_start_stop(struct scsi_xfer *xs) 1518 { 1519 struct scsi_link *link = xs->sc_link; 1520 struct atascsi *as = link->adapter_softc; 1521 struct atascsi_port *ap; 1522 struct ata_xfer *xa = xs->io; 1523 struct scsi_start_stop *ss = (struct scsi_start_stop *)xs->cmd; 1524 1525 if (xs->cmdlen != sizeof(*ss)) { 1526 atascsi_done(xs, XS_DRIVER_STUFFUP); 1527 return; 1528 } 1529 1530 if (ss->how != SSS_STOP) { 1531 atascsi_done(xs, XS_NOERROR); 1532 return; 1533 } 1534 1535 /* 1536 * A SCSI START STOP UNIT command with the START bit set to 1537 * zero gets translated into an ATA FLUSH CACHE command 1538 * followed by an ATA STANDBY IMMEDIATE command. 1539 */ 1540 ap = atascsi_lookup_port(link); 1541 xa->datalen = 0; 1542 xa->flags = ATA_F_READ; 1543 xa->complete = atascsi_disk_start_stop_done; 1544 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1545 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1546 xa->pmp_port = ap->ap_pmp_port; 1547 xa->atascsi_private = xs; 1548 if (xs->flags & SCSI_POLL) 1549 xa->flags |= ATA_F_POLL; 1550 1551 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1552 xa->fis->command = ATA_C_FLUSH_CACHE; 1553 xa->fis->device = 0; 1554 1555 ata_exec(as, xa); 1556 } 1557 1558 void 1559 atascsi_disk_start_stop_done(struct ata_xfer *xa) 1560 { 1561 struct scsi_xfer *xs = xa->atascsi_private; 1562 struct scsi_link *link = xs->sc_link; 1563 struct atascsi *as = link->adapter_softc; 1564 struct atascsi_port *ap; 1565 1566 switch (xa->state) { 1567 case ATA_S_COMPLETE: 1568 break; 1569 1570 case ATA_S_ERROR: 1571 case ATA_S_TIMEOUT: 1572 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT : 1573 XS_DRIVER_STUFFUP); 1574 xs->resid = xa->resid; 1575 scsi_done(xs); 1576 return; 1577 1578 default: 1579 panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)", 1580 xa->state); 1581 } 1582 1583 /* 1584 * The FLUSH CACHE command completed succesfully; now issue 1585 * the STANDBY IMMEDATE command. 1586 */ 1587 ap = atascsi_lookup_port(link); 1588 xa->datalen = 0; 1589 xa->flags = ATA_F_READ; 1590 xa->state = ATA_S_SETUP; 1591 xa->complete = atascsi_disk_cmd_done; 1592 /* Spec says flush cache can take >30 sec, so give it at least 45. */ 1593 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout; 1594 xa->pmp_port = ap->ap_pmp_port; 1595 xa->atascsi_private = xs; 1596 if (xs->flags & SCSI_POLL) 1597 xa->flags |= ATA_F_POLL; 1598 1599 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1600 xa->fis->command = ATA_C_STANDBY_IMMED; 1601 xa->fis->device = 0; 1602 1603 ata_exec(as, xa); 1604 } 1605 1606 void 1607 atascsi_atapi_cmd(struct scsi_xfer *xs) 1608 { 1609 struct scsi_link *link = xs->sc_link; 1610 struct atascsi *as = link->adapter_softc; 1611 struct atascsi_port *ap; 1612 struct ata_xfer *xa = xs->io; 1613 struct ata_fis_h2d *fis; 1614 1615 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1616 case SCSI_DATA_IN: 1617 xa->flags = ATA_F_PACKET | ATA_F_READ; 1618 break; 1619 case SCSI_DATA_OUT: 1620 xa->flags = ATA_F_PACKET | ATA_F_WRITE; 1621 break; 1622 default: 1623 xa->flags = ATA_F_PACKET; 1624 } 1625 xa->flags |= ATA_F_GET_RFIS; 1626 1627 ap = atascsi_lookup_port(link); 1628 xa->data = xs->data; 1629 xa->datalen = xs->datalen; 1630 xa->complete = atascsi_atapi_cmd_done; 1631 xa->timeout = xs->timeout; 1632 xa->pmp_port = ap->ap_pmp_port; 1633 xa->atascsi_private = xs; 1634 if (xs->flags & SCSI_POLL) 1635 xa->flags |= ATA_F_POLL; 1636 1637 fis = xa->fis; 1638 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1639 fis->command = ATA_C_PACKET; 1640 fis->device = 0; 1641 fis->sector_count = xa->tag << 3; 1642 fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ? 1643 ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ); 1644 fis->lba_mid = 0x00; 1645 fis->lba_high = 0x20; 1646 1647 /* Copy SCSI command into ATAPI packet. */ 1648 memcpy(xa->packetcmd, xs->cmd, xs->cmdlen); 1649 1650 ata_exec(as, xa); 1651 } 1652 1653 void 1654 atascsi_atapi_cmd_done(struct ata_xfer *xa) 1655 { 1656 struct scsi_xfer *xs = xa->atascsi_private; 1657 struct scsi_sense_data *sd = &xs->sense; 1658 1659 switch (xa->state) { 1660 case ATA_S_COMPLETE: 1661 xs->error = XS_NOERROR; 1662 break; 1663 case ATA_S_ERROR: 1664 /* Return PACKET sense data */ 1665 sd->error_code = SSD_ERRCODE_CURRENT; 1666 sd->flags = (xa->rfis.error & 0xf0) >> 4; 1667 if (xa->rfis.error & 0x04) 1668 sd->flags = SKEY_ILLEGAL_REQUEST; 1669 if (xa->rfis.error & 0x02) 1670 sd->flags |= SSD_EOM; 1671 if (xa->rfis.error & 0x01) 1672 sd->flags |= SSD_ILI; 1673 xs->error = XS_SENSE; 1674 break; 1675 case ATA_S_TIMEOUT: 1676 printf("atascsi_atapi_cmd_done, timeout\n"); 1677 xs->error = XS_TIMEOUT; 1678 break; 1679 default: 1680 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)", 1681 xa->state); 1682 } 1683 1684 xs->resid = xa->resid; 1685 1686 scsi_done(xs); 1687 } 1688 1689 void 1690 atascsi_pmp_cmd(struct scsi_xfer *xs) 1691 { 1692 switch (xs->cmd->opcode) { 1693 case REQUEST_SENSE: 1694 atascsi_pmp_sense(xs); 1695 return; 1696 case INQUIRY: 1697 atascsi_pmp_inq(xs); 1698 return; 1699 1700 case TEST_UNIT_READY: 1701 case PREVENT_ALLOW: 1702 atascsi_done(xs, XS_NOERROR); 1703 return; 1704 1705 default: 1706 atascsi_done(xs, XS_DRIVER_STUFFUP); 1707 return; 1708 } 1709 } 1710 1711 void 1712 atascsi_pmp_sense(struct scsi_xfer *xs) 1713 { 1714 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data; 1715 1716 bzero(xs->data, xs->datalen); 1717 sd->error_code = SSD_ERRCODE_CURRENT; 1718 sd->flags = SKEY_NO_SENSE; 1719 1720 atascsi_done(xs, XS_NOERROR); 1721 } 1722 1723 void 1724 atascsi_pmp_inq(struct scsi_xfer *xs) 1725 { 1726 struct scsi_inquiry_data inq; 1727 struct scsi_inquiry *in_inq = (struct scsi_inquiry *)xs->cmd; 1728 1729 if (ISSET(in_inq->flags, SI_EVPD)) { 1730 /* any evpd pages we need to support here? */ 1731 atascsi_done(xs, XS_DRIVER_STUFFUP); 1732 return; 1733 } 1734 1735 bzero(&inq, sizeof(inq)); 1736 inq.device = 0x1E; /* "well known logical unit" seems reasonable */ 1737 inq.version = 0x05; /* SPC-3? */ 1738 inq.response_format = 2; 1739 inq.additional_length = 32; 1740 inq.flags |= SID_CmdQue; 1741 bcopy("ATA ", inq.vendor, sizeof(inq.vendor)); 1742 1743 /* should use the data from atascsi_pmp_identify here? 1744 * not sure how useful the chip id is, but maybe it'd be 1745 * nice to include the number of ports. 1746 */ 1747 bcopy("Port Multiplier", inq.product, sizeof(inq.product)); 1748 bcopy(" ", inq.revision, sizeof(inq.revision)); 1749 1750 bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen)); 1751 atascsi_done(xs, XS_NOERROR); 1752 } 1753 1754 void 1755 atascsi_done(struct scsi_xfer *xs, int error) 1756 { 1757 xs->error = error; 1758 scsi_done(xs); 1759 } 1760 1761 void 1762 ata_exec(struct atascsi *as, struct ata_xfer *xa) 1763 { 1764 as->as_methods->ata_cmd(xa); 1765 } 1766 1767 void * 1768 atascsi_io_get(void *cookie) 1769 { 1770 struct atascsi_host_port *ahp = cookie; 1771 struct atascsi *as = ahp->ahp_as; 1772 struct ata_xfer *xa; 1773 1774 xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port); 1775 if (xa != NULL) 1776 xa->fis->type = ATA_FIS_TYPE_H2D; 1777 1778 return (xa); 1779 } 1780 1781 void 1782 atascsi_io_put(void *cookie, void *io) 1783 { 1784 struct atascsi_host_port *ahp = cookie; 1785 struct atascsi *as = ahp->ahp_as; 1786 struct ata_xfer *xa = io; 1787 1788 xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */ 1789 as->as_methods->ata_put_xfer(xa); 1790 } 1791 1792 void 1793 ata_polled_complete(struct ata_xfer *xa) 1794 { 1795 /* do nothing */ 1796 } 1797 1798 int 1799 ata_polled(struct ata_xfer *xa) 1800 { 1801 int rv; 1802 1803 if (!ISSET(xa->flags, ATA_F_DONE)) 1804 panic("ata_polled: xa isnt complete"); 1805 1806 switch (xa->state) { 1807 case ATA_S_COMPLETE: 1808 rv = 0; 1809 break; 1810 case ATA_S_ERROR: 1811 case ATA_S_TIMEOUT: 1812 rv = EIO; 1813 break; 1814 default: 1815 panic("ata_polled: xa state (%d)", 1816 xa->state); 1817 } 1818 1819 scsi_io_put(xa->atascsi_private, xa); 1820 1821 return (rv); 1822 } 1823 1824 void 1825 ata_complete(struct ata_xfer *xa) 1826 { 1827 SET(xa->flags, ATA_F_DONE); 1828 xa->complete(xa); 1829 } 1830 1831 void 1832 ata_swapcopy(void *src, void *dst, size_t len) 1833 { 1834 u_int16_t *s = src, *d = dst; 1835 int i; 1836 1837 len /= 2; 1838 1839 for (i = 0; i < len; i++) 1840 d[i] = swap16(s[i]); 1841 } 1842 1843 int 1844 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify) 1845 { 1846 struct atascsi *as = ap->ap_as; 1847 struct atascsi_host_port *ahp = ap->ap_host_port; 1848 struct ata_xfer *xa; 1849 1850 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1851 if (xa == NULL) 1852 panic("no free xfers on a new port"); 1853 xa->pmp_port = ap->ap_pmp_port; 1854 xa->data = identify; 1855 xa->datalen = sizeof(*identify); 1856 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1857 xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ? 1858 ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET; 1859 xa->fis->device = 0; 1860 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL; 1861 xa->timeout = 1000; 1862 xa->complete = ata_polled_complete; 1863 xa->atascsi_private = &ahp->ahp_iopool; 1864 ata_exec(as, xa); 1865 return (ata_polled(xa)); 1866 } 1867 1868 int 1869 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg) 1870 { 1871 struct atascsi *as = ap->ap_as; 1872 struct atascsi_host_port *ahp = ap->ap_host_port; 1873 struct ata_xfer *xa; 1874 1875 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP); 1876 if (xa == NULL) 1877 panic("no free xfers on a new port"); 1878 xa->fis->command = ATA_C_SET_FEATURES; 1879 xa->fis->features = subcommand; 1880 xa->fis->sector_count = arg; 1881 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port; 1882 xa->flags = ATA_F_POLL; 1883 xa->timeout = 1000; 1884 xa->complete = ata_polled_complete; 1885 xa->pmp_port = ap->ap_pmp_port; 1886 xa->atascsi_private = &ahp->ahp_iopool; 1887 ata_exec(as, xa); 1888 return (ata_polled(xa)); 1889 } 1890