1 /*- 2 * Copyright (c) 1998 - 2006 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: src/sys/dev/ata/ata-queue.c,v 1.67 2007/01/27 21:15:58 remko Exp $ 27 * $DragonFly: src/sys/dev/disk/nata/ata-queue.c,v 1.11 2008/09/23 17:43:41 dillon Exp $ 28 */ 29 30 #include "opt_ata.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/callout.h> 35 #include <sys/nata.h> 36 #include <sys/queue.h> 37 #include <sys/spinlock2.h> 38 #include <sys/buf.h> 39 #include <sys/systm.h> 40 #include <sys/taskqueue.h> 41 42 #include "ata-all.h" 43 #include "ata_if.h" 44 45 /* prototypes */ 46 static void ata_completed(void *, int); 47 static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request); 48 static void atawritereorder(struct ata_channel *ch); 49 static char *ata_skey2str(u_int8_t); 50 51 void 52 ata_queue_init(struct ata_channel *ch) 53 { 54 TAILQ_INIT(&ch->ata_queue); 55 ch->reorder = 0; 56 ch->transition = NULL; 57 } 58 59 /* 60 * Rudely drop all requests queued to the channel of specified device. 61 * XXX: The requests are leaked, use only in fatal case. 62 */ 63 void 64 ata_drop_requests(device_t dev) 65 { 66 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 67 struct ata_request *request, *tmp; 68 69 spin_lock(&ch->queue_mtx); 70 TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) { 71 TAILQ_REMOVE(&ch->ata_queue, request, chain); 72 request->result = ENXIO; 73 } 74 spin_unlock(&ch->queue_mtx); 75 } 76 77 void 78 ata_queue_request(struct ata_request *request) 79 { 80 struct ata_channel *ch; 81 82 /* treat request as virgin (this might be an ATA_R_REQUEUE) */ 83 request->result = request->status = request->error = 0; 84 85 /* check that that the device is still valid */ 86 if (!(request->parent = device_get_parent(request->dev))) { 87 request->result = ENXIO; 88 if (request->callback) 89 (request->callback)(request); 90 return; 91 } 92 ch = device_get_softc(request->parent); 93 callout_init_mp(&request->callout); /* serialization done via state_mtx */ 94 if (!request->callback && !(request->flags & ATA_R_REQUEUE)) 95 spin_init(&request->done); 96 97 /* in ATA_STALL_QUEUE state we call HW directly */ 98 if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) { 99 spin_lock(&ch->state_mtx); 100 ch->running = request; 101 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 102 ch->running = NULL; 103 if (!request->callback) 104 spin_uninit(&request->done); 105 spin_unlock(&ch->state_mtx); 106 return; 107 } 108 /* interlock against interrupt */ 109 request->flags |= ATA_R_HWCMDQUEUED; 110 spin_unlock(&ch->state_mtx); 111 } 112 /* otherwise put request on the locked queue at the specified location */ 113 else { 114 spin_lock(&ch->queue_mtx); 115 if (request->flags & ATA_R_AT_HEAD) { 116 TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain); 117 } else if (request->flags & ATA_R_ORDERED) { 118 ata_sort_queue(ch, request); 119 } else { 120 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain); 121 ch->transition = NULL; 122 } 123 spin_unlock(&ch->queue_mtx); 124 ATA_DEBUG_RQ(request, "queued"); 125 ata_start(ch->dev); 126 } 127 128 /* if this is a requeued request callback/sleep we're done */ 129 if (request->flags & ATA_R_REQUEUE) 130 return; 131 132 /* if this is not a callback wait until request is completed */ 133 if (!request->callback) { 134 ATA_DEBUG_RQ(request, "wait for completion"); 135 if (!dumping) { 136 /* interlock against wakeup */ 137 spin_lock(&request->done); 138 /* check if the request was completed already */ 139 if (!(request->flags & ATA_R_COMPLETED)) 140 ssleep(request, &request->done, 0, "ATA request completion " 141 "wait", request->timeout * hz * 4); 142 spin_unlock(&request->done); 143 /* check if the request was completed while sleeping */ 144 if (!(request->flags & ATA_R_COMPLETED)) { 145 /* apparently not */ 146 device_printf(request->dev, "WARNING - %s taskqueue timeout - " 147 "completing request directly\n", 148 ata_cmd2str(request)); 149 request->flags |= ATA_R_DANGER1; 150 ata_completed(request, 0); 151 } 152 } 153 spin_uninit(&request->done); 154 } 155 } 156 157 int 158 ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature, 159 u_int64_t lba, u_int16_t count) 160 { 161 struct ata_request *request = ata_alloc_request(); 162 int error = ENOMEM; 163 164 if (request) { 165 request->dev = dev; 166 request->u.ata.command = command; 167 request->u.ata.lba = lba; 168 request->u.ata.count = count; 169 request->u.ata.feature = feature; 170 request->flags = ATA_R_CONTROL; 171 request->timeout = 1; 172 request->retries = 0; 173 ata_queue_request(request); 174 error = request->result; 175 ata_free_request(request); 176 } 177 return error; 178 } 179 180 int 181 ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data, 182 int count, int flags, int timeout) 183 { 184 struct ata_request *request = ata_alloc_request(); 185 struct ata_device *atadev = device_get_softc(dev); 186 int error = ENOMEM; 187 188 if (request) { 189 request->dev = dev; 190 if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12) 191 bcopy(ccb, request->u.atapi.ccb, 12); 192 else 193 bcopy(ccb, request->u.atapi.ccb, 16); 194 request->data = data; 195 request->bytecount = count; 196 request->transfersize = min(request->bytecount, 65534); 197 request->flags = flags | ATA_R_ATAPI; 198 request->timeout = timeout; 199 request->retries = 0; 200 ata_queue_request(request); 201 error = request->result; 202 ata_free_request(request); 203 } 204 return error; 205 } 206 207 void 208 ata_start(device_t dev) 209 { 210 struct ata_channel *ch = device_get_softc(dev); 211 struct ata_request *request; 212 struct ata_composite *cptr; 213 int dependencies = 0; 214 215 /* if we have a request on the queue try to get it running */ 216 spin_lock(&ch->queue_mtx); 217 if ((request = TAILQ_FIRST(&ch->ata_queue))) { 218 219 /* we need the locking function to get the lock for this channel */ 220 if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) { 221 222 /* check for composite dependencies */ 223 if ((cptr = request->composite)) { 224 spin_lock(&cptr->lock); 225 if ((request->flags & ATA_R_WRITE) && 226 (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) { 227 dependencies = 1; 228 } 229 spin_unlock(&cptr->lock); 230 } 231 232 /* check we are in the right state and has no dependencies */ 233 spin_lock(&ch->state_mtx); 234 if (ch->state == ATA_IDLE && !dependencies) { 235 ATA_DEBUG_RQ(request, "starting"); 236 237 if (ch->transition == request) 238 ch->transition = TAILQ_NEXT(request, chain); 239 TAILQ_REMOVE(&ch->ata_queue, request, chain); 240 ch->running = request; 241 ch->state = ATA_ACTIVE; 242 243 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 244 ch->running = NULL; 245 ch->state = ATA_IDLE; 246 spin_unlock(&ch->state_mtx); 247 spin_unlock(&ch->queue_mtx); 248 ATA_LOCKING(dev, ATA_LF_UNLOCK); 249 ata_finish(request); 250 return; 251 } 252 253 /* interlock against interrupt */ 254 request->flags |= ATA_R_HWCMDQUEUED; 255 256 if (dumping) { 257 spin_unlock(&ch->state_mtx); 258 spin_unlock(&ch->queue_mtx); 259 while (!ata_interrupt(ch)) 260 DELAY(10); 261 return; 262 } 263 } 264 spin_unlock(&ch->state_mtx); 265 } 266 } 267 spin_unlock(&ch->queue_mtx); 268 } 269 270 void 271 ata_finish(struct ata_request *request) 272 { 273 struct ata_channel *ch = device_get_softc(request->parent); 274 275 /* 276 * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set 277 * we need to call ata_complete() directly here (no taskqueue involvement) 278 */ 279 if (dumping || 280 (ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) { 281 ATA_DEBUG_RQ(request, "finish directly"); 282 ata_completed(request, 0); 283 } 284 else { 285 /* put request on the proper taskqueue for completion */ 286 /* XXX FreeBSD has some sort of bio_taskqueue code here */ 287 TASK_INIT(&request->task, 0, ata_completed, request); 288 ATA_DEBUG_RQ(request, "finish taskqueue_swi"); 289 taskqueue_enqueue(taskqueue_swi, &request->task); 290 } 291 } 292 293 static void 294 ata_completed(void *context, int dummy) 295 { 296 struct ata_request *request = (struct ata_request *)context; 297 struct ata_channel *ch = device_get_softc(request->parent); 298 struct ata_device *atadev = device_get_softc(request->dev); 299 struct ata_composite *composite; 300 301 if (request->flags & ATA_R_DANGER2) { 302 device_printf(request->dev, 303 "WARNING - %s freeing taskqueue zombie request\n", 304 ata_cmd2str(request)); 305 request->flags &= ~(ATA_R_DANGER1 | ATA_R_DANGER2); 306 ata_free_request(request); 307 return; 308 } 309 if (request->flags & ATA_R_DANGER1) 310 request->flags |= ATA_R_DANGER2; 311 312 ATA_DEBUG_RQ(request, "completed entered"); 313 314 /* if we had a timeout, reinit channel and deal with the falldown */ 315 if (request->flags & ATA_R_TIMEOUT) { 316 /* 317 * if the channel is still present and 318 * reinit succeeds and 319 * the device doesn't get detached and 320 * there are retries left we reinject this request 321 */ 322 if (ch && !ata_reinit(ch->dev) && !request->result && 323 (request->retries-- > 0)) { 324 if (!(request->flags & ATA_R_QUIET)) { 325 device_printf(request->dev, 326 "TIMEOUT - %s retrying (%d retr%s left)", 327 ata_cmd2str(request), request->retries, 328 request->retries == 1 ? "y" : "ies"); 329 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 330 kprintf(" LBA=%ju", request->u.ata.lba); 331 kprintf("\n"); 332 } 333 request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG); 334 request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE); 335 ATA_DEBUG_RQ(request, "completed reinject"); 336 ata_queue_request(request); 337 return; 338 } 339 340 /* ran out of good intentions so finish with error */ 341 if (!request->result) { 342 if (!(request->flags & ATA_R_QUIET)) { 343 if (request->dev) { 344 device_printf(request->dev, "FAILURE - %s timed out", 345 ata_cmd2str(request)); 346 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 347 kprintf(" LBA=%ju", request->u.ata.lba); 348 kprintf("\n"); 349 } 350 } 351 request->result = EIO; 352 } 353 } 354 else if (!(request->flags & ATA_R_ATAPI) ){ 355 /* if this is a soft ECC error warn about it */ 356 /* XXX SOS we could do WARF here */ 357 if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) { 358 device_printf(request->dev, 359 "WARNING - %s soft error (ECC corrected)", 360 ata_cmd2str(request)); 361 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 362 kprintf(" LBA=%ju", request->u.ata.lba); 363 kprintf("\n"); 364 } 365 366 /* if this is a UDMA CRC error we reinject if there are retries left */ 367 if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) { 368 if (request->retries-- > 0) { 369 device_printf(request->dev, 370 "WARNING - %s UDMA ICRC error (retrying request)", 371 ata_cmd2str(request)); 372 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 373 kprintf(" LBA=%ju", request->u.ata.lba); 374 kprintf("\n"); 375 request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE); 376 ata_queue_request(request); 377 return; 378 } 379 } 380 } 381 382 switch (request->flags & ATA_R_ATAPI) { 383 384 /* ATA errors */ 385 default: 386 if (!request->result && request->status & ATA_S_ERROR) { 387 if (!(request->flags & ATA_R_QUIET)) { 388 device_printf(request->dev, 389 "FAILURE - %s status=%b error=%b", 390 ata_cmd2str(request), 391 request->status, "\20\10BUSY\7READY\6DMA_READY" 392 "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR", 393 request->error, "\20\10ICRC\7UNCORRECTABLE" 394 "\6MEDIA_CHANGED\5NID_NOT_FOUND" 395 "\4MEDIA_CHANGE_REQEST" 396 "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH"); 397 if ((request->flags & ATA_R_DMA) && 398 (request->dmastat & ATA_BMSTAT_ERROR)) 399 kprintf(" dma=0x%02x", request->dmastat); 400 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 401 kprintf(" LBA=%ju", request->u.ata.lba); 402 kprintf("\n"); 403 } 404 request->result = EIO; 405 } 406 break; 407 408 /* ATAPI errors */ 409 case ATA_R_ATAPI: 410 /* skip if result already set */ 411 if (request->result) 412 break; 413 414 /* if we have a sensekey -> request sense from device */ 415 if ((request->error & ATA_E_ATAPI_SENSE_MASK) && 416 (request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE)) { 417 static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0, 418 sizeof(struct atapi_sense), 419 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 420 421 request->u.atapi.saved_cmd = request->u.atapi.ccb[0]; 422 bcopy(ccb, request->u.atapi.ccb, 16); 423 request->data = (caddr_t)&request->u.atapi.sense; 424 request->bytecount = sizeof(struct atapi_sense); 425 request->donecount = 0; 426 request->transfersize = sizeof(struct atapi_sense); 427 request->timeout = ATA_DEFAULT_TIMEOUT; 428 request->flags &= (ATA_R_ATAPI | ATA_R_QUIET); 429 request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE); 430 ATA_DEBUG_RQ(request, "autoissue request sense"); 431 ata_queue_request(request); 432 return; 433 } 434 435 switch (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK) { 436 case ATA_SENSE_RECOVERED_ERROR: 437 device_printf(request->dev, "WARNING - %s recovered error\n", 438 ata_cmd2str(request)); 439 /* FALLTHROUGH */ 440 441 case ATA_SENSE_NO_SENSE: 442 request->result = 0; 443 break; 444 445 case ATA_SENSE_NOT_READY: 446 request->result = EBUSY; 447 break; 448 449 case ATA_SENSE_UNIT_ATTENTION: 450 atadev->flags |= ATA_D_MEDIA_CHANGED; 451 request->result = EIO; 452 break; 453 454 default: 455 request->result = EIO; 456 if (request->flags & ATA_R_QUIET) 457 break; 458 459 device_printf(request->dev, 460 "FAILURE - %s %s asc=0x%02x ascq=0x%02x ", 461 ata_cmd2str(request), ata_skey2str( 462 (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK)), 463 request->u.atapi.sense.asc, 464 request->u.atapi.sense.ascq); 465 if (request->u.atapi.sense.specific & ATA_SENSE_SPEC_VALID) 466 kprintf("sks=0x%02x 0x%02x 0x%02x\n", 467 request->u.atapi.sense.specific & ATA_SENSE_SPEC_MASK, 468 request->u.atapi.sense.specific1, 469 request->u.atapi.sense.specific2); 470 else 471 kprintf("\n"); 472 } 473 474 if ((request->u.atapi.sense.key & ATA_SENSE_KEY_MASK ? 475 request->u.atapi.sense.key & ATA_SENSE_KEY_MASK : 476 request->error)) 477 request->result = EIO; 478 } 479 480 ATA_DEBUG_RQ(request, "completed callback/wakeup"); 481 482 /* if we are part of a composite operation we need to maintain progress */ 483 if ((composite = request->composite)) { 484 int index = 0; 485 486 spin_lock(&composite->lock); 487 488 /* update whats done */ 489 if (request->flags & ATA_R_READ) 490 composite->rd_done |= (1 << request->this); 491 if (request->flags & ATA_R_WRITE) 492 composite->wr_done |= (1 << request->this); 493 494 /* find ready to go dependencies */ 495 if (composite->wr_depend && 496 (composite->rd_done & composite->wr_depend)==composite->wr_depend && 497 (composite->wr_needed & (~composite->wr_done))) { 498 index = composite->wr_needed & ~composite->wr_done; 499 } 500 501 spin_unlock(&composite->lock); 502 503 /* if we have any ready candidates kick them off */ 504 if (index) { 505 int bit; 506 507 for (bit = 0; bit < MAX_COMPOSITES; bit++) { 508 if (index & (1 << bit)) 509 ata_start(device_get_parent(composite->request[bit]->dev)); 510 } 511 } 512 } 513 514 /* get results back to the initiator for this request */ 515 if (request->callback) 516 (request->callback)(request); 517 else { 518 spin_lock(&request->done); 519 request->flags |= ATA_R_COMPLETED; 520 spin_unlock(&request->done); 521 wakeup_one(request); 522 } 523 524 /* only call ata_start if channel is present */ 525 if (ch) 526 ata_start(ch->dev); 527 } 528 529 void 530 ata_timeout(struct ata_request *request) 531 { 532 struct ata_channel *ch = device_get_softc(request->parent); 533 534 /* acquire state_mtx, softclock_handler() doesn't do this for us */ 535 spin_lock(&ch->state_mtx); 536 537 /*request->flags |= ATA_R_DEBUG;*/ 538 ATA_DEBUG_RQ(request, "timeout"); 539 540 /* 541 * if we have an ATA_ACTIVE request running, we flag the request 542 * ATA_R_TIMEOUT so ata_finish will handle it correctly 543 * also NULL out the running request so we wont loose 544 * the race with an eventual interrupt arriving late 545 */ 546 if (ch->state == ATA_ACTIVE) { 547 request->flags |= ATA_R_TIMEOUT; 548 spin_unlock(&ch->state_mtx); 549 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 550 ata_finish(request); 551 } 552 else { 553 spin_unlock(&ch->state_mtx); 554 } 555 } 556 557 void 558 ata_fail_requests(device_t dev) 559 { 560 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 561 struct ata_request *request, *tmp; 562 TAILQ_HEAD(, ata_request) fail_requests; 563 TAILQ_INIT(&fail_requests); 564 565 /* grap all channel locks to avoid races */ 566 spin_lock(&ch->queue_mtx); 567 spin_lock(&ch->state_mtx); 568 569 /* do we have any running request to care about ? */ 570 if ((request = ch->running) && (!dev || request->dev == dev)) { 571 callout_stop(&request->callout); 572 ch->running = NULL; 573 request->result = ENXIO; 574 TAILQ_INSERT_TAIL(&fail_requests, request, chain); 575 } 576 577 /* fail all requests queued on this channel for device dev if !NULL */ 578 TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) { 579 if (!dev || request->dev == dev) { 580 if (ch->transition == request) 581 ch->transition = TAILQ_NEXT(request, chain); 582 TAILQ_REMOVE(&ch->ata_queue, request, chain); 583 request->result = ENXIO; 584 TAILQ_INSERT_TAIL(&fail_requests, request, chain); 585 } 586 } 587 588 spin_unlock(&ch->state_mtx); 589 spin_unlock(&ch->queue_mtx); 590 591 /* finish up all requests collected above */ 592 TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) { 593 TAILQ_REMOVE(&fail_requests, request, chain); 594 ata_finish(request); 595 } 596 } 597 598 static u_int64_t 599 ata_get_lba(struct ata_request *request) 600 { 601 if (request->flags & ATA_R_ATAPI) { 602 switch (request->u.atapi.ccb[0]) { 603 case ATAPI_READ_BIG: 604 case ATAPI_WRITE_BIG: 605 case ATAPI_READ_CD: 606 return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) | 607 (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24); 608 case ATAPI_READ: 609 case ATAPI_WRITE: 610 return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) | 611 (request->u.atapi.ccb[2]<<16); 612 default: 613 return 0; 614 } 615 } 616 else 617 return request->u.ata.lba; 618 } 619 620 /* 621 * This implements exactly bioqdisksort() in the DragonFly kernel. 622 * The short description is: Because megabytes and megabytes worth of 623 * writes can be queued there needs to be a read-prioritization mechanism 624 * or reads get completely starved out. 625 */ 626 static void 627 ata_sort_queue(struct ata_channel *ch, struct ata_request *request) 628 { 629 if ((request->flags & ATA_R_WRITE) == 0) { 630 if (ch->transition) { 631 /* 632 * Insert before the first write 633 */ 634 TAILQ_INSERT_BEFORE(ch->transition, request, chain); 635 if (++ch->reorder >= bioq_reorder_minor_interval) { 636 ch->reorder = 0; 637 atawritereorder(ch); 638 } 639 } else { 640 /* 641 * No writes queued (or ordering was forced), 642 * insert at tail. 643 */ 644 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain); 645 } 646 } else { 647 /* 648 * Writes are always appended. If no writes were previously 649 * queued or an ordered tail insertion occured the transition 650 * field will be NULL. 651 */ 652 TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain); 653 if (ch->transition == NULL) 654 ch->transition = request; 655 } 656 if (request->composite) { 657 ch->transition = NULL; 658 ch->reorder = 0; 659 } 660 } 661 662 /* 663 * Move the transition point to prevent reads from completely 664 * starving our writes. This brings a number of writes into 665 * the fold every N reads. 666 */ 667 static void 668 atawritereorder(struct ata_channel *ch) 669 { 670 struct ata_request *req; 671 u_int64_t next_offset; 672 size_t left = (size_t)bioq_reorder_minor_bytes; 673 size_t n; 674 675 next_offset = ata_get_lba(ch->transition); 676 while ((req = ch->transition) != NULL && 677 next_offset == ata_get_lba(req)) { 678 n = req->u.ata.count; 679 next_offset = ata_get_lba(req); 680 ch->transition = TAILQ_NEXT(req, chain); 681 if (left < n) 682 break; 683 left -= n; 684 } 685 } 686 687 char * 688 ata_cmd2str(struct ata_request *request) 689 { 690 static char buffer[20]; 691 692 if (request->flags & ATA_R_ATAPI) { 693 switch (request->u.atapi.sense.key ? 694 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) { 695 case 0x00: return ("TEST_UNIT_READY"); 696 case 0x01: return ("REZERO"); 697 case 0x03: return ("REQUEST_SENSE"); 698 case 0x04: return ("FORMAT"); 699 case 0x08: return ("READ"); 700 case 0x0a: return ("WRITE"); 701 case 0x10: return ("WEOF"); 702 case 0x11: return ("SPACE"); 703 case 0x12: return ("INQUIRY"); 704 case 0x15: return ("MODE_SELECT"); 705 case 0x19: return ("ERASE"); 706 case 0x1a: return ("MODE_SENSE"); 707 case 0x1b: return ("START_STOP"); 708 case 0x1e: return ("PREVENT_ALLOW"); 709 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); 710 case 0x25: return ("READ_CAPACITY"); 711 case 0x28: return ("READ_BIG"); 712 case 0x2a: return ("WRITE_BIG"); 713 case 0x2b: return ("LOCATE"); 714 case 0x34: return ("READ_POSITION"); 715 case 0x35: return ("SYNCHRONIZE_CACHE"); 716 case 0x3b: return ("WRITE_BUFFER"); 717 case 0x3c: return ("READ_BUFFER"); 718 case 0x42: return ("READ_SUBCHANNEL"); 719 case 0x43: return ("READ_TOC"); 720 case 0x45: return ("PLAY_10"); 721 case 0x47: return ("PLAY_MSF"); 722 case 0x48: return ("PLAY_TRACK"); 723 case 0x4b: return ("PAUSE"); 724 case 0x51: return ("READ_DISK_INFO"); 725 case 0x52: return ("READ_TRACK_INFO"); 726 case 0x53: return ("RESERVE_TRACK"); 727 case 0x54: return ("SEND_OPC_INFO"); 728 case 0x55: return ("MODE_SELECT_BIG"); 729 case 0x58: return ("REPAIR_TRACK"); 730 case 0x59: return ("READ_MASTER_CUE"); 731 case 0x5a: return ("MODE_SENSE_BIG"); 732 case 0x5b: return ("CLOSE_TRACK/SESSION"); 733 case 0x5c: return ("READ_BUFFER_CAPACITY"); 734 case 0x5d: return ("SEND_CUE_SHEET"); 735 case 0x96: return ("READ_CAPACITY_16"); 736 case 0xa1: return ("BLANK_CMD"); 737 case 0xa3: return ("SEND_KEY"); 738 case 0xa4: return ("REPORT_KEY"); 739 case 0xa5: return ("PLAY_12"); 740 case 0xa6: return ("LOAD_UNLOAD"); 741 case 0xad: return ("READ_DVD_STRUCTURE"); 742 case 0xb4: return ("PLAY_CD"); 743 case 0xbb: return ("SET_SPEED"); 744 case 0xbd: return ("MECH_STATUS"); 745 case 0xbe: return ("READ_CD"); 746 case 0xff: return ("POLL_DSC"); 747 } 748 } 749 else { 750 switch (request->u.ata.command) { 751 case 0x00: return ("NOP"); 752 case 0x08: return ("DEVICE_RESET"); 753 case 0x20: return ("READ"); 754 case 0x24: return ("READ48"); 755 case 0x25: return ("READ_DMA48"); 756 case 0x26: return ("READ_DMA_QUEUED48"); 757 case 0x29: return ("READ_MUL48"); 758 case 0x30: return ("WRITE"); 759 case 0x34: return ("WRITE48"); 760 case 0x35: return ("WRITE_DMA48"); 761 case 0x36: return ("WRITE_DMA_QUEUED48"); 762 case 0x39: return ("WRITE_MUL48"); 763 case 0x70: return ("SEEK"); 764 case 0xa0: return ("PACKET_CMD"); 765 case 0xa1: return ("ATAPI_IDENTIFY"); 766 case 0xa2: return ("SERVICE"); 767 case 0xb0: return ("SMART"); 768 case 0xc0: return ("CFA ERASE"); 769 case 0xc4: return ("READ_MUL"); 770 case 0xc5: return ("WRITE_MUL"); 771 case 0xc6: return ("SET_MULTI"); 772 case 0xc7: return ("READ_DMA_QUEUED"); 773 case 0xc8: return ("READ_DMA"); 774 case 0xca: return ("WRITE_DMA"); 775 case 0xcc: return ("WRITE_DMA_QUEUED"); 776 case 0xe6: return ("SLEEP"); 777 case 0xe7: return ("FLUSHCACHE"); 778 case 0xea: return ("FLUSHCACHE48"); 779 case 0xec: return ("ATA_IDENTIFY"); 780 case 0xef: 781 switch (request->u.ata.feature) { 782 case 0x03: return ("SETFEATURES SET TRANSFER MODE"); 783 case 0x02: return ("SETFEATURES ENABLE WCACHE"); 784 case 0x82: return ("SETFEATURES DISABLE WCACHE"); 785 case 0xaa: return ("SETFEATURES ENABLE RCACHE"); 786 case 0x55: return ("SETFEATURES DISABLE RCACHE"); 787 } 788 ksprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature); 789 return buffer; 790 } 791 } 792 ksprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); 793 return buffer; 794 } 795 796 static char * 797 ata_skey2str(u_int8_t skey) 798 { 799 switch (skey) { 800 case 0x00: return ("NO SENSE"); 801 case 0x01: return ("RECOVERED ERROR"); 802 case 0x02: return ("NOT READY"); 803 case 0x03: return ("MEDIUM ERROR"); 804 case 0x04: return ("HARDWARE ERROR"); 805 case 0x05: return ("ILLEGAL REQUEST"); 806 case 0x06: return ("UNIT ATTENTION"); 807 case 0x07: return ("DATA PROTECT"); 808 case 0x08: return ("BLANK CHECK"); 809 case 0x09: return ("VENDOR SPECIFIC"); 810 case 0x0a: return ("COPY ABORTED"); 811 case 0x0b: return ("ABORTED COMMAND"); 812 case 0x0c: return ("EQUAL"); 813 case 0x0d: return ("VOLUME OVERFLOW"); 814 case 0x0e: return ("MISCOMPARE"); 815 case 0x0f: return ("RESERVED"); 816 default: return("UNKNOWN"); 817 } 818 } 819