xref: /dragonfly/sys/dev/disk/nata/ata-queue.c (revision e314d7e2)
1 /*-
2  * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/ata/ata-queue.c,v 1.67 2007/01/27 21:15:58 remko Exp $
27  */
28 
29 #include "opt_ata.h"
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/callout.h>
34 #include <sys/nata.h>
35 #include <sys/queue.h>
36 #include <sys/spinlock2.h>
37 #include <sys/buf.h>
38 #include <sys/systm.h>
39 #include <sys/taskqueue.h>
40 
41 #include "ata-all.h"
42 #include "ata_if.h"
43 
44 /* prototypes */
45 static void ata_completed(void *, int);
46 static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request);
47 static void atawritereorder(struct ata_channel *ch);
48 static char *ata_skey2str(u_int8_t);
49 
50 void
51 ata_queue_init(struct ata_channel *ch)
52 {
53     TAILQ_INIT(&ch->ata_queue);
54     ch->reorder = 0;
55     ch->transition = NULL;
56 }
57 
58 /*
59  * Rudely drop all requests queued to the channel of specified device.
60  * XXX: The requests are leaked, use only in fatal case.
61  */
62 void
63 ata_drop_requests(device_t dev)
64 {
65     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
66     struct ata_request *request, *tmp;
67 
68     spin_lock(&ch->queue_mtx);
69     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
70 	TAILQ_REMOVE(&ch->ata_queue, request, chain);
71 	request->result = ENXIO;
72     }
73     spin_unlock(&ch->queue_mtx);
74 }
75 
76 void
77 ata_queue_request(struct ata_request *request)
78 {
79     struct ata_channel *ch;
80 
81     /* treat request as virgin (this might be an ATA_R_REQUEUE) */
82     request->result = request->status = request->error = 0;
83 
84     /* check that that the device is still valid */
85     if (!(request->parent = device_get_parent(request->dev))) {
86 	request->result = ENXIO;
87 	if (request->callback)
88 	    (request->callback)(request);
89 	return;
90     }
91     ch = device_get_softc(request->parent);
92     callout_init_mp(&request->callout);	/* serialization done via state_mtx */
93     if (!request->callback && !(request->flags & ATA_R_REQUEUE))
94 	spin_init(&request->done, "ataqueuerqdone");
95 
96     /* in ATA_STALL_QUEUE state we call HW directly */
97     if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
98 	spin_lock(&ch->state_mtx);
99 	ch->running = request;
100 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
101 	    ch->running = NULL;
102 	    if (!request->callback)
103 		spin_uninit(&request->done);
104 	    spin_unlock(&ch->state_mtx);
105 	    return;
106 	}
107 	/* interlock against interrupt */
108 	request->flags |= ATA_R_HWCMDQUEUED;
109 	spin_unlock(&ch->state_mtx);
110     }
111     /* otherwise put request on the locked queue at the specified location */
112     else  {
113 	spin_lock(&ch->queue_mtx);
114 	if (request->flags & ATA_R_AT_HEAD) {
115 	    TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain);
116 	} else if (request->flags & ATA_R_ORDERED) {
117 	    ata_sort_queue(ch, request);
118 	} else {
119 	    TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
120 	    ch->transition = NULL;
121 	}
122 	spin_unlock(&ch->queue_mtx);
123 	ATA_DEBUG_RQ(request, "queued");
124 	ata_start(ch->dev);
125     }
126 
127     /* if this is a requeued request callback/sleep we're done */
128     if (request->flags & ATA_R_REQUEUE)
129 	return;
130 
131     /* if this is not a callback wait until request is completed */
132     if (!request->callback) {
133 	ATA_DEBUG_RQ(request, "wait for completion");
134 	if (!dumping) {
135 	    /* interlock against wakeup */
136 	    spin_lock(&request->done);
137 	    /* check if the request was completed already */
138 	    if (!(request->flags & ATA_R_COMPLETED))
139 		ssleep(request, &request->done, 0, "ATA request completion "
140 		       "wait", request->timeout * hz * 4);
141 	    spin_unlock(&request->done);
142 	    /* check if the request was completed while sleeping */
143 	    if (!(request->flags & ATA_R_COMPLETED)) {
144 		/* apparently not */
145 		device_printf(request->dev, "WARNING - %s taskqueue timeout - "
146 			      "completing request directly\n",
147 			      ata_cmd2str(request));
148 		request->flags |= ATA_R_DANGER1;
149 		ata_completed(request, 0);
150 	    }
151 	}
152 	spin_uninit(&request->done);
153     }
154 }
155 
156 int
157 ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature,
158 	       u_int64_t lba, u_int16_t count)
159 {
160     struct ata_request *request = ata_alloc_request();
161     int error = ENOMEM;
162 
163     if (request) {
164 	request->dev = dev;
165 	request->u.ata.command = command;
166 	request->u.ata.lba = lba;
167 	request->u.ata.count = count;
168 	request->u.ata.feature = feature;
169 	request->flags = ATA_R_CONTROL;
170 	request->timeout = ATA_DEFAULT_TIMEOUT;
171 	request->retries = 0;
172 	ata_queue_request(request);
173 	error = request->result;
174 	ata_free_request(request);
175     }
176     return error;
177 }
178 
179 int
180 ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data,
181 	     int count, int flags, int timeout)
182 {
183     struct ata_request *request = ata_alloc_request();
184     struct ata_device *atadev = device_get_softc(dev);
185     int error = ENOMEM;
186 
187     if (request) {
188 	request->dev = dev;
189 	if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12)
190 	    bcopy(ccb, request->u.atapi.ccb, 12);
191 	else
192 	    bcopy(ccb, request->u.atapi.ccb, 16);
193 	request->data = data;
194 	request->bytecount = count;
195 	request->transfersize = min(request->bytecount, 65534);
196 	request->flags = flags | ATA_R_ATAPI;
197 	request->timeout = timeout;
198 	request->retries = 0;
199 	ata_queue_request(request);
200 	error = request->result;
201 	ata_free_request(request);
202     }
203     return error;
204 }
205 
206 void
207 ata_start(device_t dev)
208 {
209     struct ata_channel *ch = device_get_softc(dev);
210     struct ata_request *request;
211     struct ata_composite *cptr;
212     int dependencies = 0;
213 
214     /* if we have a request on the queue try to get it running */
215     spin_lock(&ch->queue_mtx);
216     if ((request = TAILQ_FIRST(&ch->ata_queue))) {
217 
218 	/* we need the locking function to get the lock for this channel */
219 	if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) {
220 
221 	    /* check for composite dependencies */
222 	    if ((cptr = request->composite)) {
223 		spin_lock(&cptr->lock);
224 		if ((request->flags & ATA_R_WRITE) &&
225 		    (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) {
226 		    dependencies = 1;
227 		}
228 		spin_unlock(&cptr->lock);
229 	    }
230 
231 	    /* check we are in the right state and has no dependencies */
232 	    spin_lock(&ch->state_mtx);
233 	    if (ch->state == ATA_IDLE && !dependencies) {
234 		ATA_DEBUG_RQ(request, "starting");
235 
236 		if (ch->transition == request)
237 		    ch->transition = TAILQ_NEXT(request, chain);
238 		TAILQ_REMOVE(&ch->ata_queue, request, chain);
239 		ch->running = request;
240 		ch->state = ATA_ACTIVE;
241 
242 		if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
243 		    ch->running = NULL;
244 		    ch->state = ATA_IDLE;
245 		    spin_unlock(&ch->state_mtx);
246 		    spin_unlock(&ch->queue_mtx);
247 		    ATA_LOCKING(dev, ATA_LF_UNLOCK);
248 		    ata_finish(request);
249 		    return;
250 		}
251 
252 		/* interlock against interrupt */
253 		request->flags |= ATA_R_HWCMDQUEUED;
254 
255 		if (dumping) {
256 		    spin_unlock(&ch->state_mtx);
257 		    spin_unlock(&ch->queue_mtx);
258 		    while (!ata_interrupt(ch))
259 			DELAY(10);
260 		    return;
261 		}
262 	    }
263 	    spin_unlock(&ch->state_mtx);
264 	}
265     }
266     spin_unlock(&ch->queue_mtx);
267 }
268 
269 void
270 ata_finish(struct ata_request *request)
271 {
272     struct ata_channel *ch = device_get_softc(request->parent);
273 
274     /*
275      * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set
276      * we need to call ata_complete() directly here (no taskqueue involvement)
277      */
278     if (dumping ||
279 	(ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) {
280 	ATA_DEBUG_RQ(request, "finish directly");
281 	ata_completed(request, 0);
282     }
283     else {
284 	/* put request on the proper taskqueue for completion */
285 	/* XXX FreeBSD has some sort of bio_taskqueue code here */
286         TASK_INIT(&request->task, 0, ata_completed, request);
287 	ATA_DEBUG_RQ(request, "finish taskqueue_swi_mp");
288 	taskqueue_enqueue(taskqueue_swi_mp, &request->task);
289     }
290 }
291 
292 static void
293 ata_completed(void *context, int dummy)
294 {
295     struct ata_request *request = (struct ata_request *)context;
296     struct ata_channel *ch = device_get_softc(request->parent);
297     struct ata_device *atadev = device_get_softc(request->dev);
298     struct ata_composite *composite;
299 
300     if (request->flags & ATA_R_DANGER2) {
301 	device_printf(request->dev,
302 		      "WARNING - %s freeing taskqueue zombie request\n",
303 		      ata_cmd2str(request));
304 	request->flags &= ~(ATA_R_DANGER1 | ATA_R_DANGER2);
305 	ata_free_request(request);
306 	return;
307     }
308     if (request->flags & ATA_R_DANGER1)
309 	request->flags |= ATA_R_DANGER2;
310 
311     ATA_DEBUG_RQ(request, "completed entered");
312 
313     /* if we had a timeout, reinit channel and deal with the falldown */
314     if (request->flags & ATA_R_TIMEOUT) {
315 	/*
316 	 * if the channel is still present and
317 	 * reinit succeeds and
318 	 * the device doesn't get detached and
319 	 * there are retries left we reinject this request
320 	 */
321 	if (ch && !ata_reinit(ch->dev) && !request->result &&
322 	    (request->retries-- > 0)) {
323 	    if (!(request->flags & ATA_R_QUIET)) {
324 		device_printf(request->dev,
325 			      "TIMEOUT - %s retrying (%d retr%s left)",
326 			      ata_cmd2str(request), request->retries,
327 			      request->retries == 1 ? "y" : "ies");
328 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
329 		    kprintf(" LBA=%ju", request->u.ata.lba);
330 		kprintf("\n");
331 	    }
332 	    request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG);
333 	    request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
334 	    ATA_DEBUG_RQ(request, "completed reinject");
335 	    ata_queue_request(request);
336 	    return;
337 	}
338 
339 	/* ran out of good intentions so finish with error */
340 	if (!request->result) {
341 	    if (!(request->flags & ATA_R_QUIET)) {
342 		if (request->dev) {
343 		    device_printf(request->dev, "FAILURE - %s timed out",
344 				  ata_cmd2str(request));
345 		    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
346 			kprintf(" LBA=%ju", request->u.ata.lba);
347 		    kprintf("\n");
348 		}
349 	    }
350 	    request->result = EIO;
351 	}
352     }
353     else if (!(request->flags & ATA_R_ATAPI) ){
354 	/* if this is a soft ECC error warn about it */
355 	/* XXX SOS we could do WARF here */
356 	if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) {
357 	    device_printf(request->dev,
358 			  "WARNING - %s soft error (ECC corrected)",
359 			  ata_cmd2str(request));
360 	    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
361 		kprintf(" LBA=%ju", request->u.ata.lba);
362 	    kprintf("\n");
363 	}
364 
365 	/* if this is a UDMA CRC error we reinject if there are retries left */
366 	if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) {
367 	    if (request->retries-- > 0) {
368 		device_printf(request->dev,
369 			      "WARNING - %s UDMA ICRC error (retrying request)",
370 			      ata_cmd2str(request));
371 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
372 		    kprintf(" LBA=%ju", request->u.ata.lba);
373 		kprintf("\n");
374 		request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
375 		ata_queue_request(request);
376 		return;
377 	    }
378 	}
379     }
380 
381     switch (request->flags & ATA_R_ATAPI) {
382 
383     /* ATA errors */
384     default:
385 	if (!request->result && request->status & ATA_S_ERROR) {
386 	    if (!(request->flags & ATA_R_QUIET)) {
387 		device_printf(request->dev,
388 			      "FAILURE - %s status=%b error=%b",
389 			      ata_cmd2str(request),
390 			      request->status, "\20\10BUSY\7READY\6DMA_READY"
391 			      "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR",
392 			      request->error, "\20\10ICRC\7UNCORRECTABLE"
393 			      "\6MEDIA_CHANGED\5NID_NOT_FOUND"
394 			      "\4MEDIA_CHANGE_REQEST"
395 			      "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH");
396 		if ((request->flags & ATA_R_DMA) &&
397 		    (request->dmastat & ATA_BMSTAT_ERROR))
398 		    kprintf(" dma=0x%02x", request->dmastat);
399 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
400 		    kprintf(" LBA=%ju", request->u.ata.lba);
401 		kprintf("\n");
402 	    }
403 	    request->result = EIO;
404 	}
405 	break;
406 
407     /* ATAPI errors */
408     case ATA_R_ATAPI:
409 	/* skip if result already set */
410 	if (request->result)
411 	    break;
412 
413 	/* if we have a sensekey -> request sense from device */
414 	if ((request->error & ATA_E_ATAPI_SENSE_MASK) &&
415 	    (request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE)) {
416 	    static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0,
417 					sizeof(struct atapi_sense),
418 					0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
419 
420 	    request->u.atapi.saved_cmd = request->u.atapi.ccb[0];
421 	    bcopy(ccb, request->u.atapi.ccb, 16);
422 	    request->data = (caddr_t)&request->u.atapi.sense;
423 	    request->bytecount = sizeof(struct atapi_sense);
424 	    request->donecount = 0;
425 	    request->transfersize = sizeof(struct atapi_sense);
426 	    request->timeout = ATA_DEFAULT_TIMEOUT;
427 	    request->flags &= (ATA_R_ATAPI | ATA_R_QUIET);
428 	    request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE);
429 	    ATA_DEBUG_RQ(request, "autoissue request sense");
430 	    ata_queue_request(request);
431 	    return;
432 	}
433 
434 	switch (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK) {
435 	case ATA_SENSE_RECOVERED_ERROR:
436 	    device_printf(request->dev, "WARNING - %s recovered error\n",
437 			  ata_cmd2str(request));
438 	    /* FALLTHROUGH */
439 
440 	case ATA_SENSE_NO_SENSE:
441 	    request->result = 0;
442 	    break;
443 
444 	case ATA_SENSE_NOT_READY:
445 	    request->result = EBUSY;
446 	    break;
447 
448 	case ATA_SENSE_UNIT_ATTENTION:
449 	    atadev->flags |= ATA_D_MEDIA_CHANGED;
450 	    request->result = EIO;
451 	    break;
452 
453 	default:
454 	    request->result = EIO;
455 	    if (request->flags & ATA_R_QUIET)
456 		break;
457 
458 	    device_printf(request->dev,
459 			  "FAILURE - %s %s asc=0x%02x ascq=0x%02x ",
460 			  ata_cmd2str(request), ata_skey2str(
461 			  (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK)),
462 			  request->u.atapi.sense.asc,
463 			  request->u.atapi.sense.ascq);
464 	    if (request->u.atapi.sense.specific & ATA_SENSE_SPEC_VALID)
465 		kprintf("sks=0x%02x 0x%02x 0x%02x\n",
466 		       request->u.atapi.sense.specific & ATA_SENSE_SPEC_MASK,
467 		       request->u.atapi.sense.specific1,
468 		       request->u.atapi.sense.specific2);
469 	    else
470 		kprintf("\n");
471 	}
472 
473 	if ((request->u.atapi.sense.key & ATA_SENSE_KEY_MASK ?
474 	     request->u.atapi.sense.key & ATA_SENSE_KEY_MASK :
475 	     request->error))
476 	    request->result = EIO;
477     }
478 
479     ATA_DEBUG_RQ(request, "completed callback/wakeup");
480 
481     /* if we are part of a composite operation we need to maintain progress */
482     if ((composite = request->composite)) {
483 	int index = 0;
484 
485 	spin_lock(&composite->lock);
486 
487 	/* update whats done */
488 	if (request->flags & ATA_R_READ)
489 	    composite->rd_done |= (1 << request->this);
490 	if (request->flags & ATA_R_WRITE)
491 	    composite->wr_done |= (1 << request->this);
492 
493 	/* find ready to go dependencies */
494 	if (composite->wr_depend &&
495 	    (composite->rd_done & composite->wr_depend)==composite->wr_depend &&
496 	    (composite->wr_needed & (~composite->wr_done))) {
497 	    index = composite->wr_needed & ~composite->wr_done;
498 	}
499 
500 	spin_unlock(&composite->lock);
501 
502 	/* if we have any ready candidates kick them off */
503 	if (index) {
504 	    int bit;
505 
506 	    for (bit = 0; bit < MAX_COMPOSITES; bit++) {
507 		if (index & (1 << bit))
508 		    ata_start(device_get_parent(composite->request[bit]->dev));
509 	    }
510 	}
511     }
512 
513     /* get results back to the initiator for this request */
514     if (request->callback)
515 	(request->callback)(request);
516     else {
517 	spin_lock(&request->done);
518 	request->flags |= ATA_R_COMPLETED;
519 	spin_unlock(&request->done);
520 	wakeup_one(request);
521     }
522 
523     /* only call ata_start if channel is present */
524     if (ch)
525 	ata_start(ch->dev);
526 }
527 
528 void
529 ata_timeout(struct ata_request *request)
530 {
531     struct ata_channel *ch = device_get_softc(request->parent);
532 
533     /* acquire state_mtx, softclock_handler() doesn't do this for us */
534     spin_lock(&ch->state_mtx);
535 
536     /*request->flags |= ATA_R_DEBUG;*/
537     ATA_DEBUG_RQ(request, "timeout");
538 
539     /*
540      * if we have an ATA_ACTIVE request running, we flag the request
541      * ATA_R_TIMEOUT so ata_finish will handle it correctly
542      * also NULL out the running request so we wont loose
543      * the race with an eventual interrupt arriving late
544      */
545     if (ch->state == ATA_ACTIVE) {
546 	request->flags |= ATA_R_TIMEOUT;
547 	spin_unlock(&ch->state_mtx);
548 	ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
549 	ata_finish(request);
550     }
551     else {
552 	spin_unlock(&ch->state_mtx);
553     }
554 }
555 
556 void
557 ata_fail_requests(device_t dev)
558 {
559     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
560     struct ata_request *request, *tmp;
561     TAILQ_HEAD(, ata_request) fail_requests;
562     TAILQ_INIT(&fail_requests);
563 
564     /* grap all channel locks to avoid races */
565     spin_lock(&ch->queue_mtx);
566     spin_lock(&ch->state_mtx);
567 
568     /* do we have any running request to care about ? */
569     if ((request = ch->running) && (!dev || request->dev == dev)) {
570 	callout_stop(&request->callout);
571 	ch->running = NULL;
572 	request->result = ENXIO;
573 	TAILQ_INSERT_TAIL(&fail_requests, request, chain);
574     }
575 
576     /* fail all requests queued on this channel for device dev if !NULL */
577     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
578 	if (!dev || request->dev == dev) {
579 	    if (ch->transition == request)
580 		ch->transition = TAILQ_NEXT(request, chain);
581 	    TAILQ_REMOVE(&ch->ata_queue, request, chain);
582 	    request->result = ENXIO;
583 	    TAILQ_INSERT_TAIL(&fail_requests, request, chain);
584 	}
585     }
586 
587     spin_unlock(&ch->state_mtx);
588     spin_unlock(&ch->queue_mtx);
589 
590     /* finish up all requests collected above */
591     TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) {
592         TAILQ_REMOVE(&fail_requests, request, chain);
593         ata_finish(request);
594     }
595 }
596 
597 static u_int64_t
598 ata_get_lba(struct ata_request *request)
599 {
600     if (request->flags & ATA_R_ATAPI) {
601 	switch (request->u.atapi.ccb[0]) {
602 	case ATAPI_READ_BIG:
603 	case ATAPI_WRITE_BIG:
604 	case ATAPI_READ_CD:
605 	    return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) |
606 		   (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24);
607 	case ATAPI_READ:
608 	case ATAPI_WRITE:
609 	    return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) |
610 		   (request->u.atapi.ccb[2]<<16);
611 	default:
612 	    return 0;
613 	}
614     }
615     else
616 	return request->u.ata.lba;
617 }
618 
619 /*
620  * This implements exactly bioqdisksort() in the DragonFly kernel.
621  * The short description is: Because megabytes and megabytes worth of
622  * writes can be queued there needs to be a read-prioritization mechanism
623  * or reads get completely starved out.
624  */
625 static void
626 ata_sort_queue(struct ata_channel *ch, struct ata_request *request)
627 {
628     if ((request->flags & ATA_R_WRITE) == 0) {
629 	if (ch->transition) {
630 	    /*
631 	     * Insert before the first write
632 	     */
633 	    TAILQ_INSERT_BEFORE(ch->transition, request, chain);
634 	    if (++ch->reorder >= bioq_reorder_minor_interval) {
635 		ch->reorder = 0;
636 		atawritereorder(ch);
637 	    }
638 	} else {
639 	    /*
640 	     * No writes queued (or ordering was forced),
641 	     * insert at tail.
642 	     */
643 	    TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
644 	}
645     } else {
646 	/*
647 	 * Writes are always appended.  If no writes were previously
648 	 * queued or an ordered tail insertion occured the transition
649 	 * field will be NULL.
650 	 */
651 	TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
652 	if (ch->transition == NULL)
653 		ch->transition = request;
654     }
655     if (request->composite) {
656 	ch->transition = NULL;
657 	ch->reorder = 0;
658     }
659 }
660 
661 /*
662  * Move the transition point to prevent reads from completely
663  * starving our writes.  This brings a number of writes into
664  * the fold every N reads.
665  */
666 static void
667 atawritereorder(struct ata_channel *ch)
668 {
669     struct ata_request *req;
670     u_int64_t next_offset;
671     size_t left = (size_t)bioq_reorder_minor_bytes;
672     size_t n;
673 
674     next_offset = ata_get_lba(ch->transition);
675     while ((req = ch->transition) != NULL &&
676 	   next_offset == ata_get_lba(req)) {
677 	n = req->u.ata.count;
678 	next_offset = ata_get_lba(req);
679 	ch->transition = TAILQ_NEXT(req, chain);
680 	if (left < n)
681 	    break;
682 	left -= n;
683     }
684 }
685 
686 char *
687 ata_cmd2str(struct ata_request *request)
688 {
689     static char buffer[20];
690 
691     if (request->flags & ATA_R_ATAPI) {
692 	switch (request->u.atapi.sense.key ?
693 		request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
694 	case 0x00: return ("TEST_UNIT_READY");
695 	case 0x01: return ("REZERO");
696 	case 0x03: return ("REQUEST_SENSE");
697 	case 0x04: return ("FORMAT");
698 	case 0x08: return ("READ");
699 	case 0x0a: return ("WRITE");
700 	case 0x10: return ("WEOF");
701 	case 0x11: return ("SPACE");
702 	case 0x12: return ("INQUIRY");
703 	case 0x15: return ("MODE_SELECT");
704 	case 0x19: return ("ERASE");
705 	case 0x1a: return ("MODE_SENSE");
706 	case 0x1b: return ("START_STOP");
707 	case 0x1e: return ("PREVENT_ALLOW");
708 	case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
709 	case 0x25: return ("READ_CAPACITY");
710 	case 0x28: return ("READ_BIG");
711 	case 0x2a: return ("WRITE_BIG");
712 	case 0x2b: return ("LOCATE");
713 	case 0x34: return ("READ_POSITION");
714 	case 0x35: return ("SYNCHRONIZE_CACHE");
715 	case 0x3b: return ("WRITE_BUFFER");
716 	case 0x3c: return ("READ_BUFFER");
717 	case 0x42: return ("READ_SUBCHANNEL");
718 	case 0x43: return ("READ_TOC");
719 	case 0x45: return ("PLAY_10");
720 	case 0x47: return ("PLAY_MSF");
721 	case 0x48: return ("PLAY_TRACK");
722 	case 0x4b: return ("PAUSE");
723 	case 0x51: return ("READ_DISK_INFO");
724 	case 0x52: return ("READ_TRACK_INFO");
725 	case 0x53: return ("RESERVE_TRACK");
726 	case 0x54: return ("SEND_OPC_INFO");
727 	case 0x55: return ("MODE_SELECT_BIG");
728 	case 0x58: return ("REPAIR_TRACK");
729 	case 0x59: return ("READ_MASTER_CUE");
730 	case 0x5a: return ("MODE_SENSE_BIG");
731 	case 0x5b: return ("CLOSE_TRACK/SESSION");
732 	case 0x5c: return ("READ_BUFFER_CAPACITY");
733 	case 0x5d: return ("SEND_CUE_SHEET");
734         case 0x96: return ("READ_CAPACITY_16");
735 	case 0xa1: return ("BLANK_CMD");
736 	case 0xa3: return ("SEND_KEY");
737 	case 0xa4: return ("REPORT_KEY");
738 	case 0xa5: return ("PLAY_12");
739 	case 0xa6: return ("LOAD_UNLOAD");
740 	case 0xad: return ("READ_DVD_STRUCTURE");
741 	case 0xb4: return ("PLAY_CD");
742 	case 0xbb: return ("SET_SPEED");
743 	case 0xbd: return ("MECH_STATUS");
744 	case 0xbe: return ("READ_CD");
745 	case 0xff: return ("POLL_DSC");
746 	}
747     }
748     else {
749 	switch (request->u.ata.command) {
750 	case 0x00: return ("NOP");
751 	case 0x08: return ("DEVICE_RESET");
752 	case 0x20: return ("READ");
753 	case 0x24: return ("READ48");
754 	case 0x25: return ("READ_DMA48");
755 	case 0x26: return ("READ_DMA_QUEUED48");
756 	case 0x29: return ("READ_MUL48");
757 	case 0x30: return ("WRITE");
758 	case 0x34: return ("WRITE48");
759 	case 0x35: return ("WRITE_DMA48");
760 	case 0x36: return ("WRITE_DMA_QUEUED48");
761 	case 0x39: return ("WRITE_MUL48");
762 	case 0x70: return ("SEEK");
763 	case 0xa0: return ("PACKET_CMD");
764 	case 0xa1: return ("ATAPI_IDENTIFY");
765 	case 0xa2: return ("SERVICE");
766 	case 0xb0: return ("SMART");
767 	case 0xc0: return ("CFA ERASE");
768 	case 0xc4: return ("READ_MUL");
769 	case 0xc5: return ("WRITE_MUL");
770 	case 0xc6: return ("SET_MULTI");
771 	case 0xc7: return ("READ_DMA_QUEUED");
772 	case 0xc8: return ("READ_DMA");
773 	case 0xca: return ("WRITE_DMA");
774 	case 0xcc: return ("WRITE_DMA_QUEUED");
775 	case 0xe6: return ("SLEEP");
776 	case 0xe7: return ("FLUSHCACHE");
777 	case 0xea: return ("FLUSHCACHE48");
778 	case 0xec: return ("ATA_IDENTIFY");
779 	case 0xef:
780 	    switch (request->u.ata.feature) {
781 	    case 0x03: return ("SETFEATURES SET TRANSFER MODE");
782 	    case 0x02: return ("SETFEATURES ENABLE WCACHE");
783 	    case 0x82: return ("SETFEATURES DISABLE WCACHE");
784 	    case 0xaa: return ("SETFEATURES ENABLE RCACHE");
785 	    case 0x55: return ("SETFEATURES DISABLE RCACHE");
786 	    }
787 	    ksprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature);
788 	    return buffer;
789 	}
790     }
791     ksprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
792     return buffer;
793 }
794 
795 static char *
796 ata_skey2str(u_int8_t skey)
797 {
798     switch (skey) {
799     case 0x00: return ("NO SENSE");
800     case 0x01: return ("RECOVERED ERROR");
801     case 0x02: return ("NOT READY");
802     case 0x03: return ("MEDIUM ERROR");
803     case 0x04: return ("HARDWARE ERROR");
804     case 0x05: return ("ILLEGAL REQUEST");
805     case 0x06: return ("UNIT ATTENTION");
806     case 0x07: return ("DATA PROTECT");
807     case 0x08: return ("BLANK CHECK");
808     case 0x09: return ("VENDOR SPECIFIC");
809     case 0x0a: return ("COPY ABORTED");
810     case 0x0b: return ("ABORTED COMMAND");
811     case 0x0c: return ("EQUAL");
812     case 0x0d: return ("VOLUME OVERFLOW");
813     case 0x0e: return ("MISCOMPARE");
814     case 0x0f: return ("RESERVED");
815     default: return("UNKNOWN");
816     }
817 }
818