xref: /dragonfly/sys/dev/disk/nata/ata-queue.c (revision 3f5e28f4)
1 /*-
2  * Copyright (c) 1998 - 2006 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/ata/ata-queue.c,v 1.65 2006/07/21 19:13:05 imp Exp $
27  * $DragonFly: src/sys/dev/disk/nata/ata-queue.c,v 1.4 2006/12/22 23:26:16 swildner Exp $
28  */
29 
30 #include "opt_ata.h"
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/nata.h>
36 #include <sys/queue.h>
37 #include <sys/spinlock2.h>
38 #include <sys/systm.h>
39 #include <sys/taskqueue.h>
40 
41 #include "ata-all.h"
42 #include "ata_if.h"
43 
44 /* prototypes */
45 static void ata_completed(void *, int);
46 static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request);
47 static char *ata_skey2str(u_int8_t);
48 
49 void
50 ata_queue_request(struct ata_request *request)
51 {
52     struct ata_channel *ch;
53 
54     /* treat request as virgin (this might be an ATA_R_REQUEUE) */
55     request->result = request->status = request->error = 0;
56 
57     /* check that that the device is still valid */
58     if (!(request->parent = device_get_parent(request->dev))) {
59 	request->result = ENXIO;
60 	if (request->callback)
61 	    (request->callback)(request);
62 	return;
63     }
64     ch = device_get_softc(request->parent);
65     callout_init_mp(&request->callout);	/* serialization done via state_mtx */
66     if (!request->callback && !(request->flags & ATA_R_REQUEUE))
67 	spin_init(&request->done);
68 
69     /* in ATA_STALL_QUEUE state we call HW directly */
70     if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
71 	spin_lock_wr(&ch->state_mtx);
72 	ch->running = request;
73 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
74 	    ch->running = NULL;
75 	    if (!request->callback)
76 		spin_uninit(&request->done);
77 	    spin_unlock_wr(&ch->state_mtx);
78 	    return;
79 	}
80 	spin_unlock_wr(&ch->state_mtx);
81     }
82     /* otherwise put request on the locked queue at the specified location */
83     else  {
84 	spin_lock_wr(&ch->queue_mtx);
85 	if (request->flags & ATA_R_AT_HEAD)
86 	    TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain);
87 	else if (request->flags & ATA_R_ORDERED)
88 	    ata_sort_queue(ch, request);
89 	else
90 	    TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
91 	spin_unlock_wr(&ch->queue_mtx);
92 	ATA_DEBUG_RQ(request, "queued");
93 	ata_start(ch->dev);
94     }
95 
96     /* if this is a requeued request callback/sleep we're done */
97     if (request->flags & ATA_R_REQUEUE)
98 	return;
99 
100     /* if this is not a callback wait until request is completed */
101     if (!request->callback) {
102 	ATA_DEBUG_RQ(request, "wait for completion");
103 	if (!dumping) {
104 	    /* interlock against wakeup */
105 	    spin_lock_wr(&request->done);
106 	    /* check if the request was completed already */
107 	    if (!(request->flags & ATA_R_COMPLETED))
108 		msleep(request, &request->done, 0, "ATA request completion "
109 		       "wait", request->timeout * hz * 4);
110 	    spin_unlock_wr(&request->done);
111 	    /* check if the request was completed while sleeping */
112 	    if (!(request->flags & ATA_R_COMPLETED)) {
113 		/* apparently not */
114 		device_printf(request->dev, "WARNING - %s taskqueue timeout - "
115 			      "completing request directly\n",
116 			      ata_cmd2str(request));
117 		request->flags |= ATA_R_DANGER1;
118 		ata_completed(request, 0);
119 	    }
120 	}
121 	spin_uninit(&request->done);
122     }
123 }
124 
125 int
126 ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature,
127 	       u_int64_t lba, u_int16_t count)
128 {
129     struct ata_request *request = ata_alloc_request();
130     int error = ENOMEM;
131 
132     if (request) {
133 	request->dev = dev;
134 	request->u.ata.command = command;
135 	request->u.ata.lba = lba;
136 	request->u.ata.count = count;
137 	request->u.ata.feature = feature;
138 	request->flags = ATA_R_CONTROL;
139 	request->timeout = 1;
140 	request->retries = 0;
141 	ata_queue_request(request);
142 	error = request->result;
143 	ata_free_request(request);
144     }
145     return error;
146 }
147 
148 int
149 ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data,
150 	     int count, int flags, int timeout)
151 {
152     struct ata_request *request = ata_alloc_request();
153     struct ata_device *atadev = device_get_softc(dev);
154     int error = ENOMEM;
155 
156     if (request) {
157 	request->dev = dev;
158 	if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12)
159 	    bcopy(ccb, request->u.atapi.ccb, 12);
160 	else
161 	    bcopy(ccb, request->u.atapi.ccb, 16);
162 	request->data = data;
163 	request->bytecount = count;
164 	request->transfersize = min(request->bytecount, 65534);
165 	request->flags = flags | ATA_R_ATAPI;
166 	request->timeout = timeout;
167 	request->retries = 0;
168 	ata_queue_request(request);
169 	error = request->result;
170 	ata_free_request(request);
171     }
172     return error;
173 }
174 
175 void
176 ata_start(device_t dev)
177 {
178     struct ata_channel *ch = device_get_softc(dev);
179     struct ata_request *request;
180     struct ata_composite *cptr;
181     int dependencies = 0;
182 
183     /* if we have a request on the queue try to get it running */
184     spin_lock_wr(&ch->queue_mtx);
185     if ((request = TAILQ_FIRST(&ch->ata_queue))) {
186 
187 	/* we need the locking function to get the lock for this channel */
188 	if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) {
189 
190 	    /* check for composite dependencies */
191 	    if ((cptr = request->composite)) {
192 		spin_lock_wr(&cptr->lock);
193 		if ((request->flags & ATA_R_WRITE) &&
194 		    (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) {
195 		    dependencies = 1;
196 		}
197 		spin_unlock_wr(&cptr->lock);
198 	    }
199 
200 	    /* check we are in the right state and has no dependencies */
201 	    spin_lock_wr(&ch->state_mtx);
202 	    if (ch->state == ATA_IDLE && !dependencies) {
203 		ATA_DEBUG_RQ(request, "starting");
204 		TAILQ_REMOVE(&ch->ata_queue, request, chain);
205 		ch->running = request;
206 		ch->state = ATA_ACTIVE;
207 
208 		/* if we are the freezing point release it */
209 		if (ch->freezepoint == request)
210 		    ch->freezepoint = NULL;
211 
212 		if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
213 		    ch->running = NULL;
214 		    ch->state = ATA_IDLE;
215 		    spin_unlock_wr(&ch->state_mtx);
216 		    spin_unlock_wr(&ch->queue_mtx);
217 		    ATA_LOCKING(dev, ATA_LF_UNLOCK);
218 		    ata_finish(request);
219 		    return;
220 		}
221 		if (dumping) {
222 		    spin_unlock_wr(&ch->state_mtx);
223 		    spin_unlock_wr(&ch->queue_mtx);
224 		    while (!ata_interrupt(ch))
225 			DELAY(10);
226 		    return;
227 		}
228 	    }
229 	    spin_unlock_wr(&ch->state_mtx);
230 	}
231     }
232     spin_unlock_wr(&ch->queue_mtx);
233 }
234 
235 void
236 ata_finish(struct ata_request *request)
237 {
238     struct ata_channel *ch = device_get_softc(request->parent);
239 
240     /*
241      * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set
242      * we need to call ata_complete() directly here (no taskqueue involvement)
243      */
244     if (dumping ||
245 	(ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) {
246 	ATA_DEBUG_RQ(request, "finish directly");
247 	ata_completed(request, 0);
248     }
249     else {
250 	/* put request on the proper taskqueue for completition */
251         TASK_INIT(&request->task, 0, ata_completed, request);
252 	ATA_DEBUG_RQ(request, "finish taskqueue_swi");
253 	taskqueue_enqueue(taskqueue_swi, &request->task);
254     }
255 }
256 
257 static void
258 ata_completed(void *context, int dummy)
259 {
260     struct ata_request *request = (struct ata_request *)context;
261     struct ata_channel *ch = device_get_softc(request->parent);
262     struct ata_device *atadev = device_get_softc(request->dev);
263     struct ata_composite *composite;
264 
265     if (request->flags & ATA_R_DANGER2) {
266 	device_printf(request->dev,
267 		      "WARNING - %s freeing taskqueue zombie request\n",
268 		      ata_cmd2str(request));
269 	request->flags &= ~(ATA_R_DANGER1 | ATA_R_DANGER2);
270 	ata_free_request(request);
271 	return;
272     }
273     if (request->flags & ATA_R_DANGER1)
274 	request->flags |= ATA_R_DANGER2;
275 
276     ATA_DEBUG_RQ(request, "completed entered");
277 
278     /* if we had a timeout, reinit channel and deal with the falldown */
279     if (request->flags & ATA_R_TIMEOUT) {
280 	/*
281 	 * if the channel is still present and
282 	 * reinit succeeds and
283 	 * the device doesn't get detached and
284 	 * there are retries left we reinject this request
285 	 */
286 	if (ch && !ata_reinit(ch->dev) && !request->result &&
287 	    (request->retries-- > 0)) {
288 	    if (!(request->flags & ATA_R_QUIET)) {
289 		device_printf(request->dev,
290 			      "TIMEOUT - %s retrying (%d retr%s left)",
291 			      ata_cmd2str(request), request->retries,
292 			      request->retries == 1 ? "y" : "ies");
293 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
294 		    kprintf(" LBA=%ju", request->u.ata.lba);
295 		kprintf("\n");
296 	    }
297 	    request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG);
298 	    request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
299 	    ATA_DEBUG_RQ(request, "completed reinject");
300 	    ata_queue_request(request);
301 	    return;
302 	}
303 
304 	/* ran out of good intentions so finish with error */
305 	if (!request->result) {
306 	    if (!(request->flags & ATA_R_QUIET)) {
307 		if (request->dev) {
308 		    device_printf(request->dev, "FAILURE - %s timed out",
309 				  ata_cmd2str(request));
310 		    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
311 			kprintf(" LBA=%ju", request->u.ata.lba);
312 		    kprintf("\n");
313 		}
314 	    }
315 	    request->result = EIO;
316 	}
317     }
318     else if (!(request->flags & ATA_R_ATAPI) ){
319 	/* if this is a soft ECC error warn about it */
320 	/* XXX SOS we could do WARF here */
321 	if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) {
322 	    device_printf(request->dev,
323 			  "WARNING - %s soft error (ECC corrected)",
324 			  ata_cmd2str(request));
325 	    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
326 		kprintf(" LBA=%ju", request->u.ata.lba);
327 	    kprintf("\n");
328 	}
329 
330 	/* if this is a UDMA CRC error we reinject if there are retries left */
331 	if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) {
332 	    if (request->retries-- > 0) {
333 		device_printf(request->dev,
334 			      "WARNING - %s UDMA ICRC error (retrying request)",
335 			      ata_cmd2str(request));
336 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
337 		    kprintf(" LBA=%ju", request->u.ata.lba);
338 		kprintf("\n");
339 		request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
340 		ata_queue_request(request);
341 		return;
342 	    }
343 	}
344     }
345 
346     switch (request->flags & ATA_R_ATAPI) {
347 
348     /* ATA errors */
349     default:
350 	if (!request->result && request->status & ATA_S_ERROR) {
351 	    if (!(request->flags & ATA_R_QUIET)) {
352 		device_printf(request->dev,
353 			      "FAILURE - %s status=%b error=%b",
354 			      ata_cmd2str(request),
355 			      request->status, "\20\10BUSY\7READY\6DMA_READY"
356 			      "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR",
357 			      request->error, "\20\10ICRC\7UNCORRECTABLE"
358 			      "\6MEDIA_CHANGED\5NID_NOT_FOUND"
359 			      "\4MEDIA_CHANGE_REQEST"
360 			      "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH");
361 		if ((request->flags & ATA_R_DMA) &&
362 		    (request->dmastat & ATA_BMSTAT_ERROR))
363 		    kprintf(" dma=0x%02x", request->dmastat);
364 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
365 		    kprintf(" LBA=%ju", request->u.ata.lba);
366 		kprintf("\n");
367 	    }
368 	    request->result = EIO;
369 	}
370 	break;
371 
372     /* ATAPI errors */
373     case ATA_R_ATAPI:
374 	/* skip if result already set */
375 	if (request->result)
376 	    break;
377 
378 	/* if we have a sensekey -> request sense from device */
379 	if ((request->error & ATA_E_ATAPI_SENSE_MASK) &&
380 	    (request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE)) {
381 	    static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0,
382 					sizeof(struct atapi_sense),
383 					0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
384 
385 	    request->u.atapi.saved_cmd = request->u.atapi.ccb[0];
386 	    bcopy(ccb, request->u.atapi.ccb, 16);
387 	    request->data = (caddr_t)&request->u.atapi.sense;
388 	    request->bytecount = sizeof(struct atapi_sense);
389 	    request->donecount = 0;
390 	    request->transfersize = sizeof(struct atapi_sense);
391 	    request->timeout = 5;
392 	    request->flags &= (ATA_R_ATAPI | ATA_R_QUIET);
393 	    request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE);
394 	    ATA_DEBUG_RQ(request, "autoissue request sense");
395 	    ata_queue_request(request);
396 	    return;
397 	}
398 
399 	switch (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK) {
400 	case ATA_SENSE_RECOVERED_ERROR:
401 	    device_printf(request->dev, "WARNING - %s recovered error\n",
402 			  ata_cmd2str(request));
403 	    /* FALLTHROUGH */
404 
405 	case ATA_SENSE_NO_SENSE:
406 	    request->result = 0;
407 	    break;
408 
409 	case ATA_SENSE_NOT_READY:
410 	    request->result = EBUSY;
411 	    break;
412 
413 	case ATA_SENSE_UNIT_ATTENTION:
414 	    atadev->flags |= ATA_D_MEDIA_CHANGED;
415 	    request->result = EIO;
416 	    break;
417 
418 	default:
419 	    request->result = EIO;
420 	    if (request->flags & ATA_R_QUIET)
421 		break;
422 
423 	    device_printf(request->dev,
424 			  "FAILURE - %s %s asc=0x%02x ascq=0x%02x ",
425 			  ata_cmd2str(request), ata_skey2str(
426 			  (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK)),
427 			  request->u.atapi.sense.asc,
428 			  request->u.atapi.sense.ascq);
429 	    if (request->u.atapi.sense.specific & ATA_SENSE_SPEC_VALID)
430 		kprintf("sks=0x%02x 0x%02x 0x%02x\n",
431 		       request->u.atapi.sense.specific & ATA_SENSE_SPEC_MASK,
432 		       request->u.atapi.sense.specific1,
433 		       request->u.atapi.sense.specific2);
434 	    else
435 		kprintf("\n");
436 	}
437 
438 	if ((request->u.atapi.sense.key & ATA_SENSE_KEY_MASK ?
439 	     request->u.atapi.sense.key & ATA_SENSE_KEY_MASK :
440 	     request->error))
441 	    request->result = EIO;
442     }
443 
444     ATA_DEBUG_RQ(request, "completed callback/wakeup");
445 
446     /* if we are part of a composite operation we need to maintain progress */
447     if ((composite = request->composite)) {
448 	int index = 0;
449 
450 	spin_lock_wr(&composite->lock);
451 
452 	/* update whats done */
453 	if (request->flags & ATA_R_READ)
454 	    composite->rd_done |= (1 << request->this);
455 	if (request->flags & ATA_R_WRITE)
456 	    composite->wr_done |= (1 << request->this);
457 
458 	/* find ready to go dependencies */
459 	if (composite->wr_depend &&
460 	    (composite->rd_done & composite->wr_depend)==composite->wr_depend &&
461 	    (composite->wr_needed & (~composite->wr_done))) {
462 	    index = composite->wr_needed & ~composite->wr_done;
463 	}
464 
465 	spin_unlock_wr(&composite->lock);
466 
467 	/* if we have any ready candidates kick them off */
468 	if (index) {
469 	    int bit;
470 
471 	    for (bit = 0; bit < MAX_COMPOSITES; bit++) {
472 		if (index & (1 << bit))
473 		    ata_start(device_get_parent(composite->request[bit]->dev));
474 	    }
475 	}
476     }
477 
478     /* get results back to the initiator for this request */
479     if (request->callback)
480 	(request->callback)(request);
481     else {
482 	spin_lock_wr(&request->done);
483 	request->flags |= ATA_R_COMPLETED;
484 	wakeup_one(request);
485 	spin_unlock_wr(&request->done);
486     }
487 
488     /* only call ata_start if channel is present */
489     if (ch)
490 	ata_start(ch->dev);
491 }
492 
493 void
494 ata_timeout(struct ata_request *request)
495 {
496     struct ata_channel *ch = device_get_softc(request->parent);
497 
498     /* acquire state_mtx, softclock_handler() doesn't do this for us */
499     spin_lock_wr(&ch->state_mtx);
500 
501     /*request->flags |= ATA_R_DEBUG;*/
502     ATA_DEBUG_RQ(request, "timeout");
503 
504     /*
505      * if we have an ATA_ACTIVE request running, we flag the request
506      * ATA_R_TIMEOUT so ata_finish will handle it correctly
507      * also NULL out the running request so we wont loose
508      * the race with an eventual interrupt arriving late
509      */
510     if (ch->state == ATA_ACTIVE) {
511 	request->flags |= ATA_R_TIMEOUT;
512 	spin_unlock_wr(&ch->state_mtx);
513 	ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
514 	ata_finish(request);
515     }
516     else {
517 	spin_unlock_wr(&ch->state_mtx);
518     }
519 }
520 
521 void
522 ata_fail_requests(device_t dev)
523 {
524     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
525     struct ata_request *request, *tmp;
526     TAILQ_HEAD(, ata_request) fail_requests;
527     TAILQ_INIT(&fail_requests);
528 
529     /* grap all channel locks to avoid races */
530     spin_lock_wr(&ch->queue_mtx);
531     spin_lock_wr(&ch->state_mtx);
532 
533     /* do we have any running request to care about ? */
534     if ((request = ch->running) && (!dev || request->dev == dev)) {
535 	callout_stop(&request->callout);
536 	ch->running = NULL;
537 	request->result = ENXIO;
538 	TAILQ_INSERT_TAIL(&fail_requests, request, chain);
539     }
540 
541     /* fail all requests queued on this channel for device dev if !NULL */
542     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
543 	if (!dev || request->dev == dev) {
544 	    TAILQ_REMOVE(&ch->ata_queue, request, chain);
545 	    request->result = ENXIO;
546 	    TAILQ_INSERT_TAIL(&fail_requests, request, chain);
547 	}
548     }
549 
550     spin_unlock_wr(&ch->state_mtx);
551     spin_unlock_wr(&ch->queue_mtx);
552 
553     /* finish up all requests collected above */
554     TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) {
555         TAILQ_REMOVE(&fail_requests, request, chain);
556         ata_finish(request);
557     }
558 }
559 
560 static u_int64_t
561 ata_get_lba(struct ata_request *request)
562 {
563     if (request->flags & ATA_R_ATAPI) {
564 	switch (request->u.atapi.ccb[0]) {
565 	case ATAPI_READ_BIG:
566 	case ATAPI_WRITE_BIG:
567 	case ATAPI_READ_CD:
568 	    return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) |
569 		   (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24);
570 	case ATAPI_READ:
571 	case ATAPI_WRITE:
572 	    return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) |
573 		   (request->u.atapi.ccb[2]<<16);
574 	default:
575 	    return 0;
576 	}
577     }
578     else
579 	return request->u.ata.lba;
580 }
581 
582 static void
583 ata_sort_queue(struct ata_channel *ch, struct ata_request *request)
584 {
585     struct ata_request *this, *next;
586 
587     this = TAILQ_FIRST(&ch->ata_queue);
588 
589     /* if the queue is empty just insert */
590     if (!this) {
591 	if (request->composite)
592 	    ch->freezepoint = request;
593 	TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
594 	return;
595     }
596 
597     /* dont sort frozen parts of the queue */
598     if (ch->freezepoint)
599 	this = ch->freezepoint;
600 
601     /* if position is less than head we add after tipping point */
602     if (ata_get_lba(request) < ata_get_lba(this)) {
603 	while ((next = TAILQ_NEXT(this, chain))) {
604 
605 	    /* have we reached the tipping point */
606 	    if (ata_get_lba(next) < ata_get_lba(this)) {
607 
608 		/* sort the insert */
609 		do {
610 		    if (ata_get_lba(request) < ata_get_lba(next))
611 			break;
612 		    this = next;
613 		} while ((next = TAILQ_NEXT(this, chain)));
614 		break;
615 	    }
616 	    this = next;
617 	}
618     }
619 
620     /* we are after head so sort the insert before tipping point */
621     else {
622 	while ((next = TAILQ_NEXT(this, chain))) {
623 	    if (ata_get_lba(next) < ata_get_lba(this) ||
624 		ata_get_lba(request) < ata_get_lba(next))
625 		break;
626 	    this = next;
627 	}
628     }
629 
630     if (request->composite)
631 	ch->freezepoint = request;
632     TAILQ_INSERT_AFTER(&ch->ata_queue, this, request, chain);
633 }
634 
635 char *
636 ata_cmd2str(struct ata_request *request)
637 {
638     static char buffer[20];
639 
640     if (request->flags & ATA_R_ATAPI) {
641 	switch (request->u.atapi.sense.key ?
642 		request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
643 	case 0x00: return ("TEST_UNIT_READY");
644 	case 0x01: return ("REZERO");
645 	case 0x03: return ("REQUEST_SENSE");
646 	case 0x04: return ("FORMAT");
647 	case 0x08: return ("READ");
648 	case 0x0a: return ("WRITE");
649 	case 0x10: return ("WEOF");
650 	case 0x11: return ("SPACE");
651 	case 0x12: return ("INQUIRY");
652 	case 0x15: return ("MODE_SELECT");
653 	case 0x19: return ("ERASE");
654 	case 0x1a: return ("MODE_SENSE");
655 	case 0x1b: return ("START_STOP");
656 	case 0x1e: return ("PREVENT_ALLOW");
657 	case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
658 	case 0x25: return ("READ_CAPACITY");
659 	case 0x28: return ("READ_BIG");
660 	case 0x2a: return ("WRITE_BIG");
661 	case 0x2b: return ("LOCATE");
662 	case 0x34: return ("READ_POSITION");
663 	case 0x35: return ("SYNCHRONIZE_CACHE");
664 	case 0x3b: return ("WRITE_BUFFER");
665 	case 0x3c: return ("READ_BUFFER");
666 	case 0x42: return ("READ_SUBCHANNEL");
667 	case 0x43: return ("READ_TOC");
668 	case 0x45: return ("PLAY_10");
669 	case 0x47: return ("PLAY_MSF");
670 	case 0x48: return ("PLAY_TRACK");
671 	case 0x4b: return ("PAUSE");
672 	case 0x51: return ("READ_DISK_INFO");
673 	case 0x52: return ("READ_TRACK_INFO");
674 	case 0x53: return ("RESERVE_TRACK");
675 	case 0x54: return ("SEND_OPC_INFO");
676 	case 0x55: return ("MODE_SELECT_BIG");
677 	case 0x58: return ("REPAIR_TRACK");
678 	case 0x59: return ("READ_MASTER_CUE");
679 	case 0x5a: return ("MODE_SENSE_BIG");
680 	case 0x5b: return ("CLOSE_TRACK/SESSION");
681 	case 0x5c: return ("READ_BUFFER_CAPACITY");
682 	case 0x5d: return ("SEND_CUE_SHEET");
683         case 0x96: return ("SERVICE_ACTION_IN");
684 	case 0xa1: return ("BLANK_CMD");
685 	case 0xa3: return ("SEND_KEY");
686 	case 0xa4: return ("REPORT_KEY");
687 	case 0xa5: return ("PLAY_12");
688 	case 0xa6: return ("LOAD_UNLOAD");
689 	case 0xad: return ("READ_DVD_STRUCTURE");
690 	case 0xb4: return ("PLAY_CD");
691 	case 0xbb: return ("SET_SPEED");
692 	case 0xbd: return ("MECH_STATUS");
693 	case 0xbe: return ("READ_CD");
694 	case 0xff: return ("POLL_DSC");
695 	}
696     }
697     else {
698 	switch (request->u.ata.command) {
699 	case 0x00: return ("NOP");
700 	case 0x08: return ("DEVICE_RESET");
701 	case 0x20: return ("READ");
702 	case 0x24: return ("READ48");
703 	case 0x25: return ("READ_DMA48");
704 	case 0x26: return ("READ_DMA_QUEUED48");
705 	case 0x29: return ("READ_MUL48");
706 	case 0x30: return ("WRITE");
707 	case 0x34: return ("WRITE48");
708 	case 0x35: return ("WRITE_DMA48");
709 	case 0x36: return ("WRITE_DMA_QUEUED48");
710 	case 0x39: return ("WRITE_MUL48");
711 	case 0x70: return ("SEEK");
712 	case 0xa0: return ("PACKET_CMD");
713 	case 0xa1: return ("ATAPI_IDENTIFY");
714 	case 0xa2: return ("SERVICE");
715 	case 0xc0: return ("CFA ERASE");
716 	case 0xc4: return ("READ_MUL");
717 	case 0xc5: return ("WRITE_MUL");
718 	case 0xc6: return ("SET_MULTI");
719 	case 0xc7: return ("READ_DMA_QUEUED");
720 	case 0xc8: return ("READ_DMA");
721 	case 0xca: return ("WRITE_DMA");
722 	case 0xcc: return ("WRITE_DMA_QUEUED");
723 	case 0xe6: return ("SLEEP");
724 	case 0xe7: return ("FLUSHCACHE");
725 	case 0xea: return ("FLUSHCACHE48");
726 	case 0xec: return ("ATA_IDENTIFY");
727 	case 0xef:
728 	    switch (request->u.ata.feature) {
729 	    case 0x03: return ("SETFEATURES SET TRANSFER MODE");
730 	    case 0x02: return ("SETFEATURES ENABLE WCACHE");
731 	    case 0x82: return ("SETFEATURES DISABLE WCACHE");
732 	    case 0xaa: return ("SETFEATURES ENABLE RCACHE");
733 	    case 0x55: return ("SETFEATURES DISABLE RCACHE");
734 	    }
735 	    ksprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature);
736 	    return buffer;
737 	}
738     }
739     ksprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
740     return buffer;
741 }
742 
743 static char *
744 ata_skey2str(u_int8_t skey)
745 {
746     switch (skey) {
747     case 0x00: return ("NO SENSE");
748     case 0x01: return ("RECOVERED ERROR");
749     case 0x02: return ("NOT READY");
750     case 0x03: return ("MEDIUM ERROR");
751     case 0x04: return ("HARDWARE ERROR");
752     case 0x05: return ("ILLEGAL REQUEST");
753     case 0x06: return ("UNIT ATTENTION");
754     case 0x07: return ("DATA PROTECT");
755     case 0x08: return ("BLANK CHECK");
756     case 0x09: return ("VENDOR SPECIFIC");
757     case 0x0a: return ("COPY ABORTED");
758     case 0x0b: return ("ABORTED COMMAND");
759     case 0x0c: return ("EQUAL");
760     case 0x0d: return ("VOLUME OVERFLOW");
761     case 0x0e: return ("MISCOMPARE");
762     case 0x0f: return ("RESERVED");
763     default: return("UNKNOWN");
764     }
765 }
766