xref: /dragonfly/sys/dev/disk/nata/ata-queue.c (revision c37c9ab3)
1 /*-
2  * Copyright (c) 1998 - 2006 Søren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/ata/ata-queue.c,v 1.67 2007/01/27 21:15:58 remko Exp $
27  */
28 
29 #include "opt_ata.h"
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/callout.h>
34 #include <sys/nata.h>
35 #include <sys/queue.h>
36 #include <sys/lock.h>
37 #include <sys/buf.h>
38 #include <sys/systm.h>
39 #include <sys/taskqueue.h>
40 
41 #include "ata-all.h"
42 #include "ata_if.h"
43 
44 /* prototypes */
45 static void ata_completed(void *, int);
46 static void ata_sort_queue(struct ata_channel *ch, struct ata_request *request);
47 static void atawritereorder(struct ata_channel *ch);
48 static char *ata_skey2str(u_int8_t);
49 
50 void
51 ata_queue_init(struct ata_channel *ch)
52 {
53     TAILQ_INIT(&ch->ata_queue);
54     ch->reorder = 0;
55     ch->transition = NULL;
56 }
57 
58 /*
59  * Rudely drop all requests queued to the channel of specified device.
60  * XXX: The requests are leaked, use only in fatal case.
61  */
62 void
63 ata_drop_requests(device_t dev)
64 {
65     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
66     struct ata_request *request, *tmp;
67 
68     lockmgr(&ch->queue_mtx, LK_EXCLUSIVE);
69     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
70 	TAILQ_REMOVE(&ch->ata_queue, request, chain);
71 	request->result = ENXIO;
72     }
73     lockmgr(&ch->queue_mtx, LK_RELEASE);
74 }
75 
76 void
77 ata_queue_request(struct ata_request *request)
78 {
79     struct ata_channel *ch;
80 
81     /* treat request as virgin (this might be an ATA_R_REQUEUE) */
82     request->result = request->status = request->error = 0;
83 
84     /* check that the device is still valid */
85     if (!(request->parent = device_get_parent(request->dev))) {
86 	request->result = ENXIO;
87 	if (request->callback)
88 	    (request->callback)(request);
89 	return;
90     }
91     ch = device_get_softc(request->parent);
92     /* serialization done via state_mtx */
93     callout_init_lk(&request->callout, &ch->state_mtx);
94     if (!request->callback && !(request->flags & ATA_R_REQUEUE))
95 	lockinit(&request->done, "ataqueuerqdone", 0, 0);
96 
97     /* in ATA_STALL_QUEUE state we call HW directly */
98     if ((ch->state & ATA_STALL_QUEUE) && (request->flags & ATA_R_CONTROL)) {
99 	lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
100 	ch->running = request;
101 	if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
102 	    ch->running = NULL;
103 	    if (!request->callback)
104 		lockuninit(&request->done);
105 	    lockmgr(&ch->state_mtx, LK_RELEASE);
106 	    return;
107 	}
108 	/* interlock against interrupt */
109 	request->flags |= ATA_R_HWCMDQUEUED;
110 	lockmgr(&ch->state_mtx, LK_RELEASE);
111     }
112     /* otherwise put request on the locked queue at the specified location */
113     else  {
114 	lockmgr(&ch->queue_mtx, LK_EXCLUSIVE);
115 	if (request->flags & ATA_R_AT_HEAD) {
116 	    TAILQ_INSERT_HEAD(&ch->ata_queue, request, chain);
117 	} else if (request->flags & ATA_R_ORDERED) {
118 	    ata_sort_queue(ch, request);
119 	} else {
120 	    TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
121 	    ch->transition = NULL;
122 	}
123 	lockmgr(&ch->queue_mtx, LK_RELEASE);
124 	ATA_DEBUG_RQ(request, "queued");
125 	ata_start(ch->dev);
126     }
127 
128     /* if this is a requeued request callback/sleep we're done */
129     if (request->flags & ATA_R_REQUEUE)
130 	return;
131 
132     /* if this is not a callback wait until request is completed */
133     if (!request->callback) {
134 	ATA_DEBUG_RQ(request, "wait for completion");
135 	if (!dumping) {
136 	    /* interlock against wakeup */
137 	    lockmgr(&request->done, LK_EXCLUSIVE);
138 	    /* check if the request was completed already */
139 	    if (!(request->flags & ATA_R_COMPLETED))
140 		lksleep(request, &request->done, 0, "ATA request completion "
141 		       "wait", request->timeout * hz * 4);
142 	    lockmgr(&request->done, LK_RELEASE);
143 	    /* check if the request was completed while sleeping */
144 	    if (!(request->flags & ATA_R_COMPLETED)) {
145 		/* apparently not */
146 		device_printf(request->dev, "WARNING - %s taskqueue timeout - "
147 			      "completing request directly\n",
148 			      ata_cmd2str(request));
149 		request->flags |= ATA_R_DANGER1;
150 		ata_completed(request, 0);
151 	    }
152 	}
153 	lockuninit(&request->done);
154     }
155 }
156 
157 int
158 ata_controlcmd(device_t dev, u_int8_t command, u_int16_t feature,
159 	       u_int64_t lba, u_int16_t count)
160 {
161     struct ata_device *atadev = device_get_softc(dev);
162     struct ata_request *request = ata_alloc_request();
163     int error = ENOMEM;
164 
165     if (request) {
166 	request->dev = dev;
167 	request->u.ata.command = command;
168 	request->u.ata.lba = lba;
169 	request->u.ata.count = count;
170 	request->u.ata.feature = feature;
171 	request->flags = ATA_R_CONTROL;
172 	if (atadev->spindown_state) {
173 	    device_printf(dev, "request while spun down, starting.\n");
174 	    atadev->spindown_state = 0;
175 	    request->timeout = MAX(ATA_DEFAULT_TIMEOUT, 31);
176 	} else {
177 	    request->timeout = ATA_DEFAULT_TIMEOUT;
178 	}
179 	request->retries = 0;
180 	ata_queue_request(request);
181 	error = request->result;
182 	ata_free_request(request);
183     }
184     return error;
185 }
186 
187 int
188 ata_atapicmd(device_t dev, u_int8_t *ccb, caddr_t data,
189 	     int count, int flags, int timeout)
190 {
191     struct ata_request *request = ata_alloc_request();
192     struct ata_device *atadev = device_get_softc(dev);
193     int error = ENOMEM;
194 
195     if (request) {
196 	request->dev = dev;
197 	if ((atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12)
198 	    bcopy(ccb, request->u.atapi.ccb, 12);
199 	else
200 	    bcopy(ccb, request->u.atapi.ccb, 16);
201 	request->data = data;
202 	request->bytecount = count;
203 	request->transfersize = min(request->bytecount, 65534);
204 	request->flags = flags | ATA_R_ATAPI;
205 	request->timeout = timeout;
206 	request->retries = 0;
207 	ata_queue_request(request);
208 	error = request->result;
209 	ata_free_request(request);
210     }
211     return error;
212 }
213 
214 void
215 ata_start(device_t dev)
216 {
217     struct ata_channel *ch = device_get_softc(dev);
218     struct ata_request *request;
219     struct ata_composite *cptr;
220     int dependencies = 0;
221 
222     /* if we have a request on the queue try to get it running */
223     lockmgr(&ch->queue_mtx, LK_EXCLUSIVE);
224     if ((request = TAILQ_FIRST(&ch->ata_queue))) {
225 
226 	/* we need the locking function to get the lock for this channel */
227 	if (ATA_LOCKING(dev, ATA_LF_LOCK) == ch->unit) {
228 
229 	    /* check for composite dependencies */
230 	    if ((cptr = request->composite)) {
231 		lockmgr(&cptr->lock, LK_EXCLUSIVE);
232 		if ((request->flags & ATA_R_WRITE) &&
233 		    (cptr->wr_depend & cptr->rd_done) != cptr->wr_depend) {
234 		    dependencies = 1;
235 		}
236 		lockmgr(&cptr->lock, LK_RELEASE);
237 	    }
238 
239 	    /* check we are in the right state and has no dependencies */
240 	    lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
241 	    if (ch->state == ATA_IDLE && !dependencies) {
242 		ATA_DEBUG_RQ(request, "starting");
243 
244 		if (ch->transition == request)
245 		    ch->transition = TAILQ_NEXT(request, chain);
246 		TAILQ_REMOVE(&ch->ata_queue, request, chain);
247 		ch->running = request;
248 		ch->state = ATA_ACTIVE;
249 
250 		if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) {
251 		    ch->running = NULL;
252 		    ch->state = ATA_IDLE;
253 		    lockmgr(&ch->state_mtx, LK_RELEASE);
254 		    lockmgr(&ch->queue_mtx, LK_RELEASE);
255 		    ATA_LOCKING(dev, ATA_LF_UNLOCK);
256 		    ata_finish(request);
257 		    return;
258 		}
259 
260 		/* interlock against interrupt */
261 		request->flags |= ATA_R_HWCMDQUEUED;
262 
263 		if (dumping) {
264 		    lockmgr(&ch->state_mtx, LK_RELEASE);
265 		    lockmgr(&ch->queue_mtx, LK_RELEASE);
266 		    while (!ata_interrupt(ch))
267 			DELAY(10);
268 		    return;
269 		}
270 	    }
271 	    lockmgr(&ch->state_mtx, LK_RELEASE);
272 	}
273     }
274     lockmgr(&ch->queue_mtx, LK_RELEASE);
275 }
276 
277 void
278 ata_finish(struct ata_request *request)
279 {
280     struct ata_channel *ch = device_get_softc(request->parent);
281 
282     /*
283      * if in ATA_STALL_QUEUE state or request has ATA_R_DIRECT flags set
284      * we need to call ata_complete() directly here (no taskqueue involvement)
285      */
286     if (dumping ||
287 	(ch->state & ATA_STALL_QUEUE) || (request->flags & ATA_R_DIRECT)) {
288 	ATA_DEBUG_RQ(request, "finish directly");
289 	ata_completed(request, 0);
290     }
291     else {
292 	/* put request on the proper taskqueue for completion */
293 	/* XXX FreeBSD has some sort of bio_taskqueue code here */
294         TASK_INIT(&request->task, 0, ata_completed, request);
295 	ATA_DEBUG_RQ(request, "finish taskqueue_swi_mp");
296 	taskqueue_enqueue(taskqueue_swi_mp, &request->task);
297     }
298 }
299 
300 static void
301 ata_completed(void *context, int dummy)
302 {
303     struct ata_request *request = (struct ata_request *)context;
304     struct ata_channel *ch = device_get_softc(request->parent);
305     struct ata_device *atadev = device_get_softc(request->dev);
306     struct ata_composite *composite;
307 
308     if (request->flags & ATA_R_DANGER2) {
309 	device_printf(request->dev,
310 		      "WARNING - %s freeing taskqueue zombie request\n",
311 		      ata_cmd2str(request));
312 	request->flags &= ~(ATA_R_DANGER1 | ATA_R_DANGER2);
313 	ata_free_request(request);
314 	return;
315     }
316     if (request->flags & ATA_R_DANGER1)
317 	request->flags |= ATA_R_DANGER2;
318 
319     ATA_DEBUG_RQ(request, "completed entered");
320 
321     /* if we had a timeout, reinit channel and deal with the falldown */
322     if (request->flags & ATA_R_TIMEOUT) {
323 	/*
324 	 * if the channel is still present and
325 	 * reinit succeeds and
326 	 * the device doesn't get detached and
327 	 * there are retries left we reinject this request
328 	 */
329 	if (ch && !ata_reinit(ch->dev) && !request->result &&
330 	    (request->retries-- > 0)) {
331 	    if (!(request->flags & ATA_R_QUIET)) {
332 		device_printf(request->dev,
333 			      "TIMEOUT - %s retrying (%d retr%s left)",
334 			      ata_cmd2str(request), request->retries,
335 			      request->retries == 1 ? "y" : "ies");
336 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
337 		    kprintf(" LBA=%ju", request->u.ata.lba);
338 		kprintf("\n");
339 	    }
340 	    request->flags &= ~(ATA_R_TIMEOUT | ATA_R_DEBUG);
341 	    request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
342 	    ATA_DEBUG_RQ(request, "completed reinject");
343 	    ata_queue_request(request);
344 	    return;
345 	}
346 
347 	/* ran out of good intentions so finish with error */
348 	if (!request->result) {
349 	    if (!(request->flags & ATA_R_QUIET)) {
350 		if (request->dev) {
351 		    device_printf(request->dev, "FAILURE - %s timed out",
352 				  ata_cmd2str(request));
353 		    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
354 			kprintf(" LBA=%ju", request->u.ata.lba);
355 		    kprintf("\n");
356 		}
357 	    }
358 	    request->result = EIO;
359 	}
360     }
361     else if (!(request->flags & ATA_R_ATAPI) ){
362 	/* if this is a soft ECC error warn about it */
363 	/* XXX SOS we could do WARF here */
364 	if ((request->status & (ATA_S_CORR | ATA_S_ERROR)) == ATA_S_CORR) {
365 	    device_printf(request->dev,
366 			  "WARNING - %s soft error (ECC corrected)",
367 			  ata_cmd2str(request));
368 	    if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
369 		kprintf(" LBA=%ju", request->u.ata.lba);
370 	    kprintf("\n");
371 	}
372 
373 	/* if this is a UDMA CRC error we reinject if there are retries left */
374 	if (request->flags & ATA_R_DMA && request->error & ATA_E_ICRC) {
375 	    if (request->retries-- > 0) {
376 		device_printf(request->dev,
377 			      "WARNING - %s UDMA ICRC error (retrying request)",
378 			      ata_cmd2str(request));
379 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
380 		    kprintf(" LBA=%ju", request->u.ata.lba);
381 		kprintf("\n");
382 		request->flags |= (ATA_R_AT_HEAD | ATA_R_REQUEUE);
383 		ata_queue_request(request);
384 		return;
385 	    }
386 	}
387     }
388 
389     switch (request->flags & ATA_R_ATAPI) {
390 
391     /* ATA errors */
392     default:
393 	if (!request->result && request->status & ATA_S_ERROR) {
394 	    if (!(request->flags & ATA_R_QUIET)) {
395 		device_printf(request->dev,
396 			      "FAILURE - %s status=%pb%i error=%pb%i",
397 			      ata_cmd2str(request),
398 			      "\20\10BUSY\7READY\6DMA_READY"
399 			      "\5DSC\4DRQ\3CORRECTABLE\2INDEX\1ERROR",
400 			      request->status,
401 			      "\20\10ICRC\7UNCORRECTABLE"
402 			      "\6MEDIA_CHANGED\5NID_NOT_FOUND"
403 			      "\4MEDIA_CHANGE_REQEST"
404 			      "\3ABORTED\2NO_MEDIA\1ILLEGAL_LENGTH",
405 			      request->error);
406 		if ((request->flags & ATA_R_DMA) &&
407 		    (request->dmastat & ATA_BMSTAT_ERROR))
408 		    kprintf(" dma=0x%02x", request->dmastat);
409 		if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
410 		    kprintf(" LBA=%ju", request->u.ata.lba);
411 		kprintf("\n");
412 	    }
413 	    request->result = EIO;
414 	}
415 	break;
416 
417     /* ATAPI errors */
418     case ATA_R_ATAPI:
419 	/* skip if result already set */
420 	if (request->result)
421 	    break;
422 
423 	/* if we have a sensekey -> request sense from device */
424 	if ((request->error & ATA_E_ATAPI_SENSE_MASK) &&
425 	    (request->u.atapi.ccb[0] != ATAPI_REQUEST_SENSE)) {
426 	    static u_int8_t ccb[16] = { ATAPI_REQUEST_SENSE, 0, 0, 0,
427 					sizeof(struct atapi_sense),
428 					0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
429 
430 	    request->u.atapi.saved_cmd = request->u.atapi.ccb[0];
431 	    bcopy(ccb, request->u.atapi.ccb, 16);
432 	    request->data = (caddr_t)&request->u.atapi.sense;
433 	    request->bytecount = sizeof(struct atapi_sense);
434 	    request->donecount = 0;
435 	    request->transfersize = sizeof(struct atapi_sense);
436 	    request->timeout = ATA_DEFAULT_TIMEOUT;
437 	    request->flags &= (ATA_R_ATAPI | ATA_R_QUIET);
438 	    request->flags |= (ATA_R_READ | ATA_R_AT_HEAD | ATA_R_REQUEUE);
439 	    ATA_DEBUG_RQ(request, "autoissue request sense");
440 	    ata_queue_request(request);
441 	    return;
442 	}
443 
444 	switch (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK) {
445 	case ATA_SENSE_RECOVERED_ERROR:
446 	    device_printf(request->dev, "WARNING - %s recovered error\n",
447 			  ata_cmd2str(request));
448 	    /* FALLTHROUGH */
449 
450 	case ATA_SENSE_NO_SENSE:
451 	    request->result = 0;
452 	    break;
453 
454 	case ATA_SENSE_NOT_READY:
455 	    request->result = EBUSY;
456 	    break;
457 
458 	case ATA_SENSE_UNIT_ATTENTION:
459 	    atadev->flags |= ATA_D_MEDIA_CHANGED;
460 	    request->result = EIO;
461 	    break;
462 
463 	default:
464 	    request->result = EIO;
465 	    if (request->flags & ATA_R_QUIET)
466 		break;
467 
468 	    device_printf(request->dev,
469 			  "FAILURE - %s %s asc=0x%02x ascq=0x%02x ",
470 			  ata_cmd2str(request), ata_skey2str(
471 			  (request->u.atapi.sense.key & ATA_SENSE_KEY_MASK)),
472 			  request->u.atapi.sense.asc,
473 			  request->u.atapi.sense.ascq);
474 	    if (request->u.atapi.sense.specific & ATA_SENSE_SPEC_VALID)
475 		kprintf("sks=0x%02x 0x%02x 0x%02x\n",
476 		       request->u.atapi.sense.specific & ATA_SENSE_SPEC_MASK,
477 		       request->u.atapi.sense.specific1,
478 		       request->u.atapi.sense.specific2);
479 	    else
480 		kprintf("\n");
481 	}
482 
483 	if ((request->u.atapi.sense.key & ATA_SENSE_KEY_MASK ?
484 	     request->u.atapi.sense.key & ATA_SENSE_KEY_MASK :
485 	     request->error))
486 	    request->result = EIO;
487     }
488 
489     ATA_DEBUG_RQ(request, "completed callback/wakeup");
490 
491     /* if we are part of a composite operation we need to maintain progress */
492     if ((composite = request->composite)) {
493 	int index = 0;
494 
495 	lockmgr(&composite->lock, LK_EXCLUSIVE);
496 
497 	/* update whats done */
498 	if (request->flags & ATA_R_READ)
499 	    composite->rd_done |= (1 << request->this);
500 	if (request->flags & ATA_R_WRITE)
501 	    composite->wr_done |= (1 << request->this);
502 
503 	/* find ready to go dependencies */
504 	if (composite->wr_depend &&
505 	    (composite->rd_done & composite->wr_depend)==composite->wr_depend &&
506 	    (composite->wr_needed & (~composite->wr_done))) {
507 	    index = composite->wr_needed & ~composite->wr_done;
508 	}
509 
510 	lockmgr(&composite->lock, LK_RELEASE);
511 
512 	/* if we have any ready candidates kick them off */
513 	if (index) {
514 	    int bit;
515 
516 	    for (bit = 0; bit < MAX_COMPOSITES; bit++) {
517 		if (index & (1 << bit))
518 		    ata_start(device_get_parent(composite->request[bit]->dev));
519 	    }
520 	}
521     }
522 
523     /* get results back to the initiator for this request */
524     if (request->callback)
525 	(request->callback)(request);
526     else {
527 	lockmgr(&request->done, LK_EXCLUSIVE);
528 	request->flags |= ATA_R_COMPLETED;
529 	lockmgr(&request->done, LK_RELEASE);
530 	wakeup_one(request);
531     }
532 
533     /* only call ata_start if channel is present */
534     if (ch)
535 	ata_start(ch->dev);
536 }
537 
538 void
539 ata_timeout(struct ata_request *request)
540 {
541     struct ata_channel *ch = device_get_softc(request->parent);
542 
543     /*
544      * NOTE: callout acquired ch->state_mtx for us.
545      */
546     /*request->flags |= ATA_R_DEBUG;*/
547     ATA_DEBUG_RQ(request, "timeout");
548 
549     /*
550      * if we have an ATA_ACTIVE request running, we flag the request
551      * ATA_R_TIMEOUT so ata_finish will handle it correctly
552      * also NULL out the running request so we wont loose
553      * the race with an eventual interrupt arriving late
554      */
555     if (ch->state == ATA_ACTIVE) {
556 	request->flags |= ATA_R_TIMEOUT;
557 	lockmgr(&ch->state_mtx, LK_RELEASE);
558 	ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
559 	ata_finish(request);
560 	lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
561     }
562 }
563 
564 void
565 ata_fail_requests(device_t dev)
566 {
567     struct ata_channel *ch = device_get_softc(device_get_parent(dev));
568     struct ata_request *request, *tmp;
569     TAILQ_HEAD(, ata_request) fail_requests;
570     TAILQ_INIT(&fail_requests);
571 
572     /* grap all channel locks to avoid races */
573     lockmgr(&ch->queue_mtx, LK_EXCLUSIVE);
574     lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
575 
576     /* do we have any running request to care about ? */
577     if ((request = ch->running) && (!dev || request->dev == dev)) {
578 	callout_cancel(&request->callout);
579 	ch->running = NULL;
580 	request->result = ENXIO;
581 	TAILQ_INSERT_TAIL(&fail_requests, request, chain);
582     }
583 
584     /* fail all requests queued on this channel for device dev if !NULL */
585     TAILQ_FOREACH_MUTABLE(request, &ch->ata_queue, chain, tmp) {
586 	if (!dev || request->dev == dev) {
587 	    if (ch->transition == request)
588 		ch->transition = TAILQ_NEXT(request, chain);
589 	    TAILQ_REMOVE(&ch->ata_queue, request, chain);
590 	    request->result = ENXIO;
591 	    TAILQ_INSERT_TAIL(&fail_requests, request, chain);
592 	}
593     }
594 
595     lockmgr(&ch->state_mtx, LK_RELEASE);
596     lockmgr(&ch->queue_mtx, LK_RELEASE);
597 
598     /* finish up all requests collected above */
599     TAILQ_FOREACH_MUTABLE(request, &fail_requests, chain, tmp) {
600         TAILQ_REMOVE(&fail_requests, request, chain);
601         ata_finish(request);
602     }
603 }
604 
605 static u_int64_t
606 ata_get_lba(struct ata_request *request)
607 {
608     if (request->flags & ATA_R_ATAPI) {
609 	switch (request->u.atapi.ccb[0]) {
610 	case ATAPI_READ_BIG:
611 	case ATAPI_WRITE_BIG:
612 	case ATAPI_READ_CD:
613 	    return (request->u.atapi.ccb[5]) | (request->u.atapi.ccb[4]<<8) |
614 		   (request->u.atapi.ccb[3]<<16)|(request->u.atapi.ccb[2]<<24);
615 	case ATAPI_READ:
616 	case ATAPI_WRITE:
617 	    return (request->u.atapi.ccb[4]) | (request->u.atapi.ccb[3]<<8) |
618 		   (request->u.atapi.ccb[2]<<16);
619 	default:
620 	    return 0;
621 	}
622     }
623     else
624 	return request->u.ata.lba;
625 }
626 
627 /*
628  * This implements exactly bioqdisksort() in the DragonFly kernel.
629  * The short description is: Because megabytes and megabytes worth of
630  * writes can be queued there needs to be a read-prioritization mechanism
631  * or reads get completely starved out.
632  */
633 static void
634 ata_sort_queue(struct ata_channel *ch, struct ata_request *request)
635 {
636     if ((request->flags & ATA_R_WRITE) == 0) {
637 	if (ch->transition) {
638 	    /*
639 	     * Insert before the first write
640 	     */
641 	    TAILQ_INSERT_BEFORE(ch->transition, request, chain);
642 	    if (++ch->reorder >= bioq_reorder_minor_interval) {
643 		ch->reorder = 0;
644 		atawritereorder(ch);
645 	    }
646 	} else {
647 	    /*
648 	     * No writes queued (or ordering was forced),
649 	     * insert at tail.
650 	     */
651 	    TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
652 	}
653     } else {
654 	/*
655 	 * Writes are always appended.  If no writes were previously
656 	 * queued or an ordered tail insertion occured the transition
657 	 * field will be NULL.
658 	 */
659 	TAILQ_INSERT_TAIL(&ch->ata_queue, request, chain);
660 	if (ch->transition == NULL)
661 		ch->transition = request;
662     }
663     if (request->composite) {
664 	ch->transition = NULL;
665 	ch->reorder = 0;
666     }
667 }
668 
669 /*
670  * Move the transition point to prevent reads from completely
671  * starving our writes.  This brings a number of writes into
672  * the fold every N reads.
673  */
674 static void
675 atawritereorder(struct ata_channel *ch)
676 {
677     struct ata_request *req;
678     u_int64_t next_offset;
679     size_t left = (size_t)bioq_reorder_minor_bytes;
680     size_t n;
681 
682     next_offset = ata_get_lba(ch->transition);
683     while ((req = ch->transition) != NULL &&
684 	   next_offset == ata_get_lba(req)) {
685 	n = req->u.ata.count;
686 	next_offset = ata_get_lba(req);
687 	ch->transition = TAILQ_NEXT(req, chain);
688 	if (left < n)
689 	    break;
690 	left -= n;
691     }
692 }
693 
694 const char *
695 ata_cmd2str(struct ata_request *request)
696 {
697     static char buffer[20];
698 
699     if (request->flags & ATA_R_ATAPI) {
700 	switch (request->u.atapi.sense.key ?
701 		request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) {
702 	case 0x00: return ("TEST_UNIT_READY");
703 	case 0x01: return ("REZERO");
704 	case 0x03: return ("REQUEST_SENSE");
705 	case 0x04: return ("FORMAT");
706 	case 0x08: return ("READ");
707 	case 0x0a: return ("WRITE");
708 	case 0x10: return ("WEOF");
709 	case 0x11: return ("SPACE");
710 	case 0x12: return ("INQUIRY");
711 	case 0x15: return ("MODE_SELECT");
712 	case 0x19: return ("ERASE");
713 	case 0x1a: return ("MODE_SENSE");
714 	case 0x1b: return ("START_STOP");
715 	case 0x1e: return ("PREVENT_ALLOW");
716 	case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES");
717 	case 0x25: return ("READ_CAPACITY");
718 	case 0x28: return ("READ_BIG");
719 	case 0x2a: return ("WRITE_BIG");
720 	case 0x2b: return ("LOCATE");
721 	case 0x34: return ("READ_POSITION");
722 	case 0x35: return ("SYNCHRONIZE_CACHE");
723 	case 0x3b: return ("WRITE_BUFFER");
724 	case 0x3c: return ("READ_BUFFER");
725 	case 0x42: return ("READ_SUBCHANNEL");
726 	case 0x43: return ("READ_TOC");
727 	case 0x45: return ("PLAY_10");
728 	case 0x47: return ("PLAY_MSF");
729 	case 0x48: return ("PLAY_TRACK");
730 	case 0x4b: return ("PAUSE");
731 	case 0x51: return ("READ_DISK_INFO");
732 	case 0x52: return ("READ_TRACK_INFO");
733 	case 0x53: return ("RESERVE_TRACK");
734 	case 0x54: return ("SEND_OPC_INFO");
735 	case 0x55: return ("MODE_SELECT_BIG");
736 	case 0x58: return ("REPAIR_TRACK");
737 	case 0x59: return ("READ_MASTER_CUE");
738 	case 0x5a: return ("MODE_SENSE_BIG");
739 	case 0x5b: return ("CLOSE_TRACK/SESSION");
740 	case 0x5c: return ("READ_BUFFER_CAPACITY");
741 	case 0x5d: return ("SEND_CUE_SHEET");
742 	case 0x96: return ("READ_CAPACITY_16");
743 	case 0xa1: return ("BLANK_CMD");
744 	case 0xa3: return ("SEND_KEY");
745 	case 0xa4: return ("REPORT_KEY");
746 	case 0xa5: return ("PLAY_12");
747 	case 0xa6: return ("LOAD_UNLOAD");
748 	case 0xad: return ("READ_DVD_STRUCTURE");
749 	case 0xb4: return ("PLAY_CD");
750 	case 0xbb: return ("SET_SPEED");
751 	case 0xbd: return ("MECH_STATUS");
752 	case 0xbe: return ("READ_CD");
753 	case 0xff: return ("POLL_DSC");
754 	}
755     }
756     else {
757 	switch (request->u.ata.command) {
758 	case 0x00: return ("NOP");
759 	case 0x08: return ("DEVICE_RESET");
760 	case 0x20: return ("READ");
761 	case 0x24: return ("READ48");
762 	case 0x25: return ("READ_DMA48");
763 	case 0x26: return ("READ_DMA_QUEUED48");
764 	case 0x27: return ("READ_NATIVE_MAX_ADDRESS48");
765 	case 0x29: return ("READ_MUL48");
766 	case 0x30: return ("WRITE");
767 	case 0x34: return ("WRITE48");
768 	case 0x35: return ("WRITE_DMA48");
769 	case 0x36: return ("WRITE_DMA_QUEUED48");
770 	case 0x39: return ("WRITE_MUL48");
771 	case 0x70: return ("SEEK");
772 	case 0xa0: return ("PACKET_CMD");
773 	case 0xa1: return ("ATAPI_IDENTIFY");
774 	case 0xa2: return ("SERVICE");
775 	case 0xb0: return ("SMART");
776 	case 0xc0: return ("CFA ERASE");
777 	case 0xc4: return ("READ_MUL");
778 	case 0xc5: return ("WRITE_MUL");
779 	case 0xc6: return ("SET_MULTI");
780 	case 0xc7: return ("READ_DMA_QUEUED");
781 	case 0xc8: return ("READ_DMA");
782 	case 0xca: return ("WRITE_DMA");
783 	case 0xcc: return ("WRITE_DMA_QUEUED");
784 	case 0xe6: return ("SLEEP");
785 	case 0xe7: return ("FLUSHCACHE");
786 	case 0xea: return ("FLUSHCACHE48");
787 	case 0xec: return ("ATA_IDENTIFY");
788 	case 0xef:
789 	    switch (request->u.ata.feature) {
790 	    case 0x03: return ("SETFEATURES SET TRANSFER MODE");
791 	    case 0x02: return ("SETFEATURES ENABLE WCACHE");
792 	    case 0x82: return ("SETFEATURES DISABLE WCACHE");
793 	    case 0xaa: return ("SETFEATURES ENABLE RCACHE");
794 	    case 0x55: return ("SETFEATURES DISABLE RCACHE");
795 	    }
796 	    ksprintf(buffer, "SETFEATURES 0x%02x", request->u.ata.feature);
797 	    return buffer;
798 	case 0xf8: return ("READ_NATIVE_MAX_ADDRESS");
799 	}
800     }
801     ksprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command);
802     return buffer;
803 }
804 
805 static char *
806 ata_skey2str(u_int8_t skey)
807 {
808     switch (skey) {
809     case 0x00: return ("NO SENSE");
810     case 0x01: return ("RECOVERED ERROR");
811     case 0x02: return ("NOT READY");
812     case 0x03: return ("MEDIUM ERROR");
813     case 0x04: return ("HARDWARE ERROR");
814     case 0x05: return ("ILLEGAL REQUEST");
815     case 0x06: return ("UNIT ATTENTION");
816     case 0x07: return ("DATA PROTECT");
817     case 0x08: return ("BLANK CHECK");
818     case 0x09: return ("VENDOR SPECIFIC");
819     case 0x0a: return ("COPY ABORTED");
820     case 0x0b: return ("ABORTED COMMAND");
821     case 0x0c: return ("EQUAL");
822     case 0x0d: return ("VOLUME OVERFLOW");
823     case 0x0e: return ("MISCOMPARE");
824     case 0x0f: return ("RESERVED");
825     default: return("UNKNOWN");
826     }
827 }
828