xref: /dragonfly/sys/dev/disk/nata/ata-all.c (revision b7367ef6)
1 /*-
2  * Copyright (c) 1998 - 2006 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/ata/ata-all.c,v 1.279 2007/02/23 16:25:08 jhb Exp $
27  * $DragonFly: src/sys/dev/disk/nata/ata-all.c,v 1.12 2007/06/05 18:30:40 swildner Exp $
28  */
29 
30 #include "opt_ata.h"
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/callout.h>
35 #include <sys/conf.h>
36 #include <sys/ctype.h>
37 #include <sys/device.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/libkern.h>
41 #include <sys/lock.h>		/* for {get,rel}_mplock() */
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/nata.h>
45 #include <sys/objcache.h>
46 #include <sys/queue.h>
47 #include <sys/spinlock2.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include "ata-all.h"
52 #include "ata_if.h"
53 
54 /* device structure */
55 static  d_ioctl_t       ata_ioctl;
56 static struct dev_ops ata_ops = {
57 	{ "ata", 159, 0 },
58 	.d_open =	nullopen,
59 	.d_close =	nullclose,
60 	.d_ioctl =	ata_ioctl,
61 };
62 
63 /* prototypes */
64 static void ata_boot_attach(void);
65 static device_t ata_add_child(device_t, struct ata_device *, int);
66 static int ata_getparam(struct ata_device *, int);
67 static void bswap(int8_t *, int);
68 static void btrim(int8_t *, int);
69 static void bpack(int8_t *, int8_t *, int);
70 
71 /* global vars */
72 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer");
73 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL;
74 devclass_t ata_devclass;
75 struct objcache *ata_request_cache;
76 struct objcache *ata_composite_cache;
77 struct objcache_malloc_args ata_request_malloc_args = {
78 	sizeof(struct ata_request), M_ATA };
79 struct objcache_malloc_args ata_composite_malloc_args = {
80 	sizeof(struct ata_composite), M_ATA };
81 int ata_wc = 1;
82 
83 /* local vars */
84 static int ata_dma = 1;
85 static int atapi_dma = 1;
86 
87 /* sysctl vars */
88 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters");
89 TUNABLE_INT("hw.ata.ata_dma", &ata_dma);
90 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RW, &ata_dma, 0,
91 	   "ATA disk DMA mode control");
92 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma);
93 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RW, &atapi_dma, 0,
94 	   "ATAPI device DMA mode control");
95 TUNABLE_INT("hw.ata.wc", &ata_wc);
96 SYSCTL_INT(_hw_ata, OID_AUTO, ata_wc, CTLFLAG_RW, &ata_wc, 0,
97 	   "ATA disk write caching");
98 
99 /*
100  * newbus device interface related functions
101  */
102 int
103 ata_probe(device_t dev)
104 {
105     return 0;
106 }
107 
108 int
109 ata_attach(device_t dev)
110 {
111     struct ata_channel *ch = device_get_softc(dev);
112     int error, rid;
113 
114     /* check that we have a virgin channel to attach */
115     if (ch->r_irq)
116 	return EEXIST;
117 
118     /* initialize the softc basics */
119     ch->dev = dev;
120     ch->state = ATA_IDLE;
121     spin_init(&ch->state_mtx);
122     spin_init(&ch->queue_mtx);
123     TAILQ_INIT(&ch->ata_queue);
124 
125     /* reset the controller HW, the channel and device(s) */
126     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
127 	tsleep(&error, 0, "ataatch", 1);
128     ATA_RESET(dev);
129     ATA_LOCKING(dev, ATA_LF_UNLOCK);
130 
131     /* setup interrupt delivery */
132     rid = ATA_IRQ_RID;
133     ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
134 				       RF_SHAREABLE | RF_ACTIVE);
135     if (!ch->r_irq) {
136 	device_printf(dev, "unable to allocate interrupt\n");
137 	return ENXIO;
138     }
139     if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS,
140 				(driver_intr_t *)ata_interrupt, ch, &ch->ih,
141 				NULL))) {
142 	device_printf(dev, "unable to setup interrupt\n");
143 	return error;
144     }
145 
146     /* probe and attach devices on this channel unless we are in early boot */
147     ata_identify(dev);
148     return 0;
149 }
150 
151 int
152 ata_detach(device_t dev)
153 {
154     struct ata_channel *ch = device_get_softc(dev);
155     device_t *children;
156     int nchildren, i;
157 
158     /* check that we have a valid channel to detach */
159     if (!ch->r_irq)
160 	return ENXIO;
161 
162     /* grap the channel lock so no new requests gets launched */
163     spin_lock_wr(&ch->state_mtx);
164     ch->state |= ATA_STALL_QUEUE;
165     spin_unlock_wr(&ch->state_mtx);
166 
167     /* detach & delete all children */
168     if (!device_get_children(dev, &children, &nchildren)) {
169 	for (i = 0; i < nchildren; i++)
170 	    if (children[i])
171 		device_delete_child(dev, children[i]);
172 	kfree(children, M_TEMP);
173     }
174 
175     /* release resources */
176     bus_teardown_intr(dev, ch->r_irq, ch->ih);
177     bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
178     ch->r_irq = NULL;
179     spin_uninit(&ch->state_mtx);
180     spin_uninit(&ch->queue_mtx);
181     return 0;
182 }
183 
184 int
185 ata_reinit(device_t dev)
186 {
187     struct ata_channel *ch = device_get_softc(dev);
188     struct ata_request *request;
189     device_t *children;
190     int nchildren, i;
191 
192     /* check that we have a valid channel to reinit */
193     if (!ch || !ch->r_irq)
194 	return ENXIO;
195 
196     if (bootverbose)
197 	device_printf(dev, "reiniting channel ..\n");
198 
199     /* poll for locking the channel */
200     while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit)
201 	tsleep(&dev, 0, "atarini", 1);
202 
203     /* catch eventual request in ch->running */
204     spin_lock_wr(&ch->state_mtx);
205     if ((request = ch->running))
206 	callout_stop(&request->callout);
207     ch->running = NULL;
208 
209     /* unconditionally grap the channel lock */
210     ch->state |= ATA_STALL_QUEUE;
211     spin_unlock_wr(&ch->state_mtx);
212 
213     /* reset the controller HW, the channel and device(s) */
214     ATA_RESET(dev);
215 
216     /* reinit the children and delete any that fails */
217     if (!device_get_children(dev, &children, &nchildren)) {
218 	get_mplock();
219 	for (i = 0; i < nchildren; i++) {
220 	    /* did any children go missing ? */
221 	    if (children[i] && device_is_attached(children[i]) &&
222 		ATA_REINIT(children[i])) {
223 		/*
224 		 * if we had a running request and its device matches
225 		 * this child we need to inform the request that the
226 		 * device is gone.
227 		 */
228 		if (request && request->dev == children[i]) {
229 		    request->result = ENXIO;
230 		    device_printf(request->dev, "FAILURE - device detached\n");
231 
232 		    /* if not timeout finish request here */
233 		    if (!(request->flags & ATA_R_TIMEOUT))
234 			    ata_finish(request);
235 		    request = NULL;
236 		}
237 		device_delete_child(dev, children[i]);
238 	    }
239 	}
240 	kfree(children, M_TEMP);
241 	rel_mplock();
242     }
243 
244     /* if we still have a good request put it on the queue again */
245     if (request && !(request->flags & ATA_R_TIMEOUT)) {
246 	device_printf(request->dev,
247 		      "WARNING - %s requeued due to channel reset",
248 		      ata_cmd2str(request));
249 	if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL)))
250 	    kprintf(" LBA=%ju", request->u.ata.lba);
251 	kprintf("\n");
252 	request->flags |= ATA_R_REQUEUE;
253 	ata_queue_request(request);
254     }
255 
256     /* we're done release the channel for new work */
257     spin_lock_wr(&ch->state_mtx);
258     ch->state = ATA_IDLE;
259     spin_unlock_wr(&ch->state_mtx);
260     ATA_LOCKING(dev, ATA_LF_UNLOCK);
261 
262     if (bootverbose)
263 	device_printf(dev, "reinit done ..\n");
264 
265     /* kick off requests on the queue */
266     ata_start(dev);
267     return 0;
268 }
269 
270 int
271 ata_suspend(device_t dev)
272 {
273     struct ata_channel *ch;
274 
275     /* check for valid device */
276     if (!dev || !(ch = device_get_softc(dev)))
277 	return ENXIO;
278 
279     /* wait for the channel to be IDLE or detached before suspending */
280     while (ch->r_irq) {
281 	spin_lock_wr(&ch->state_mtx);
282 	if (ch->state == ATA_IDLE) {
283 	    ch->state = ATA_ACTIVE;
284 	    spin_unlock_wr(&ch->state_mtx);
285 	    break;
286 	}
287 	spin_unlock_wr(&ch->state_mtx);
288 	tsleep(ch, 0, "atasusp", hz/10);
289     }
290     ATA_LOCKING(dev, ATA_LF_UNLOCK);
291     return 0;
292 }
293 
294 int
295 ata_resume(device_t dev)
296 {
297     struct ata_channel *ch;
298     int error;
299 
300     /* check for valid device */
301     if (!dev || !(ch = device_get_softc(dev)))
302 	return ENXIO;
303 
304     /* reinit the devices, we dont know what mode/state they are in */
305     error = ata_reinit(dev);
306 
307     /* kick off requests on the queue */
308     ata_start(dev);
309     return error;
310 }
311 
312 int
313 ata_interrupt(void *data)
314 {
315     struct ata_channel *ch = (struct ata_channel *)data;
316     struct ata_request *request;
317 
318     spin_lock_wr(&ch->state_mtx);
319     do {
320 	/*
321 	 * Ignore interrupt if its not for us.  This may also have the
322 	 * side effect of processing events unrelated to I/O requests.
323 	 */
324 	if (ch->hw.status && !ch->hw.status(ch->dev))
325 	    break;
326 
327 	/*
328 	 * Check if we have a running request, and make sure it has been
329 	 * completely queued.  Otherwise the channel status may indicate
330 	 * not-busy when, in fact, the command had not yet been issued.
331 	 */
332 	if ((request = ch->running) == NULL)
333 	    break;
334 	if ((request->flags & ATA_R_HWCMDQUEUED) == 0) {
335 	    kprintf("ata_interrupt: early interrupt\n");
336 	    break;
337 	}
338 
339 	/* XXX TGEN Ignore weird ATAPI+DMA interrupts on SMP */
340 	if (ch->dma && (request->flags & ATA_R_ATAPI)) {
341             int status = ATA_IDX_INB(ch, ATA_STATUS);
342 	    int error = ATA_IDX_INB(ch, ATA_ERROR);
343 	    int bmstat = ATA_IDX_INB(ch, ATA_BMSTAT_PORT) & ATA_BMSTAT_MASK;
344 	    if (((status & (ATA_S_DWF|ATA_S_DRQ)) == (ATA_S_DWF|ATA_S_DRQ)) &&
345 		((error & ATA_E_ILI) == ATA_E_ILI) &&
346 		!(bmstat & ATA_BMSTAT_ERROR)) {
347                 if (bootverbose)
348                     device_printf(request->dev, "ignoring weird interrupt\n");
349 		break;
350 	    }
351 	}
352 
353 	ATA_DEBUG_RQ(request, "interrupt");
354 
355 	/* safetycheck for the right state */
356 	if (ch->state == ATA_IDLE) {
357 	    device_printf(request->dev, "interrupt on idle channel ignored\n");
358 	    break;
359 	}
360 
361 	/*
362 	 * we have the HW locks, so end the transaction for this request
363 	 * if it finishes immediately otherwise wait for next interrupt
364 	 */
365 	if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) {
366 	    ch->running = NULL;
367 	    if (ch->state == ATA_ACTIVE)
368 		ch->state = ATA_IDLE;
369 	    spin_unlock_wr(&ch->state_mtx);
370 	    ATA_LOCKING(ch->dev, ATA_LF_UNLOCK);
371 	    ata_finish(request);
372 	    return 1;
373 	}
374     } while (0);
375     spin_unlock_wr(&ch->state_mtx);
376     return 0;
377 }
378 
379 /*
380  * device related interfaces
381  */
382 static int
383 ata_ioctl(struct dev_ioctl_args *ap)
384 {
385     device_t device, *children;
386     struct ata_ioc_devices *devices = (struct ata_ioc_devices *)ap->a_data;
387     int *value = (int *)ap->a_data;
388     int i, nchildren, error = ENOTTY;
389 
390     switch (ap->a_cmd) {
391     case IOCATAGMAXCHANNEL:
392 	*value = devclass_get_maxunit(ata_devclass);
393 	error = 0;
394 	break;
395 
396     case IOCATAREINIT:
397 	if (*value > devclass_get_maxunit(ata_devclass) ||
398 	    !(device = devclass_get_device(ata_devclass, *value)))
399 	    return ENXIO;
400 	error = ata_reinit(device);
401 	ata_start(device);
402 	break;
403 
404     case IOCATAATTACH:
405 	if (*value > devclass_get_maxunit(ata_devclass) ||
406 	    !(device = devclass_get_device(ata_devclass, *value)))
407 	    return ENXIO;
408 	/* XXX SOS should enable channel HW on controller */
409 	error = ata_attach(device);
410 	break;
411 
412     case IOCATADETACH:
413 	if (*value > devclass_get_maxunit(ata_devclass) ||
414 	    !(device = devclass_get_device(ata_devclass, *value)))
415 	    return ENXIO;
416 	error = ata_detach(device);
417 	/* XXX SOS should disable channel HW on controller */
418 	break;
419 
420     case IOCATADEVICES:
421 	if (devices->channel > devclass_get_maxunit(ata_devclass) ||
422 	    !(device = devclass_get_device(ata_devclass, devices->channel)))
423 	    return ENXIO;
424 	bzero(devices->name[0], 32);
425 	bzero(&devices->params[0], sizeof(struct ata_params));
426 	bzero(devices->name[1], 32);
427 	bzero(&devices->params[1], sizeof(struct ata_params));
428 	if (!device_get_children(device, &children, &nchildren)) {
429 	    for (i = 0; i < nchildren; i++) {
430 		if (children[i] && device_is_attached(children[i])) {
431 		    struct ata_device *atadev = device_get_softc(children[i]);
432 
433 		    if (atadev->unit == ATA_MASTER) {
434 			strncpy(devices->name[0],
435 				device_get_nameunit(children[i]), 32);
436 			bcopy(&atadev->param, &devices->params[0],
437 			      sizeof(struct ata_params));
438 		    }
439 		    if (atadev->unit == ATA_SLAVE) {
440 			strncpy(devices->name[1],
441 				device_get_nameunit(children[i]), 32);
442 			bcopy(&atadev->param, &devices->params[1],
443 			      sizeof(struct ata_params));
444 		    }
445 		}
446 	    }
447 	    kfree(children, M_TEMP);
448 	    error = 0;
449 	}
450 	else
451 	    error = ENODEV;
452 	break;
453 
454     default:
455 	if (ata_raid_ioctl_func)
456 	    error = ata_raid_ioctl_func(ap->a_cmd, ap->a_data);
457     }
458     return error;
459 }
460 
461 int
462 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
463 {
464     struct ata_device *atadev = device_get_softc(dev);
465     struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
466     struct ata_params *params = (struct ata_params *)data;
467     int *mode = (int *)data;
468     struct ata_request *request;
469     caddr_t buf;
470     int error;
471 
472     switch (cmd) {
473     case IOCATAREQUEST:
474 	if (!(buf = kmalloc(ioc_request->count, M_ATA, M_WAITOK | M_NULLOK))) {
475 	    return ENOMEM;
476 	}
477 	if (!(request = ata_alloc_request())) {
478 	    kfree(buf, M_ATA);
479 	    return  ENOMEM;
480 	}
481 	if (ioc_request->flags & ATA_CMD_WRITE) {
482 	    error = copyin(ioc_request->data, buf, ioc_request->count);
483 	    if (error) {
484 		kfree(buf, M_ATA);
485 		ata_free_request(request);
486 		return error;
487 	    }
488 	}
489 	request->dev = dev;
490 	if (ioc_request->flags & ATA_CMD_ATAPI) {
491 	    request->flags = ATA_R_ATAPI;
492 	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
493 	}
494 	else {
495 	    request->u.ata.command = ioc_request->u.ata.command;
496 	    request->u.ata.feature = ioc_request->u.ata.feature;
497 	    request->u.ata.lba = ioc_request->u.ata.lba;
498 	    request->u.ata.count = ioc_request->u.ata.count;
499 	}
500 	request->timeout = ioc_request->timeout;
501 	request->data = buf;
502 	request->bytecount = ioc_request->count;
503 	request->transfersize = request->bytecount;
504 	if (ioc_request->flags & ATA_CMD_CONTROL)
505 	    request->flags |= ATA_R_CONTROL;
506 	if (ioc_request->flags & ATA_CMD_READ)
507 	    request->flags |= ATA_R_READ;
508 	if (ioc_request->flags & ATA_CMD_WRITE)
509 	    request->flags |= ATA_R_WRITE;
510 	ata_queue_request(request);
511 	if (request->flags & ATA_R_ATAPI) {
512 	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
513 		  sizeof(struct atapi_sense));
514 	}
515 	else {
516 	    ioc_request->u.ata.command = request->u.ata.command;
517 	    ioc_request->u.ata.feature = request->u.ata.feature;
518 	    ioc_request->u.ata.lba = request->u.ata.lba;
519 	    ioc_request->u.ata.count = request->u.ata.count;
520 	}
521 	ioc_request->error = request->result;
522 	if (ioc_request->flags & ATA_CMD_READ)
523 	    error = copyout(buf, ioc_request->data, ioc_request->count);
524 	else
525 	    error = 0;
526 	kfree(buf, M_ATA);
527 	ata_free_request(request);
528 	return error;
529 
530     case IOCATAGPARM:
531 	ata_getparam(atadev, 0);
532 	bcopy(&atadev->param, params, sizeof(struct ata_params));
533 	return 0;
534 
535     case IOCATASMODE:
536 	atadev->mode = *mode;
537 	ATA_SETMODE(device_get_parent(dev), dev);
538 	return 0;
539 
540     case IOCATAGMODE:
541 	*mode = atadev->mode;
542 	return 0;
543     default:
544 	return ENOTTY;
545     }
546 }
547 
548 static void
549 ata_boot_attach(void)
550 {
551     struct ata_channel *ch;
552     int ctlr;
553 
554     get_mplock();
555 
556     /* kick of probe and attach on all channels */
557     for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) {
558 	if ((ch = devclass_get_softc(ata_devclass, ctlr))) {
559 	    ata_identify(ch->dev);
560 	}
561     }
562 
563     rel_mplock();
564 }
565 
566 
567 /*
568  * misc support functions
569  */
570 static device_t
571 ata_add_child(device_t parent, struct ata_device *atadev, int unit)
572 {
573     device_t child;
574 
575     if ((child = device_add_child(parent, NULL, unit))) {
576 	device_set_softc(child, atadev);
577 	device_quiet(child);
578 	atadev->dev = child;
579 	atadev->max_iosize = DEV_BSIZE;
580 	atadev->mode = ATA_PIO_MAX;
581     }
582     return child;
583 }
584 
585 static int
586 ata_getparam(struct ata_device *atadev, int init)
587 {
588     struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
589     struct ata_request *request;
590     u_int8_t command = 0;
591     int error = ENOMEM, retries = 2;
592 
593     if (ch->devices &
594 	(atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE))
595 	command = ATA_ATA_IDENTIFY;
596     if (ch->devices &
597 	(atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE))
598 	command = ATA_ATAPI_IDENTIFY;
599     if (!command)
600 	return ENXIO;
601 
602     while (retries-- > 0 && error) {
603 	if (!(request = ata_alloc_request()))
604 	    break;
605 	request->dev = atadev->dev;
606 	request->timeout = 1;
607 	request->retries = 0;
608 	request->u.ata.command = command;
609 	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET);
610 	request->data = (void *)&atadev->param;
611 	request->bytecount = sizeof(struct ata_params);
612 	request->donecount = 0;
613 	request->transfersize = DEV_BSIZE;
614 	ata_queue_request(request);
615 	error = request->result;
616 	ata_free_request(request);
617     }
618 
619     if (!error && (isprint(atadev->param.model[0]) ||
620 		   isprint(atadev->param.model[1]))) {
621 	struct ata_params *atacap = &atadev->param;
622 	char buffer[64];
623 	int16_t *ptr;
624 
625 	for (ptr = (int16_t *)atacap;
626 	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
627 	    *ptr = le16toh(*ptr);
628 	}
629 	if (!(!strncmp(atacap->model, "FX", 2) ||
630 	      !strncmp(atacap->model, "NEC", 3) ||
631 	      !strncmp(atacap->model, "Pioneer", 7) ||
632 	      !strncmp(atacap->model, "SHARP", 5))) {
633 	    bswap(atacap->model, sizeof(atacap->model));
634 	    bswap(atacap->revision, sizeof(atacap->revision));
635 	    bswap(atacap->serial, sizeof(atacap->serial));
636 	}
637 	btrim(atacap->model, sizeof(atacap->model));
638 	bpack(atacap->model, atacap->model, sizeof(atacap->model));
639 	btrim(atacap->revision, sizeof(atacap->revision));
640 	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
641 	btrim(atacap->serial, sizeof(atacap->serial));
642 	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));
643 
644 	if (bootverbose)
645 	    kprintf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
646 		   device_get_unit(ch->dev),
647 		   atadev->unit == ATA_MASTER ? "master" : "slave",
648 		   ata_mode2str(ata_pmode(atacap)),
649 		   ata_mode2str(ata_wmode(atacap)),
650 		   ata_mode2str(ata_umode(atacap)),
651 		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");
652 
653 	if (init) {
654 	    ksprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
655 	    device_set_desc_copy(atadev->dev, buffer);
656 	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
657 		(atadev->param.config != ATA_CFA_MAGIC1) &&
658 		(atadev->param.config != ATA_CFA_MAGIC2)) {
659 		if (atapi_dma && ch->dma &&
660 		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
661 		    ata_umode(&atadev->param) >= ATA_UDMA2)
662 		    atadev->mode = ATA_DMA_MAX;
663 	    }
664 	    else {
665 		if (ata_dma && ch->dma &&
666 		    (ata_umode(&atadev->param) > 0 ||
667 		     ata_wmode(&atadev->param) > 0))
668 		    atadev->mode = ATA_DMA_MAX;
669 	    }
670 	}
671     }
672     else {
673 	if (!error)
674 	    error = ENXIO;
675     }
676     return error;
677 }
678 
679 int
680 ata_identify(device_t dev)
681 {
682     struct ata_channel *ch = device_get_softc(dev);
683     struct ata_device *master = NULL, *slave = NULL;
684     device_t master_child = NULL, slave_child = NULL;
685     int master_unit = -1, slave_unit = -1;
686 
687     if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) {
688 	if (!(master = kmalloc(sizeof(struct ata_device),
689 			      M_ATA, M_INTWAIT | M_ZERO))) {
690 	    device_printf(dev, "out of memory\n");
691 	    return ENOMEM;
692 	}
693 	master->unit = ATA_MASTER;
694     }
695     if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) {
696 	if (!(slave = kmalloc(sizeof(struct ata_device),
697 			     M_ATA, M_INTWAIT | M_ZERO))) {
698 	    kfree(master, M_ATA);
699 	    device_printf(dev, "out of memory\n");
700 	    return ENOMEM;
701 	}
702 	slave->unit = ATA_SLAVE;
703     }
704 
705 #ifdef ATA_STATIC_ID
706     if (ch->devices & ATA_ATA_MASTER)
707 	master_unit = (device_get_unit(dev) << 1);
708 #endif
709     if (master && !(master_child = ata_add_child(dev, master, master_unit))) {
710 	kfree(master, M_ATA);
711 	master = NULL;
712     }
713 #ifdef ATA_STATIC_ID
714     if (ch->devices & ATA_ATA_SLAVE)
715 	slave_unit = (device_get_unit(dev) << 1) + 1;
716 #endif
717     if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) {
718 	kfree(slave, M_ATA);
719 	slave = NULL;
720     }
721 
722     if (slave && ata_getparam(slave, 1)) {
723 	device_delete_child(dev, slave_child);
724 	kfree(slave, M_ATA);
725     }
726     if (master && ata_getparam(master, 1)) {
727 	device_delete_child(dev, master_child);
728 	kfree(master, M_ATA);
729     }
730 
731     bus_generic_probe(dev);
732     bus_generic_attach(dev);
733     return 0;
734 }
735 
736 void
737 ata_default_registers(device_t dev)
738 {
739     struct ata_channel *ch = device_get_softc(dev);
740 
741     /* fill in the defaults from whats setup already */
742     ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res;
743     ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset;
744     ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res;
745     ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset;
746     ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res;
747     ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset;
748     ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res;
749     ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset;
750 }
751 
752 void
753 ata_modify_if_48bit(struct ata_request *request)
754 {
755     struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
756     struct ata_device *atadev = device_get_softc(request->dev);
757 
758     atadev->flags &= ~ATA_D_48BIT_ACTIVE;
759 
760     if ((request->u.ata.lba >= ATA_MAX_28BIT_LBA ||
761 	 request->u.ata.count > 256) &&
762 	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {
763 
764 	/* translate command into 48bit version */
765 	switch (request->u.ata.command) {
766 	case ATA_READ:
767 	    request->u.ata.command = ATA_READ48;
768 	    break;
769 	case ATA_READ_MUL:
770 	    request->u.ata.command = ATA_READ_MUL48;
771 	    break;
772 	case ATA_READ_DMA:
773 	    if (ch->flags & ATA_NO_48BIT_DMA) {
774 		if (request->transfersize > DEV_BSIZE)
775 		    request->u.ata.command = ATA_READ_MUL48;
776 		else
777 		    request->u.ata.command = ATA_READ48;
778 		request->flags &= ~ATA_R_DMA;
779 	    }
780 	    else
781 		request->u.ata.command = ATA_READ_DMA48;
782 	    break;
783 	case ATA_READ_DMA_QUEUED:
784 	    if (ch->flags & ATA_NO_48BIT_DMA) {
785 		if (request->transfersize > DEV_BSIZE)
786 		    request->u.ata.command = ATA_READ_MUL48;
787 		else
788 		    request->u.ata.command = ATA_READ48;
789 		request->flags &= ~ATA_R_DMA;
790 	    }
791 	    else
792 		request->u.ata.command = ATA_READ_DMA_QUEUED48;
793 	    break;
794 	case ATA_WRITE:
795 	    request->u.ata.command = ATA_WRITE48;
796 	    break;
797 	case ATA_WRITE_MUL:
798 	    request->u.ata.command = ATA_WRITE_MUL48;
799 	    break;
800 	case ATA_WRITE_DMA:
801 	    if (ch->flags & ATA_NO_48BIT_DMA) {
802 		if (request->transfersize > DEV_BSIZE)
803 		    request->u.ata.command = ATA_WRITE_MUL48;
804 		else
805 		    request->u.ata.command = ATA_WRITE48;
806 		request->flags &= ~ATA_R_DMA;
807 	    }
808 	    else
809 		request->u.ata.command = ATA_WRITE_DMA48;
810 	    break;
811 	case ATA_WRITE_DMA_QUEUED:
812 	    if (ch->flags & ATA_NO_48BIT_DMA) {
813 		if (request->transfersize > DEV_BSIZE)
814 		    request->u.ata.command = ATA_WRITE_MUL48;
815 		else
816 		    request->u.ata.command = ATA_WRITE48;
817 		request->u.ata.command = ATA_WRITE48;
818 		request->flags &= ~ATA_R_DMA;
819 	    }
820 	    else
821 		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
822 	    break;
823 	case ATA_FLUSHCACHE:
824 	    request->u.ata.command = ATA_FLUSHCACHE48;
825 	    break;
826 	case ATA_READ_NATIVE_MAX_ADDDRESS:
827 	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48;
828 	    break;
829 	case ATA_SET_MAX_ADDRESS:
830 	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
831 	    break;
832 	default:
833 	    return;
834 	}
835 	atadev->flags |= ATA_D_48BIT_ACTIVE;
836     }
837 }
838 
839 void
840 ata_udelay(int interval)
841 {
842     /* for now just use DELAY, the timer/sleep subsytems are not there yet */
843     /* XXX use DRIVERSLEEP if possible */
844     if (1 || interval < (1000000/hz))
845 	DELAY(interval);
846     else
847 	tsleep(&interval, 0, "ataslp", interval/(1000000/hz));
848 }
849 
850 char *
851 ata_mode2str(int mode)
852 {
853     switch (mode) {
854     case -1: return "UNSUPPORTED";
855     case ATA_PIO0: return "PIO0";
856     case ATA_PIO1: return "PIO1";
857     case ATA_PIO2: return "PIO2";
858     case ATA_PIO3: return "PIO3";
859     case ATA_PIO4: return "PIO4";
860     case ATA_WDMA0: return "WDMA0";
861     case ATA_WDMA1: return "WDMA1";
862     case ATA_WDMA2: return "WDMA2";
863     case ATA_UDMA0: return "UDMA16";
864     case ATA_UDMA1: return "UDMA25";
865     case ATA_UDMA2: return "UDMA33";
866     case ATA_UDMA3: return "UDMA40";
867     case ATA_UDMA4: return "UDMA66";
868     case ATA_UDMA5: return "UDMA100";
869     case ATA_UDMA6: return "UDMA133";
870     case ATA_SA150: return "SATA150";
871     case ATA_SA300: return "SATA300";
872     case ATA_USB: return "USB";
873     case ATA_USB1: return "USB1";
874     case ATA_USB2: return "USB2";
875     default:
876 	if (mode & ATA_DMA_MASK)
877 	    return "BIOSDMA";
878 	else
879 	    return "BIOSPIO";
880     }
881 }
882 
883 int
884 ata_pmode(struct ata_params *ap)
885 {
886     if (ap->atavalid & ATA_FLAG_64_70) {
887 	if (ap->apiomodes & 0x02)
888 	    return ATA_PIO4;
889 	if (ap->apiomodes & 0x01)
890 	    return ATA_PIO3;
891     }
892     if (ap->mwdmamodes & 0x04)
893 	return ATA_PIO4;
894     if (ap->mwdmamodes & 0x02)
895 	return ATA_PIO3;
896     if (ap->mwdmamodes & 0x01)
897 	return ATA_PIO2;
898     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200)
899 	return ATA_PIO2;
900     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100)
901 	return ATA_PIO1;
902     if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000)
903 	return ATA_PIO0;
904     return ATA_PIO0;
905 }
906 
907 int
908 ata_wmode(struct ata_params *ap)
909 {
910     if (ap->mwdmamodes & 0x04)
911 	return ATA_WDMA2;
912     if (ap->mwdmamodes & 0x02)
913 	return ATA_WDMA1;
914     if (ap->mwdmamodes & 0x01)
915 	return ATA_WDMA0;
916     return -1;
917 }
918 
919 int
920 ata_umode(struct ata_params *ap)
921 {
922     if (ap->atavalid & ATA_FLAG_88) {
923 	if (ap->udmamodes & 0x40)
924 	    return ATA_UDMA6;
925 	if (ap->udmamodes & 0x20)
926 	    return ATA_UDMA5;
927 	if (ap->udmamodes & 0x10)
928 	    return ATA_UDMA4;
929 	if (ap->udmamodes & 0x08)
930 	    return ATA_UDMA3;
931 	if (ap->udmamodes & 0x04)
932 	    return ATA_UDMA2;
933 	if (ap->udmamodes & 0x02)
934 	    return ATA_UDMA1;
935 	if (ap->udmamodes & 0x01)
936 	    return ATA_UDMA0;
937     }
938     return -1;
939 }
940 
941 int
942 ata_limit_mode(device_t dev, int mode, int maxmode)
943 {
944     struct ata_device *atadev = device_get_softc(dev);
945 
946     if (maxmode && mode > maxmode)
947 	mode = maxmode;
948 
949     if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0)
950 	return min(mode, ata_umode(&atadev->param));
951 
952     if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0)
953 	return min(mode, ata_wmode(&atadev->param));
954 
955     if (mode > ata_pmode(&atadev->param))
956 	return min(mode, ata_pmode(&atadev->param));
957 
958     return mode;
959 }
960 
961 static void
962 bswap(int8_t *buf, int len)
963 {
964     u_int16_t *ptr = (u_int16_t*)(buf + len);
965 
966     while (--ptr >= (u_int16_t*)buf)
967 	*ptr = ntohs(*ptr);
968 }
969 
970 static void
971 btrim(int8_t *buf, int len)
972 {
973     int8_t *ptr;
974 
975     for (ptr = buf; ptr < buf+len; ++ptr)
976 	if (!*ptr || *ptr == '_')
977 	    *ptr = ' ';
978     for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr)
979 	*ptr = 0;
980 }
981 
982 static void
983 bpack(int8_t *src, int8_t *dst, int len)
984 {
985     int i, j, blank;
986 
987     for (i = j = blank = 0 ; i < len; i++) {
988 	if (blank && src[i] == ' ') continue;
989 	if (blank && src[i] != ' ') {
990 	    dst[j++] = src[i];
991 	    blank = 0;
992 	    continue;
993 	}
994 	if (src[i] == ' ') {
995 	    blank = 1;
996 	    if (i == 0)
997 		continue;
998 	}
999 	dst[j++] = src[i];
1000     }
1001     if (j < len)
1002 	dst[j] = 0x00;
1003 }
1004 
1005 
1006 /*
1007  * module handeling
1008  */
1009 static int
1010 ata_module_event_handler(module_t mod, int what, void *arg)
1011 {
1012     /* static because we need the reference at destruction time */
1013     static cdev_t atacdev;
1014 
1015     switch (what) {
1016     case MOD_LOAD:
1017 	/* register controlling device */
1018 	dev_ops_add(&ata_ops, 0, 0);
1019 	atacdev = make_dev(&ata_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
1020 	reference_dev(atacdev);
1021 	return 0;
1022 
1023     case MOD_UNLOAD:
1024 	/* deregister controlling device */
1025 	destroy_dev(atacdev);
1026 	dev_ops_remove(&ata_ops, 0, 0);
1027 	return 0;
1028 
1029     default:
1030 	return EOPNOTSUPP;
1031     }
1032 }
1033 
1034 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL };
1035 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
1036 MODULE_VERSION(ata, 1);
1037 
1038 /*
1039  * Construct a completely zero'ed ata_request. On objcache_put(), an
1040  * ata_request object is also zero'ed, so objcache_get() is guaranteed to give
1041  * completely zero'ed objects without spending too much time.
1042  */
1043 static boolean_t
1044 ata_request_cache_ctor(void *obj, void *private, int ocflags)
1045 {
1046     struct ata_request *arp = obj;
1047 
1048     bzero(arp, sizeof(struct ata_request));
1049     return(TRUE);
1050 }
1051 
1052 /*
1053  * Construct a completely zero'ed ata_composite. On objcache_put(), an
1054  * ata_composite object is also zero'ed, so objcache_get() is guaranteed to give
1055  * completely zero'ed objects without spending too much time.
1056  */
1057 static boolean_t
1058 ata_composite_cache_ctor(void *obj, void *private, int ocflags)
1059 {
1060     struct ata_composite *acp = obj;
1061 
1062     bzero(acp, sizeof(struct ata_composite));
1063     return(TRUE);
1064 }
1065 
1066 static void
1067 ata_init(void)
1068 {
1069     ata_request_cache = objcache_create("ata_request", 0, 0,
1070 					ata_request_cache_ctor, NULL, NULL,
1071 					objcache_malloc_alloc,
1072 					objcache_malloc_free,
1073 					&ata_request_malloc_args);
1074     ata_composite_cache = objcache_create("ata_composite", 0, 0,
1075 					  ata_composite_cache_ctor, NULL, NULL,
1076 					  objcache_malloc_alloc,
1077 					  objcache_malloc_free,
1078 					  &ata_composite_malloc_args);
1079 }
1080 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL);
1081 
1082 static void
1083 ata_uninit(void)
1084 {
1085     objcache_destroy(ata_composite_cache);
1086     objcache_destroy(ata_request_cache);
1087 }
1088 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL);
1089