xref: /openbsd/sys/dev/ata/atascsi.c (revision 4cfece93)
1 /*	$OpenBSD: atascsi.c,v 1.138 2020/07/11 13:34:06 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com>
6  * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/device.h>
27 #include <sys/queue.h>
28 #include <sys/pool.h>
29 
30 #include <scsi/scsi_all.h>
31 #include <scsi/scsi_disk.h>
32 #include <scsi/scsiconf.h>
33 
34 #include <dev/ata/atascsi.h>
35 #include <dev/ata/pmreg.h>
36 
37 struct atascsi_port;
38 
39 struct atascsi {
40 	struct device		*as_dev;
41 	void			*as_cookie;
42 
43 	struct atascsi_host_port **as_host_ports;
44 
45 	struct atascsi_methods	*as_methods;
46 	struct scsi_adapter	as_switch;
47 	struct scsi_link	as_link;
48 	struct scsibus_softc	*as_scsibus;
49 
50 	int			as_capability;
51 	int			as_ncqdepth;
52 };
53 
54 /*
55  * atascsi_host_port is a port attached to the host controller, and
56  * only holds the details relevant to the host controller.
57  * atascsi_port is any port, including ports on port multipliers, and
58  * it holds details of the device attached to the port.
59  *
60  * When there is a port multiplier attached to a port, the ahp_ports
61  * array in the atascsi_host_port struct contains one atascsi_port for
62  * each port, and one for the control port (port 15).  The index into
63  * the array is the LUN used to address the port.  For the control port,
64  * the LUN is 0, and for the port multiplier ports, the LUN is the
65  * port number plus one.
66  *
67  * When there is no port multiplier attached to a port, the ahp_ports
68  * array contains a single entry for the device.  The LUN and port number
69  * for this entry are both 0.
70  */
71 
72 struct atascsi_host_port {
73 	struct scsi_iopool	ahp_iopool;
74 	struct atascsi		*ahp_as;
75 	int			ahp_port;
76 	int			ahp_nports;
77 
78 	struct atascsi_port	**ahp_ports;
79 };
80 
81 struct atascsi_port {
82 	struct ata_identify	ap_identify;
83 	struct atascsi_host_port *ap_host_port;
84 	struct atascsi		*ap_as;
85 	int			ap_pmp_port;
86 	int			ap_type;
87 	int			ap_ncqdepth;
88 	int			ap_features;
89 #define ATA_PORT_F_NCQ			0x1
90 #define ATA_PORT_F_TRIM			0x2
91 };
92 
93 void		atascsi_cmd(struct scsi_xfer *);
94 int		atascsi_probe(struct scsi_link *);
95 void		atascsi_free(struct scsi_link *);
96 
97 /* template */
98 struct scsi_adapter atascsi_switch = {
99 	atascsi_cmd, NULL, atascsi_probe, atascsi_free, NULL
100 };
101 
102 void		ata_swapcopy(void *, void *, size_t);
103 
104 void		atascsi_disk_cmd(struct scsi_xfer *);
105 void		atascsi_disk_cmd_done(struct ata_xfer *);
106 void		atascsi_disk_inq(struct scsi_xfer *);
107 void		atascsi_disk_inquiry(struct scsi_xfer *);
108 void		atascsi_disk_vpd_supported(struct scsi_xfer *);
109 void		atascsi_disk_vpd_serial(struct scsi_xfer *);
110 void		atascsi_disk_vpd_ident(struct scsi_xfer *);
111 void		atascsi_disk_vpd_ata(struct scsi_xfer *);
112 void		atascsi_disk_vpd_limits(struct scsi_xfer *);
113 void		atascsi_disk_vpd_info(struct scsi_xfer *);
114 void		atascsi_disk_vpd_thin(struct scsi_xfer *);
115 void		atascsi_disk_write_same_16(struct scsi_xfer *);
116 void		atascsi_disk_write_same_16_done(struct ata_xfer *);
117 void		atascsi_disk_unmap(struct scsi_xfer *);
118 void		atascsi_disk_unmap_task(void *);
119 void		atascsi_disk_unmap_done(struct ata_xfer *);
120 void		atascsi_disk_capacity(struct scsi_xfer *);
121 void		atascsi_disk_capacity16(struct scsi_xfer *);
122 void		atascsi_disk_sync(struct scsi_xfer *);
123 void		atascsi_disk_sync_done(struct ata_xfer *);
124 void		atascsi_disk_sense(struct scsi_xfer *);
125 void		atascsi_disk_start_stop(struct scsi_xfer *);
126 void		atascsi_disk_start_stop_done(struct ata_xfer *);
127 
128 void		atascsi_atapi_cmd(struct scsi_xfer *);
129 void		atascsi_atapi_cmd_done(struct ata_xfer *);
130 
131 void		atascsi_pmp_cmd(struct scsi_xfer *);
132 void		atascsi_pmp_cmd_done(struct ata_xfer *);
133 void		atascsi_pmp_sense(struct scsi_xfer *xs);
134 void		atascsi_pmp_inq(struct scsi_xfer *xs);
135 
136 
137 void		atascsi_passthru_12(struct scsi_xfer *);
138 void		atascsi_passthru_16(struct scsi_xfer *);
139 int		atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t);
140 void		atascsi_passthru_done(struct ata_xfer *);
141 
142 void		atascsi_done(struct scsi_xfer *, int);
143 
144 void		ata_exec(struct atascsi *, struct ata_xfer *);
145 
146 void		ata_polled_complete(struct ata_xfer *);
147 int		ata_polled(struct ata_xfer *);
148 
149 u_int64_t	ata_identify_blocks(struct ata_identify *);
150 u_int		ata_identify_blocksize(struct ata_identify *);
151 u_int		ata_identify_block_l2p_exp(struct ata_identify *);
152 u_int		ata_identify_block_logical_align(struct ata_identify *);
153 
154 void		*atascsi_io_get(void *);
155 void		atascsi_io_put(void *, void *);
156 struct atascsi_port * atascsi_lookup_port(struct scsi_link *);
157 
158 int		atascsi_port_identify(struct atascsi_port *,
159 		    struct ata_identify *);
160 int		atascsi_port_set_features(struct atascsi_port *, int, int);
161 
162 
163 struct atascsi *
164 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa)
165 {
166 	struct scsibus_attach_args	saa;
167 	struct atascsi			*as;
168 
169 	as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO);
170 
171 	as->as_dev = self;
172 	as->as_cookie = aaa->aaa_cookie;
173 	as->as_methods = aaa->aaa_methods;
174 	as->as_capability = aaa->aaa_capability;
175 	as->as_ncqdepth = aaa->aaa_ncmds;
176 
177 	/* copy from template and modify for ourselves */
178 	as->as_switch = atascsi_switch;
179 	if (aaa->aaa_minphys != NULL)
180 		as->as_switch.dev_minphys = aaa->aaa_minphys;
181 
182 	as->as_host_ports = mallocarray(aaa->aaa_nports,
183 	    sizeof(struct atascsi_host_port *),	M_DEVBUF, M_WAITOK | M_ZERO);
184 
185 	as->as_link.adapter = &as->as_switch;
186 	as->as_link.adapter_softc = as;
187 	as->as_link.adapter_buswidth = aaa->aaa_nports;
188 	as->as_link.luns = SATA_PMP_MAX_PORTS;
189 	as->as_link.adapter_target = SDEV_NO_ADAPTER_TARGET;
190 	as->as_link.openings = 1;
191 
192 	saa.saa_sc_link = &as->as_link;
193 
194 	as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa,
195 	    scsiprint);
196 
197 	return (as);
198 }
199 
200 int
201 atascsi_detach(struct atascsi *as, int flags)
202 {
203 	int				rv;
204 
205 	rv = config_detach((struct device *)as->as_scsibus, flags);
206 	if (rv != 0)
207 		return (rv);
208 
209 	free(as->as_host_ports, M_DEVBUF, 0);
210 	free(as, M_DEVBUF, sizeof(*as));
211 
212 	return (0);
213 }
214 
215 struct atascsi_port *
216 atascsi_lookup_port(struct scsi_link *link)
217 {
218 	struct atascsi 			*as = link->adapter_softc;
219 	struct atascsi_host_port 	*ahp;
220 
221 	if (link->target >= as->as_link.adapter_buswidth)
222 		return (NULL);
223 
224 	ahp = as->as_host_ports[link->target];
225 	if (link->lun >= ahp->ahp_nports)
226 		return (NULL);
227 
228 	return (ahp->ahp_ports[link->lun]);
229 }
230 
231 int
232 atascsi_probe(struct scsi_link *link)
233 {
234 	struct atascsi			*as = link->adapter_softc;
235 	struct atascsi_host_port 	*ahp;
236 	struct atascsi_port		*ap;
237 	struct ata_xfer			*xa;
238 	struct ata_identify		*identify;
239 	int				port, type, qdepth;
240 	int				rv;
241 	u_int16_t			cmdset;
242 	u_int16_t			validinfo, ultradma;
243 	int				i, xfermode = -1;
244 
245 	port = link->target;
246 	if (port >= as->as_link.adapter_buswidth)
247 		return (ENXIO);
248 
249 	/* if this is a PMP port, check it's valid */
250 	if (link->lun > 0) {
251 		if (link->lun >= as->as_host_ports[port]->ahp_nports)
252 			return (ENXIO);
253 	}
254 
255 	type = as->as_methods->ata_probe(as->as_cookie, port, link->lun);
256 	switch (type) {
257 	case ATA_PORT_T_DISK:
258 		break;
259 	case ATA_PORT_T_ATAPI:
260 		link->flags |= SDEV_ATAPI;
261 		link->quirks |= SDEV_ONLYBIG;
262 		break;
263 	case ATA_PORT_T_PM:
264 		if (link->lun != 0) {
265 			printf("%s.%d.%d: Port multipliers cannot be nested\n",
266 			    as->as_dev->dv_xname, port, link->lun);
267 			rv = ENODEV;
268 			goto unsupported;
269 		}
270 		break;
271 	default:
272 		rv = ENODEV;
273 		goto unsupported;
274 	}
275 
276 	ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
277 	ap->ap_as = as;
278 
279 	if (link->lun == 0) {
280 		ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO);
281 		ahp->ahp_as = as;
282 		ahp->ahp_port = port;
283 
284 		scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get,
285 		    atascsi_io_put);
286 
287 		as->as_host_ports[port] = ahp;
288 
289 		if (type == ATA_PORT_T_PM) {
290 			ahp->ahp_nports = SATA_PMP_MAX_PORTS;
291 			ap->ap_pmp_port = SATA_PMP_CONTROL_PORT;
292 		} else {
293 			ahp->ahp_nports = 1;
294 			ap->ap_pmp_port = 0;
295 		}
296 		ahp->ahp_ports = mallocarray(ahp->ahp_nports,
297 		    sizeof(struct atascsi_port *), M_DEVBUF, M_WAITOK | M_ZERO);
298 	} else {
299 		ahp = as->as_host_ports[port];
300 		ap->ap_pmp_port = link->lun - 1;
301 	}
302 
303 	ap->ap_host_port = ahp;
304 	ap->ap_type = type;
305 
306 	link->pool = &ahp->ahp_iopool;
307 
308 	/* fetch the device info, except for port multipliers */
309 	if (type != ATA_PORT_T_PM) {
310 
311 		/* devices attached to port multipliers tend not to be
312 		 * spun up at this point, and sometimes this prevents
313 		 * identification from working, so we retry a few times
314 		 * with a fairly long delay.
315 		 */
316 
317 		identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO);
318 
319 		int count = (link->lun > 0) ? 6 : 2;
320 		while (count--) {
321 			rv = atascsi_port_identify(ap, identify);
322 			if (rv == 0) {
323 				ap->ap_identify = *identify;
324 				break;
325 			}
326 			if (count > 0)
327 				delay(5000000);
328 		}
329 
330 		dma_free(identify, sizeof(*identify));
331 
332 		if (rv != 0) {
333 			goto error;
334 		}
335 	}
336 
337 	ahp->ahp_ports[link->lun] = ap;
338 
339 	if (type != ATA_PORT_T_DISK)
340 		return (0);
341 
342 	/*
343 	 * Early SATA drives (as well as PATA drives) need to have
344 	 * their transfer mode set properly, otherwise commands that
345 	 * use DMA will time out.
346 	 */
347 	validinfo = letoh16(ap->ap_identify.validinfo);
348 	if (ISSET(validinfo, ATA_ID_VALIDINFO_ULTRADMA)) {
349 		ultradma = letoh16(ap->ap_identify.ultradma);
350 		for (i = 7; i >= 0; i--) {
351 			if (ultradma & (1 << i)) {
352 				xfermode = ATA_SF_XFERMODE_UDMA | i;
353 				break;
354 			}
355 		}
356 	}
357 	if (xfermode != -1)
358 		(void)atascsi_port_set_features(ap, ATA_SF_XFERMODE, xfermode);
359 
360 	if (as->as_capability & ASAA_CAP_NCQ &&
361 	    ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) &&
362 	    (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) {
363 		ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth));
364 		qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth);
365 		if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED))
366 			qdepth--;
367 
368 		if (qdepth > 1) {
369 			SET(ap->ap_features, ATA_PORT_F_NCQ);
370 
371 			/* Raise the number of openings */
372 			link->openings = qdepth;
373 
374 			/*
375 			 * XXX for directly attached devices, throw away any xfers
376 			 * that have tag numbers higher than what the device supports.
377 			 */
378 			if (link->lun == 0) {
379 				while (qdepth--) {
380 					xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
381 					if (xa->tag < link->openings) {
382 						xa->state = ATA_S_COMPLETE;
383 						scsi_io_put(&ahp->ahp_iopool, xa);
384 					}
385 				}
386 			}
387 		}
388 	}
389 
390 	if (ISSET(letoh16(ap->ap_identify.data_set_mgmt),
391 	    ATA_ID_DATA_SET_MGMT_TRIM))
392 		SET(ap->ap_features, ATA_PORT_F_TRIM);
393 
394 	cmdset = letoh16(ap->ap_identify.cmdset82);
395 
396 	/* Enable write cache if supported */
397 	if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) {
398 		/* We don't care if it fails. */
399 		(void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0);
400 	}
401 
402 	/* Enable read lookahead if supported */
403 	if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) {
404 		/* We don't care if it fails. */
405 		(void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0);
406 	}
407 
408 	/*
409 	 * FREEZE LOCK the device so malicous users can't lock it on us.
410 	 * As there is no harm in issuing this to devices that don't
411 	 * support the security feature set we just send it, and don't bother
412 	 * checking if the device sends a command abort to tell us it doesn't
413 	 * support it
414 	 */
415 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
416 	if (xa == NULL)
417 		panic("no free xfers on a new port");
418 	xa->fis->command = ATA_C_SEC_FREEZE_LOCK;
419 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
420 	xa->flags = ATA_F_POLL;
421 	xa->timeout = 1000;
422 	xa->complete = ata_polled_complete;
423 	xa->pmp_port = ap->ap_pmp_port;
424 	xa->atascsi_private = &ahp->ahp_iopool;
425 	ata_exec(as, xa);
426 	ata_polled(xa); /* we dont care if it doesnt work */
427 
428 	return (0);
429 error:
430 	free(ap, M_DEVBUF, sizeof(*ap));
431 unsupported:
432 
433 	as->as_methods->ata_free(as->as_cookie, port, link->lun);
434 	return (rv);
435 }
436 
437 void
438 atascsi_free(struct scsi_link *link)
439 {
440 	struct atascsi			*as = link->adapter_softc;
441 	struct atascsi_host_port	*ahp;
442 	struct atascsi_port		*ap;
443 	int				port;
444 
445 	port = link->target;
446 	if (port >= as->as_link.adapter_buswidth)
447 		return;
448 
449 	ahp = as->as_host_ports[port];
450 	if (ahp == NULL)
451 		return;
452 
453 	if (link->lun >= ahp->ahp_nports)
454 		return;
455 
456 	ap = ahp->ahp_ports[link->lun];
457 	free(ap, M_DEVBUF, sizeof(*ap));
458 	ahp->ahp_ports[link->lun] = NULL;
459 
460 	as->as_methods->ata_free(as->as_cookie, port, link->lun);
461 
462 	if (link->lun == ahp->ahp_nports - 1) {
463 		/* we've already freed all of ahp->ahp_ports, now
464 		 * free ahp itself.  this relies on the order luns are
465 		 * detached in scsi_detach_target().
466 		 */
467 		free(ahp, M_DEVBUF, sizeof(*ahp));
468 		as->as_host_ports[port] = NULL;
469 	}
470 }
471 
472 void
473 atascsi_cmd(struct scsi_xfer *xs)
474 {
475 	struct scsi_link	*link = xs->sc_link;
476 	struct atascsi_port	*ap;
477 
478 	ap = atascsi_lookup_port(link);
479 	if (ap == NULL) {
480 		atascsi_done(xs, XS_DRIVER_STUFFUP);
481 		return;
482 	}
483 
484 	switch (ap->ap_type) {
485 	case ATA_PORT_T_DISK:
486 		atascsi_disk_cmd(xs);
487 		break;
488 	case ATA_PORT_T_ATAPI:
489 		atascsi_atapi_cmd(xs);
490 		break;
491 	case ATA_PORT_T_PM:
492 		atascsi_pmp_cmd(xs);
493 		break;
494 
495 	case ATA_PORT_T_NONE:
496 	default:
497 		atascsi_done(xs, XS_DRIVER_STUFFUP);
498 		break;
499 	}
500 }
501 
502 void
503 atascsi_disk_cmd(struct scsi_xfer *xs)
504 {
505 	struct scsi_link	*link = xs->sc_link;
506 	struct atascsi		*as = link->adapter_softc;
507 	struct atascsi_port	*ap;
508 	struct ata_xfer		*xa = xs->io;
509 	int			flags = 0;
510 	struct ata_fis_h2d	*fis;
511 	u_int64_t		lba;
512 	u_int32_t		sector_count;
513 
514 	ap = atascsi_lookup_port(link);
515 
516 	switch (xs->cmd->opcode) {
517 	case READ_COMMAND:
518 	case READ_BIG:
519 	case READ_12:
520 	case READ_16:
521 		flags = ATA_F_READ;
522 		break;
523 	case WRITE_COMMAND:
524 	case WRITE_BIG:
525 	case WRITE_12:
526 	case WRITE_16:
527 		flags = ATA_F_WRITE;
528 		/* deal with io outside the switch */
529 		break;
530 
531 	case WRITE_SAME_16:
532 		atascsi_disk_write_same_16(xs);
533 		return;
534 	case UNMAP:
535 		atascsi_disk_unmap(xs);
536 		return;
537 
538 	case SYNCHRONIZE_CACHE:
539 		atascsi_disk_sync(xs);
540 		return;
541 	case REQUEST_SENSE:
542 		atascsi_disk_sense(xs);
543 		return;
544 	case INQUIRY:
545 		atascsi_disk_inq(xs);
546 		return;
547 	case READ_CAPACITY:
548 		atascsi_disk_capacity(xs);
549 		return;
550 	case READ_CAPACITY_16:
551 		atascsi_disk_capacity16(xs);
552 		return;
553 
554 	case ATA_PASSTHRU_12:
555 		atascsi_passthru_12(xs);
556 		return;
557 	case ATA_PASSTHRU_16:
558 		atascsi_passthru_16(xs);
559 		return;
560 
561 	case START_STOP:
562 		atascsi_disk_start_stop(xs);
563 		return;
564 
565 	case TEST_UNIT_READY:
566 	case PREVENT_ALLOW:
567 		atascsi_done(xs, XS_NOERROR);
568 		return;
569 
570 	default:
571 		atascsi_done(xs, XS_DRIVER_STUFFUP);
572 		return;
573 	}
574 
575 	xa->flags = flags;
576 	scsi_cmd_rw_decode(xs->cmd, &lba, &sector_count);
577 	if ((lba >> 48) != 0 || (sector_count >> 16) != 0) {
578 		atascsi_done(xs, XS_DRIVER_STUFFUP);
579 		return;
580 	}
581 
582 	fis = xa->fis;
583 
584 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
585 	fis->lba_low = lba & 0xff;
586 	fis->lba_mid = (lba >> 8) & 0xff;
587 	fis->lba_high = (lba >> 16) & 0xff;
588 
589 	if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) &&
590 	    (xa->tag < ap->ap_ncqdepth) &&
591 	    !(xs->flags & SCSI_POLL)) {
592 		/* Use NCQ */
593 		xa->flags |= ATA_F_NCQ;
594 		fis->command = (xa->flags & ATA_F_WRITE) ?
595 		    ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA;
596 		fis->device = ATA_H2D_DEVICE_LBA;
597 		fis->lba_low_exp = (lba >> 24) & 0xff;
598 		fis->lba_mid_exp = (lba >> 32) & 0xff;
599 		fis->lba_high_exp = (lba >> 40) & 0xff;
600 		fis->sector_count = xa->tag << 3;
601 		fis->features = sector_count & 0xff;
602 		fis->features_exp = (sector_count >> 8) & 0xff;
603 	} else if (sector_count > 0x100 || lba > 0xfffffff) {
604 		/* Use LBA48 */
605 		fis->command = (xa->flags & ATA_F_WRITE) ?
606 		    ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT;
607 		fis->device = ATA_H2D_DEVICE_LBA;
608 		fis->lba_low_exp = (lba >> 24) & 0xff;
609 		fis->lba_mid_exp = (lba >> 32) & 0xff;
610 		fis->lba_high_exp = (lba >> 40) & 0xff;
611 		fis->sector_count = sector_count & 0xff;
612 		fis->sector_count_exp = (sector_count >> 8) & 0xff;
613 	} else {
614 		/* Use LBA */
615 		fis->command = (xa->flags & ATA_F_WRITE) ?
616 		    ATA_C_WRITEDMA : ATA_C_READDMA;
617 		fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f);
618 		fis->sector_count = sector_count & 0xff;
619 	}
620 
621 	xa->data = xs->data;
622 	xa->datalen = xs->datalen;
623 	xa->complete = atascsi_disk_cmd_done;
624 	xa->timeout = xs->timeout;
625 	xa->pmp_port = ap->ap_pmp_port;
626 	xa->atascsi_private = xs;
627 	if (xs->flags & SCSI_POLL)
628 		xa->flags |= ATA_F_POLL;
629 
630 	ata_exec(as, xa);
631 }
632 
633 void
634 atascsi_disk_cmd_done(struct ata_xfer *xa)
635 {
636 	struct scsi_xfer	*xs = xa->atascsi_private;
637 
638 	switch (xa->state) {
639 	case ATA_S_COMPLETE:
640 		xs->error = XS_NOERROR;
641 		break;
642 	case ATA_S_ERROR:
643 		/* fake sense? */
644 		xs->error = XS_DRIVER_STUFFUP;
645 		break;
646 	case ATA_S_TIMEOUT:
647 		xs->error = XS_TIMEOUT;
648 		break;
649 	default:
650 		panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)",
651 		    xa->state);
652 	}
653 
654 	xs->resid = xa->resid;
655 
656 	scsi_done(xs);
657 }
658 
659 void
660 atascsi_disk_inq(struct scsi_xfer *xs)
661 {
662 	struct scsi_inquiry	*inq = (struct scsi_inquiry *)xs->cmd;
663 
664 	if (xs->cmdlen != sizeof(*inq)) {
665 		atascsi_done(xs, XS_DRIVER_STUFFUP);
666 		return;
667 	}
668 
669 	if (ISSET(inq->flags, SI_EVPD)) {
670 		switch (inq->pagecode) {
671 		case SI_PG_SUPPORTED:
672 			atascsi_disk_vpd_supported(xs);
673 			break;
674 		case SI_PG_SERIAL:
675 			atascsi_disk_vpd_serial(xs);
676 			break;
677 		case SI_PG_DEVID:
678 			atascsi_disk_vpd_ident(xs);
679 			break;
680 		case SI_PG_ATA:
681 			atascsi_disk_vpd_ata(xs);
682 			break;
683 		case SI_PG_DISK_LIMITS:
684 			atascsi_disk_vpd_limits(xs);
685 			break;
686 		case SI_PG_DISK_INFO:
687 			atascsi_disk_vpd_info(xs);
688 			break;
689 		case SI_PG_DISK_THIN:
690 			atascsi_disk_vpd_thin(xs);
691 			break;
692 		default:
693 			atascsi_done(xs, XS_DRIVER_STUFFUP);
694 			break;
695 		}
696 	} else
697 		atascsi_disk_inquiry(xs);
698 }
699 
700 void
701 atascsi_disk_inquiry(struct scsi_xfer *xs)
702 {
703 	struct scsi_inquiry_data inq;
704 	struct scsi_link        *link = xs->sc_link;
705 	struct atascsi_port	*ap;
706 
707 	ap = atascsi_lookup_port(link);
708 
709 	bzero(&inq, sizeof(inq));
710 
711 	inq.device = T_DIRECT;
712 	inq.version = 0x05; /* SPC-3 */
713 	inq.response_format = 2;
714 	inq.additional_length = 32;
715 	inq.flags |= SID_CmdQue;
716 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
717 	ata_swapcopy(ap->ap_identify.model, inq.product,
718 	    sizeof(inq.product));
719 	ata_swapcopy(ap->ap_identify.firmware, inq.revision,
720 	    sizeof(inq.revision));
721 
722 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
723 
724 	atascsi_done(xs, XS_NOERROR);
725 }
726 
727 void
728 atascsi_disk_vpd_supported(struct scsi_xfer *xs)
729 {
730 	struct {
731 		struct scsi_vpd_hdr	hdr;
732 		u_int8_t		list[7];
733 	}			pg;
734 	struct scsi_link        *link = xs->sc_link;
735 	struct atascsi_port	*ap;
736 	int			fat;
737 
738 	ap = atascsi_lookup_port(link);
739 	fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1;
740 
741 	bzero(&pg, sizeof(pg));
742 
743 	pg.hdr.device = T_DIRECT;
744 	pg.hdr.page_code = SI_PG_SUPPORTED;
745 	_lto2b(sizeof(pg.list) - fat, pg.hdr.page_length);
746 	pg.list[0] = SI_PG_SUPPORTED;
747 	pg.list[1] = SI_PG_SERIAL;
748 	pg.list[2] = SI_PG_DEVID;
749 	pg.list[3] = SI_PG_ATA;
750 	pg.list[4] = SI_PG_DISK_LIMITS;
751 	pg.list[5] = SI_PG_DISK_INFO;
752 	pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */
753 
754 	bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen));
755 
756 	atascsi_done(xs, XS_NOERROR);
757 }
758 
759 void
760 atascsi_disk_vpd_serial(struct scsi_xfer *xs)
761 {
762 	struct scsi_link        *link = xs->sc_link;
763 	struct atascsi_port	*ap;
764 	struct scsi_vpd_serial	pg;
765 
766 	ap = atascsi_lookup_port(link);
767 	bzero(&pg, sizeof(pg));
768 
769 	pg.hdr.device = T_DIRECT;
770 	pg.hdr.page_code = SI_PG_SERIAL;
771 	_lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length);
772 	ata_swapcopy(ap->ap_identify.serial, pg.serial,
773 	    sizeof(ap->ap_identify.serial));
774 
775 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
776 
777 	atascsi_done(xs, XS_NOERROR);
778 }
779 
780 void
781 atascsi_disk_vpd_ident(struct scsi_xfer *xs)
782 {
783 	struct scsi_link        *link = xs->sc_link;
784 	struct atascsi_port	*ap;
785 	struct {
786 		struct scsi_vpd_hdr	hdr;
787 		struct scsi_vpd_devid_hdr devid_hdr;
788 		u_int8_t		devid[68];
789 	}			pg;
790 	u_int8_t		*p;
791 	size_t			pg_len;
792 
793 	ap = atascsi_lookup_port(link);
794 	bzero(&pg, sizeof(pg));
795 	if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) {
796 		pg_len = 8;
797 
798 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY;
799 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA;
800 
801 		ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len);
802 	} else {
803 		pg_len = 68;
804 
805 		pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII;
806 		pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10;
807 
808 		p = pg.devid;
809 		bcopy("ATA     ", p, 8);
810 		p += 8;
811 		ata_swapcopy(ap->ap_identify.model, p,
812 		    sizeof(ap->ap_identify.model));
813 		p += sizeof(ap->ap_identify.model);
814 		ata_swapcopy(ap->ap_identify.serial, p,
815 		    sizeof(ap->ap_identify.serial));
816 	}
817 
818 	pg.devid_hdr.len = pg_len;
819 	pg_len += sizeof(pg.devid_hdr);
820 
821 	pg.hdr.device = T_DIRECT;
822 	pg.hdr.page_code = SI_PG_DEVID;
823 	_lto2b(pg_len, pg.hdr.page_length);
824 	pg_len += sizeof(pg.hdr);
825 
826 	bcopy(&pg, xs->data, MIN(pg_len, xs->datalen));
827 
828 	atascsi_done(xs, XS_NOERROR);
829 }
830 
831 void
832 atascsi_disk_vpd_ata(struct scsi_xfer *xs)
833 {
834 	struct scsi_link        *link = xs->sc_link;
835 	struct atascsi_port	*ap;
836 	struct scsi_vpd_ata	pg;
837 
838 	ap = atascsi_lookup_port(link);
839 	bzero(&pg, sizeof(pg));
840 
841 	pg.hdr.device = T_DIRECT;
842 	pg.hdr.page_code = SI_PG_ATA;
843 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
844 
845 	memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor));
846 	memcpy(pg.sat_vendor, "OpenBSD",
847 	    MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor)));
848 	memset(pg.sat_product, ' ', sizeof(pg.sat_product));
849 	memcpy(pg.sat_product, "atascsi",
850 	    MIN(strlen("atascsi"), sizeof(pg.sat_product)));
851 	memset(pg.sat_revision, ' ', sizeof(pg.sat_revision));
852 	memcpy(pg.sat_revision, osrelease,
853 	    MIN(strlen(osrelease), sizeof(pg.sat_revision)));
854 
855 	/* XXX device signature */
856 
857 	switch (ap->ap_type) {
858 	case ATA_PORT_T_DISK:
859 		pg.command_code = VPD_ATA_COMMAND_CODE_ATA;
860 		break;
861 	case ATA_PORT_T_ATAPI:
862 		pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI;
863 		break;
864 	}
865 
866 	memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify));
867 
868 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
869 
870 	atascsi_done(xs, XS_NOERROR);
871 }
872 
873 void
874 atascsi_disk_vpd_limits(struct scsi_xfer *xs)
875 {
876 	struct scsi_link        *link = xs->sc_link;
877 	struct atascsi_port	*ap;
878 	struct scsi_vpd_disk_limits pg;
879 
880 	ap = atascsi_lookup_port(link);
881 	bzero(&pg, sizeof(pg));
882 	pg.hdr.device = T_DIRECT;
883 	pg.hdr.page_code = SI_PG_DISK_LIMITS;
884 	_lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length);
885 
886 	_lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify),
887 	    pg.optimal_xfer_granularity);
888 
889 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
890 		/*
891 		 * ATA only supports 65535 blocks per TRIM descriptor, so
892 		 * avoid having to split UNMAP descriptors and overflow the page
893 		 * limit by using that as a max.
894 		 */
895 		_lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count);
896 		_lto4b(512 / 8, pg.max_unmap_desc_count);
897         }
898 
899 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
900 
901 	atascsi_done(xs, XS_NOERROR);
902 }
903 
904 void
905 atascsi_disk_vpd_info(struct scsi_xfer *xs)
906 {
907 	struct scsi_link        *link = xs->sc_link;
908 	struct atascsi_port	*ap;
909 	struct scsi_vpd_disk_info pg;
910 
911 	ap = atascsi_lookup_port(link);
912 	bzero(&pg, sizeof(pg));
913 	pg.hdr.device = T_DIRECT;
914 	pg.hdr.page_code = SI_PG_DISK_INFO;
915 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
916 
917 	_lto2b(letoh16(ap->ap_identify.rpm), pg.rpm);
918 	pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK;
919 
920 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
921 
922 	atascsi_done(xs, XS_NOERROR);
923 }
924 
925 void
926 atascsi_disk_vpd_thin(struct scsi_xfer *xs)
927 {
928 	struct scsi_link        *link = xs->sc_link;
929 	struct atascsi_port	*ap;
930 	struct scsi_vpd_disk_thin pg;
931 
932 	ap = atascsi_lookup_port(link);
933 	if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
934 		atascsi_done(xs, XS_DRIVER_STUFFUP);
935 		return;
936 	}
937 
938 	bzero(&pg, sizeof(pg));
939 	pg.hdr.device = T_DIRECT;
940 	pg.hdr.page_code = SI_PG_DISK_THIN;
941 	_lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
942 
943 	pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS;
944 
945 	bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
946 
947 	atascsi_done(xs, XS_NOERROR);
948 }
949 
950 void
951 atascsi_disk_write_same_16(struct scsi_xfer *xs)
952 {
953 	struct scsi_link	*link = xs->sc_link;
954 	struct atascsi		*as = link->adapter_softc;
955 	struct atascsi_port	*ap;
956 	struct scsi_write_same_16 *cdb;
957 	struct ata_xfer		*xa = xs->io;
958 	struct ata_fis_h2d	*fis;
959 	u_int64_t		lba;
960 	u_int32_t		length;
961 	u_int64_t		desc;
962 
963 	if (xs->cmdlen != sizeof(*cdb)) {
964 		atascsi_done(xs, XS_DRIVER_STUFFUP);
965 		return;
966 	}
967 
968 	ap = atascsi_lookup_port(link);
969 	cdb = (struct scsi_write_same_16 *)xs->cmd;
970 
971 	if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) ||
972 	   !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
973 		/* generate sense data */
974 		atascsi_done(xs, XS_DRIVER_STUFFUP);
975 		return;
976 	}
977 
978 	if (xs->datalen < 512) {
979 		/* generate sense data */
980 		atascsi_done(xs, XS_DRIVER_STUFFUP);
981 		return;
982 	}
983 
984 	lba = _8btol(cdb->lba);
985 	length = _4btol(cdb->length);
986 
987 	if (length > ATA_DSM_TRIM_MAX_LEN) {
988 		/* XXX we dont support requests over 65535 blocks */
989 		atascsi_done(xs, XS_DRIVER_STUFFUP);
990 		return;
991 	}
992 
993 	xa->data = xs->data;
994 	xa->datalen = 512;
995 	xa->flags = ATA_F_WRITE;
996 	xa->pmp_port = ap->ap_pmp_port;
997 	if (xs->flags & SCSI_POLL)
998 		xa->flags |= ATA_F_POLL;
999 	xa->complete = atascsi_disk_write_same_16_done;
1000 	xa->atascsi_private = xs;
1001 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1002 
1003 	/* TRIM sends a list of blocks to discard in the databuf. */
1004 	memset(xa->data, 0, xa->datalen);
1005 	desc = htole64(ATA_DSM_TRIM_DESC(lba, length));
1006 	memcpy(xa->data, &desc, sizeof(desc));
1007 
1008 	fis = xa->fis;
1009 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1010 	fis->command = ATA_C_DSM;
1011 	fis->features = ATA_DSM_TRIM;
1012 	fis->sector_count = 1;
1013 
1014 	ata_exec(as, xa);
1015 }
1016 
1017 void
1018 atascsi_disk_write_same_16_done(struct ata_xfer *xa)
1019 {
1020 	struct scsi_xfer	*xs = xa->atascsi_private;
1021 
1022 	switch (xa->state) {
1023 	case ATA_S_COMPLETE:
1024 		xs->error = XS_NOERROR;
1025 		break;
1026 	case ATA_S_ERROR:
1027 		xs->error = XS_DRIVER_STUFFUP;
1028 		break;
1029 	case ATA_S_TIMEOUT:
1030 		xs->error = XS_TIMEOUT;
1031 		break;
1032 
1033 	default:
1034 		panic("atascsi_disk_write_same_16_done: "
1035 		    "unexpected ata_xfer state (%d)", xa->state);
1036 	}
1037 
1038 	scsi_done(xs);
1039 }
1040 
1041 void
1042 atascsi_disk_unmap(struct scsi_xfer *xs)
1043 {
1044 	struct ata_xfer		*xa = xs->io;
1045 	struct scsi_unmap	*cdb;
1046 	struct scsi_unmap_data	*unmap;
1047 	u_int			len;
1048 
1049 	if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb))
1050 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1051 
1052 	cdb = (struct scsi_unmap *)xs->cmd;
1053 	len = _2btol(cdb->list_len);
1054 	if (xs->datalen != len || len < sizeof(*unmap)) {
1055 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1056 		return;
1057 	}
1058 
1059 	unmap = (struct scsi_unmap_data *)xs->data;
1060 	if (_2btol(unmap->data_length) != len) {
1061 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1062 		return;
1063 	}
1064 
1065 	len = _2btol(unmap->desc_length);
1066 	if (len != xs->datalen - sizeof(*unmap)) {
1067 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1068 		return;
1069 	}
1070 
1071 	if (len < sizeof(struct scsi_unmap_desc)) {
1072 		/* no work, no error according to sbc3 */
1073 		atascsi_done(xs, XS_NOERROR);
1074 	}
1075 
1076 	if (len > sizeof(struct scsi_unmap_desc) * 64) {
1077 		/* more work than we advertised */
1078 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1079 		return;
1080 	}
1081 
1082 	/* let's go */
1083 	if (ISSET(xs->flags, SCSI_NOSLEEP)) {
1084 		task_set(&xa->task, atascsi_disk_unmap_task, xs);
1085 		task_add(systq, &xa->task);
1086 	} else {
1087 		/* we can already sleep for memory */
1088 		atascsi_disk_unmap_task(xs);
1089 	}
1090 }
1091 
1092 void
1093 atascsi_disk_unmap_task(void *xxs)
1094 {
1095 	struct scsi_xfer	*xs = xxs;
1096 	struct scsi_link	*link = xs->sc_link;
1097 	struct atascsi		*as = link->adapter_softc;
1098 	struct atascsi_port	*ap;
1099 	struct ata_xfer		*xa = xs->io;
1100 	struct ata_fis_h2d	*fis;
1101 	struct scsi_unmap_data	*unmap;
1102 	struct scsi_unmap_desc	*descs, *d;
1103 	u_int64_t		*trims;
1104 	u_int			len, i;
1105 
1106 	trims = dma_alloc(512, PR_WAITOK | PR_ZERO);
1107 
1108 	ap = atascsi_lookup_port(link);
1109 	unmap = (struct scsi_unmap_data *)xs->data;
1110 	descs = (struct scsi_unmap_desc *)(unmap + 1);
1111 
1112 	len = _2btol(unmap->desc_length) / sizeof(*d);
1113 	for (i = 0; i < len; i++) {
1114 		d = &descs[i];
1115 		if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN)
1116 			goto fail;
1117 
1118 		trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr),
1119 		    _4btol(d->logical_blocks)));
1120 	}
1121 
1122 	xa->data = trims;
1123 	xa->datalen = 512;
1124 	xa->flags = ATA_F_WRITE;
1125 	xa->pmp_port = ap->ap_pmp_port;
1126 	xa->complete = atascsi_disk_unmap_done;
1127 	xa->atascsi_private = xs;
1128 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1129 
1130 	fis = xa->fis;
1131 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1132 	fis->command = ATA_C_DSM;
1133 	fis->features = ATA_DSM_TRIM;
1134 	fis->sector_count = 1;
1135 
1136 	ata_exec(as, xa);
1137 	return;
1138 
1139  fail:
1140 	dma_free(xa->data, 512);
1141 	atascsi_done(xs, XS_DRIVER_STUFFUP);
1142 }
1143 
1144 void
1145 atascsi_disk_unmap_done(struct ata_xfer *xa)
1146 {
1147 	struct scsi_xfer	*xs = xa->atascsi_private;
1148 
1149 	dma_free(xa->data, 512);
1150 
1151 	switch (xa->state) {
1152 	case ATA_S_COMPLETE:
1153 		xs->error = XS_NOERROR;
1154 		break;
1155 	case ATA_S_ERROR:
1156 		xs->error = XS_DRIVER_STUFFUP;
1157 		break;
1158 	case ATA_S_TIMEOUT:
1159 		xs->error = XS_TIMEOUT;
1160 		break;
1161 
1162 	default:
1163 		panic("atascsi_disk_unmap_done: "
1164 		    "unexpected ata_xfer state (%d)", xa->state);
1165 	}
1166 
1167 	scsi_done(xs);
1168 }
1169 
1170 void
1171 atascsi_disk_sync(struct scsi_xfer *xs)
1172 {
1173 	struct scsi_link	*link = xs->sc_link;
1174 	struct atascsi		*as = link->adapter_softc;
1175 	struct atascsi_port	*ap;
1176 	struct ata_xfer		*xa = xs->io;
1177 
1178 	if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) {
1179 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1180 		return;
1181 	}
1182 
1183 	ap = atascsi_lookup_port(link);
1184 	xa->datalen = 0;
1185 	xa->flags = ATA_F_READ;
1186 	xa->complete = atascsi_disk_sync_done;
1187 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1188 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1189 	xa->atascsi_private = xs;
1190 	xa->pmp_port = ap->ap_pmp_port;
1191 	if (xs->flags & SCSI_POLL)
1192 		xa->flags |= ATA_F_POLL;
1193 
1194 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1195 	xa->fis->command = ATA_C_FLUSH_CACHE;
1196 	xa->fis->device = 0;
1197 
1198 	ata_exec(as, xa);
1199 }
1200 
1201 void
1202 atascsi_disk_sync_done(struct ata_xfer *xa)
1203 {
1204 	struct scsi_xfer	*xs = xa->atascsi_private;
1205 
1206 	switch (xa->state) {
1207 	case ATA_S_COMPLETE:
1208 		xs->error = XS_NOERROR;
1209 		break;
1210 
1211 	case ATA_S_ERROR:
1212 	case ATA_S_TIMEOUT:
1213 		printf("atascsi_disk_sync_done: %s\n",
1214 		    xa->state == ATA_S_TIMEOUT ? "timeout" : "error");
1215 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1216 		    XS_DRIVER_STUFFUP);
1217 		break;
1218 
1219 	default:
1220 		panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)",
1221 		    xa->state);
1222 	}
1223 
1224 	scsi_done(xs);
1225 }
1226 
1227 u_int64_t
1228 ata_identify_blocks(struct ata_identify *id)
1229 {
1230 	u_int64_t		blocks = 0;
1231 	int			i;
1232 
1233 	if (letoh16(id->cmdset83) & 0x0400) {
1234 		/* LBA48 feature set supported */
1235 		for (i = 3; i >= 0; --i) {
1236 			blocks <<= 16;
1237 			blocks += letoh16(id->addrsecxt[i]);
1238 		}
1239 	} else {
1240 		blocks = letoh16(id->addrsec[1]);
1241 		blocks <<= 16;
1242 		blocks += letoh16(id->addrsec[0]);
1243 	}
1244 
1245 	return (blocks - 1);
1246 }
1247 
1248 u_int
1249 ata_identify_blocksize(struct ata_identify *id)
1250 {
1251 	u_int			blocksize = 512;
1252 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1253 
1254 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1255 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) {
1256 		blocksize = letoh16(id->words_lsec[1]);
1257 		blocksize <<= 16;
1258 		blocksize += letoh16(id->words_lsec[0]);
1259 		blocksize <<= 1;
1260 	}
1261 
1262 	return (blocksize);
1263 }
1264 
1265 u_int
1266 ata_identify_block_l2p_exp(struct ata_identify *id)
1267 {
1268 	u_int			exponent = 0;
1269 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1270 
1271 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1272 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) {
1273 		exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE);
1274 	}
1275 
1276 	return (exponent);
1277 }
1278 
1279 u_int
1280 ata_identify_block_logical_align(struct ata_identify *id)
1281 {
1282 	u_int			align = 0;
1283 	u_int16_t		p2l_sect = letoh16(id->p2l_sect);
1284 	u_int16_t		logical_align = letoh16(id->logical_align);
1285 
1286 	if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1287 	    ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) &&
1288 	    (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID)
1289 		align = logical_align & ATA_ID_LALIGN;
1290 
1291 	return (align);
1292 }
1293 
1294 void
1295 atascsi_disk_capacity(struct scsi_xfer *xs)
1296 {
1297 	struct scsi_link	*link = xs->sc_link;
1298 	struct atascsi_port	*ap;
1299 	struct scsi_read_cap_data rcd;
1300 	u_int64_t		capacity;
1301 
1302 	ap = atascsi_lookup_port(link);
1303 	if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
1304 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1305 		return;
1306 	}
1307 
1308 	bzero(&rcd, sizeof(rcd));
1309 	capacity = ata_identify_blocks(&ap->ap_identify);
1310 	if (capacity > 0xffffffff)
1311 		capacity = 0xffffffff;
1312 
1313 	_lto4b(capacity, rcd.addr);
1314 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1315 
1316 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1317 
1318 	atascsi_done(xs, XS_NOERROR);
1319 }
1320 
1321 void
1322 atascsi_disk_capacity16(struct scsi_xfer *xs)
1323 {
1324 	struct scsi_link	*link = xs->sc_link;
1325 	struct atascsi_port	*ap;
1326 	struct scsi_read_cap_data_16 rcd;
1327 	u_int			align;
1328 	u_int16_t		lowest_aligned = 0;
1329 
1330 	ap = atascsi_lookup_port(link);
1331 	if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
1332 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1333 		return;
1334 	}
1335 
1336 	bzero(&rcd, sizeof(rcd));
1337 
1338 	_lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr);
1339 	_lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1340 	rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify);
1341 	align = ata_identify_block_logical_align(&ap->ap_identify);
1342 	if (align > 0)
1343 		lowest_aligned = (1 << rcd.logical_per_phys) - align;
1344 
1345 	if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
1346 		SET(lowest_aligned, READ_CAP_16_TPE);
1347 
1348 		if (ISSET(letoh16(ap->ap_identify.add_support),
1349 		    ATA_ID_ADD_SUPPORT_DRT))
1350 			SET(lowest_aligned, READ_CAP_16_TPRZ);
1351 	}
1352 	_lto2b(lowest_aligned, rcd.lowest_aligned);
1353 
1354 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1355 
1356 	atascsi_done(xs, XS_NOERROR);
1357 }
1358 
1359 int
1360 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags)
1361 {
1362 	struct ata_xfer		*xa = xs->io;
1363 
1364 	xa->data = xs->data;
1365 	xa->datalen = xs->datalen;
1366 	xa->timeout = xs->timeout;
1367 	xa->flags = 0;
1368 	if (xs->flags & SCSI_DATA_IN)
1369 		xa->flags |= ATA_F_READ;
1370 	if (xs->flags & SCSI_DATA_OUT)
1371 		xa->flags |= ATA_F_WRITE;
1372 	if (xs->flags & SCSI_POLL)
1373 		xa->flags |= ATA_F_POLL;
1374 
1375 	switch (count_proto & ATA_PASSTHRU_PROTO_MASK) {
1376 	case ATA_PASSTHRU_PROTO_NON_DATA:
1377 	case ATA_PASSTHRU_PROTO_PIO_DATAIN:
1378 	case ATA_PASSTHRU_PROTO_PIO_DATAOUT:
1379 		xa->flags |= ATA_F_PIO;
1380 		break;
1381 	default:
1382 		/* we dont support this yet */
1383 		return (1);
1384 	}
1385 
1386 	xa->atascsi_private = xs;
1387 	xa->complete = atascsi_passthru_done;
1388 
1389 	return (0);
1390 }
1391 
1392 void
1393 atascsi_passthru_12(struct scsi_xfer *xs)
1394 {
1395 	struct scsi_link	*link = xs->sc_link;
1396 	struct atascsi		*as = link->adapter_softc;
1397 	struct atascsi_port	*ap;
1398 	struct ata_xfer		*xa = xs->io;
1399 	struct scsi_ata_passthru_12 *cdb;
1400 	struct ata_fis_h2d	*fis;
1401 
1402 	if (xs->cmdlen != sizeof(*cdb)) {
1403 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1404 		return;
1405 	}
1406 
1407 	cdb = (struct scsi_ata_passthru_12 *)xs->cmd;
1408 	/* validate cdb */
1409 
1410 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1411 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1412 		return;
1413 	}
1414 
1415 	ap = atascsi_lookup_port(link);
1416 	fis = xa->fis;
1417 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1418 	fis->command = cdb->command;
1419 	fis->features = cdb->features;
1420 	fis->lba_low = cdb->lba_low;
1421 	fis->lba_mid = cdb->lba_mid;
1422 	fis->lba_high = cdb->lba_high;
1423 	fis->device = cdb->device;
1424 	fis->sector_count = cdb->sector_count;
1425 	xa->pmp_port = ap->ap_pmp_port;
1426 
1427 	ata_exec(as, xa);
1428 }
1429 
1430 void
1431 atascsi_passthru_16(struct scsi_xfer *xs)
1432 {
1433 	struct scsi_link	*link = xs->sc_link;
1434 	struct atascsi		*as = link->adapter_softc;
1435 	struct atascsi_port	*ap;
1436 	struct ata_xfer		*xa = xs->io;
1437 	struct scsi_ata_passthru_16 *cdb;
1438 	struct ata_fis_h2d	*fis;
1439 
1440 	if (xs->cmdlen != sizeof(*cdb)) {
1441 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1442 		return;
1443 	}
1444 
1445 	cdb = (struct scsi_ata_passthru_16 *)xs->cmd;
1446 	/* validate cdb */
1447 
1448 	if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1449 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1450 		return;
1451 	}
1452 
1453 	ap = atascsi_lookup_port(link);
1454 	fis = xa->fis;
1455 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1456 	fis->command = cdb->command;
1457 	fis->features = cdb->features[1];
1458 	fis->lba_low = cdb->lba_low[1];
1459 	fis->lba_mid = cdb->lba_mid[1];
1460 	fis->lba_high = cdb->lba_high[1];
1461 	fis->device = cdb->device;
1462 	fis->lba_low_exp = cdb->lba_low[0];
1463 	fis->lba_mid_exp = cdb->lba_mid[0];
1464 	fis->lba_high_exp = cdb->lba_high[0];
1465 	fis->features_exp = cdb->features[0];
1466 	fis->sector_count = cdb->sector_count[1];
1467 	fis->sector_count_exp = cdb->sector_count[0];
1468 	xa->pmp_port = ap->ap_pmp_port;
1469 
1470 	ata_exec(as, xa);
1471 }
1472 
1473 void
1474 atascsi_passthru_done(struct ata_xfer *xa)
1475 {
1476 	struct scsi_xfer	*xs = xa->atascsi_private;
1477 
1478 	/*
1479 	 * XXX need to generate sense if cdb wants it
1480 	 */
1481 
1482 	switch (xa->state) {
1483 	case ATA_S_COMPLETE:
1484 		xs->error = XS_NOERROR;
1485 		break;
1486 	case ATA_S_ERROR:
1487 		xs->error = XS_DRIVER_STUFFUP;
1488 		break;
1489 	case ATA_S_TIMEOUT:
1490 		printf("atascsi_passthru_done, timeout\n");
1491 		xs->error = XS_TIMEOUT;
1492 		break;
1493 	default:
1494 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1495 		    xa->state);
1496 	}
1497 
1498 	xs->resid = xa->resid;
1499 
1500 	scsi_done(xs);
1501 }
1502 
1503 void
1504 atascsi_disk_sense(struct scsi_xfer *xs)
1505 {
1506 	struct scsi_sense_data	*sd = (struct scsi_sense_data *)xs->data;
1507 
1508 	bzero(xs->data, xs->datalen);
1509 	/* check datalen > sizeof(struct scsi_sense_data)? */
1510 	sd->error_code = SSD_ERRCODE_CURRENT;
1511 	sd->flags = SKEY_NO_SENSE;
1512 
1513 	atascsi_done(xs, XS_NOERROR);
1514 }
1515 
1516 void
1517 atascsi_disk_start_stop(struct scsi_xfer *xs)
1518 {
1519 	struct scsi_link	*link = xs->sc_link;
1520 	struct atascsi		*as = link->adapter_softc;
1521 	struct atascsi_port	*ap;
1522 	struct ata_xfer		*xa = xs->io;
1523 	struct scsi_start_stop	*ss = (struct scsi_start_stop *)xs->cmd;
1524 
1525 	if (xs->cmdlen != sizeof(*ss)) {
1526 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1527 		return;
1528 	}
1529 
1530 	if (ss->how != SSS_STOP) {
1531 		atascsi_done(xs, XS_NOERROR);
1532 		return;
1533 	}
1534 
1535 	/*
1536 	 * A SCSI START STOP UNIT command with the START bit set to
1537 	 * zero gets translated into an ATA FLUSH CACHE command
1538 	 * followed by an ATA STANDBY IMMEDIATE command.
1539 	 */
1540 	ap = atascsi_lookup_port(link);
1541 	xa->datalen = 0;
1542 	xa->flags = ATA_F_READ;
1543 	xa->complete = atascsi_disk_start_stop_done;
1544 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1545 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1546 	xa->pmp_port = ap->ap_pmp_port;
1547 	xa->atascsi_private = xs;
1548 	if (xs->flags & SCSI_POLL)
1549 		xa->flags |= ATA_F_POLL;
1550 
1551 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1552 	xa->fis->command = ATA_C_FLUSH_CACHE;
1553 	xa->fis->device = 0;
1554 
1555 	ata_exec(as, xa);
1556 }
1557 
1558 void
1559 atascsi_disk_start_stop_done(struct ata_xfer *xa)
1560 {
1561 	struct scsi_xfer	*xs = xa->atascsi_private;
1562 	struct scsi_link	*link = xs->sc_link;
1563 	struct atascsi		*as = link->adapter_softc;
1564 	struct atascsi_port	*ap;
1565 
1566 	switch (xa->state) {
1567 	case ATA_S_COMPLETE:
1568 		break;
1569 
1570 	case ATA_S_ERROR:
1571 	case ATA_S_TIMEOUT:
1572 		xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1573 		    XS_DRIVER_STUFFUP);
1574 		xs->resid = xa->resid;
1575 		scsi_done(xs);
1576 		return;
1577 
1578 	default:
1579 		panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)",
1580 		    xa->state);
1581 	}
1582 
1583 	/*
1584 	 * The FLUSH CACHE command completed succesfully; now issue
1585 	 * the STANDBY IMMEDATE command.
1586 	 */
1587 	ap = atascsi_lookup_port(link);
1588 	xa->datalen = 0;
1589 	xa->flags = ATA_F_READ;
1590 	xa->state = ATA_S_SETUP;
1591 	xa->complete = atascsi_disk_cmd_done;
1592 	/* Spec says flush cache can take >30 sec, so give it at least 45. */
1593 	xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1594 	xa->pmp_port = ap->ap_pmp_port;
1595 	xa->atascsi_private = xs;
1596 	if (xs->flags & SCSI_POLL)
1597 		xa->flags |= ATA_F_POLL;
1598 
1599 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1600 	xa->fis->command = ATA_C_STANDBY_IMMED;
1601 	xa->fis->device = 0;
1602 
1603 	ata_exec(as, xa);
1604 }
1605 
1606 void
1607 atascsi_atapi_cmd(struct scsi_xfer *xs)
1608 {
1609 	struct scsi_link	*link = xs->sc_link;
1610 	struct atascsi		*as = link->adapter_softc;
1611 	struct atascsi_port	*ap;
1612 	struct ata_xfer		*xa = xs->io;
1613 	struct ata_fis_h2d	*fis;
1614 
1615 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1616 	case SCSI_DATA_IN:
1617 		xa->flags = ATA_F_PACKET | ATA_F_READ;
1618 		break;
1619 	case SCSI_DATA_OUT:
1620 		xa->flags = ATA_F_PACKET | ATA_F_WRITE;
1621 		break;
1622 	default:
1623 		xa->flags = ATA_F_PACKET;
1624 	}
1625 	xa->flags |= ATA_F_GET_RFIS;
1626 
1627 	ap = atascsi_lookup_port(link);
1628 	xa->data = xs->data;
1629 	xa->datalen = xs->datalen;
1630 	xa->complete = atascsi_atapi_cmd_done;
1631 	xa->timeout = xs->timeout;
1632 	xa->pmp_port = ap->ap_pmp_port;
1633 	xa->atascsi_private = xs;
1634 	if (xs->flags & SCSI_POLL)
1635 		xa->flags |= ATA_F_POLL;
1636 
1637 	fis = xa->fis;
1638 	fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1639 	fis->command = ATA_C_PACKET;
1640 	fis->device = 0;
1641 	fis->sector_count = xa->tag << 3;
1642 	fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ?
1643 	    ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ);
1644 	fis->lba_mid = 0x00;
1645 	fis->lba_high = 0x20;
1646 
1647 	/* Copy SCSI command into ATAPI packet. */
1648 	memcpy(xa->packetcmd, xs->cmd, xs->cmdlen);
1649 
1650 	ata_exec(as, xa);
1651 }
1652 
1653 void
1654 atascsi_atapi_cmd_done(struct ata_xfer *xa)
1655 {
1656 	struct scsi_xfer	*xs = xa->atascsi_private;
1657 	struct scsi_sense_data  *sd = &xs->sense;
1658 
1659 	switch (xa->state) {
1660 	case ATA_S_COMPLETE:
1661 		xs->error = XS_NOERROR;
1662 		break;
1663 	case ATA_S_ERROR:
1664 		/* Return PACKET sense data */
1665 		sd->error_code = SSD_ERRCODE_CURRENT;
1666 		sd->flags = (xa->rfis.error & 0xf0) >> 4;
1667 		if (xa->rfis.error & 0x04)
1668 			sd->flags = SKEY_ILLEGAL_REQUEST;
1669 		if (xa->rfis.error & 0x02)
1670 			sd->flags |= SSD_EOM;
1671 		if (xa->rfis.error & 0x01)
1672 			sd->flags |= SSD_ILI;
1673 		xs->error = XS_SENSE;
1674 		break;
1675 	case ATA_S_TIMEOUT:
1676 		printf("atascsi_atapi_cmd_done, timeout\n");
1677 		xs->error = XS_TIMEOUT;
1678 		break;
1679 	default:
1680 		panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1681 		    xa->state);
1682 	}
1683 
1684 	xs->resid = xa->resid;
1685 
1686 	scsi_done(xs);
1687 }
1688 
1689 void
1690 atascsi_pmp_cmd(struct scsi_xfer *xs)
1691 {
1692 	switch (xs->cmd->opcode) {
1693 	case REQUEST_SENSE:
1694 		atascsi_pmp_sense(xs);
1695 		return;
1696 	case INQUIRY:
1697 		atascsi_pmp_inq(xs);
1698 		return;
1699 
1700 	case TEST_UNIT_READY:
1701 	case PREVENT_ALLOW:
1702 		atascsi_done(xs, XS_NOERROR);
1703 		return;
1704 
1705 	default:
1706 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1707 		return;
1708 	}
1709 }
1710 
1711 void
1712 atascsi_pmp_sense(struct scsi_xfer *xs)
1713 {
1714 	struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
1715 
1716 	bzero(xs->data, xs->datalen);
1717 	sd->error_code = SSD_ERRCODE_CURRENT;
1718 	sd->flags = SKEY_NO_SENSE;
1719 
1720 	atascsi_done(xs, XS_NOERROR);
1721 }
1722 
1723 void
1724 atascsi_pmp_inq(struct scsi_xfer *xs)
1725 {
1726 	struct scsi_inquiry_data inq;
1727 	struct scsi_inquiry *in_inq = (struct scsi_inquiry *)xs->cmd;
1728 
1729 	if (ISSET(in_inq->flags, SI_EVPD)) {
1730 		/* any evpd pages we need to support here? */
1731 		atascsi_done(xs, XS_DRIVER_STUFFUP);
1732 		return;
1733 	}
1734 
1735 	bzero(&inq, sizeof(inq));
1736 	inq.device = 0x1E;	/* "well known logical unit" seems reasonable */
1737 	inq.version = 0x05;	/* SPC-3? */
1738 	inq.response_format = 2;
1739 	inq.additional_length = 32;
1740 	inq.flags |= SID_CmdQue;
1741 	bcopy("ATA     ", inq.vendor, sizeof(inq.vendor));
1742 
1743 	/* should use the data from atascsi_pmp_identify here?
1744 	 * not sure how useful the chip id is, but maybe it'd be
1745 	 * nice to include the number of ports.
1746 	 */
1747 	bcopy("Port Multiplier", inq.product, sizeof(inq.product));
1748 	bcopy("    ", inq.revision, sizeof(inq.revision));
1749 
1750 	bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
1751 	atascsi_done(xs, XS_NOERROR);
1752 }
1753 
1754 void
1755 atascsi_done(struct scsi_xfer *xs, int error)
1756 {
1757 	xs->error = error;
1758 	scsi_done(xs);
1759 }
1760 
1761 void
1762 ata_exec(struct atascsi *as, struct ata_xfer *xa)
1763 {
1764 	as->as_methods->ata_cmd(xa);
1765 }
1766 
1767 void *
1768 atascsi_io_get(void *cookie)
1769 {
1770 	struct atascsi_host_port	*ahp = cookie;
1771 	struct atascsi			*as = ahp->ahp_as;
1772 	struct ata_xfer			*xa;
1773 
1774 	xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port);
1775 	if (xa != NULL)
1776 		xa->fis->type = ATA_FIS_TYPE_H2D;
1777 
1778 	return (xa);
1779 }
1780 
1781 void
1782 atascsi_io_put(void *cookie, void *io)
1783 {
1784 	struct atascsi_host_port	*ahp = cookie;
1785 	struct atascsi			*as = ahp->ahp_as;
1786 	struct ata_xfer			*xa = io;
1787 
1788 	xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */
1789 	as->as_methods->ata_put_xfer(xa);
1790 }
1791 
1792 void
1793 ata_polled_complete(struct ata_xfer *xa)
1794 {
1795 	/* do nothing */
1796 }
1797 
1798 int
1799 ata_polled(struct ata_xfer *xa)
1800 {
1801 	int			rv;
1802 
1803 	if (!ISSET(xa->flags, ATA_F_DONE))
1804 		panic("ata_polled: xa isnt complete");
1805 
1806 	switch (xa->state) {
1807 	case ATA_S_COMPLETE:
1808 		rv = 0;
1809 		break;
1810 	case ATA_S_ERROR:
1811 	case ATA_S_TIMEOUT:
1812 		rv = EIO;
1813 		break;
1814 	default:
1815 		panic("ata_polled: xa state (%d)",
1816 		    xa->state);
1817 	}
1818 
1819 	scsi_io_put(xa->atascsi_private, xa);
1820 
1821 	return (rv);
1822 }
1823 
1824 void
1825 ata_complete(struct ata_xfer *xa)
1826 {
1827 	SET(xa->flags, ATA_F_DONE);
1828 	xa->complete(xa);
1829 }
1830 
1831 void
1832 ata_swapcopy(void *src, void *dst, size_t len)
1833 {
1834 	u_int16_t *s = src, *d = dst;
1835 	int i;
1836 
1837 	len /= 2;
1838 
1839 	for (i = 0; i < len; i++)
1840 		d[i] = swap16(s[i]);
1841 }
1842 
1843 int
1844 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify)
1845 {
1846 	struct atascsi			*as = ap->ap_as;
1847 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1848 	struct ata_xfer			*xa;
1849 
1850 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1851 	if (xa == NULL)
1852 		panic("no free xfers on a new port");
1853 	xa->pmp_port = ap->ap_pmp_port;
1854 	xa->data = identify;
1855 	xa->datalen = sizeof(*identify);
1856 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1857 	xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ?
1858 	    ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET;
1859 	xa->fis->device = 0;
1860 	xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL;
1861 	xa->timeout = 1000;
1862 	xa->complete = ata_polled_complete;
1863 	xa->atascsi_private = &ahp->ahp_iopool;
1864 	ata_exec(as, xa);
1865 	return (ata_polled(xa));
1866 }
1867 
1868 int
1869 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg)
1870 {
1871 	struct atascsi			*as = ap->ap_as;
1872 	struct atascsi_host_port	*ahp = ap->ap_host_port;
1873 	struct ata_xfer			*xa;
1874 
1875 	xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1876 	if (xa == NULL)
1877 		panic("no free xfers on a new port");
1878 	xa->fis->command = ATA_C_SET_FEATURES;
1879 	xa->fis->features = subcommand;
1880 	xa->fis->sector_count = arg;
1881 	xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1882 	xa->flags = ATA_F_POLL;
1883 	xa->timeout = 1000;
1884 	xa->complete = ata_polled_complete;
1885 	xa->pmp_port = ap->ap_pmp_port;
1886 	xa->atascsi_private = &ahp->ahp_iopool;
1887 	ata_exec(as, xa);
1888 	return (ata_polled(xa));
1889 }
1890