1 /* $OpenBSD: atascsi.c,v 1.156 2024/09/04 07:54:52 mglocker Exp $ */
2
3 /*
4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2010 Conformal Systems LLC <info@conformal.com>
6 * Copyright (c) 2010 Jonathan Matthew <jonathan@d14n.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/pool.h>
26
27 #include <scsi/scsi_all.h>
28 #include <scsi/scsi_disk.h>
29 #include <scsi/scsiconf.h>
30
31 #include <dev/ata/atascsi.h>
32 #include <dev/ata/pmreg.h>
33
34 struct atascsi_port;
35
36 struct atascsi {
37 struct device *as_dev;
38 void *as_cookie;
39
40 struct atascsi_host_port **as_host_ports;
41
42 const struct atascsi_methods *as_methods;
43 struct scsi_adapter as_switch;
44 struct scsibus_softc *as_scsibus;
45
46 int as_capability;
47 int as_ncqdepth;
48 };
49
50 /*
51 * atascsi_host_port is a port attached to the host controller, and
52 * only holds the details relevant to the host controller.
53 * atascsi_port is any port, including ports on port multipliers, and
54 * it holds details of the device attached to the port.
55 *
56 * When there is a port multiplier attached to a port, the ahp_ports
57 * array in the atascsi_host_port struct contains one atascsi_port for
58 * each port, and one for the control port (port 15). The index into
59 * the array is the LUN used to address the port. For the control port,
60 * the LUN is 0, and for the port multiplier ports, the LUN is the
61 * port number plus one.
62 *
63 * When there is no port multiplier attached to a port, the ahp_ports
64 * array contains a single entry for the device. The LUN and port number
65 * for this entry are both 0.
66 */
67
68 struct atascsi_host_port {
69 struct scsi_iopool ahp_iopool;
70 struct atascsi *ahp_as;
71 int ahp_port;
72 int ahp_nports;
73
74 struct atascsi_port **ahp_ports;
75 };
76
77 struct atascsi_port {
78 struct ata_identify ap_identify;
79 struct atascsi_host_port *ap_host_port;
80 struct atascsi *ap_as;
81 int ap_pmp_port;
82 int ap_type;
83 int ap_ncqdepth;
84 int ap_features;
85 #define ATA_PORT_F_NCQ 0x1
86 #define ATA_PORT_F_TRIM 0x2
87 };
88
89 void atascsi_cmd(struct scsi_xfer *);
90 int atascsi_probe(struct scsi_link *);
91 void atascsi_free(struct scsi_link *);
92
93 /* template */
94 const struct scsi_adapter atascsi_switch = {
95 atascsi_cmd, NULL, atascsi_probe, atascsi_free, NULL
96 };
97
98 void ata_swapcopy(void *, void *, size_t);
99
100 void atascsi_disk_cmd(struct scsi_xfer *);
101 void atascsi_disk_cmd_done(struct ata_xfer *);
102 void atascsi_disk_inq(struct scsi_xfer *);
103 void atascsi_disk_inquiry(struct scsi_xfer *);
104 void atascsi_disk_vpd_supported(struct scsi_xfer *);
105 void atascsi_disk_vpd_serial(struct scsi_xfer *);
106 void atascsi_disk_vpd_ident(struct scsi_xfer *);
107 void atascsi_disk_vpd_ata(struct scsi_xfer *);
108 void atascsi_disk_vpd_limits(struct scsi_xfer *);
109 void atascsi_disk_vpd_info(struct scsi_xfer *);
110 void atascsi_disk_vpd_thin(struct scsi_xfer *);
111 void atascsi_disk_write_same_16(struct scsi_xfer *);
112 void atascsi_disk_write_same_16_done(struct ata_xfer *);
113 void atascsi_disk_unmap(struct scsi_xfer *);
114 void atascsi_disk_unmap_task(void *);
115 void atascsi_disk_unmap_done(struct ata_xfer *);
116 void atascsi_disk_capacity(struct scsi_xfer *);
117 void atascsi_disk_capacity16(struct scsi_xfer *);
118 void atascsi_disk_sync(struct scsi_xfer *);
119 void atascsi_disk_sync_done(struct ata_xfer *);
120 void atascsi_disk_sense(struct scsi_xfer *);
121 void atascsi_disk_start_stop(struct scsi_xfer *);
122 void atascsi_disk_start_stop_done(struct ata_xfer *);
123
124 void atascsi_atapi_cmd(struct scsi_xfer *);
125 void atascsi_atapi_cmd_done(struct ata_xfer *);
126
127 void atascsi_pmp_cmd(struct scsi_xfer *);
128 void atascsi_pmp_sense(struct scsi_xfer *xs);
129 void atascsi_pmp_inq(struct scsi_xfer *xs);
130
131
132 void atascsi_passthru_12(struct scsi_xfer *);
133 void atascsi_passthru_16(struct scsi_xfer *);
134 int atascsi_passthru_map(struct scsi_xfer *, u_int8_t, u_int8_t);
135 void atascsi_passthru_done(struct ata_xfer *);
136
137 void atascsi_done(struct scsi_xfer *, int);
138
139 void ata_exec(struct atascsi *, struct ata_xfer *);
140
141 void ata_polled_complete(struct ata_xfer *);
142 int ata_polled(struct ata_xfer *);
143
144 u_int64_t ata_identify_blocks(struct ata_identify *);
145 u_int ata_identify_blocksize(struct ata_identify *);
146 u_int ata_identify_block_l2p_exp(struct ata_identify *);
147 u_int ata_identify_block_logical_align(struct ata_identify *);
148
149 void *atascsi_io_get(void *);
150 void atascsi_io_put(void *, void *);
151 struct atascsi_port * atascsi_lookup_port(struct scsi_link *);
152
153 int atascsi_port_identify(struct atascsi_port *,
154 struct ata_identify *);
155 int atascsi_port_set_features(struct atascsi_port *, int, int);
156
157
158 struct atascsi *
atascsi_attach(struct device * self,struct atascsi_attach_args * aaa)159 atascsi_attach(struct device *self, struct atascsi_attach_args *aaa)
160 {
161 struct scsibus_attach_args saa;
162 struct atascsi *as;
163
164 as = malloc(sizeof(*as), M_DEVBUF, M_WAITOK | M_ZERO);
165
166 as->as_dev = self;
167 as->as_cookie = aaa->aaa_cookie;
168 as->as_methods = aaa->aaa_methods;
169 as->as_capability = aaa->aaa_capability;
170 as->as_ncqdepth = aaa->aaa_ncmds;
171
172 /* copy from template and modify for ourselves */
173 as->as_switch = atascsi_switch;
174 if (aaa->aaa_minphys != NULL)
175 as->as_switch.dev_minphys = aaa->aaa_minphys;
176
177 as->as_host_ports = mallocarray(aaa->aaa_nports,
178 sizeof(struct atascsi_host_port *), M_DEVBUF, M_WAITOK | M_ZERO);
179
180 saa.saa_adapter = &as->as_switch;
181 saa.saa_adapter_softc = as;
182 saa.saa_adapter_buswidth = aaa->aaa_nports;
183 saa.saa_luns = SATA_PMP_MAX_PORTS;
184 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
185 saa.saa_openings = 1;
186 saa.saa_pool = NULL;
187 saa.saa_quirks = saa.saa_flags = 0;
188 saa.saa_wwpn = saa.saa_wwnn = 0;
189
190 as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa,
191 scsiprint);
192
193 return (as);
194 }
195
196 int
atascsi_detach(struct atascsi * as,int flags)197 atascsi_detach(struct atascsi *as, int flags)
198 {
199 int rv;
200
201 rv = config_detach((struct device *)as->as_scsibus, flags);
202 if (rv != 0)
203 return (rv);
204
205 free(as->as_host_ports, M_DEVBUF, 0);
206 free(as, M_DEVBUF, sizeof(*as));
207
208 return (0);
209 }
210
211 struct atascsi_port *
atascsi_lookup_port(struct scsi_link * link)212 atascsi_lookup_port(struct scsi_link *link)
213 {
214 struct atascsi *as = link->bus->sb_adapter_softc;
215 struct atascsi_host_port *ahp;
216
217 if (link->target >= link->bus->sb_adapter_buswidth)
218 return (NULL);
219
220 ahp = as->as_host_ports[link->target];
221 if (link->lun >= ahp->ahp_nports)
222 return (NULL);
223
224 return (ahp->ahp_ports[link->lun]);
225 }
226
227 int
atascsi_probe(struct scsi_link * link)228 atascsi_probe(struct scsi_link *link)
229 {
230 struct atascsi *as = link->bus->sb_adapter_softc;
231 struct atascsi_host_port *ahp;
232 struct atascsi_port *ap;
233 struct ata_xfer *xa;
234 struct ata_identify *identify;
235 int port, type, qdepth;
236 int rv;
237 u_int16_t cmdset;
238 u_int16_t validinfo, ultradma;
239 int i, xfermode = -1;
240
241 port = link->target;
242 if (port >= link->bus->sb_adapter_buswidth)
243 return (ENXIO);
244
245 /* if this is a PMP port, check it's valid */
246 if (link->lun > 0) {
247 if (link->lun >= as->as_host_ports[port]->ahp_nports)
248 return (ENXIO);
249 }
250
251 type = as->as_methods->ata_probe(as->as_cookie, port, link->lun);
252 switch (type) {
253 case ATA_PORT_T_DISK:
254 break;
255 case ATA_PORT_T_ATAPI:
256 link->flags |= SDEV_ATAPI;
257 break;
258 case ATA_PORT_T_PM:
259 if (link->lun != 0) {
260 printf("%s.%d.%d: Port multipliers cannot be nested\n",
261 as->as_dev->dv_xname, port, link->lun);
262 rv = ENODEV;
263 goto unsupported;
264 }
265 break;
266 default:
267 rv = ENODEV;
268 goto unsupported;
269 }
270
271 ap = malloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
272 ap->ap_as = as;
273
274 if (link->lun == 0) {
275 ahp = malloc(sizeof(*ahp), M_DEVBUF, M_WAITOK | M_ZERO);
276 ahp->ahp_as = as;
277 ahp->ahp_port = port;
278
279 scsi_iopool_init(&ahp->ahp_iopool, ahp, atascsi_io_get,
280 atascsi_io_put);
281
282 as->as_host_ports[port] = ahp;
283
284 if (type == ATA_PORT_T_PM) {
285 ahp->ahp_nports = SATA_PMP_MAX_PORTS;
286 ap->ap_pmp_port = SATA_PMP_CONTROL_PORT;
287 } else {
288 ahp->ahp_nports = 1;
289 ap->ap_pmp_port = 0;
290 }
291 ahp->ahp_ports = mallocarray(ahp->ahp_nports,
292 sizeof(struct atascsi_port *), M_DEVBUF, M_WAITOK | M_ZERO);
293 } else {
294 ahp = as->as_host_ports[port];
295 ap->ap_pmp_port = link->lun - 1;
296 }
297
298 ap->ap_host_port = ahp;
299 ap->ap_type = type;
300
301 link->pool = &ahp->ahp_iopool;
302
303 /* fetch the device info, except for port multipliers */
304 if (type != ATA_PORT_T_PM) {
305
306 /* devices attached to port multipliers tend not to be
307 * spun up at this point, and sometimes this prevents
308 * identification from working, so we retry a few times
309 * with a fairly long delay.
310 */
311
312 identify = dma_alloc(sizeof(*identify), PR_WAITOK | PR_ZERO);
313
314 int count = (link->lun > 0) ? 6 : 2;
315 while (count--) {
316 rv = atascsi_port_identify(ap, identify);
317 if (rv == 0) {
318 ap->ap_identify = *identify;
319 break;
320 }
321 if (count > 0)
322 delay(5000000);
323 }
324
325 dma_free(identify, sizeof(*identify));
326
327 if (rv != 0) {
328 goto error;
329 }
330 }
331
332 ahp->ahp_ports[link->lun] = ap;
333
334 if (type != ATA_PORT_T_DISK)
335 return (0);
336
337 /*
338 * Early SATA drives (as well as PATA drives) need to have
339 * their transfer mode set properly, otherwise commands that
340 * use DMA will time out.
341 */
342 validinfo = letoh16(ap->ap_identify.validinfo);
343 if (ISSET(validinfo, ATA_ID_VALIDINFO_ULTRADMA)) {
344 ultradma = letoh16(ap->ap_identify.ultradma);
345 for (i = 7; i >= 0; i--) {
346 if (ultradma & (1 << i)) {
347 xfermode = ATA_SF_XFERMODE_UDMA | i;
348 break;
349 }
350 }
351 }
352 if (xfermode != -1)
353 (void)atascsi_port_set_features(ap, ATA_SF_XFERMODE, xfermode);
354
355 if (as->as_capability & ASAA_CAP_NCQ &&
356 ISSET(letoh16(ap->ap_identify.satacap), ATA_SATACAP_NCQ) &&
357 (link->lun == 0 || as->as_capability & ASAA_CAP_PMP_NCQ)) {
358 ap->ap_ncqdepth = ATA_QDEPTH(letoh16(ap->ap_identify.qdepth));
359 qdepth = MIN(ap->ap_ncqdepth, as->as_ncqdepth);
360 if (ISSET(as->as_capability, ASAA_CAP_NEEDS_RESERVED))
361 qdepth--;
362
363 if (qdepth > 1) {
364 SET(ap->ap_features, ATA_PORT_F_NCQ);
365
366 /* Raise the number of openings */
367 link->openings = qdepth;
368
369 /*
370 * XXX for directly attached devices, throw away any xfers
371 * that have tag numbers higher than what the device supports.
372 */
373 if (link->lun == 0) {
374 while (qdepth--) {
375 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
376 if (xa->tag < link->openings) {
377 xa->state = ATA_S_COMPLETE;
378 scsi_io_put(&ahp->ahp_iopool, xa);
379 }
380 }
381 }
382 }
383 }
384
385 if (ISSET(letoh16(ap->ap_identify.data_set_mgmt),
386 ATA_ID_DATA_SET_MGMT_TRIM))
387 SET(ap->ap_features, ATA_PORT_F_TRIM);
388
389 cmdset = letoh16(ap->ap_identify.cmdset82);
390
391 /* Enable write cache if supported */
392 if (ISSET(cmdset, ATA_IDENTIFY_WRITECACHE)) {
393 /* We don't care if it fails. */
394 (void)atascsi_port_set_features(ap, ATA_SF_WRITECACHE_EN, 0);
395 }
396
397 /* Enable read lookahead if supported */
398 if (ISSET(cmdset, ATA_IDENTIFY_LOOKAHEAD)) {
399 /* We don't care if it fails. */
400 (void)atascsi_port_set_features(ap, ATA_SF_LOOKAHEAD_EN, 0);
401 }
402
403 /*
404 * FREEZE LOCK the device so malicious users can't lock it on us.
405 * As there is no harm in issuing this to devices that don't
406 * support the security feature set we just send it, and don't bother
407 * checking if the device sends a command abort to tell us it doesn't
408 * support it
409 */
410 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
411 if (xa == NULL)
412 panic("no free xfers on a new port");
413 xa->fis->command = ATA_C_SEC_FREEZE_LOCK;
414 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
415 xa->flags = ATA_F_POLL;
416 xa->timeout = 1000;
417 xa->complete = ata_polled_complete;
418 xa->pmp_port = ap->ap_pmp_port;
419 xa->atascsi_private = &ahp->ahp_iopool;
420 ata_exec(as, xa);
421 ata_polled(xa); /* we don't care if it doesn't work */
422
423 return (0);
424 error:
425 free(ap, M_DEVBUF, sizeof(*ap));
426 unsupported:
427
428 as->as_methods->ata_free(as->as_cookie, port, link->lun);
429 return (rv);
430 }
431
432 void
atascsi_free(struct scsi_link * link)433 atascsi_free(struct scsi_link *link)
434 {
435 struct atascsi *as = link->bus->sb_adapter_softc;
436 struct atascsi_host_port *ahp;
437 struct atascsi_port *ap;
438 int port;
439
440 port = link->target;
441 if (port >= link->bus->sb_adapter_buswidth)
442 return;
443
444 ahp = as->as_host_ports[port];
445 if (ahp == NULL)
446 return;
447
448 if (link->lun >= ahp->ahp_nports)
449 return;
450
451 ap = ahp->ahp_ports[link->lun];
452 free(ap, M_DEVBUF, sizeof(*ap));
453 ahp->ahp_ports[link->lun] = NULL;
454
455 as->as_methods->ata_free(as->as_cookie, port, link->lun);
456
457 if (link->lun == ahp->ahp_nports - 1) {
458 /* we've already freed all of ahp->ahp_ports, now
459 * free ahp itself. this relies on the order luns are
460 * detached in scsi_detach_target().
461 */
462 free(ahp, M_DEVBUF, sizeof(*ahp));
463 as->as_host_ports[port] = NULL;
464 }
465 }
466
467 void
atascsi_cmd(struct scsi_xfer * xs)468 atascsi_cmd(struct scsi_xfer *xs)
469 {
470 struct scsi_link *link = xs->sc_link;
471 struct atascsi_port *ap;
472
473 ap = atascsi_lookup_port(link);
474 if (ap == NULL) {
475 atascsi_done(xs, XS_DRIVER_STUFFUP);
476 return;
477 }
478
479 switch (ap->ap_type) {
480 case ATA_PORT_T_DISK:
481 atascsi_disk_cmd(xs);
482 break;
483 case ATA_PORT_T_ATAPI:
484 atascsi_atapi_cmd(xs);
485 break;
486 case ATA_PORT_T_PM:
487 atascsi_pmp_cmd(xs);
488 break;
489
490 case ATA_PORT_T_NONE:
491 default:
492 atascsi_done(xs, XS_DRIVER_STUFFUP);
493 break;
494 }
495 }
496
497 void
atascsi_disk_cmd(struct scsi_xfer * xs)498 atascsi_disk_cmd(struct scsi_xfer *xs)
499 {
500 struct scsi_link *link = xs->sc_link;
501 struct atascsi *as = link->bus->sb_adapter_softc;
502 struct atascsi_port *ap;
503 struct ata_xfer *xa = xs->io;
504 int flags = 0;
505 struct ata_fis_h2d *fis;
506 u_int64_t lba;
507 u_int32_t sector_count;
508
509 ap = atascsi_lookup_port(link);
510
511 switch (xs->cmd.opcode) {
512 case READ_COMMAND:
513 case READ_10:
514 case READ_12:
515 case READ_16:
516 flags = ATA_F_READ;
517 break;
518 case WRITE_COMMAND:
519 case WRITE_10:
520 case WRITE_12:
521 case WRITE_16:
522 flags = ATA_F_WRITE;
523 /* deal with io outside the switch */
524 break;
525
526 case WRITE_SAME_16:
527 atascsi_disk_write_same_16(xs);
528 return;
529 case UNMAP:
530 atascsi_disk_unmap(xs);
531 return;
532
533 case SYNCHRONIZE_CACHE:
534 atascsi_disk_sync(xs);
535 return;
536 case REQUEST_SENSE:
537 atascsi_disk_sense(xs);
538 return;
539 case INQUIRY:
540 atascsi_disk_inq(xs);
541 return;
542 case READ_CAPACITY:
543 atascsi_disk_capacity(xs);
544 return;
545 case READ_CAPACITY_16:
546 atascsi_disk_capacity16(xs);
547 return;
548
549 case ATA_PASSTHRU_12:
550 atascsi_passthru_12(xs);
551 return;
552 case ATA_PASSTHRU_16:
553 atascsi_passthru_16(xs);
554 return;
555
556 case START_STOP:
557 atascsi_disk_start_stop(xs);
558 return;
559
560 case TEST_UNIT_READY:
561 case PREVENT_ALLOW:
562 atascsi_done(xs, XS_NOERROR);
563 return;
564
565 default:
566 atascsi_done(xs, XS_DRIVER_STUFFUP);
567 return;
568 }
569
570 xa->flags = flags;
571 scsi_cmd_rw_decode(&xs->cmd, &lba, §or_count);
572 if ((lba >> 48) != 0 || (sector_count >> 16) != 0) {
573 atascsi_done(xs, XS_DRIVER_STUFFUP);
574 return;
575 }
576
577 fis = xa->fis;
578
579 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
580 fis->lba_low = lba & 0xff;
581 fis->lba_mid = (lba >> 8) & 0xff;
582 fis->lba_high = (lba >> 16) & 0xff;
583
584 if (ISSET(ap->ap_features, ATA_PORT_F_NCQ) &&
585 (xa->tag < ap->ap_ncqdepth) &&
586 !(xs->flags & SCSI_POLL)) {
587 /* Use NCQ */
588 xa->flags |= ATA_F_NCQ;
589 fis->command = (xa->flags & ATA_F_WRITE) ?
590 ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA;
591 fis->device = ATA_H2D_DEVICE_LBA;
592 fis->lba_low_exp = (lba >> 24) & 0xff;
593 fis->lba_mid_exp = (lba >> 32) & 0xff;
594 fis->lba_high_exp = (lba >> 40) & 0xff;
595 fis->sector_count = xa->tag << 3;
596 fis->features = sector_count & 0xff;
597 fis->features_exp = (sector_count >> 8) & 0xff;
598 } else if (sector_count > 0x100 || lba > 0xfffffff) {
599 /* Use LBA48 */
600 fis->command = (xa->flags & ATA_F_WRITE) ?
601 ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT;
602 fis->device = ATA_H2D_DEVICE_LBA;
603 fis->lba_low_exp = (lba >> 24) & 0xff;
604 fis->lba_mid_exp = (lba >> 32) & 0xff;
605 fis->lba_high_exp = (lba >> 40) & 0xff;
606 fis->sector_count = sector_count & 0xff;
607 fis->sector_count_exp = (sector_count >> 8) & 0xff;
608 } else {
609 /* Use LBA */
610 fis->command = (xa->flags & ATA_F_WRITE) ?
611 ATA_C_WRITEDMA : ATA_C_READDMA;
612 fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f);
613 fis->sector_count = sector_count & 0xff;
614 }
615
616 xa->data = xs->data;
617 xa->datalen = xs->datalen;
618 xa->complete = atascsi_disk_cmd_done;
619 xa->timeout = xs->timeout;
620 xa->pmp_port = ap->ap_pmp_port;
621 xa->atascsi_private = xs;
622 if (xs->flags & SCSI_POLL)
623 xa->flags |= ATA_F_POLL;
624
625 ata_exec(as, xa);
626 }
627
628 void
atascsi_disk_cmd_done(struct ata_xfer * xa)629 atascsi_disk_cmd_done(struct ata_xfer *xa)
630 {
631 struct scsi_xfer *xs = xa->atascsi_private;
632
633 switch (xa->state) {
634 case ATA_S_COMPLETE:
635 xs->error = XS_NOERROR;
636 break;
637 case ATA_S_ERROR:
638 /* fake sense? */
639 xs->error = XS_DRIVER_STUFFUP;
640 break;
641 case ATA_S_TIMEOUT:
642 xs->error = XS_TIMEOUT;
643 break;
644 default:
645 panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)",
646 xa->state);
647 }
648
649 xs->resid = xa->resid;
650
651 scsi_done(xs);
652 }
653
654 void
atascsi_disk_inq(struct scsi_xfer * xs)655 atascsi_disk_inq(struct scsi_xfer *xs)
656 {
657 struct scsi_inquiry *inq = (struct scsi_inquiry *)&xs->cmd;
658
659 if (xs->cmdlen != sizeof(*inq)) {
660 atascsi_done(xs, XS_DRIVER_STUFFUP);
661 return;
662 }
663
664 if (ISSET(inq->flags, SI_EVPD)) {
665 switch (inq->pagecode) {
666 case SI_PG_SUPPORTED:
667 atascsi_disk_vpd_supported(xs);
668 break;
669 case SI_PG_SERIAL:
670 atascsi_disk_vpd_serial(xs);
671 break;
672 case SI_PG_DEVID:
673 atascsi_disk_vpd_ident(xs);
674 break;
675 case SI_PG_ATA:
676 atascsi_disk_vpd_ata(xs);
677 break;
678 case SI_PG_DISK_LIMITS:
679 atascsi_disk_vpd_limits(xs);
680 break;
681 case SI_PG_DISK_INFO:
682 atascsi_disk_vpd_info(xs);
683 break;
684 case SI_PG_DISK_THIN:
685 atascsi_disk_vpd_thin(xs);
686 break;
687 default:
688 atascsi_done(xs, XS_DRIVER_STUFFUP);
689 break;
690 }
691 } else
692 atascsi_disk_inquiry(xs);
693 }
694
695 void
atascsi_disk_inquiry(struct scsi_xfer * xs)696 atascsi_disk_inquiry(struct scsi_xfer *xs)
697 {
698 struct scsi_inquiry_data inq;
699 struct scsi_link *link = xs->sc_link;
700 struct atascsi_port *ap;
701
702 ap = atascsi_lookup_port(link);
703
704 bzero(&inq, sizeof(inq));
705
706 inq.device = T_DIRECT;
707 inq.version = SCSI_REV_SPC3;
708 inq.response_format = SID_SCSI2_RESPONSE;
709 inq.additional_length = SID_SCSI2_ALEN;
710 inq.flags |= SID_CmdQue;
711 bcopy("ATA ", inq.vendor, sizeof(inq.vendor));
712 ata_swapcopy(ap->ap_identify.model, inq.product,
713 sizeof(inq.product));
714 ata_swapcopy(ap->ap_identify.firmware, inq.revision,
715 sizeof(inq.revision));
716
717 scsi_copy_internal_data(xs, &inq, sizeof(inq));
718
719 atascsi_done(xs, XS_NOERROR);
720 }
721
722 void
atascsi_disk_vpd_supported(struct scsi_xfer * xs)723 atascsi_disk_vpd_supported(struct scsi_xfer *xs)
724 {
725 struct {
726 struct scsi_vpd_hdr hdr;
727 u_int8_t list[7];
728 } pg;
729 struct scsi_link *link = xs->sc_link;
730 struct atascsi_port *ap;
731 int fat;
732
733 ap = atascsi_lookup_port(link);
734 fat = ISSET(ap->ap_features, ATA_PORT_F_TRIM) ? 0 : 1;
735
736 bzero(&pg, sizeof(pg));
737
738 pg.hdr.device = T_DIRECT;
739 pg.hdr.page_code = SI_PG_SUPPORTED;
740 _lto2b(sizeof(pg.list) - fat, pg.hdr.page_length);
741 pg.list[0] = SI_PG_SUPPORTED;
742 pg.list[1] = SI_PG_SERIAL;
743 pg.list[2] = SI_PG_DEVID;
744 pg.list[3] = SI_PG_ATA;
745 pg.list[4] = SI_PG_DISK_LIMITS;
746 pg.list[5] = SI_PG_DISK_INFO;
747 pg.list[6] = SI_PG_DISK_THIN; /* "trimmed" if fat. get it? tehe. */
748
749 bcopy(&pg, xs->data, MIN(sizeof(pg) - fat, xs->datalen));
750
751 atascsi_done(xs, XS_NOERROR);
752 }
753
754 void
atascsi_disk_vpd_serial(struct scsi_xfer * xs)755 atascsi_disk_vpd_serial(struct scsi_xfer *xs)
756 {
757 struct scsi_link *link = xs->sc_link;
758 struct atascsi_port *ap;
759 struct scsi_vpd_serial pg;
760
761 ap = atascsi_lookup_port(link);
762 bzero(&pg, sizeof(pg));
763
764 pg.hdr.device = T_DIRECT;
765 pg.hdr.page_code = SI_PG_SERIAL;
766 _lto2b(sizeof(ap->ap_identify.serial), pg.hdr.page_length);
767 ata_swapcopy(ap->ap_identify.serial, pg.serial,
768 sizeof(ap->ap_identify.serial));
769
770 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
771
772 atascsi_done(xs, XS_NOERROR);
773 }
774
775 void
atascsi_disk_vpd_ident(struct scsi_xfer * xs)776 atascsi_disk_vpd_ident(struct scsi_xfer *xs)
777 {
778 struct scsi_link *link = xs->sc_link;
779 struct atascsi_port *ap;
780 struct {
781 struct scsi_vpd_hdr hdr;
782 struct scsi_vpd_devid_hdr devid_hdr;
783 u_int8_t devid[68];
784 } pg;
785 u_int8_t *p;
786 size_t pg_len;
787
788 ap = atascsi_lookup_port(link);
789 bzero(&pg, sizeof(pg));
790 if (letoh16(ap->ap_identify.features87) & ATA_ID_F87_WWN) {
791 pg_len = 8;
792
793 pg.devid_hdr.pi_code = VPD_DEVID_CODE_BINARY;
794 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_NAA;
795
796 ata_swapcopy(&ap->ap_identify.naa_ieee_oui, pg.devid, pg_len);
797 } else {
798 pg_len = 68;
799
800 pg.devid_hdr.pi_code = VPD_DEVID_CODE_ASCII;
801 pg.devid_hdr.flags = VPD_DEVID_ASSOC_LU | VPD_DEVID_TYPE_T10;
802
803 p = pg.devid;
804 bcopy("ATA ", p, 8);
805 p += 8;
806 ata_swapcopy(ap->ap_identify.model, p,
807 sizeof(ap->ap_identify.model));
808 p += sizeof(ap->ap_identify.model);
809 ata_swapcopy(ap->ap_identify.serial, p,
810 sizeof(ap->ap_identify.serial));
811 }
812
813 pg.devid_hdr.len = pg_len;
814 pg_len += sizeof(pg.devid_hdr);
815
816 pg.hdr.device = T_DIRECT;
817 pg.hdr.page_code = SI_PG_DEVID;
818 _lto2b(pg_len, pg.hdr.page_length);
819 pg_len += sizeof(pg.hdr);
820
821 bcopy(&pg, xs->data, MIN(pg_len, xs->datalen));
822
823 atascsi_done(xs, XS_NOERROR);
824 }
825
826 void
atascsi_disk_vpd_ata(struct scsi_xfer * xs)827 atascsi_disk_vpd_ata(struct scsi_xfer *xs)
828 {
829 struct scsi_link *link = xs->sc_link;
830 struct atascsi_port *ap;
831 struct scsi_vpd_ata pg;
832
833 ap = atascsi_lookup_port(link);
834 bzero(&pg, sizeof(pg));
835
836 pg.hdr.device = T_DIRECT;
837 pg.hdr.page_code = SI_PG_ATA;
838 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
839
840 memset(pg.sat_vendor, ' ', sizeof(pg.sat_vendor));
841 memcpy(pg.sat_vendor, "OpenBSD",
842 MIN(strlen("OpenBSD"), sizeof(pg.sat_vendor)));
843 memset(pg.sat_product, ' ', sizeof(pg.sat_product));
844 memcpy(pg.sat_product, "atascsi",
845 MIN(strlen("atascsi"), sizeof(pg.sat_product)));
846 memset(pg.sat_revision, ' ', sizeof(pg.sat_revision));
847 memcpy(pg.sat_revision, osrelease,
848 MIN(strlen(osrelease), sizeof(pg.sat_revision)));
849
850 /* XXX device signature */
851
852 switch (ap->ap_type) {
853 case ATA_PORT_T_DISK:
854 pg.command_code = VPD_ATA_COMMAND_CODE_ATA;
855 break;
856 case ATA_PORT_T_ATAPI:
857 pg.command_code = VPD_ATA_COMMAND_CODE_ATAPI;
858 break;
859 }
860
861 memcpy(pg.identify, &ap->ap_identify, sizeof(pg.identify));
862
863 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
864
865 atascsi_done(xs, XS_NOERROR);
866 }
867
868 void
atascsi_disk_vpd_limits(struct scsi_xfer * xs)869 atascsi_disk_vpd_limits(struct scsi_xfer *xs)
870 {
871 struct scsi_link *link = xs->sc_link;
872 struct atascsi_port *ap;
873 struct scsi_vpd_disk_limits pg;
874
875 ap = atascsi_lookup_port(link);
876 bzero(&pg, sizeof(pg));
877 pg.hdr.device = T_DIRECT;
878 pg.hdr.page_code = SI_PG_DISK_LIMITS;
879 _lto2b(SI_PG_DISK_LIMITS_LEN_THIN, pg.hdr.page_length);
880
881 _lto2b(1 << ata_identify_block_l2p_exp(&ap->ap_identify),
882 pg.optimal_xfer_granularity);
883
884 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
885 /*
886 * ATA only supports 65535 blocks per TRIM descriptor, so
887 * avoid having to split UNMAP descriptors and overflow the page
888 * limit by using that as a max.
889 */
890 _lto4b(ATA_DSM_TRIM_MAX_LEN, pg.max_unmap_lba_count);
891 _lto4b(512 / 8, pg.max_unmap_desc_count);
892 }
893
894 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
895
896 atascsi_done(xs, XS_NOERROR);
897 }
898
899 void
atascsi_disk_vpd_info(struct scsi_xfer * xs)900 atascsi_disk_vpd_info(struct scsi_xfer *xs)
901 {
902 struct scsi_link *link = xs->sc_link;
903 struct atascsi_port *ap;
904 struct scsi_vpd_disk_info pg;
905
906 ap = atascsi_lookup_port(link);
907 bzero(&pg, sizeof(pg));
908 pg.hdr.device = T_DIRECT;
909 pg.hdr.page_code = SI_PG_DISK_INFO;
910 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
911
912 _lto2b(letoh16(ap->ap_identify.rpm), pg.rpm);
913 pg.form_factor = letoh16(ap->ap_identify.form) & ATA_ID_FORM_MASK;
914
915 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
916
917 atascsi_done(xs, XS_NOERROR);
918 }
919
920 void
atascsi_disk_vpd_thin(struct scsi_xfer * xs)921 atascsi_disk_vpd_thin(struct scsi_xfer *xs)
922 {
923 struct scsi_link *link = xs->sc_link;
924 struct atascsi_port *ap;
925 struct scsi_vpd_disk_thin pg;
926
927 ap = atascsi_lookup_port(link);
928 if (!ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
929 atascsi_done(xs, XS_DRIVER_STUFFUP);
930 return;
931 }
932
933 bzero(&pg, sizeof(pg));
934 pg.hdr.device = T_DIRECT;
935 pg.hdr.page_code = SI_PG_DISK_THIN;
936 _lto2b(sizeof(pg) - sizeof(pg.hdr), pg.hdr.page_length);
937
938 pg.flags = VPD_DISK_THIN_TPU | VPD_DISK_THIN_TPWS;
939
940 bcopy(&pg, xs->data, MIN(sizeof(pg), xs->datalen));
941
942 atascsi_done(xs, XS_NOERROR);
943 }
944
945 void
atascsi_disk_write_same_16(struct scsi_xfer * xs)946 atascsi_disk_write_same_16(struct scsi_xfer *xs)
947 {
948 struct scsi_link *link = xs->sc_link;
949 struct atascsi *as = link->bus->sb_adapter_softc;
950 struct atascsi_port *ap;
951 struct scsi_write_same_16 *cdb;
952 struct ata_xfer *xa = xs->io;
953 struct ata_fis_h2d *fis;
954 u_int64_t lba;
955 u_int32_t length;
956 u_int64_t desc;
957
958 if (xs->cmdlen != sizeof(*cdb)) {
959 atascsi_done(xs, XS_DRIVER_STUFFUP);
960 return;
961 }
962
963 ap = atascsi_lookup_port(link);
964 cdb = (struct scsi_write_same_16 *)&xs->cmd;
965
966 if (!ISSET(cdb->flags, WRITE_SAME_F_UNMAP) ||
967 !ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
968 /* generate sense data */
969 atascsi_done(xs, XS_DRIVER_STUFFUP);
970 return;
971 }
972
973 if (xs->datalen < 512) {
974 /* generate sense data */
975 atascsi_done(xs, XS_DRIVER_STUFFUP);
976 return;
977 }
978
979 lba = _8btol(cdb->lba);
980 length = _4btol(cdb->length);
981
982 if (length > ATA_DSM_TRIM_MAX_LEN) {
983 /* XXX we dont support requests over 65535 blocks */
984 atascsi_done(xs, XS_DRIVER_STUFFUP);
985 return;
986 }
987
988 xa->data = xs->data;
989 xa->datalen = 512;
990 xa->flags = ATA_F_WRITE;
991 xa->pmp_port = ap->ap_pmp_port;
992 if (xs->flags & SCSI_POLL)
993 xa->flags |= ATA_F_POLL;
994 xa->complete = atascsi_disk_write_same_16_done;
995 xa->atascsi_private = xs;
996 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
997
998 /* TRIM sends a list of blocks to discard in the databuf. */
999 memset(xa->data, 0, xa->datalen);
1000 desc = htole64(ATA_DSM_TRIM_DESC(lba, length));
1001 memcpy(xa->data, &desc, sizeof(desc));
1002
1003 fis = xa->fis;
1004 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1005 fis->command = ATA_C_DSM;
1006 fis->features = ATA_DSM_TRIM;
1007 fis->sector_count = 1;
1008
1009 ata_exec(as, xa);
1010 }
1011
1012 void
atascsi_disk_write_same_16_done(struct ata_xfer * xa)1013 atascsi_disk_write_same_16_done(struct ata_xfer *xa)
1014 {
1015 struct scsi_xfer *xs = xa->atascsi_private;
1016
1017 switch (xa->state) {
1018 case ATA_S_COMPLETE:
1019 xs->error = XS_NOERROR;
1020 break;
1021 case ATA_S_ERROR:
1022 xs->error = XS_DRIVER_STUFFUP;
1023 break;
1024 case ATA_S_TIMEOUT:
1025 xs->error = XS_TIMEOUT;
1026 break;
1027
1028 default:
1029 panic("atascsi_disk_write_same_16_done: "
1030 "unexpected ata_xfer state (%d)", xa->state);
1031 }
1032
1033 scsi_done(xs);
1034 }
1035
1036 void
atascsi_disk_unmap(struct scsi_xfer * xs)1037 atascsi_disk_unmap(struct scsi_xfer *xs)
1038 {
1039 struct ata_xfer *xa = xs->io;
1040 struct scsi_unmap *cdb;
1041 struct scsi_unmap_data *unmap;
1042 u_int len;
1043
1044 if (ISSET(xs->flags, SCSI_POLL) || xs->cmdlen != sizeof(*cdb))
1045 atascsi_done(xs, XS_DRIVER_STUFFUP);
1046
1047 cdb = (struct scsi_unmap *)&xs->cmd;
1048 len = _2btol(cdb->list_len);
1049 if (xs->datalen != len || len < sizeof(*unmap)) {
1050 atascsi_done(xs, XS_DRIVER_STUFFUP);
1051 return;
1052 }
1053
1054 unmap = (struct scsi_unmap_data *)xs->data;
1055 if (_2btol(unmap->data_length) != len) {
1056 atascsi_done(xs, XS_DRIVER_STUFFUP);
1057 return;
1058 }
1059
1060 len = _2btol(unmap->desc_length);
1061 if (len != xs->datalen - sizeof(*unmap)) {
1062 atascsi_done(xs, XS_DRIVER_STUFFUP);
1063 return;
1064 }
1065
1066 if (len < sizeof(struct scsi_unmap_desc)) {
1067 /* no work, no error according to sbc3 */
1068 atascsi_done(xs, XS_NOERROR);
1069 }
1070
1071 if (len > sizeof(struct scsi_unmap_desc) * 64) {
1072 /* more work than we advertised */
1073 atascsi_done(xs, XS_DRIVER_STUFFUP);
1074 return;
1075 }
1076
1077 /* let's go */
1078 if (ISSET(xs->flags, SCSI_NOSLEEP)) {
1079 task_set(&xa->task, atascsi_disk_unmap_task, xs);
1080 task_add(systq, &xa->task);
1081 } else {
1082 /* we can already sleep for memory */
1083 atascsi_disk_unmap_task(xs);
1084 }
1085 }
1086
1087 void
atascsi_disk_unmap_task(void * xxs)1088 atascsi_disk_unmap_task(void *xxs)
1089 {
1090 struct scsi_xfer *xs = xxs;
1091 struct scsi_link *link = xs->sc_link;
1092 struct atascsi *as = link->bus->sb_adapter_softc;
1093 struct atascsi_port *ap;
1094 struct ata_xfer *xa = xs->io;
1095 struct ata_fis_h2d *fis;
1096 struct scsi_unmap_data *unmap;
1097 struct scsi_unmap_desc *descs, *d;
1098 u_int64_t *trims;
1099 u_int len, i;
1100
1101 trims = dma_alloc(512, PR_WAITOK | PR_ZERO);
1102
1103 ap = atascsi_lookup_port(link);
1104 unmap = (struct scsi_unmap_data *)xs->data;
1105 descs = (struct scsi_unmap_desc *)(unmap + 1);
1106
1107 len = _2btol(unmap->desc_length) / sizeof(*d);
1108 for (i = 0; i < len; i++) {
1109 d = &descs[i];
1110 if (_4btol(d->logical_blocks) > ATA_DSM_TRIM_MAX_LEN)
1111 goto fail;
1112
1113 trims[i] = htole64(ATA_DSM_TRIM_DESC(_8btol(d->logical_addr),
1114 _4btol(d->logical_blocks)));
1115 }
1116
1117 xa->data = trims;
1118 xa->datalen = 512;
1119 xa->flags = ATA_F_WRITE;
1120 xa->pmp_port = ap->ap_pmp_port;
1121 xa->complete = atascsi_disk_unmap_done;
1122 xa->atascsi_private = xs;
1123 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1124
1125 fis = xa->fis;
1126 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1127 fis->command = ATA_C_DSM;
1128 fis->features = ATA_DSM_TRIM;
1129 fis->sector_count = 1;
1130
1131 ata_exec(as, xa);
1132 return;
1133
1134 fail:
1135 dma_free(xa->data, 512);
1136 atascsi_done(xs, XS_DRIVER_STUFFUP);
1137 }
1138
1139 void
atascsi_disk_unmap_done(struct ata_xfer * xa)1140 atascsi_disk_unmap_done(struct ata_xfer *xa)
1141 {
1142 struct scsi_xfer *xs = xa->atascsi_private;
1143
1144 dma_free(xa->data, 512);
1145
1146 switch (xa->state) {
1147 case ATA_S_COMPLETE:
1148 xs->error = XS_NOERROR;
1149 break;
1150 case ATA_S_ERROR:
1151 xs->error = XS_DRIVER_STUFFUP;
1152 break;
1153 case ATA_S_TIMEOUT:
1154 xs->error = XS_TIMEOUT;
1155 break;
1156
1157 default:
1158 panic("atascsi_disk_unmap_done: "
1159 "unexpected ata_xfer state (%d)", xa->state);
1160 }
1161
1162 scsi_done(xs);
1163 }
1164
1165 void
atascsi_disk_sync(struct scsi_xfer * xs)1166 atascsi_disk_sync(struct scsi_xfer *xs)
1167 {
1168 struct scsi_link *link = xs->sc_link;
1169 struct atascsi *as = link->bus->sb_adapter_softc;
1170 struct atascsi_port *ap;
1171 struct ata_xfer *xa = xs->io;
1172
1173 if (xs->cmdlen != sizeof(struct scsi_synchronize_cache)) {
1174 atascsi_done(xs, XS_DRIVER_STUFFUP);
1175 return;
1176 }
1177
1178 ap = atascsi_lookup_port(link);
1179 xa->datalen = 0;
1180 xa->flags = ATA_F_READ;
1181 xa->complete = atascsi_disk_sync_done;
1182 /* Spec says flush cache can take >30 sec, so give it at least 45. */
1183 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1184 xa->atascsi_private = xs;
1185 xa->pmp_port = ap->ap_pmp_port;
1186 if (xs->flags & SCSI_POLL)
1187 xa->flags |= ATA_F_POLL;
1188
1189 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1190 xa->fis->command = ATA_C_FLUSH_CACHE;
1191 xa->fis->device = 0;
1192
1193 ata_exec(as, xa);
1194 }
1195
1196 void
atascsi_disk_sync_done(struct ata_xfer * xa)1197 atascsi_disk_sync_done(struct ata_xfer *xa)
1198 {
1199 struct scsi_xfer *xs = xa->atascsi_private;
1200
1201 switch (xa->state) {
1202 case ATA_S_COMPLETE:
1203 xs->error = XS_NOERROR;
1204 break;
1205
1206 case ATA_S_ERROR:
1207 case ATA_S_TIMEOUT:
1208 printf("atascsi_disk_sync_done: %s\n",
1209 xa->state == ATA_S_TIMEOUT ? "timeout" : "error");
1210 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1211 XS_DRIVER_STUFFUP);
1212 break;
1213
1214 default:
1215 panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)",
1216 xa->state);
1217 }
1218
1219 scsi_done(xs);
1220 }
1221
1222 u_int64_t
ata_identify_blocks(struct ata_identify * id)1223 ata_identify_blocks(struct ata_identify *id)
1224 {
1225 u_int64_t blocks = 0;
1226 int i;
1227
1228 if (letoh16(id->cmdset83) & 0x0400) {
1229 /* LBA48 feature set supported */
1230 for (i = 3; i >= 0; --i) {
1231 blocks <<= 16;
1232 blocks += letoh16(id->addrsecxt[i]);
1233 }
1234 } else {
1235 blocks = letoh16(id->addrsec[1]);
1236 blocks <<= 16;
1237 blocks += letoh16(id->addrsec[0]);
1238 }
1239
1240 return (blocks - 1);
1241 }
1242
1243 u_int
ata_identify_blocksize(struct ata_identify * id)1244 ata_identify_blocksize(struct ata_identify *id)
1245 {
1246 u_int blocksize = 512;
1247 u_int16_t p2l_sect = letoh16(id->p2l_sect);
1248
1249 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1250 ISSET(p2l_sect, ATA_ID_P2L_SECT_SIZESET)) {
1251 blocksize = letoh16(id->words_lsec[1]);
1252 blocksize <<= 16;
1253 blocksize += letoh16(id->words_lsec[0]);
1254 blocksize <<= 1;
1255 }
1256
1257 return (blocksize);
1258 }
1259
1260 u_int
ata_identify_block_l2p_exp(struct ata_identify * id)1261 ata_identify_block_l2p_exp(struct ata_identify *id)
1262 {
1263 u_int exponent = 0;
1264 u_int16_t p2l_sect = letoh16(id->p2l_sect);
1265
1266 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1267 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET)) {
1268 exponent = (p2l_sect & ATA_ID_P2L_SECT_SIZE);
1269 }
1270
1271 return (exponent);
1272 }
1273
1274 u_int
ata_identify_block_logical_align(struct ata_identify * id)1275 ata_identify_block_logical_align(struct ata_identify *id)
1276 {
1277 u_int align = 0;
1278 u_int16_t p2l_sect = letoh16(id->p2l_sect);
1279 u_int16_t logical_align = letoh16(id->logical_align);
1280
1281 if ((p2l_sect & ATA_ID_P2L_SECT_MASK) == ATA_ID_P2L_SECT_VALID &&
1282 ISSET(p2l_sect, ATA_ID_P2L_SECT_SET) &&
1283 (logical_align & ATA_ID_LALIGN_MASK) == ATA_ID_LALIGN_VALID)
1284 align = logical_align & ATA_ID_LALIGN;
1285
1286 return (align);
1287 }
1288
1289 void
atascsi_disk_capacity(struct scsi_xfer * xs)1290 atascsi_disk_capacity(struct scsi_xfer *xs)
1291 {
1292 struct scsi_link *link = xs->sc_link;
1293 struct atascsi_port *ap;
1294 struct scsi_read_cap_data rcd;
1295 u_int64_t capacity;
1296
1297 ap = atascsi_lookup_port(link);
1298 if (xs->cmdlen != sizeof(struct scsi_read_capacity)) {
1299 atascsi_done(xs, XS_DRIVER_STUFFUP);
1300 return;
1301 }
1302
1303 bzero(&rcd, sizeof(rcd));
1304 capacity = ata_identify_blocks(&ap->ap_identify);
1305 if (capacity > 0xffffffff)
1306 capacity = 0xffffffff;
1307
1308 _lto4b(capacity, rcd.addr);
1309 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1310
1311 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1312
1313 atascsi_done(xs, XS_NOERROR);
1314 }
1315
1316 void
atascsi_disk_capacity16(struct scsi_xfer * xs)1317 atascsi_disk_capacity16(struct scsi_xfer *xs)
1318 {
1319 struct scsi_link *link = xs->sc_link;
1320 struct atascsi_port *ap;
1321 struct scsi_read_cap_data_16 rcd;
1322 u_int align;
1323 u_int16_t lowest_aligned = 0;
1324
1325 ap = atascsi_lookup_port(link);
1326 if (xs->cmdlen != sizeof(struct scsi_read_capacity_16)) {
1327 atascsi_done(xs, XS_DRIVER_STUFFUP);
1328 return;
1329 }
1330
1331 bzero(&rcd, sizeof(rcd));
1332
1333 _lto8b(ata_identify_blocks(&ap->ap_identify), rcd.addr);
1334 _lto4b(ata_identify_blocksize(&ap->ap_identify), rcd.length);
1335 rcd.logical_per_phys = ata_identify_block_l2p_exp(&ap->ap_identify);
1336 align = ata_identify_block_logical_align(&ap->ap_identify);
1337 if (align > 0)
1338 lowest_aligned = (1 << rcd.logical_per_phys) - align;
1339
1340 if (ISSET(ap->ap_features, ATA_PORT_F_TRIM)) {
1341 SET(lowest_aligned, READ_CAP_16_TPE);
1342
1343 if (ISSET(letoh16(ap->ap_identify.add_support),
1344 ATA_ID_ADD_SUPPORT_DRT))
1345 SET(lowest_aligned, READ_CAP_16_TPRZ);
1346 }
1347 _lto2b(lowest_aligned, rcd.lowest_aligned);
1348
1349 bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
1350
1351 atascsi_done(xs, XS_NOERROR);
1352 }
1353
1354 int
atascsi_passthru_map(struct scsi_xfer * xs,u_int8_t count_proto,u_int8_t flags)1355 atascsi_passthru_map(struct scsi_xfer *xs, u_int8_t count_proto, u_int8_t flags)
1356 {
1357 struct ata_xfer *xa = xs->io;
1358
1359 xa->data = xs->data;
1360 xa->datalen = xs->datalen;
1361 xa->timeout = xs->timeout;
1362 xa->flags = 0;
1363 if (xs->flags & SCSI_DATA_IN)
1364 xa->flags |= ATA_F_READ;
1365 if (xs->flags & SCSI_DATA_OUT)
1366 xa->flags |= ATA_F_WRITE;
1367 if (xs->flags & SCSI_POLL)
1368 xa->flags |= ATA_F_POLL;
1369
1370 switch (count_proto & ATA_PASSTHRU_PROTO_MASK) {
1371 case ATA_PASSTHRU_PROTO_NON_DATA:
1372 case ATA_PASSTHRU_PROTO_PIO_DATAIN:
1373 case ATA_PASSTHRU_PROTO_PIO_DATAOUT:
1374 xa->flags |= ATA_F_PIO;
1375 break;
1376 default:
1377 /* we dont support this yet */
1378 return (1);
1379 }
1380
1381 xa->atascsi_private = xs;
1382 xa->complete = atascsi_passthru_done;
1383
1384 return (0);
1385 }
1386
1387 void
atascsi_passthru_12(struct scsi_xfer * xs)1388 atascsi_passthru_12(struct scsi_xfer *xs)
1389 {
1390 struct scsi_link *link = xs->sc_link;
1391 struct atascsi *as = link->bus->sb_adapter_softc;
1392 struct atascsi_port *ap;
1393 struct ata_xfer *xa = xs->io;
1394 struct scsi_ata_passthru_12 *cdb;
1395 struct ata_fis_h2d *fis;
1396
1397 if (xs->cmdlen != sizeof(*cdb)) {
1398 atascsi_done(xs, XS_DRIVER_STUFFUP);
1399 return;
1400 }
1401
1402 cdb = (struct scsi_ata_passthru_12 *)&xs->cmd;
1403 /* validate cdb */
1404
1405 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1406 atascsi_done(xs, XS_DRIVER_STUFFUP);
1407 return;
1408 }
1409
1410 ap = atascsi_lookup_port(link);
1411 fis = xa->fis;
1412 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1413 fis->command = cdb->command;
1414 fis->features = cdb->features;
1415 fis->lba_low = cdb->lba_low;
1416 fis->lba_mid = cdb->lba_mid;
1417 fis->lba_high = cdb->lba_high;
1418 fis->device = cdb->device;
1419 fis->sector_count = cdb->sector_count;
1420 xa->pmp_port = ap->ap_pmp_port;
1421
1422 ata_exec(as, xa);
1423 }
1424
1425 void
atascsi_passthru_16(struct scsi_xfer * xs)1426 atascsi_passthru_16(struct scsi_xfer *xs)
1427 {
1428 struct scsi_link *link = xs->sc_link;
1429 struct atascsi *as = link->bus->sb_adapter_softc;
1430 struct atascsi_port *ap;
1431 struct ata_xfer *xa = xs->io;
1432 struct scsi_ata_passthru_16 *cdb;
1433 struct ata_fis_h2d *fis;
1434
1435 if (xs->cmdlen != sizeof(*cdb)) {
1436 atascsi_done(xs, XS_DRIVER_STUFFUP);
1437 return;
1438 }
1439
1440 cdb = (struct scsi_ata_passthru_16 *)&xs->cmd;
1441 /* validate cdb */
1442
1443 if (atascsi_passthru_map(xs, cdb->count_proto, cdb->flags) != 0) {
1444 atascsi_done(xs, XS_DRIVER_STUFFUP);
1445 return;
1446 }
1447
1448 ap = atascsi_lookup_port(link);
1449 fis = xa->fis;
1450 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1451 fis->command = cdb->command;
1452 fis->features = cdb->features[1];
1453 fis->lba_low = cdb->lba_low[1];
1454 fis->lba_mid = cdb->lba_mid[1];
1455 fis->lba_high = cdb->lba_high[1];
1456 fis->device = cdb->device;
1457 fis->lba_low_exp = cdb->lba_low[0];
1458 fis->lba_mid_exp = cdb->lba_mid[0];
1459 fis->lba_high_exp = cdb->lba_high[0];
1460 fis->features_exp = cdb->features[0];
1461 fis->sector_count = cdb->sector_count[1];
1462 fis->sector_count_exp = cdb->sector_count[0];
1463 xa->pmp_port = ap->ap_pmp_port;
1464
1465 ata_exec(as, xa);
1466 }
1467
1468 void
atascsi_passthru_done(struct ata_xfer * xa)1469 atascsi_passthru_done(struct ata_xfer *xa)
1470 {
1471 struct scsi_xfer *xs = xa->atascsi_private;
1472
1473 /*
1474 * XXX need to generate sense if cdb wants it
1475 */
1476
1477 switch (xa->state) {
1478 case ATA_S_COMPLETE:
1479 xs->error = XS_NOERROR;
1480 break;
1481 case ATA_S_ERROR:
1482 xs->error = XS_DRIVER_STUFFUP;
1483 break;
1484 case ATA_S_TIMEOUT:
1485 printf("atascsi_passthru_done, timeout\n");
1486 xs->error = XS_TIMEOUT;
1487 break;
1488 default:
1489 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1490 xa->state);
1491 }
1492
1493 xs->resid = xa->resid;
1494
1495 scsi_done(xs);
1496 }
1497
1498 void
atascsi_disk_sense(struct scsi_xfer * xs)1499 atascsi_disk_sense(struct scsi_xfer *xs)
1500 {
1501 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
1502
1503 bzero(xs->data, xs->datalen);
1504 /* check datalen > sizeof(struct scsi_sense_data)? */
1505 sd->error_code = SSD_ERRCODE_CURRENT;
1506 sd->flags = SKEY_NO_SENSE;
1507
1508 atascsi_done(xs, XS_NOERROR);
1509 }
1510
1511 void
atascsi_disk_start_stop(struct scsi_xfer * xs)1512 atascsi_disk_start_stop(struct scsi_xfer *xs)
1513 {
1514 struct scsi_link *link = xs->sc_link;
1515 struct atascsi *as = link->bus->sb_adapter_softc;
1516 struct atascsi_port *ap;
1517 struct ata_xfer *xa = xs->io;
1518 struct scsi_start_stop *ss = (struct scsi_start_stop *)&xs->cmd;
1519
1520 if (xs->cmdlen != sizeof(*ss)) {
1521 atascsi_done(xs, XS_DRIVER_STUFFUP);
1522 return;
1523 }
1524
1525 if (ss->how != SSS_STOP) {
1526 atascsi_done(xs, XS_NOERROR);
1527 return;
1528 }
1529
1530 /*
1531 * A SCSI START STOP UNIT command with the START bit set to
1532 * zero gets translated into an ATA FLUSH CACHE command
1533 * followed by an ATA STANDBY IMMEDIATE command.
1534 */
1535 ap = atascsi_lookup_port(link);
1536 xa->datalen = 0;
1537 xa->flags = ATA_F_READ;
1538 xa->complete = atascsi_disk_start_stop_done;
1539 /* Spec says flush cache can take >30 sec, so give it at least 45. */
1540 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1541 xa->pmp_port = ap->ap_pmp_port;
1542 xa->atascsi_private = xs;
1543 if (xs->flags & SCSI_POLL)
1544 xa->flags |= ATA_F_POLL;
1545
1546 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1547 xa->fis->command = ATA_C_FLUSH_CACHE;
1548 xa->fis->device = 0;
1549
1550 ata_exec(as, xa);
1551 }
1552
1553 void
atascsi_disk_start_stop_done(struct ata_xfer * xa)1554 atascsi_disk_start_stop_done(struct ata_xfer *xa)
1555 {
1556 struct scsi_xfer *xs = xa->atascsi_private;
1557 struct scsi_link *link = xs->sc_link;
1558 struct atascsi *as = link->bus->sb_adapter_softc;
1559 struct atascsi_port *ap;
1560
1561 switch (xa->state) {
1562 case ATA_S_COMPLETE:
1563 break;
1564
1565 case ATA_S_ERROR:
1566 case ATA_S_TIMEOUT:
1567 xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
1568 XS_DRIVER_STUFFUP);
1569 xs->resid = xa->resid;
1570 scsi_done(xs);
1571 return;
1572
1573 default:
1574 panic("atascsi_disk_start_stop_done: unexpected ata_xfer state (%d)",
1575 xa->state);
1576 }
1577
1578 /*
1579 * The FLUSH CACHE command completed successfully; now issue
1580 * the STANDBY IMMEDIATE command.
1581 */
1582 ap = atascsi_lookup_port(link);
1583 xa->datalen = 0;
1584 xa->flags = ATA_F_READ;
1585 xa->state = ATA_S_SETUP;
1586 xa->complete = atascsi_disk_cmd_done;
1587 /* Spec says flush cache can take >30 sec, so give it at least 45. */
1588 xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
1589 xa->pmp_port = ap->ap_pmp_port;
1590 xa->atascsi_private = xs;
1591 if (xs->flags & SCSI_POLL)
1592 xa->flags |= ATA_F_POLL;
1593
1594 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1595 xa->fis->command = ATA_C_STANDBY_IMMED;
1596 xa->fis->device = 0;
1597
1598 ata_exec(as, xa);
1599 }
1600
1601 void
atascsi_atapi_cmd(struct scsi_xfer * xs)1602 atascsi_atapi_cmd(struct scsi_xfer *xs)
1603 {
1604 struct scsi_link *link = xs->sc_link;
1605 struct atascsi *as = link->bus->sb_adapter_softc;
1606 struct atascsi_port *ap;
1607 struct ata_xfer *xa = xs->io;
1608 struct ata_fis_h2d *fis;
1609
1610 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1611 case SCSI_DATA_IN:
1612 xa->flags = ATA_F_PACKET | ATA_F_READ;
1613 break;
1614 case SCSI_DATA_OUT:
1615 xa->flags = ATA_F_PACKET | ATA_F_WRITE;
1616 break;
1617 default:
1618 xa->flags = ATA_F_PACKET;
1619 }
1620 xa->flags |= ATA_F_GET_RFIS;
1621
1622 ap = atascsi_lookup_port(link);
1623 xa->data = xs->data;
1624 xa->datalen = xs->datalen;
1625 xa->complete = atascsi_atapi_cmd_done;
1626 xa->timeout = xs->timeout;
1627 xa->pmp_port = ap->ap_pmp_port;
1628 xa->atascsi_private = xs;
1629 if (xs->flags & SCSI_POLL)
1630 xa->flags |= ATA_F_POLL;
1631
1632 fis = xa->fis;
1633 fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1634 fis->command = ATA_C_PACKET;
1635 fis->device = 0;
1636 fis->sector_count = xa->tag << 3;
1637 fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ?
1638 ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ);
1639 fis->lba_mid = 0x00;
1640 fis->lba_high = 0x20;
1641
1642 /* Copy SCSI command into ATAPI packet. */
1643 memcpy(xa->packetcmd, &xs->cmd, xs->cmdlen);
1644
1645 ata_exec(as, xa);
1646 }
1647
1648 void
atascsi_atapi_cmd_done(struct ata_xfer * xa)1649 atascsi_atapi_cmd_done(struct ata_xfer *xa)
1650 {
1651 struct scsi_xfer *xs = xa->atascsi_private;
1652 struct scsi_sense_data *sd = &xs->sense;
1653
1654 switch (xa->state) {
1655 case ATA_S_COMPLETE:
1656 xs->error = XS_NOERROR;
1657 break;
1658 case ATA_S_ERROR:
1659 /* Return PACKET sense data */
1660 sd->error_code = SSD_ERRCODE_CURRENT;
1661 sd->flags = (xa->rfis.error & 0xf0) >> 4;
1662 if (xa->rfis.error & 0x04)
1663 sd->flags = SKEY_ILLEGAL_REQUEST;
1664 if (xa->rfis.error & 0x02)
1665 sd->flags |= SSD_EOM;
1666 if (xa->rfis.error & 0x01)
1667 sd->flags |= SSD_ILI;
1668 xs->error = XS_SENSE;
1669 break;
1670 case ATA_S_TIMEOUT:
1671 printf("atascsi_atapi_cmd_done, timeout\n");
1672 xs->error = XS_TIMEOUT;
1673 break;
1674 default:
1675 panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
1676 xa->state);
1677 }
1678
1679 xs->resid = xa->resid;
1680
1681 scsi_done(xs);
1682 }
1683
1684 void
atascsi_pmp_cmd(struct scsi_xfer * xs)1685 atascsi_pmp_cmd(struct scsi_xfer *xs)
1686 {
1687 switch (xs->cmd.opcode) {
1688 case REQUEST_SENSE:
1689 atascsi_pmp_sense(xs);
1690 return;
1691 case INQUIRY:
1692 atascsi_pmp_inq(xs);
1693 return;
1694
1695 case TEST_UNIT_READY:
1696 case PREVENT_ALLOW:
1697 atascsi_done(xs, XS_NOERROR);
1698 return;
1699
1700 default:
1701 atascsi_done(xs, XS_DRIVER_STUFFUP);
1702 return;
1703 }
1704 }
1705
1706 void
atascsi_pmp_sense(struct scsi_xfer * xs)1707 atascsi_pmp_sense(struct scsi_xfer *xs)
1708 {
1709 struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
1710
1711 bzero(xs->data, xs->datalen);
1712 sd->error_code = SSD_ERRCODE_CURRENT;
1713 sd->flags = SKEY_NO_SENSE;
1714
1715 atascsi_done(xs, XS_NOERROR);
1716 }
1717
1718 void
atascsi_pmp_inq(struct scsi_xfer * xs)1719 atascsi_pmp_inq(struct scsi_xfer *xs)
1720 {
1721 struct scsi_inquiry_data inq;
1722 struct scsi_inquiry *in_inq = (struct scsi_inquiry *)&xs->cmd;
1723
1724 if (ISSET(in_inq->flags, SI_EVPD)) {
1725 /* any evpd pages we need to support here? */
1726 atascsi_done(xs, XS_DRIVER_STUFFUP);
1727 return;
1728 }
1729
1730 bzero(&inq, sizeof(inq));
1731 inq.device = 0x1E; /* "well known logical unit" seems reasonable */
1732 inq.version = SCSI_REV_SPC3;
1733 inq.response_format = SID_SCSI2_RESPONSE;
1734 inq.additional_length = SID_SCSI2_ALEN;
1735 inq.flags |= SID_CmdQue;
1736 bcopy("ATA ", inq.vendor, sizeof(inq.vendor));
1737
1738 /* should use the data from atascsi_pmp_identify here?
1739 * not sure how useful the chip id is, but maybe it'd be
1740 * nice to include the number of ports.
1741 */
1742 bcopy("Port Multiplier", inq.product, sizeof(inq.product));
1743 bcopy(" ", inq.revision, sizeof(inq.revision));
1744
1745 scsi_copy_internal_data(xs, &inq, sizeof(inq));
1746
1747 atascsi_done(xs, XS_NOERROR);
1748 }
1749
1750 void
atascsi_done(struct scsi_xfer * xs,int error)1751 atascsi_done(struct scsi_xfer *xs, int error)
1752 {
1753 xs->error = error;
1754 scsi_done(xs);
1755 }
1756
1757 void
ata_exec(struct atascsi * as,struct ata_xfer * xa)1758 ata_exec(struct atascsi *as, struct ata_xfer *xa)
1759 {
1760 as->as_methods->ata_cmd(xa);
1761 }
1762
1763 void *
atascsi_io_get(void * cookie)1764 atascsi_io_get(void *cookie)
1765 {
1766 struct atascsi_host_port *ahp = cookie;
1767 struct atascsi *as = ahp->ahp_as;
1768 struct ata_xfer *xa;
1769
1770 xa = as->as_methods->ata_get_xfer(as->as_cookie, ahp->ahp_port);
1771 if (xa != NULL)
1772 xa->fis->type = ATA_FIS_TYPE_H2D;
1773
1774 return (xa);
1775 }
1776
1777 void
atascsi_io_put(void * cookie,void * io)1778 atascsi_io_put(void *cookie, void *io)
1779 {
1780 struct atascsi_host_port *ahp = cookie;
1781 struct atascsi *as = ahp->ahp_as;
1782 struct ata_xfer *xa = io;
1783
1784 xa->state = ATA_S_COMPLETE; /* XXX this state machine is dumb */
1785 as->as_methods->ata_put_xfer(xa);
1786 }
1787
1788 void
ata_polled_complete(struct ata_xfer * xa)1789 ata_polled_complete(struct ata_xfer *xa)
1790 {
1791 /* do nothing */
1792 }
1793
1794 int
ata_polled(struct ata_xfer * xa)1795 ata_polled(struct ata_xfer *xa)
1796 {
1797 int rv;
1798
1799 if (!ISSET(xa->flags, ATA_F_DONE))
1800 panic("ata_polled: xa isn't complete");
1801
1802 switch (xa->state) {
1803 case ATA_S_COMPLETE:
1804 rv = 0;
1805 break;
1806 case ATA_S_ERROR:
1807 case ATA_S_TIMEOUT:
1808 rv = EIO;
1809 break;
1810 default:
1811 panic("ata_polled: xa state (%d)",
1812 xa->state);
1813 }
1814
1815 scsi_io_put(xa->atascsi_private, xa);
1816
1817 return (rv);
1818 }
1819
1820 void
ata_complete(struct ata_xfer * xa)1821 ata_complete(struct ata_xfer *xa)
1822 {
1823 SET(xa->flags, ATA_F_DONE);
1824 xa->complete(xa);
1825 }
1826
1827 void
ata_swapcopy(void * src,void * dst,size_t len)1828 ata_swapcopy(void *src, void *dst, size_t len)
1829 {
1830 u_int16_t *s = src, *d = dst;
1831 int i;
1832
1833 len /= 2;
1834
1835 for (i = 0; i < len; i++)
1836 d[i] = swap16(s[i]);
1837 }
1838
1839 int
atascsi_port_identify(struct atascsi_port * ap,struct ata_identify * identify)1840 atascsi_port_identify(struct atascsi_port *ap, struct ata_identify *identify)
1841 {
1842 struct atascsi *as = ap->ap_as;
1843 struct atascsi_host_port *ahp = ap->ap_host_port;
1844 struct ata_xfer *xa;
1845
1846 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1847 if (xa == NULL)
1848 panic("no free xfers on a new port");
1849 xa->pmp_port = ap->ap_pmp_port;
1850 xa->data = identify;
1851 xa->datalen = sizeof(*identify);
1852 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1853 xa->fis->command = (ap->ap_type == ATA_PORT_T_DISK) ?
1854 ATA_C_IDENTIFY : ATA_C_IDENTIFY_PACKET;
1855 xa->fis->device = 0;
1856 xa->flags = ATA_F_READ | ATA_F_PIO | ATA_F_POLL;
1857 xa->timeout = 1000;
1858 xa->complete = ata_polled_complete;
1859 xa->atascsi_private = &ahp->ahp_iopool;
1860 ata_exec(as, xa);
1861 return (ata_polled(xa));
1862 }
1863
1864 int
atascsi_port_set_features(struct atascsi_port * ap,int subcommand,int arg)1865 atascsi_port_set_features(struct atascsi_port *ap, int subcommand, int arg)
1866 {
1867 struct atascsi *as = ap->ap_as;
1868 struct atascsi_host_port *ahp = ap->ap_host_port;
1869 struct ata_xfer *xa;
1870
1871 xa = scsi_io_get(&ahp->ahp_iopool, SCSI_NOSLEEP);
1872 if (xa == NULL)
1873 panic("no free xfers on a new port");
1874 xa->fis->command = ATA_C_SET_FEATURES;
1875 xa->fis->features = subcommand;
1876 xa->fis->sector_count = arg;
1877 xa->fis->flags = ATA_H2D_FLAGS_CMD | ap->ap_pmp_port;
1878 xa->flags = ATA_F_POLL;
1879 xa->timeout = 1000;
1880 xa->complete = ata_polled_complete;
1881 xa->pmp_port = ap->ap_pmp_port;
1882 xa->atascsi_private = &ahp->ahp_iopool;
1883 ata_exec(as, xa);
1884 return (ata_polled(xa));
1885 }
1886