1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 
31 #include "ata_common.h"
32 #include "atapi.h"
33 
34 /* SCSA entry points */
35 
36 static int atapi_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
37     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
38 static int atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void));
39 static void atapi_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
40     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
41 static int atapi_tran_abort(struct scsi_address *ap, struct scsi_pkt *spktp);
42 static int atapi_tran_reset(struct scsi_address *ap, int level);
43 static int atapi_tran_getcap(struct scsi_address *ap, char *capstr, int whom);
44 static int atapi_tran_setcap(struct scsi_address *ap, char *capstr,
45     int value, int whom);
46 static struct scsi_pkt	*atapi_tran_init_pkt(struct scsi_address *ap,
47     struct scsi_pkt *spktp, struct buf *bp, int cmdlen, int statuslen,
48     int tgtlen, int flags, int (*callback)(caddr_t), caddr_t arg);
49 static void atapi_tran_destroy_pkt(struct scsi_address *ap,
50     struct scsi_pkt *spktp);
51 static void atapi_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *spktp);
52 static void atapi_tran_sync_pkt(struct scsi_address *ap,
53     struct scsi_pkt *spktp);
54 static int atapi_tran_start(struct scsi_address *ap, struct scsi_pkt *spktp);
55 
56 /*
57  * packet callbacks
58  */
59 static void atapi_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
60     int do_callback);
61 static int atapi_id_update(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
62     ata_pkt_t *ata_pktp);
63 
64 
65 /* external dependencies */
66 
67 char _depends_on[] = "misc/scsi";
68 
69 /*
70  * Local static data
71  */
72 
73 #if 0
74 static ddi_dma_lim_t atapi_dma_limits = {
75 	0,		/* address low				*/
76 	0xffffffffU,	/* address high				*/
77 	0,		/* counter max				*/
78 	1,		/* burstsize				*/
79 	DMA_UNIT_8,	/* minimum xfer				*/
80 	0,		/* dma speed				*/
81 	(uint_t)DMALIM_VER0,	/* version			*/
82 	0xffffffffU,	/* address register			*/
83 	0xffffffffU,	/* counter register			*/
84 	1,		/* granular				*/
85 	1,		/* scatter/gather list length		*/
86 	0xffffffffU	/* request size				*/
87 };
88 #endif
89 
90 static	int	atapi_use_static_geometry = TRUE;
91 static	int	atapi_arq_enable = TRUE;
92 
93 
94 /*
95  *
96  * Call SCSA init to initialize the ATAPI half of the driver
97  *
98  */
99 
100 int
101 atapi_attach(ata_ctl_t *ata_ctlp)
102 {
103 	dev_info_t	*dip = ata_ctlp->ac_dip;
104 	scsi_hba_tran_t *tran;
105 
106 	ADBG_TRACE(("atapi_init entered\n"));
107 
108 	/* allocate transport structure */
109 
110 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
111 
112 	if (tran == NULL) {
113 		ADBG_WARN(("atapi_init: scsi_hba_tran_alloc failed\n"));
114 		goto errout;
115 	}
116 
117 	ata_ctlp->ac_atapi_tran = tran;
118 	ata_ctlp->ac_flags |= AC_SCSI_HBA_TRAN_ALLOC;
119 
120 	/* initialize transport structure */
121 
122 	tran->tran_hba_private = ata_ctlp;
123 	tran->tran_tgt_private = NULL;
124 
125 	tran->tran_tgt_init = atapi_tran_tgt_init;
126 	tran->tran_tgt_probe = atapi_tran_tgt_probe;
127 	tran->tran_tgt_free = atapi_tran_tgt_free;
128 	tran->tran_start = atapi_tran_start;
129 	tran->tran_reset = atapi_tran_reset;
130 	tran->tran_abort = atapi_tran_abort;
131 	tran->tran_getcap = atapi_tran_getcap;
132 	tran->tran_setcap = atapi_tran_setcap;
133 	tran->tran_init_pkt = atapi_tran_init_pkt;
134 	tran->tran_destroy_pkt = atapi_tran_destroy_pkt;
135 	tran->tran_dmafree = atapi_tran_dmafree;
136 	tran->tran_sync_pkt = atapi_tran_sync_pkt;
137 
138 	if (scsi_hba_attach_setup(ata_ctlp->ac_dip, &ata_pciide_dma_attr, tran,
139 	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
140 		ADBG_WARN(("atapi_init: scsi_hba_attach_setup failed\n"));
141 		goto errout;
142 	}
143 
144 	ata_ctlp->ac_flags |= AC_SCSI_HBA_ATTACH;
145 
146 	return (TRUE);
147 
148 errout:
149 	atapi_detach(ata_ctlp);
150 	return (FALSE);
151 }
152 
153 
154 /*
155  *
156  * destroy the atapi sub-system
157  *
158  */
159 
160 void
161 atapi_detach(
162 	ata_ctl_t *ata_ctlp)
163 {
164 	ADBG_TRACE(("atapi_detach entered\n"));
165 
166 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_ATTACH)
167 		(void) scsi_hba_detach(ata_ctlp->ac_dip);
168 
169 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_TRAN_ALLOC)
170 		scsi_hba_tran_free(ata_ctlp->ac_atapi_tran);
171 }
172 
173 
174 
175 /*
176  *
177  * initialize the ATAPI drive's soft-state based on the
178  * response to IDENTIFY PACKET DEVICE command
179  *
180  */
181 
182 int
183 atapi_init_drive(
184 	ata_drv_t *ata_drvp)
185 {
186 	ADBG_TRACE(("atapi_init_drive entered\n"));
187 
188 	/* Determine ATAPI CDB size */
189 
190 	switch (ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_PKT_SZ) {
191 
192 	case ATAPI_ID_CFG_PKT_12B:
193 		ata_drvp->ad_cdb_len = 12;
194 		break;
195 	case ATAPI_ID_CFG_PKT_16B:
196 		ata_drvp->ad_cdb_len = 16;
197 		break;
198 	default:
199 		ADBG_WARN(("atapi_init_drive: bad pkt size support\n"));
200 		return (FALSE);
201 	}
202 
203 	/* determine if drive gives an intr when it wants the CDB */
204 
205 	if ((ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_DRQ_TYPE) !=
206 	    ATAPI_ID_CFG_DRQ_INTR)
207 		ata_drvp->ad_flags |= AD_NO_CDB_INTR;
208 
209 	return (TRUE);
210 }
211 
212 
213 /*
214  *
215  * destroy an atapi drive
216  *
217  */
218 
219 /* ARGSUSED */
220 void
221 atapi_uninit_drive(
222 	ata_drv_t *ata_drvp)
223 {
224 	ADBG_TRACE(("atapi_uninit_drive entered\n"));
225 }
226 
227 /*
228  *
229  * Issue an IDENTIFY PACKET (ATAPI) DEVICE command
230  *
231  */
232 
233 int
234 atapi_id(
235 	ddi_acc_handle_t io_hdl1,
236 	caddr_t		 ioaddr1,
237 	ddi_acc_handle_t io_hdl2,
238 	caddr_t		 ioaddr2,
239 	struct ata_id	*ata_idp)
240 {
241 	int	rc;
242 
243 	ADBG_TRACE(("atapi_id entered\n"));
244 
245 	rc = ata_id_common(ATC_ID_PACKET_DEVICE, FALSE, io_hdl1, ioaddr1,
246 	    io_hdl2, ioaddr2, ata_idp);
247 
248 	if (!rc)
249 		return (FALSE);
250 
251 	if ((ata_idp->ai_config & ATAC_ATAPI_TYPE_MASK) != ATAC_ATAPI_TYPE)
252 		return (FALSE);
253 
254 	return (TRUE);
255 }
256 
257 
258 /*
259  *
260  * Check the device's register block for the ATAPI signature.
261  *
262  * Although the spec says the sector count, sector number and device/head
263  * registers are also part of the signature, for some unknown reason, this
264  * routine only checks the cyl hi and cyl low registers. I'm just
265  * guessing, but it might be because ATA and ATAPI devices return
266  * identical values in those registers and we actually rely on the
267  * IDENTIFY DEVICE and IDENTIFY PACKET DEVICE commands to recognize the
268  * device type.
269  *
270  */
271 
272 int
273 atapi_signature(
274 	ddi_acc_handle_t io_hdl,
275 	caddr_t ioaddr)
276 {
277 	int	rc = FALSE;
278 	ADBG_TRACE(("atapi_signature entered\n"));
279 
280 	if (ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_HCYL) == ATAPI_SIG_HI &&
281 	    ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_LCYL) != ATAPI_SIG_LO)
282 		rc = TRUE;
283 
284 	/*
285 	 * The following is a little bit of bullet proofing.
286 	 *
287 	 * When some drives are configured on a master-only bus they
288 	 * "shadow" their registers for the not-present slave drive.
289 	 * This is bogus and if you're not careful it may cause a
290 	 * master-only drive to be mistakenly recognized as both
291 	 * master and slave. By clearing the signature registers here
292 	 * I can make certain that when ata_drive_type() switches from
293 	 * the master to slave drive that I'll read back non-signature
294 	 * values regardless of whether the master-only drive does
295 	 * the "shadow" register trick. This prevents a bogus
296 	 * IDENTIFY PACKET DEVICE command from being issued which
297 	 * a really bogus master-only drive will return "shadow"
298 	 * data for.
299 	 */
300 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_HCYL, 0);
301 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_LCYL, 0);
302 
303 	return (rc);
304 }
305 
306 
307 /*
308  *
309  * SCSA tran_tgt_init entry point
310  *
311  */
312 
313 /* ARGSUSED */
314 static int
315 atapi_tran_tgt_init(
316 	dev_info_t	*hba_dip,
317 	dev_info_t	*tgt_dip,
318 	scsi_hba_tran_t *hba_tran,
319 	struct scsi_device *sd)
320 {
321 	gtgt_t	  *gtgtp;	/* GHD's per-target-instance structure */
322 	ata_ctl_t *ata_ctlp;
323 	ata_tgt_t *ata_tgtp;
324 	ata_drv_t *ata_drvp;
325 	struct scsi_address *ap;
326 	int	rc = DDI_SUCCESS;
327 
328 	ADBG_TRACE(("atapi_tran_tgt_init entered\n"));
329 
330 	/*
331 	 * Qualification of targ, lun, and ATAPI device presence
332 	 *  have already been taken care of by ata_bus_ctl
333 	 */
334 
335 	/* store pointer to drive struct in cloned tran struct */
336 
337 	ata_ctlp = TRAN2CTL(hba_tran);
338 	ap = &sd->sd_address;
339 
340 	ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
341 
342 	/*
343 	 * Create the "atapi" property so the target driver knows
344 	 * to use the correct set of SCSI commands
345 	 */
346 	if (!ata_prop_create(tgt_dip, ata_drvp, "atapi")) {
347 		return (DDI_FAILURE);
348 	}
349 
350 	gtgtp = ghd_target_init(hba_dip, tgt_dip, &ata_ctlp->ac_ccc,
351 	    sizeof (ata_tgt_t), ata_ctlp,
352 	    ap->a_target, ap->a_lun);
353 
354 	/* tran_tgt_private points to gtgt_t */
355 	hba_tran->tran_tgt_private = gtgtp;
356 
357 	/* gt_tgt_private points to ata_tgt_t */
358 	ata_tgtp = GTGTP2ATATGTP(gtgtp);
359 
360 	/* initialize the per-target-instance data */
361 	ata_tgtp->at_drvp = ata_drvp;
362 	ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
363 	ata_tgtp->at_dma_attr.dma_attr_maxxfer =
364 	    ata_ctlp->ac_max_transfer << SCTRSHFT;
365 
366 	return (rc);
367 }
368 
369 
370 /*
371  *
372  * SCSA tran_tgt_probe entry point
373  *
374  */
375 
376 static int
377 atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void))
378 {
379 	ADBG_TRACE(("atapi_tran_tgt_probe entered\n"));
380 
381 	return (scsi_hba_probe(sd, callback));
382 }
383 
384 
385 /*
386  *
387  * SCSA tran_tgt_free entry point
388  *
389  */
390 
391 /* ARGSUSED */
392 static void
393 atapi_tran_tgt_free(
394 	dev_info_t	*hba_dip,
395 	dev_info_t	*tgt_dip,
396 	scsi_hba_tran_t	*hba_tran,
397 	struct scsi_device *sd)
398 {
399 	ADBG_TRACE(("atapi_tran_tgt_free entered\n"));
400 
401 	ghd_target_free(hba_dip, tgt_dip, &TRAN2ATAP(hba_tran)->ac_ccc,
402 	    TRAN2GTGTP(hba_tran));
403 	hba_tran->tran_tgt_private = NULL;
404 }
405 
406 
407 
408 /*
409  *
410  * SCSA tran_abort entry point
411  *
412  */
413 
414 /* ARGSUSED */
415 static int
416 atapi_tran_abort(
417 	struct scsi_address *ap,
418 	struct scsi_pkt *spktp)
419 {
420 	ADBG_TRACE(("atapi_tran_abort entered\n"));
421 
422 	if (spktp) {
423 		return (ghd_tran_abort(&ADDR2CTL(ap)->ac_ccc, PKTP2GCMDP(spktp),
424 		    ADDR2GTGTP(ap), NULL));
425 	}
426 
427 	return (ghd_tran_abort_lun(&ADDR2CTL(ap)->ac_ccc, ADDR2GTGTP(ap),
428 	    NULL));
429 }
430 
431 
432 /*
433  *
434  * SCSA tran_reset entry point
435  *
436  */
437 
438 /* ARGSUSED */
439 static int
440 atapi_tran_reset(
441 	struct scsi_address *ap,
442 	int level)
443 {
444 	ADBG_TRACE(("atapi_tran_reset entered\n"));
445 
446 	if (level == RESET_TARGET)
447 		return (ghd_tran_reset_target(&ADDR2CTL(ap)->ac_ccc,
448 		    ADDR2GTGTP(ap), NULL));
449 	if (level == RESET_ALL)
450 		return (ghd_tran_reset_bus(&ADDR2CTL(ap)->ac_ccc,
451 		    ADDR2GTGTP(ap), NULL));
452 	return (FALSE);
453 
454 }
455 
456 
457 /*
458  *
459  * SCSA tran_setcap entry point
460  *
461  */
462 
463 static int
464 atapi_tran_setcap(
465 	struct scsi_address *ap,
466 	char *capstr,
467 	int value,
468 	int whom)
469 {
470 	gtgt_t	  *gtgtp = ADDR2GTGTP(ap);
471 	ata_tgt_t *tgtp = GTGTP2ATATGTP(gtgtp);
472 
473 	ADBG_TRACE(("atapi_tran_setcap entered\n"));
474 
475 	switch (scsi_hba_lookup_capstr(capstr)) {
476 		case SCSI_CAP_SECTOR_SIZE:
477 			tgtp->at_dma_attr.dma_attr_granular = (uint_t)value;
478 			return (TRUE);
479 
480 		case SCSI_CAP_ARQ:
481 			if (whom) {
482 				tgtp->at_arq = value;
483 				return (TRUE);
484 			}
485 			break;
486 
487 		case SCSI_CAP_TOTAL_SECTORS:
488 			tgtp->at_total_sectors = value;
489 			return (TRUE);
490 	}
491 	return (FALSE);
492 }
493 
494 
495 /*
496  *
497  * SCSA tran_getcap entry point
498  *
499  */
500 
501 static int
502 atapi_tran_getcap(
503 	struct scsi_address *ap,
504 	char *capstr,
505 	int whom)
506 {
507 	struct ata_id	 ata_id;
508 	struct ata_id	*ata_idp;
509 	ata_ctl_t	*ata_ctlp;
510 	ata_drv_t	*ata_drvp;
511 	gtgt_t		*gtgtp;
512 	int		 rval = -1;
513 
514 	ADBG_TRACE(("atapi_tran_getcap entered\n"));
515 
516 	if (capstr == NULL || whom == 0)
517 		return (-1);
518 
519 	ata_ctlp = ADDR2CTL(ap);
520 
521 	switch (scsi_hba_lookup_capstr(capstr)) {
522 	case SCSI_CAP_ARQ:
523 		rval = TRUE;
524 		break;
525 
526 	case SCSI_CAP_INITIATOR_ID:
527 		rval = 7;
528 		break;
529 
530 	case SCSI_CAP_DMA_MAX:
531 		/* XXX - what should the real limit be?? */
532 		/* limit to 64K ??? */
533 		rval = 4096 * (ATA_DMA_NSEGS - 1);
534 		break;
535 
536 	case SCSI_CAP_GEOMETRY:
537 		/* Default geometry */
538 		if (atapi_use_static_geometry) {
539 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
540 			break;
541 		}
542 
543 		/* this code is currently not used */
544 
545 		ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
546 		gtgtp = ADDR2GTGTP(ap);
547 
548 		/*
549 		 * retrieve the current IDENTIFY PACKET DEVICE info
550 		 */
551 		if (!ata_queue_cmd(atapi_id_update, &ata_id, ata_ctlp,
552 		    ata_drvp, gtgtp)) {
553 			ADBG_TRACE(("atapi_tran_getcap geometry failed"));
554 			return (0);
555 		}
556 
557 		/*
558 		 * save the new response data
559 		 */
560 		ata_idp = &ata_drvp->ad_id;
561 		*ata_idp = ata_id;
562 
563 		switch ((ata_idp->ai_config >> 8) & 0xf) {
564 		case DTYPE_RODIRECT:
565 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
566 			break;
567 		case DTYPE_DIRECT:
568 		case DTYPE_OPTICAL:
569 			rval = (ata_idp->ai_curheads << 16) |
570 			    ata_idp->ai_cursectrk;
571 			break;
572 		default:
573 			rval = 0;
574 		}
575 		break;
576 	}
577 
578 	return (rval);
579 }
580 
581 
582 
583 /*
584  *
585  * SCSA tran_init_pkt entry point
586  *
587  */
588 
589 static struct scsi_pkt *
590 atapi_tran_init_pkt(
591 	struct scsi_address *ap,
592 	struct scsi_pkt	*spktp,
593 	struct buf	*bp,
594 	int		 cmdlen,
595 	int		 statuslen,
596 	int		 tgtlen,
597 	int		 flags,
598 	int		(*callback)(caddr_t),
599 	caddr_t		 arg)
600 {
601 	gtgt_t		*gtgtp = ADDR2GTGTP(ap);
602 	ata_tgt_t	*ata_tgtp = GTGTP2ATATGTP(gtgtp);
603 	ata_ctl_t	*ata_ctlp = ADDR2CTL(ap);
604 	ata_pkt_t	*ata_pktp;
605 	struct scsi_pkt	*new_spktp;
606 	ddi_dma_attr_t	*sg_attrp;
607 	int		 bytes;
608 
609 	ADBG_TRACE(("atapi_tran_init_pkt entered\n"));
610 
611 
612 	/*
613 	 * Determine whether to do PCI-IDE DMA setup, start out by
614 	 * assuming we're not.
615 	 */
616 	sg_attrp = NULL;
617 
618 	if (bp == NULL) {
619 		/* no data to transfer */
620 		goto skip_dma_setup;
621 	}
622 
623 	if (bp->b_bcount == 0) {
624 		/* no data to transfer */
625 		goto skip_dma_setup;
626 	}
627 
628 	if ((GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_pciide_dma == ATA_DMA_OFF)) {
629 		goto skip_dma_setup;
630 	}
631 
632 	if (ata_dma_disabled)
633 		goto skip_dma_setup;
634 
635 
636 	/*
637 	 * The PCI-IDE DMA engine is brain-damaged and can't
638 	 * DMA non-aligned buffers.
639 	 */
640 	if (((bp->b_flags & B_PAGEIO) == 0) &&
641 	    ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
642 		/*
643 		 * if the virtual address isn't aligned, then the
644 		 * physical address also isn't aligned.
645 		 */
646 		goto skip_dma_setup;
647 	}
648 
649 	/*
650 	 * It also insists that the byte count must be even.
651 	 */
652 	if (bp->b_bcount & 1) {
653 		/* something odd here */
654 		goto skip_dma_setup;
655 	}
656 
657 	/*
658 	 * Huzza! We're really going to do it
659 	 */
660 	sg_attrp = &ata_tgtp->at_dma_attr;
661 
662 
663 skip_dma_setup:
664 
665 	/*
666 	 * Call GHD packet init function
667 	 */
668 
669 	new_spktp = ghd_tran_init_pkt_attr(&ata_ctlp->ac_ccc, ap, spktp, bp,
670 	    cmdlen, statuslen, tgtlen, flags,
671 	    callback, arg, sizeof (ata_pkt_t), sg_attrp);
672 
673 	if (new_spktp == NULL)
674 		return (NULL);
675 
676 	ata_pktp = SPKT2APKT(new_spktp);
677 	ata_pktp->ap_cdbp = new_spktp->pkt_cdbp;
678 	if (statuslen > 255) {
679 		statuslen = sizeof (struct scsi_arq_status);
680 	}
681 	ata_pktp->ap_statuslen = (uchar_t)statuslen;
682 
683 	/* reset data direction flags */
684 	if (spktp)
685 		ata_pktp->ap_flags &= ~(AP_READ | AP_WRITE);
686 
687 	/*
688 	 * check for ARQ mode
689 	 */
690 	if (atapi_arq_enable == TRUE &&
691 	    ata_tgtp->at_arq == TRUE &&
692 	    ata_pktp->ap_statuslen >= sizeof (struct scsi_arq_status)) {
693 		ADBG_TRACE(("atapi_tran_init_pkt ARQ\n"));
694 		ata_pktp->ap_scbp =
695 		    (struct scsi_arq_status *)new_spktp->pkt_scbp;
696 		ata_pktp->ap_flags |= AP_ARQ_ON_ERROR;
697 	}
698 
699 	/*
700 	 * fill these with zeros for ATA/ATAPI-4 compatibility
701 	 */
702 	ata_pktp->ap_sec = 0;
703 	ata_pktp->ap_count = 0;
704 
705 	if (ata_pktp->ap_sg_cnt) {
706 		ASSERT(bp != NULL);
707 		/* determine direction to program the DMA engine later */
708 		if (bp->b_flags & B_READ) {
709 			ata_pktp->ap_flags |= AP_READ;
710 		} else {
711 			ata_pktp->ap_flags |= AP_WRITE;
712 		}
713 		ata_pktp->ap_pciide_dma = TRUE;
714 		ata_pktp->ap_hicyl = 0;
715 		ata_pktp->ap_lwcyl = 0;
716 		return (new_spktp);
717 	}
718 
719 	/*
720 	 * Since we're not using DMA, we need to map the buffer into
721 	 * kernel address space
722 	 */
723 
724 	ata_pktp->ap_pciide_dma = FALSE;
725 	if (bp && bp->b_bcount) {
726 		/*
727 		 * If this is a fresh request map the buffer and
728 		 * reset the ap_baddr pointer and the current offset
729 		 * and byte count.
730 		 *
731 		 * The ap_boffset is used to set the ap_v_addr ptr at
732 		 * the start of each I/O request.
733 		 *
734 		 * The ap_bcount is used to update ap_boffset when the
735 		 * target driver requests the next segment.
736 		 *
737 		 */
738 		if (cmdlen) {
739 			bp_mapin(bp);
740 			ata_pktp->ap_baddr = bp->b_un.b_addr;
741 			ata_pktp->ap_bcount = 0;
742 			ata_pktp->ap_boffset = 0;
743 		}
744 		ASSERT(ata_pktp->ap_baddr != NULL);
745 
746 		/* determine direction for the PIO FSM */
747 		if (bp->b_flags & B_READ) {
748 			ata_pktp->ap_flags |= AP_READ;
749 		} else {
750 			ata_pktp->ap_flags |= AP_WRITE;
751 		}
752 
753 		/*
754 		 * If the drive has the Single Sector bug, limit
755 		 * the transfer to a single sector. This assumes
756 		 * ATAPI CD drives always use 2k sectors.
757 		 */
758 		if (GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_flags & AD_1SECTOR) {
759 			size_t resid;
760 			size_t tmp;
761 
762 			/* adjust offset based on prior request */
763 			ata_pktp->ap_boffset += ata_pktp->ap_bcount;
764 
765 			/* compute number of bytes left to transfer */
766 			resid = bp->b_bcount - ata_pktp->ap_boffset;
767 
768 			/* limit the transfer to 2k */
769 			tmp = MIN(2048, resid);
770 			ata_pktp->ap_bcount = tmp;
771 
772 			/* tell target driver how much is left for next time */
773 			new_spktp->pkt_resid = resid - tmp;
774 		} else {
775 			/* do the whole request in one swell foop */
776 			ata_pktp->ap_bcount = bp->b_bcount;
777 			new_spktp->pkt_resid = 0;
778 		}
779 
780 	} else {
781 		ata_pktp->ap_baddr = NULL;
782 		ata_pktp->ap_bcount = 0;
783 		ata_pktp->ap_boffset = 0;
784 	}
785 
786 	/*
787 	 * determine the size of each partial data transfer
788 	 * to/from the drive
789 	 */
790 	bytes = min(ata_pktp->ap_bcount, ATAPI_MAX_BYTES_PER_DRQ);
791 	ata_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
792 	ata_pktp->ap_lwcyl = (uchar_t)bytes;
793 	return (new_spktp);
794 }
795 
796 
797 /*
798  * GHD ccballoc callback
799  *
800  *	Initializing the ata_pkt, and return the ptr to the gcmd_t to GHD.
801  *
802  */
803 
804 /* ARGSUSED */
805 int
806 atapi_ccballoc(
807 	gtgt_t	*gtgtp,
808 	gcmd_t	*gcmdp,
809 	int	 cmdlen,
810 	int	 statuslen,
811 	int	 tgtlen,
812 	int	 ccblen)
813 
814 {
815 	ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
816 	ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
817 
818 	ADBG_TRACE(("atapi_ccballoc entered\n"));
819 
820 	/* set the back ptr from the ata_pkt to the gcmd_t */
821 	ata_pktp->ap_gcmdp = gcmdp;
822 
823 	/* check length of SCSI CDB is not larger than drive expects */
824 
825 	if (cmdlen > ata_drvp->ad_cdb_len) {
826 		ADBG_WARN(("atapi_ccballoc: SCSI CDB too large!\n"));
827 		return (FALSE);
828 	}
829 
830 	/*
831 	 * save length of the SCSI CDB, and calculate CDB padding
832 	 * note that for convenience, padding is expressed in shorts.
833 	 */
834 
835 	ata_pktp->ap_cdb_len = (uchar_t)cmdlen;
836 	ata_pktp->ap_cdb_pad =
837 		((unsigned)(ata_drvp->ad_cdb_len - cmdlen)) >> 1;
838 
839 	/* set up callback functions */
840 
841 	ata_pktp->ap_start = atapi_fsm_start;
842 	ata_pktp->ap_intr = atapi_fsm_intr;
843 	ata_pktp->ap_complete = atapi_complete;
844 
845 	/* set-up for start */
846 
847 	ata_pktp->ap_flags = AP_ATAPI;
848 	ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
849 	ata_pktp->ap_cmd = ATC_PACKET;
850 
851 	return (TRUE);
852 }
853 
854 
855 
856 /*
857  *
858  * SCSA tran_destroy_pkt entry point
859  *
860  */
861 
862 static void
863 atapi_tran_destroy_pkt(
864 	struct scsi_address *ap,
865 	struct scsi_pkt *spktp)
866 {
867 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
868 
869 	ADBG_TRACE(("atapi_tran_destroy_pkt entered\n"));
870 
871 	if (gcmdp->cmd_dma_handle != NULL) {
872 		ghd_dmafree_attr(gcmdp);
873 	}
874 
875 	ghd_pktfree(&ADDR2CTL(ap)->ac_ccc, ap, spktp);
876 }
877 
878 
879 
880 /*
881  *
882  * GHD ccbfree callback function
883  *
884  */
885 
886 /* ARGSUSED */
887 void
888 atapi_ccbfree(
889 	gcmd_t *gcmdp)
890 {
891 	ADBG_TRACE(("atapi_ccbfree entered\n"));
892 
893 	/* nothing to do */
894 }
895 
896 
897 /*
898  *
899  * SCSA tran_dmafree entry point
900  *
901  */
902 
903 /*ARGSUSED*/
904 static void
905 atapi_tran_dmafree(
906 	struct scsi_address *ap,
907 	struct scsi_pkt *spktp)
908 {
909 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
910 
911 	ADBG_TRACE(("atapi_tran_dmafree entered\n"));
912 
913 	if (gcmdp->cmd_dma_handle != NULL) {
914 		ghd_dmafree_attr(gcmdp);
915 	}
916 }
917 
918 
919 
920 /*
921  *
922  * SCSA tran_sync_pkt entry point
923  *
924  */
925 
926 /*ARGSUSED*/
927 static void
928 atapi_tran_sync_pkt(
929 	struct scsi_address *ap,
930 	struct scsi_pkt *spktp)
931 {
932 
933 	ADBG_TRACE(("atapi_tran_sync_pkt entered\n"));
934 
935 	if (PKTP2GCMDP(spktp)->cmd_dma_handle != NULL) {
936 		ghd_tran_sync_pkt(ap, spktp);
937 	}
938 }
939 
940 
941 
942 /*
943  *
944  * SCSA tran_start entry point
945  *
946  */
947 
948 /* ARGSUSED */
949 static int
950 atapi_tran_start(
951 	struct scsi_address *ap,
952 	struct scsi_pkt *spktp)
953 {
954 	ata_pkt_t *ata_pktp = SPKT2APKT(spktp);
955 	ata_drv_t *ata_drvp = APKT2DRV(ata_pktp);
956 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
957 	gcmd_t	  *gcmdp = APKT2GCMD(ata_pktp);
958 	int	   polled = FALSE;
959 	int	   rc;
960 
961 	ADBG_TRACE(("atapi_tran_start entered\n"));
962 
963 	/*
964 	 * Basic initialization performed each and every time a
965 	 * scsi_pkt is submitted. A single scsi_pkt may be submitted
966 	 * multiple times so this routine has to be idempotent. One
967 	 * time initializations don't belong here.
968 	 */
969 
970 	/*
971 	 * The ap_v_addr pointer is incremented by the PIO data
972 	 * transfer routine as each word is transferred. Therefore, need
973 	 * to reset ap_v_addr here (rather than atapi_tran_init_pkt())
974 	 * in case the target resubmits the same pkt multiple times
975 	 * (which is permitted by SCSA).
976 	 */
977 	ata_pktp->ap_v_addr = ata_pktp->ap_baddr + ata_pktp->ap_boffset;
978 
979 	/* ap_resid is decremented as the data transfer progresses */
980 	ata_pktp->ap_resid = ata_pktp->ap_bcount;
981 
982 	/* clear error flags */
983 	ata_pktp->ap_flags &= (AP_ATAPI | AP_READ | AP_WRITE | AP_ARQ_ON_ERROR);
984 	spktp->pkt_reason = 0;
985 	spktp->pkt_state = 0;
986 	spktp->pkt_statistics = 0;
987 
988 	/*
989 	 * check for polling pkt
990 	 */
991 	if (spktp->pkt_flags & FLAG_NOINTR) {
992 		polled = TRUE;
993 	}
994 
995 #ifdef ___just_ignore_unsupported_flags___
996 	/* driver cannot accept tagged commands */
997 
998 	if (spktp->pkt_flags & (FLAG_HTAG|FLAG_OTAG|FLAG_STAG)) {
999 		spktp->pkt_reason = CMD_TRAN_ERR;
1000 		return (TRAN_BADPKT);
1001 	}
1002 #endif
1003 
1004 	/* call common transport routine */
1005 
1006 	rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
1007 	    spktp->pkt_time, polled, NULL);
1008 
1009 	/* see if pkt was not accepted */
1010 
1011 	if (rc != TRAN_ACCEPT)
1012 		return (rc);
1013 
1014 	return (rc);
1015 }
1016 
1017 
1018 /*
1019  *
1020  * GHD packet complete callback
1021  *
1022  */
1023 /* ARGSUSED */
1024 static void
1025 atapi_complete(
1026 	ata_drv_t *ata_drvp,
1027 	ata_pkt_t *ata_pktp,
1028 	int do_callback)
1029 {
1030 	struct scsi_pkt *spktp = APKT2SPKT(ata_pktp);
1031 	struct scsi_status *scsi_stat = (struct scsi_status *)spktp->pkt_scbp;
1032 
1033 	ADBG_TRACE(("atapi_complete entered\n"));
1034 	ADBG_TRANSPORT(("atapi_complete: pkt = 0x%p\n", ata_pktp));
1035 
1036 	/* update resid */
1037 
1038 	spktp->pkt_resid = ata_pktp->ap_resid;
1039 
1040 	if (ata_pktp->ap_flags & AP_SENT_CMD) {
1041 		spktp->pkt_state |=
1042 		    STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
1043 	}
1044 	if (ata_pktp->ap_flags & AP_XFERRED_DATA) {
1045 		spktp->pkt_state |= STATE_XFERRED_DATA;
1046 	}
1047 
1048 	if (ata_pktp->ap_flags & AP_GOT_STATUS) {
1049 		spktp->pkt_state |= STATE_GOT_STATUS;
1050 	}
1051 
1052 	/* check for fatal errors */
1053 
1054 	if (ata_pktp->ap_flags & AP_TRAN_ERROR) {
1055 		spktp->pkt_reason = CMD_TRAN_ERR;
1056 	} else if (ata_pktp->ap_flags & AP_BUS_RESET) {
1057 		spktp->pkt_reason = CMD_RESET;
1058 		spktp->pkt_statistics |= STAT_BUS_RESET;
1059 	} else if (ata_pktp->ap_flags & AP_DEV_RESET) {
1060 		spktp->pkt_reason = CMD_RESET;
1061 		spktp->pkt_statistics |= STAT_DEV_RESET;
1062 	} else if (ata_pktp->ap_flags & AP_ABORT) {
1063 		spktp->pkt_reason = CMD_ABORTED;
1064 		spktp->pkt_statistics |= STAT_ABORTED;
1065 	} else if (ata_pktp->ap_flags & AP_TIMEOUT) {
1066 		spktp->pkt_reason = CMD_TIMEOUT;
1067 		spktp->pkt_statistics |= STAT_TIMEOUT;
1068 	} else {
1069 		spktp->pkt_reason = CMD_CMPLT;
1070 	}
1071 
1072 	/* non-fatal errors */
1073 
1074 	if (ata_pktp->ap_flags & AP_ERROR)
1075 		scsi_stat->sts_chk = 1;
1076 	else
1077 		scsi_stat->sts_chk = 0;
1078 
1079 	if (ata_pktp->ap_flags & AP_ARQ_ERROR) {
1080 		ADBG_ARQ(("atapi_complete ARQ error 0x%p\n", ata_pktp));
1081 		spktp->pkt_reason = CMD_TRAN_ERR;
1082 
1083 	} else if (ata_pktp->ap_flags & AP_ARQ_OKAY) {
1084 		static struct scsi_status zero_scsi_status = { 0 };
1085 		struct scsi_arq_status *arqp;
1086 
1087 		ADBG_ARQ(("atapi_complete ARQ okay 0x%p\n", ata_pktp));
1088 		spktp->pkt_state |= STATE_ARQ_DONE;
1089 		arqp = ata_pktp->ap_scbp;
1090 		arqp->sts_rqpkt_reason = CMD_CMPLT;
1091 		arqp->sts_rqpkt_state = STATE_XFERRED_DATA;
1092 		arqp->sts_rqpkt_status = zero_scsi_status;
1093 		arqp->sts_rqpkt_resid = 0;
1094 		arqp->sts_rqpkt_statistics = 0;
1095 
1096 	}
1097 
1098 	ADBG_TRANSPORT(("atapi_complete: reason = 0x%x stats = 0x%x "
1099 	    "sts_chk = %d\n", spktp->pkt_reason, spktp->pkt_statistics,
1100 	    scsi_stat->sts_chk));
1101 
1102 	if (do_callback && (spktp->pkt_comp))
1103 		(*spktp->pkt_comp)(spktp);
1104 }
1105 
1106 
1107 
1108 /*
1109  * Update the IDENTIFY PACKET DEVICE info
1110  */
1111 
1112 static int
1113 atapi_id_update(
1114 	ata_ctl_t	*ata_ctlp,
1115 	ata_drv_t	*ata_drvp,
1116 	ata_pkt_t	*ata_pktp)
1117 {
1118 	ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
1119 	caddr_t		 ioaddr1 = ata_ctlp->ac_ioaddr1;
1120 	ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
1121 	caddr_t		 ioaddr2 = ata_ctlp->ac_ioaddr2;
1122 	int	rc;
1123 
1124 	/*
1125 	 * select the appropriate drive and LUN
1126 	 */
1127 	ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD,
1128 	    ata_drvp->ad_drive_bits);
1129 	ata_nsecwait(400);
1130 
1131 	/*
1132 	 * make certain the drive is selected, and wait for not busy
1133 	 */
1134 	if (!ata_wait(io_hdl2, ioaddr2, ATS_DRDY, ATS_BSY, 5 * 1000000)) {
1135 		ADBG_ERROR(("atapi_id_update: select failed\n"));
1136 		ata_pktp->ap_flags |= AP_ERROR;
1137 		return (ATA_FSM_RC_FINI);
1138 	}
1139 
1140 	rc = atapi_id(ata_ctlp->ac_iohandle1, ata_ctlp->ac_ioaddr1,
1141 	    ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2,
1142 	    (struct ata_id *)ata_pktp->ap_v_addr);
1143 
1144 	if (!rc) {
1145 		ata_pktp->ap_flags |= AP_ERROR;
1146 	} else {
1147 		ata_pktp->ap_flags |= AP_XFERRED_DATA;
1148 	}
1149 	return (ATA_FSM_RC_FINI);
1150 }
1151 
1152 
1153 
1154 /*
1155  * Both drives on the controller share a common pkt to do
1156  * ARQ processing. Therefore the pkt is only partially
1157  * initialized here. The rest of initialization occurs
1158  * just before starting the ARQ pkt when an error is
1159  * detected.
1160  */
1161 
1162 void
1163 atapi_init_arq(
1164 	ata_ctl_t *ata_ctlp)
1165 {
1166 	ata_pkt_t *arq_pktp = ata_ctlp->ac_arq_pktp;
1167 
1168 	arq_pktp->ap_cdbp = ata_ctlp->ac_arq_cdb;
1169 	arq_pktp->ap_cdb_len = sizeof (ata_ctlp->ac_arq_cdb);
1170 	arq_pktp->ap_start = atapi_fsm_start;
1171 	arq_pktp->ap_intr = atapi_fsm_intr;
1172 	arq_pktp->ap_complete = atapi_complete;
1173 	arq_pktp->ap_flags = AP_ATAPI;
1174 	arq_pktp->ap_cmd = ATC_PACKET;
1175 
1176 	ata_ctlp->ac_arq_cdb[0] = SCMD_REQUEST_SENSE;
1177 }
1178