1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 
31 #include "ata_common.h"
32 #include "atapi.h"
33 
34 /* SCSA entry points */
35 
36 static int atapi_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
37     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
38 static int atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void));
39 static void atapi_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
40     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
41 static int atapi_tran_abort(struct scsi_address *ap, struct scsi_pkt *spktp);
42 static int atapi_tran_reset(struct scsi_address *ap, int level);
43 static int atapi_tran_getcap(struct scsi_address *ap, char *capstr, int whom);
44 static int atapi_tran_setcap(struct scsi_address *ap, char *capstr,
45     int value, int whom);
46 static struct scsi_pkt	*atapi_tran_init_pkt(struct scsi_address *ap,
47     struct scsi_pkt *spktp, struct buf *bp, int cmdlen, int statuslen,
48     int tgtlen, int flags, int (*callback)(caddr_t), caddr_t arg);
49 static void atapi_tran_destroy_pkt(struct scsi_address *ap,
50     struct scsi_pkt *spktp);
51 static void atapi_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *spktp);
52 static void atapi_tran_sync_pkt(struct scsi_address *ap,
53     struct scsi_pkt *spktp);
54 static int atapi_tran_start(struct scsi_address *ap, struct scsi_pkt *spktp);
55 
56 /*
57  * packet callbacks
58  */
59 static void atapi_complete(ata_drv_t *ata_drvp, ata_pkt_t *ata_pktp,
60     int do_callback);
61 static int atapi_id_update(ata_ctl_t *ata_ctlp, ata_drv_t *ata_drvp,
62     ata_pkt_t *ata_pktp);
63 
64 
65 /* external dependencies */
66 
67 char _depends_on[] = "misc/scsi";
68 
69 /*
70  * Local static data
71  */
72 
73 #if 0
74 static ddi_dma_lim_t atapi_dma_limits = {
75 	0,		/* address low				*/
76 	0xffffffffU,	/* address high				*/
77 	0,		/* counter max				*/
78 	1,		/* burstsize				*/
79 	DMA_UNIT_8,	/* minimum xfer				*/
80 	0,		/* dma speed				*/
81 	(uint_t)DMALIM_VER0,	/* version			*/
82 	0xffffffffU,	/* address register			*/
83 	0xffffffffU,	/* counter register			*/
84 	1,		/* granular				*/
85 	1,		/* scatter/gather list length		*/
86 	0xffffffffU	/* request size				*/
87 };
88 #endif
89 
90 static	int	atapi_use_static_geometry = TRUE;
91 static	int	atapi_arq_enable = TRUE;
92 
93 
94 /*
95  *
96  * Call SCSA init to initialize the ATAPI half of the driver
97  *
98  */
99 
100 int
101 atapi_attach(ata_ctl_t *ata_ctlp)
102 {
103 	dev_info_t	*dip = ata_ctlp->ac_dip;
104 	scsi_hba_tran_t *tran;
105 
106 	ADBG_TRACE(("atapi_init entered\n"));
107 
108 	/* allocate transport structure */
109 
110 	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
111 
112 	if (tran == NULL) {
113 		ADBG_WARN(("atapi_init: scsi_hba_tran_alloc failed\n"));
114 		goto errout;
115 	}
116 
117 	ata_ctlp->ac_atapi_tran = tran;
118 	ata_ctlp->ac_flags |= AC_SCSI_HBA_TRAN_ALLOC;
119 
120 	/* initialize transport structure */
121 
122 	tran->tran_hba_private = ata_ctlp;
123 	tran->tran_tgt_private = NULL;
124 
125 	tran->tran_tgt_init = atapi_tran_tgt_init;
126 	tran->tran_tgt_probe = atapi_tran_tgt_probe;
127 	tran->tran_tgt_free = atapi_tran_tgt_free;
128 	tran->tran_start = atapi_tran_start;
129 	tran->tran_reset = atapi_tran_reset;
130 	tran->tran_abort = atapi_tran_abort;
131 	tran->tran_getcap = atapi_tran_getcap;
132 	tran->tran_setcap = atapi_tran_setcap;
133 	tran->tran_init_pkt = atapi_tran_init_pkt;
134 	tran->tran_destroy_pkt = atapi_tran_destroy_pkt;
135 	tran->tran_dmafree = atapi_tran_dmafree;
136 	tran->tran_sync_pkt = atapi_tran_sync_pkt;
137 
138 	if (scsi_hba_attach_setup(ata_ctlp->ac_dip, &ata_pciide_dma_attr, tran,
139 		SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
140 		ADBG_WARN(("atapi_init: scsi_hba_attach_setup failed\n"));
141 		goto errout;
142 	}
143 
144 	ata_ctlp->ac_flags |= AC_SCSI_HBA_ATTACH;
145 
146 	return (TRUE);
147 
148 errout:
149 	atapi_detach(ata_ctlp);
150 	return (FALSE);
151 }
152 
153 
154 /*
155  *
156  * destroy the atapi sub-system
157  *
158  */
159 
160 void
161 atapi_detach(
162 	ata_ctl_t *ata_ctlp)
163 {
164 	ADBG_TRACE(("atapi_detach entered\n"));
165 
166 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_ATTACH)
167 		(void) scsi_hba_detach(ata_ctlp->ac_dip);
168 
169 	if (ata_ctlp->ac_flags & AC_SCSI_HBA_TRAN_ALLOC)
170 		scsi_hba_tran_free(ata_ctlp->ac_atapi_tran);
171 }
172 
173 
174 
175 /*
176  *
177  * initialize the ATAPI drive's soft-state based on the
178  * response to IDENTIFY PACKET DEVICE command
179  *
180  */
181 
182 int
183 atapi_init_drive(
184 	ata_drv_t *ata_drvp)
185 {
186 	ADBG_TRACE(("atapi_init_drive entered\n"));
187 
188 	/* Determine ATAPI CDB size */
189 
190 	switch (ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_PKT_SZ) {
191 
192 	case ATAPI_ID_CFG_PKT_12B:
193 		ata_drvp->ad_cdb_len = 12;
194 		break;
195 	case ATAPI_ID_CFG_PKT_16B:
196 		ata_drvp->ad_cdb_len = 16;
197 		break;
198 	default:
199 		ADBG_WARN(("atapi_init_drive: bad pkt size support\n"));
200 		return (FALSE);
201 	}
202 
203 	/* determine if drive gives an intr when it wants the CDB */
204 
205 	if ((ata_drvp->ad_id.ai_config & ATAPI_ID_CFG_DRQ_TYPE) !=
206 	    ATAPI_ID_CFG_DRQ_INTR)
207 		ata_drvp->ad_flags |= AD_NO_CDB_INTR;
208 
209 	return (TRUE);
210 }
211 
212 
213 /*
214  *
215  * destroy an atapi drive
216  *
217  */
218 
219 /* ARGSUSED */
220 void
221 atapi_uninit_drive(
222 	ata_drv_t *ata_drvp)
223 {
224 	ADBG_TRACE(("atapi_uninit_drive entered\n"));
225 }
226 
227 /*
228  *
229  * Issue an IDENTIFY PACKET (ATAPI) DEVICE command
230  *
231  */
232 
233 int
234 atapi_id(
235 	ddi_acc_handle_t io_hdl1,
236 	caddr_t		 ioaddr1,
237 	ddi_acc_handle_t io_hdl2,
238 	caddr_t		 ioaddr2,
239 	struct ata_id	*ata_idp)
240 {
241 	int	rc;
242 
243 	ADBG_TRACE(("atapi_id entered\n"));
244 
245 	rc = ata_id_common(ATC_ID_PACKET_DEVICE, FALSE, io_hdl1, ioaddr1,
246 		io_hdl2, ioaddr2, ata_idp);
247 
248 	if (!rc)
249 		return (FALSE);
250 
251 	if ((ata_idp->ai_config & ATAC_ATAPI_TYPE_MASK) != ATAC_ATAPI_TYPE)
252 		return (FALSE);
253 
254 	return (TRUE);
255 }
256 
257 
258 /*
259  *
260  * Check the device's register block for the ATAPI signature.
261  *
262  * Although the spec says the sector count, sector number and device/head
263  * registers are also part of the signature, for some unknown reason, this
264  * routine only checks the cyl hi and cyl low registers. I'm just
265  * guessing, but it might be because ATA and ATAPI devices return
266  * identical values in those registers and we actually rely on the
267  * IDENTIFY DEVICE and IDENTIFY PACKET DEVICE commands to recognize the
268  * device type.
269  *
270  */
271 
272 int
273 atapi_signature(
274 	ddi_acc_handle_t io_hdl,
275 	caddr_t ioaddr)
276 {
277 	int	rc = FALSE;
278 	ADBG_TRACE(("atapi_signature entered\n"));
279 
280 	if (ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_HCYL) == ATAPI_SIG_HI &&
281 		ddi_get8(io_hdl, (uchar_t *)ioaddr + AT_LCYL) != ATAPI_SIG_LO)
282 		rc = TRUE;
283 
284 	/*
285 	 * The following is a little bit of bullet proofing.
286 	 *
287 	 * When some drives are configured on a master-only bus they
288 	 * "shadow" their registers for the not-present slave drive.
289 	 * This is bogus and if you're not careful it may cause a
290 	 * master-only drive to be mistakenly recognized as both
291 	 * master and slave. By clearing the signature registers here
292 	 * I can make certain that when ata_drive_type() switches from
293 	 * the master to slave drive that I'll read back non-signature
294 	 * values regardless of whether the master-only drive does
295 	 * the "shadow" register trick. This prevents a bogus
296 	 * IDENTIFY PACKET DEVICE command from being issued which
297 	 * a really bogus master-only drive will return "shadow"
298 	 * data for.
299 	 */
300 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_HCYL, 0);
301 	ddi_put8(io_hdl, (uchar_t *)ioaddr + AT_LCYL, 0);
302 
303 	return (rc);
304 }
305 
306 
307 /*
308  *
309  * SCSA tran_tgt_init entry point
310  *
311  */
312 
313 /* ARGSUSED */
314 static int
315 atapi_tran_tgt_init(
316 	dev_info_t	*hba_dip,
317 	dev_info_t	*tgt_dip,
318 	scsi_hba_tran_t *hba_tran,
319 	struct scsi_device *sd)
320 {
321 	gtgt_t	  *gtgtp;	/* GHD's per-target-instance structure */
322 	ata_ctl_t *ata_ctlp;
323 	ata_tgt_t *ata_tgtp;
324 	ata_drv_t *ata_drvp;
325 	struct scsi_address *ap;
326 	int	rc = DDI_SUCCESS;
327 
328 	ADBG_TRACE(("atapi_tran_tgt_init entered\n"));
329 
330 	/*
331 	 * Qualification of targ, lun, and ATAPI device presence
332 	 *  have already been taken care of by ata_bus_ctl
333 	 */
334 
335 	/* store pointer to drive struct in cloned tran struct */
336 
337 	ata_ctlp = TRAN2CTL(hba_tran);
338 	ap = &sd->sd_address;
339 
340 	ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
341 
342 	/*
343 	 * Create the "atapi" property so the target driver knows
344 	 * to use the correct set of SCSI commands
345 	 */
346 	if (!ata_prop_create(tgt_dip, ata_drvp, "atapi")) {
347 		return (DDI_FAILURE);
348 	}
349 
350 	gtgtp = ghd_target_init(hba_dip, tgt_dip, &ata_ctlp->ac_ccc,
351 	    sizeof (ata_tgt_t), ata_ctlp,
352 	    ap->a_target, ap->a_lun);
353 
354 	/* tran_tgt_private points to gtgt_t */
355 	hba_tran->tran_tgt_private = gtgtp;
356 
357 	/* gt_tgt_private points to ata_tgt_t */
358 	ata_tgtp = GTGTP2ATATGTP(gtgtp);
359 
360 	/* initialize the per-target-instance data */
361 	ata_tgtp->at_drvp = ata_drvp;
362 	ata_tgtp->at_dma_attr = ata_pciide_dma_attr;
363 	ata_tgtp->at_dma_attr.dma_attr_maxxfer =
364 	    ata_ctlp->ac_max_transfer << SCTRSHFT;
365 
366 	return (rc);
367 }
368 
369 
370 /*
371  *
372  * SCSA tran_tgt_probe entry point
373  *
374  */
375 
376 static int
377 atapi_tran_tgt_probe(struct scsi_device *sd, int (*callback)(void))
378 {
379 	ADBG_TRACE(("atapi_tran_tgt_probe entered\n"));
380 
381 	return (scsi_hba_probe(sd, callback));
382 }
383 
384 
385 /*
386  *
387  * SCSA tran_tgt_free entry point
388  *
389  */
390 
391 /* ARGSUSED */
392 static void
393 atapi_tran_tgt_free(
394 	dev_info_t	*hba_dip,
395 	dev_info_t	*tgt_dip,
396 	scsi_hba_tran_t	*hba_tran,
397 	struct scsi_device *sd)
398 {
399 	ADBG_TRACE(("atapi_tran_tgt_free entered\n"));
400 
401 	ghd_target_free(hba_dip, tgt_dip, &TRAN2ATAP(hba_tran)->ac_ccc,
402 		TRAN2GTGTP(hba_tran));
403 	hba_tran->tran_tgt_private = NULL;
404 }
405 
406 
407 
408 /*
409  *
410  * SCSA tran_abort entry point
411  *
412  */
413 
414 /* ARGSUSED */
415 static int
416 atapi_tran_abort(
417 	struct scsi_address *ap,
418 	struct scsi_pkt *spktp)
419 {
420 	ADBG_TRACE(("atapi_tran_abort entered\n"));
421 
422 	if (spktp) {
423 		return (ghd_tran_abort(&ADDR2CTL(ap)->ac_ccc, PKTP2GCMDP(spktp),
424 			ADDR2GTGTP(ap), NULL));
425 	}
426 
427 	return (ghd_tran_abort_lun(&ADDR2CTL(ap)->ac_ccc, ADDR2GTGTP(ap),
428 		NULL));
429 }
430 
431 
432 /*
433  *
434  * SCSA tran_reset entry point
435  *
436  */
437 
438 /* ARGSUSED */
439 static int
440 atapi_tran_reset(
441 	struct scsi_address *ap,
442 	int level)
443 {
444 	ADBG_TRACE(("atapi_tran_reset entered\n"));
445 
446 	if (level == RESET_TARGET)
447 		return (ghd_tran_reset_target(&ADDR2CTL(ap)->ac_ccc,
448 		    ADDR2GTGTP(ap), NULL));
449 	if (level == RESET_ALL)
450 		return (ghd_tran_reset_bus(&ADDR2CTL(ap)->ac_ccc,
451 		    ADDR2GTGTP(ap), NULL));
452 	return (FALSE);
453 
454 }
455 
456 
457 /*
458  *
459  * SCSA tran_setcap entry point
460  *
461  */
462 
463 static int
464 atapi_tran_setcap(
465 	struct scsi_address *ap,
466 	char *capstr,
467 	int value,
468 	int whom)
469 {
470 	gtgt_t	  *gtgtp = ADDR2GTGTP(ap);
471 	ata_tgt_t *tgtp = GTGTP2ATATGTP(gtgtp);
472 
473 	ADBG_TRACE(("atapi_tran_setcap entered\n"));
474 
475 	switch (scsi_hba_lookup_capstr(capstr)) {
476 		case SCSI_CAP_SECTOR_SIZE:
477 			tgtp->at_dma_attr.dma_attr_granular = (uint_t)value;
478 			return (TRUE);
479 
480 		case SCSI_CAP_ARQ:
481 			if (whom) {
482 				tgtp->at_arq = value;
483 				return (TRUE);
484 			}
485 			break;
486 
487 		case SCSI_CAP_TOTAL_SECTORS:
488 			tgtp->at_total_sectors = value;
489 			return (TRUE);
490 	}
491 	return (FALSE);
492 }
493 
494 
495 /*
496  *
497  * SCSA tran_getcap entry point
498  *
499  */
500 
501 static int
502 atapi_tran_getcap(
503 	struct scsi_address *ap,
504 	char *capstr,
505 	int whom)
506 {
507 	struct ata_id	 ata_id;
508 	struct ata_id	*ata_idp;
509 	ata_ctl_t	*ata_ctlp;
510 	ata_drv_t	*ata_drvp;
511 	gtgt_t		*gtgtp;
512 	int		 rval = -1;
513 
514 	ADBG_TRACE(("atapi_tran_getcap entered\n"));
515 
516 	if (capstr == NULL || whom == 0)
517 		return (-1);
518 
519 	ata_ctlp = ADDR2CTL(ap);
520 
521 	switch (scsi_hba_lookup_capstr(capstr)) {
522 	case SCSI_CAP_ARQ:
523 		rval = TRUE;
524 		break;
525 
526 	case SCSI_CAP_INITIATOR_ID:
527 		rval = 7;
528 		break;
529 
530 	case SCSI_CAP_DMA_MAX:
531 		/* XXX - what should the real limit be?? */
532 		/* limit to 64K ??? */
533 		rval = 4096 * (ATA_DMA_NSEGS - 1);
534 		break;
535 
536 	case SCSI_CAP_GEOMETRY:
537 		/* Default geometry */
538 		if (atapi_use_static_geometry) {
539 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
540 			break;
541 		}
542 
543 		/* this code is currently not used */
544 
545 		ata_drvp = CTL2DRV(ata_ctlp, ap->a_target, ap->a_lun);
546 		gtgtp = ADDR2GTGTP(ap);
547 
548 		/*
549 		 * retrieve the current IDENTIFY PACKET DEVICE info
550 		 */
551 		if (!ata_queue_cmd(atapi_id_update, &ata_id, ata_ctlp,
552 			ata_drvp, gtgtp)) {
553 			ADBG_TRACE(("atapi_tran_getcap geometry failed"));
554 			return (0);
555 		}
556 
557 		/*
558 		 * save the new response data
559 		 */
560 		ata_idp = &ata_drvp->ad_id;
561 		*ata_idp = ata_id;
562 
563 		switch ((ata_idp->ai_config >> 8) & 0xf) {
564 		case DTYPE_RODIRECT:
565 			rval = ATAPI_HEADS << 16 | ATAPI_SECTORS_PER_TRK;
566 			break;
567 		case DTYPE_DIRECT:
568 		case DTYPE_OPTICAL:
569 			rval = (ata_idp->ai_curheads << 16) |
570 				ata_idp->ai_cursectrk;
571 			break;
572 		default:
573 			rval = 0;
574 		}
575 		break;
576 	}
577 
578 	return (rval);
579 }
580 
581 
582 
583 /*
584  *
585  * SCSA tran_init_pkt entry point
586  *
587  */
588 
589 static struct scsi_pkt *
590 atapi_tran_init_pkt(
591 	struct scsi_address *ap,
592 	struct scsi_pkt	*spktp,
593 	struct buf	*bp,
594 	int		 cmdlen,
595 	int		 statuslen,
596 	int		 tgtlen,
597 	int		 flags,
598 	int		(*callback)(caddr_t),
599 	caddr_t		 arg)
600 {
601 	gtgt_t		*gtgtp = ADDR2GTGTP(ap);
602 	ata_tgt_t	*ata_tgtp = GTGTP2ATATGTP(gtgtp);
603 	ata_ctl_t	*ata_ctlp = ADDR2CTL(ap);
604 	ata_pkt_t	*ata_pktp;
605 	struct scsi_pkt	*new_spktp;
606 	ddi_dma_attr_t	*sg_attrp;
607 	int		 bytes;
608 
609 	ADBG_TRACE(("atapi_tran_init_pkt entered\n"));
610 
611 
612 	/*
613 	 * Determine whether to do PCI-IDE DMA setup, start out by
614 	 * assuming we're not.
615 	 */
616 	sg_attrp = NULL;
617 
618 	if (bp == NULL) {
619 		/* no data to transfer */
620 		goto skip_dma_setup;
621 	}
622 
623 	if (bp->b_bcount == 0) {
624 		/* no data to transfer */
625 		goto skip_dma_setup;
626 	}
627 
628 	if ((GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_pciide_dma == ATA_DMA_OFF)) {
629 		goto skip_dma_setup;
630 	}
631 
632 	if (ata_dma_disabled)
633 		goto skip_dma_setup;
634 
635 
636 	/*
637 	 * The PCI-IDE DMA engine is brain-damaged and can't
638 	 * DMA non-aligned buffers.
639 	 */
640 	if (((bp->b_flags & B_PAGEIO) == 0) &&
641 	    ((uintptr_t)bp->b_un.b_addr) & PCIIDE_PRDE_ADDR_MASK) {
642 		/*
643 		 * if the virtual address isn't aligned, then the
644 		 * physical address also isn't aligned.
645 		 */
646 		goto skip_dma_setup;
647 	}
648 
649 	/*
650 	 * It also insists that the byte count must be even.
651 	 */
652 	if (bp->b_bcount & 1) {
653 		/* something odd here */
654 		goto skip_dma_setup;
655 	}
656 
657 	/*
658 	 * Huzza! We're really going to do it
659 	 */
660 	sg_attrp = &ata_tgtp->at_dma_attr;
661 
662 
663 skip_dma_setup:
664 
665 	/*
666 	 * Call GHD packet init function
667 	 */
668 
669 	new_spktp = ghd_tran_init_pkt_attr(&ata_ctlp->ac_ccc, ap, spktp, bp,
670 		cmdlen, statuslen, tgtlen, flags,
671 		callback, arg, sizeof (ata_pkt_t), sg_attrp);
672 
673 	if (new_spktp == NULL)
674 		return (NULL);
675 
676 	ata_pktp = SPKT2APKT(new_spktp);
677 	ata_pktp->ap_cdbp = new_spktp->pkt_cdbp;
678 	ata_pktp->ap_statuslen = (uchar_t)statuslen;
679 
680 	/* reset data direction flags */
681 	if (spktp)
682 		ata_pktp->ap_flags &= ~(AP_READ | AP_WRITE);
683 
684 	/*
685 	 * check for ARQ mode
686 	 */
687 	if (atapi_arq_enable == TRUE &&
688 		ata_tgtp->at_arq == TRUE &&
689 		ata_pktp->ap_statuslen >= sizeof (struct scsi_arq_status)) {
690 		ADBG_TRACE(("atapi_tran_init_pkt ARQ\n"));
691 		ata_pktp->ap_scbp =
692 		    (struct scsi_arq_status *)new_spktp->pkt_scbp;
693 		ata_pktp->ap_flags |= AP_ARQ_ON_ERROR;
694 	}
695 
696 	/*
697 	 * fill these with zeros for ATA/ATAPI-4 compatibility
698 	 */
699 	ata_pktp->ap_sec = 0;
700 	ata_pktp->ap_count = 0;
701 
702 	if (ata_pktp->ap_sg_cnt) {
703 		ASSERT(bp != NULL);
704 		/* determine direction to program the DMA engine later */
705 		if (bp->b_flags & B_READ) {
706 			ata_pktp->ap_flags |= AP_READ;
707 		} else {
708 			ata_pktp->ap_flags |= AP_WRITE;
709 		}
710 		ata_pktp->ap_pciide_dma = TRUE;
711 		ata_pktp->ap_hicyl = 0;
712 		ata_pktp->ap_lwcyl = 0;
713 		return (new_spktp);
714 	}
715 
716 	/*
717 	 * Since we're not using DMA, we need to map the buffer into
718 	 * kernel address space
719 	 */
720 
721 	ata_pktp->ap_pciide_dma = FALSE;
722 	if (bp && bp->b_bcount) {
723 		/*
724 		 * If this is a fresh request map the buffer and
725 		 * reset the ap_baddr pointer and the current offset
726 		 * and byte count.
727 		 *
728 		 * The ap_boffset is used to set the ap_v_addr ptr at
729 		 * the start of each I/O request.
730 		 *
731 		 * The ap_bcount is used to update ap_boffset when the
732 		 * target driver requests the next segment.
733 		 *
734 		 */
735 		if (cmdlen) {
736 			bp_mapin(bp);
737 			ata_pktp->ap_baddr = bp->b_un.b_addr;
738 			ata_pktp->ap_bcount = 0;
739 			ata_pktp->ap_boffset = 0;
740 		}
741 		ASSERT(ata_pktp->ap_baddr != NULL);
742 
743 		/* determine direction for the PIO FSM */
744 		if (bp->b_flags & B_READ) {
745 			ata_pktp->ap_flags |= AP_READ;
746 		} else {
747 			ata_pktp->ap_flags |= AP_WRITE;
748 		}
749 
750 		/*
751 		 * If the drive has the Single Sector bug, limit
752 		 * the transfer to a single sector. This assumes
753 		 * ATAPI CD drives always use 2k sectors.
754 		 */
755 		if (GTGTP2ATADRVP(ADDR2GTGTP(ap))->ad_flags & AD_1SECTOR) {
756 			size_t resid;
757 			size_t tmp;
758 
759 			/* adjust offset based on prior request */
760 			ata_pktp->ap_boffset += ata_pktp->ap_bcount;
761 
762 			/* compute number of bytes left to transfer */
763 			resid = bp->b_bcount - ata_pktp->ap_boffset;
764 
765 			/* limit the transfer to 2k */
766 			tmp = MIN(2048, resid);
767 			ata_pktp->ap_bcount = tmp;
768 
769 			/* tell target driver how much is left for next time */
770 			new_spktp->pkt_resid = resid - tmp;
771 		} else {
772 			/* do the whole request in one swell foop */
773 			ata_pktp->ap_bcount = bp->b_bcount;
774 			new_spktp->pkt_resid = 0;
775 		}
776 
777 	} else {
778 		ata_pktp->ap_baddr = NULL;
779 		ata_pktp->ap_bcount = 0;
780 		ata_pktp->ap_boffset = 0;
781 	}
782 
783 	/*
784 	 * determine the size of each partial data transfer
785 	 * to/from the drive
786 	 */
787 	bytes = min(ata_pktp->ap_bcount, ATAPI_MAX_BYTES_PER_DRQ);
788 	ata_pktp->ap_hicyl = (uchar_t)(bytes >> 8);
789 	ata_pktp->ap_lwcyl = (uchar_t)bytes;
790 	return (new_spktp);
791 }
792 
793 
794 /*
795  * GHD ccballoc callback
796  *
797  *	Initializing the ata_pkt, and return the ptr to the gcmd_t to GHD.
798  *
799  */
800 
801 /* ARGSUSED */
802 int
803 atapi_ccballoc(
804 	gtgt_t	*gtgtp,
805 	gcmd_t	*gcmdp,
806 	int	 cmdlen,
807 	int	 statuslen,
808 	int	 tgtlen,
809 	int	 ccblen)
810 
811 {
812 	ata_drv_t *ata_drvp = GTGTP2ATADRVP(gtgtp);
813 	ata_pkt_t *ata_pktp = GCMD2APKT(gcmdp);
814 
815 	ADBG_TRACE(("atapi_ccballoc entered\n"));
816 
817 	/* set the back ptr from the ata_pkt to the gcmd_t */
818 	ata_pktp->ap_gcmdp = gcmdp;
819 
820 	/* check length of SCSI CDB is not larger than drive expects */
821 
822 	if (cmdlen > ata_drvp->ad_cdb_len) {
823 		ADBG_WARN(("atapi_ccballoc: SCSI CDB too large!\n"));
824 		return (FALSE);
825 	}
826 
827 	/*
828 	 * save length of the SCSI CDB, and calculate CDB padding
829 	 * note that for convenience, padding is expressed in shorts.
830 	 */
831 
832 	ata_pktp->ap_cdb_len = (uchar_t)cmdlen;
833 	ata_pktp->ap_cdb_pad =
834 		((unsigned)(ata_drvp->ad_cdb_len - cmdlen)) >> 1;
835 
836 	/* set up callback functions */
837 
838 	ata_pktp->ap_start = atapi_fsm_start;
839 	ata_pktp->ap_intr = atapi_fsm_intr;
840 	ata_pktp->ap_complete = atapi_complete;
841 
842 	/* set-up for start */
843 
844 	ata_pktp->ap_flags = AP_ATAPI;
845 	ata_pktp->ap_hd = ata_drvp->ad_drive_bits;
846 	ata_pktp->ap_cmd = ATC_PACKET;
847 
848 	return (TRUE);
849 }
850 
851 
852 
853 /*
854  *
855  * SCSA tran_destroy_pkt entry point
856  *
857  */
858 
859 static void
860 atapi_tran_destroy_pkt(
861 	struct scsi_address *ap,
862 	struct scsi_pkt *spktp)
863 {
864 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
865 
866 	ADBG_TRACE(("atapi_tran_destroy_pkt entered\n"));
867 
868 	if (gcmdp->cmd_dma_handle != NULL) {
869 		ghd_dmafree_attr(gcmdp);
870 	}
871 
872 	ghd_pktfree(&ADDR2CTL(ap)->ac_ccc, ap, spktp);
873 }
874 
875 
876 
877 /*
878  *
879  * GHD ccbfree callback function
880  *
881  */
882 
883 /* ARGSUSED */
884 void
885 atapi_ccbfree(
886 	gcmd_t *gcmdp)
887 {
888 	ADBG_TRACE(("atapi_ccbfree entered\n"));
889 
890 	/* nothing to do */
891 }
892 
893 
894 /*
895  *
896  * SCSA tran_dmafree entry point
897  *
898  */
899 
900 /*ARGSUSED*/
901 static void
902 atapi_tran_dmafree(
903 	struct scsi_address *ap,
904 	struct scsi_pkt *spktp)
905 {
906 	gcmd_t	  *gcmdp = PKTP2GCMDP(spktp);
907 
908 	ADBG_TRACE(("atapi_tran_dmafree entered\n"));
909 
910 	if (gcmdp->cmd_dma_handle != NULL) {
911 		ghd_dmafree_attr(gcmdp);
912 	}
913 }
914 
915 
916 
917 /*
918  *
919  * SCSA tran_sync_pkt entry point
920  *
921  */
922 
923 /*ARGSUSED*/
924 static void
925 atapi_tran_sync_pkt(
926 	struct scsi_address *ap,
927 	struct scsi_pkt *spktp)
928 {
929 
930 	ADBG_TRACE(("atapi_tran_sync_pkt entered\n"));
931 
932 	if (PKTP2GCMDP(spktp)->cmd_dma_handle != NULL) {
933 		ghd_tran_sync_pkt(ap, spktp);
934 	}
935 }
936 
937 
938 
939 /*
940  *
941  * SCSA tran_start entry point
942  *
943  */
944 
945 /* ARGSUSED */
946 static int
947 atapi_tran_start(
948 	struct scsi_address *ap,
949 	struct scsi_pkt *spktp)
950 {
951 	ata_pkt_t *ata_pktp = SPKT2APKT(spktp);
952 	ata_drv_t *ata_drvp = APKT2DRV(ata_pktp);
953 	ata_ctl_t *ata_ctlp = ata_drvp->ad_ctlp;
954 	gcmd_t	  *gcmdp = APKT2GCMD(ata_pktp);
955 	int	   polled = FALSE;
956 	int	   rc;
957 
958 	ADBG_TRACE(("atapi_tran_start entered\n"));
959 
960 	/*
961 	 * Basic initialization performed each and every time a
962 	 * scsi_pkt is submitted. A single scsi_pkt may be submitted
963 	 * multiple times so this routine has to be idempotent. One
964 	 * time initializations don't belong here.
965 	 */
966 
967 	/*
968 	 * The ap_v_addr pointer is incremented by the PIO data
969 	 * transfer routine as each word is transferred. Therefore, need
970 	 * to reset ap_v_addr here (rather than atapi_tran_init_pkt())
971 	 * in case the target resubmits the same pkt multiple times
972 	 * (which is permitted by SCSA).
973 	 */
974 	ata_pktp->ap_v_addr = ata_pktp->ap_baddr + ata_pktp->ap_boffset;
975 
976 	/* ap_resid is decremented as the data transfer progresses */
977 	ata_pktp->ap_resid = ata_pktp->ap_bcount;
978 
979 	/* clear error flags */
980 	ata_pktp->ap_flags &= (AP_ATAPI | AP_READ | AP_WRITE | AP_ARQ_ON_ERROR);
981 	spktp->pkt_reason = 0;
982 	spktp->pkt_state = 0;
983 	spktp->pkt_statistics = 0;
984 
985 	/*
986 	 * check for polling pkt
987 	 */
988 	if (spktp->pkt_flags & FLAG_NOINTR) {
989 		polled = TRUE;
990 	}
991 
992 #ifdef ___just_ignore_unsupported_flags___
993 	/* driver cannot accept tagged commands */
994 
995 	if (spktp->pkt_flags & (FLAG_HTAG|FLAG_OTAG|FLAG_STAG)) {
996 		spktp->pkt_reason = CMD_TRAN_ERR;
997 		return (TRAN_BADPKT);
998 	}
999 #endif
1000 
1001 	/* call common transport routine */
1002 
1003 	rc = ghd_transport(&ata_ctlp->ac_ccc, gcmdp, gcmdp->cmd_gtgtp,
1004 		spktp->pkt_time, polled, NULL);
1005 
1006 	/* see if pkt was not accepted */
1007 
1008 	if (rc != TRAN_ACCEPT)
1009 		return (rc);
1010 
1011 	return (rc);
1012 }
1013 
1014 
1015 /*
1016  *
1017  * GHD packet complete callback
1018  *
1019  */
1020 /* ARGSUSED */
1021 static void
1022 atapi_complete(
1023 	ata_drv_t *ata_drvp,
1024 	ata_pkt_t *ata_pktp,
1025 	int do_callback)
1026 {
1027 	struct scsi_pkt *spktp = APKT2SPKT(ata_pktp);
1028 	struct scsi_status *scsi_stat = (struct scsi_status *)spktp->pkt_scbp;
1029 
1030 	ADBG_TRACE(("atapi_complete entered\n"));
1031 	ADBG_TRANSPORT(("atapi_complete: pkt = 0x%p\n", ata_pktp));
1032 
1033 	/* update resid */
1034 
1035 	spktp->pkt_resid = ata_pktp->ap_resid;
1036 
1037 	if (ata_pktp->ap_flags & AP_SENT_CMD) {
1038 		spktp->pkt_state |=
1039 			STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
1040 	}
1041 	if (ata_pktp->ap_flags & AP_XFERRED_DATA) {
1042 		spktp->pkt_state |= STATE_XFERRED_DATA;
1043 	}
1044 
1045 	if (ata_pktp->ap_flags & AP_GOT_STATUS) {
1046 		spktp->pkt_state |= STATE_GOT_STATUS;
1047 	}
1048 
1049 	/* check for fatal errors */
1050 
1051 	if (ata_pktp->ap_flags & AP_TRAN_ERROR) {
1052 		spktp->pkt_reason = CMD_TRAN_ERR;
1053 	} else if (ata_pktp->ap_flags & AP_BUS_RESET) {
1054 		spktp->pkt_reason = CMD_RESET;
1055 		spktp->pkt_statistics |= STAT_BUS_RESET;
1056 	} else if (ata_pktp->ap_flags & AP_DEV_RESET) {
1057 		spktp->pkt_reason = CMD_RESET;
1058 		spktp->pkt_statistics |= STAT_DEV_RESET;
1059 	} else if (ata_pktp->ap_flags & AP_ABORT) {
1060 		spktp->pkt_reason = CMD_ABORTED;
1061 		spktp->pkt_statistics |= STAT_ABORTED;
1062 	} else if (ata_pktp->ap_flags & AP_TIMEOUT) {
1063 		spktp->pkt_reason = CMD_TIMEOUT;
1064 		spktp->pkt_statistics |= STAT_TIMEOUT;
1065 	} else {
1066 		spktp->pkt_reason = CMD_CMPLT;
1067 	}
1068 
1069 	/* non-fatal errors */
1070 
1071 	if (ata_pktp->ap_flags & AP_ERROR)
1072 		scsi_stat->sts_chk = 1;
1073 	else
1074 		scsi_stat->sts_chk = 0;
1075 
1076 	if (ata_pktp->ap_flags & AP_ARQ_ERROR) {
1077 		ADBG_ARQ(("atapi_complete ARQ error 0x%p\n", ata_pktp));
1078 		spktp->pkt_reason = CMD_TRAN_ERR;
1079 
1080 	} else if (ata_pktp->ap_flags & AP_ARQ_OKAY) {
1081 		static struct scsi_status zero_scsi_status = { 0 };
1082 		struct scsi_arq_status *arqp;
1083 
1084 		ADBG_ARQ(("atapi_complete ARQ okay 0x%p\n", ata_pktp));
1085 		spktp->pkt_state |= STATE_ARQ_DONE;
1086 		arqp = ata_pktp->ap_scbp;
1087 		arqp->sts_rqpkt_reason = CMD_CMPLT;
1088 		arqp->sts_rqpkt_state = STATE_XFERRED_DATA;
1089 		arqp->sts_rqpkt_status = zero_scsi_status;
1090 		arqp->sts_rqpkt_resid = 0;
1091 		arqp->sts_rqpkt_statistics = 0;
1092 
1093 	}
1094 
1095 	ADBG_TRANSPORT(("atapi_complete: reason = 0x%x stats = 0x%x "
1096 	    "sts_chk = %d\n", spktp->pkt_reason, spktp->pkt_statistics,
1097 	    scsi_stat->sts_chk));
1098 
1099 	if (do_callback && (spktp->pkt_comp))
1100 		(*spktp->pkt_comp)(spktp);
1101 }
1102 
1103 
1104 
1105 /*
1106  * Update the IDENTIFY PACKET DEVICE info
1107  */
1108 
1109 static int
1110 atapi_id_update(
1111 	ata_ctl_t	*ata_ctlp,
1112 	ata_drv_t	*ata_drvp,
1113 	ata_pkt_t	*ata_pktp)
1114 {
1115 	ddi_acc_handle_t io_hdl1 = ata_ctlp->ac_iohandle1;
1116 	caddr_t		 ioaddr1 = ata_ctlp->ac_ioaddr1;
1117 	ddi_acc_handle_t io_hdl2 = ata_ctlp->ac_iohandle2;
1118 	caddr_t		 ioaddr2 = ata_ctlp->ac_ioaddr2;
1119 	int	rc;
1120 
1121 	/*
1122 	 * select the appropriate drive and LUN
1123 	 */
1124 	ddi_put8(io_hdl1, (uchar_t *)ioaddr1 + AT_DRVHD,
1125 		ata_drvp->ad_drive_bits);
1126 	ata_nsecwait(400);
1127 
1128 	/*
1129 	 * make certain the drive is selected, and wait for not busy
1130 	 */
1131 	if (!ata_wait(io_hdl2, ioaddr2, ATS_DRDY, ATS_BSY, 5 * 1000000)) {
1132 		ADBG_ERROR(("atapi_id_update: select failed\n"));
1133 		ata_pktp->ap_flags |= AP_ERROR;
1134 		return (ATA_FSM_RC_FINI);
1135 	}
1136 
1137 	rc = atapi_id(ata_ctlp->ac_iohandle1, ata_ctlp->ac_ioaddr1,
1138 		ata_ctlp->ac_iohandle2, ata_ctlp->ac_ioaddr2,
1139 		(struct ata_id *)ata_pktp->ap_v_addr);
1140 
1141 	if (!rc) {
1142 		ata_pktp->ap_flags |= AP_ERROR;
1143 	} else {
1144 		ata_pktp->ap_flags |= AP_XFERRED_DATA;
1145 	}
1146 	return (ATA_FSM_RC_FINI);
1147 }
1148 
1149 
1150 
1151 /*
1152  * Both drives on the controller share a common pkt to do
1153  * ARQ processing. Therefore the pkt is only partially
1154  * initialized here. The rest of initialization occurs
1155  * just before starting the ARQ pkt when an error is
1156  * detected.
1157  */
1158 
1159 void
1160 atapi_init_arq(
1161 	ata_ctl_t *ata_ctlp)
1162 {
1163 	ata_pkt_t *arq_pktp = ata_ctlp->ac_arq_pktp;
1164 
1165 	arq_pktp->ap_cdbp = ata_ctlp->ac_arq_cdb;
1166 	arq_pktp->ap_cdb_len = sizeof (ata_ctlp->ac_arq_cdb);
1167 	arq_pktp->ap_start = atapi_fsm_start;
1168 	arq_pktp->ap_intr = atapi_fsm_intr;
1169 	arq_pktp->ap_complete = atapi_complete;
1170 	arq_pktp->ap_flags = AP_ATAPI;
1171 	arq_pktp->ap_cmd = ATC_PACKET;
1172 
1173 	ata_ctlp->ac_arq_cdb[0] = SCMD_REQUEST_SENSE;
1174 }
1175