xref: /dragonfly/sys/dev/disk/amd/amd.c (revision 1465342b)
1 /*
2  *********************************************************************
3  *	FILE NAME  : amd.c
4  *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5  *		     Erich Chen     (erich@tekram.com.tw)
6  *	Description: Device Driver for the amd53c974 PCI Bus Master
7  *		     SCSI Host adapter found on cards such as
8  *		     the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.14 2007/12/23 07:00:56 pavalos Exp $
35  */
36 
37 /*
38  *********************************************************************
39  *	HISTORY:
40  *
41  *	REV#	DATE	NAME    	DESCRIPTION
42  *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
43  *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
44  *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
45  *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
46  *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
47  *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
48  *********************************************************************
49  */
50 
51 /* #define AMD_DEBUG0           */
52 /* #define AMD_DEBUG_SCSI_PHASE */
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/kernel.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
59 #include <sys/buf.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 #include <sys/thread2.h>
63 
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 
67 #include <machine/clock.h>
68 
69 #include <bus/cam/cam.h>
70 #include <bus/cam/cam_ccb.h>
71 #include <bus/cam/cam_sim.h>
72 #include <bus/cam/cam_xpt_sim.h>
73 #include <bus/cam/cam_debug.h>
74 
75 #include <bus/cam/scsi/scsi_all.h>
76 #include <bus/cam/scsi/scsi_message.h>
77 
78 #include <bus/pci/pcivar.h>
79 #include <bus/pci/pcireg.h>
80 #include "amd.h"
81 
82 #define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
83 #define PCI_BASE_ADDR0	    		0x10
84 
85 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
86 typedef phase_handler_t *phase_handler_func_t;
87 
88 static void amd_intr(void *vamd);
89 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
90 static phase_handler_t amd_NopPhase;
91 
92 static phase_handler_t amd_DataOutPhase0;
93 static phase_handler_t amd_DataInPhase0;
94 #define amd_CommandPhase0 amd_NopPhase
95 static phase_handler_t amd_StatusPhase0;
96 static phase_handler_t amd_MsgOutPhase0;
97 static phase_handler_t amd_MsgInPhase0;
98 static phase_handler_t amd_DataOutPhase1;
99 static phase_handler_t amd_DataInPhase1;
100 static phase_handler_t amd_CommandPhase1;
101 static phase_handler_t amd_StatusPhase1;
102 static phase_handler_t amd_MsgOutPhase1;
103 static phase_handler_t amd_MsgInPhase1;
104 
105 static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
106 static int	amdparsemsg(struct amd_softc *amd);
107 static int	amdhandlemsgreject(struct amd_softc *amd);
108 static void	amdconstructsdtr(struct amd_softc *amd,
109 				 u_int period, u_int offset);
110 static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
111 static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
112 
113 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
114 static void amd_Disconnect(struct amd_softc *amd);
115 static void amd_Reselect(struct amd_softc *amd);
116 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
117 static void amd_ScsiRstDetect(struct amd_softc *amd);
118 static void amd_ResetSCSIBus(struct amd_softc *amd);
119 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
120 static void amd_InvalidCmd(struct amd_softc *amd);
121 
122 #if 0
123 static void amd_timeout(void *arg1);
124 static void amd_reset(struct amd_softc *amd);
125 #endif
126 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
127 
128 void    amd_linkSRB(struct amd_softc *amd);
129 static int amd_init(device_t);
130 static void amd_load_defaults(struct amd_softc *amd);
131 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
132 static int amd_EEpromInDO(struct amd_softc *amd);
133 static u_int16_t EEpromGetData1(struct amd_softc *amd);
134 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
135 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
136 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
137 static void amd_ReadEEprom(struct amd_softc *amd);
138 
139 static int amd_probe(device_t);
140 static int amd_attach(device_t);
141 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
142 			     lun_id_t lun, u_int tag, struct srb_queue *queue,
143 			     cam_status status);
144 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
145 		       u_int period, u_int offset, u_int type);
146 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
147 
148 static __inline void amd_clear_msg_state(struct amd_softc *amd);
149 
150 static __inline void
151 amd_clear_msg_state(struct amd_softc *amd)
152 {
153 	amd->msgout_len = 0;
154 	amd->msgout_index = 0;
155 	amd->msgin_index = 0;
156 }
157 
158 /* CAM SIM entry points */
159 #define ccb_srb_ptr spriv_ptr0
160 #define ccb_amd_ptr spriv_ptr1
161 static void	amd_action(struct cam_sim *sim, union ccb *ccb);
162 static void	amd_poll(struct cam_sim *sim);
163 
164 /*
165  * State engine function tables indexed by SCSI phase number
166  */
167 phase_handler_func_t amd_SCSI_phase0[] = {
168 	amd_DataOutPhase0,
169 	amd_DataInPhase0,
170 	amd_CommandPhase0,
171 	amd_StatusPhase0,
172 	amd_NopPhase,
173 	amd_NopPhase,
174 	amd_MsgOutPhase0,
175 	amd_MsgInPhase0
176 };
177 
178 phase_handler_func_t amd_SCSI_phase1[] = {
179 	amd_DataOutPhase1,
180 	amd_DataInPhase1,
181 	amd_CommandPhase1,
182 	amd_StatusPhase1,
183 	amd_NopPhase,
184 	amd_NopPhase,
185 	amd_MsgOutPhase1,
186 	amd_MsgInPhase1
187 };
188 
189 /*
190  * EEProm/BIOS negotiation periods
191  */
192 u_int8_t   eeprom_period[] = {
193 	 25,	/* 10.0MHz */
194 	 32,	/*  8.0MHz */
195 	 38,	/*  6.6MHz */
196 	 44,	/*  5.7MHz */
197 	 50,	/*  5.0MHz */
198 	 63,	/*  4.0MHz */
199 	 83,	/*  3.0MHz */
200 	125	/*  2.0MHz */
201 };
202 
203 /*
204  * chip clock setting to SCSI specified sync parameter table.
205  */
206 u_int8_t tinfo_sync_period[] = {
207 	25,	/* 10.0 */
208 	32,	/* 8.0 */
209 	38,	/* 6.6 */
210 	44,	/* 5.7 */
211 	50,	/* 5.0 */
212 	57,	/* 4.4 */
213 	63,	/* 4.0 */
214 	70,	/* 3.6 */
215 	76,	/* 3.3 */
216 	83	/* 3.0 */
217 };
218 
219 static __inline struct amd_srb *
220 amdgetsrb(struct amd_softc * amd)
221 {
222 	struct amd_srb *    pSRB;
223 
224 	crit_enter();
225 	pSRB = TAILQ_FIRST(&amd->free_srbs);
226 	if (pSRB)
227 		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
228 	crit_exit();
229 	return (pSRB);
230 }
231 
232 static void
233 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
234 {
235 	struct scsi_request_sense sense_cmd;
236 	struct ccb_scsiio *csio;
237 	u_int8_t *cdb;
238 	u_int cdb_len;
239 
240 	csio = &srb->pccb->csio;
241 
242 	if (srb->SRBFlag & AUTO_REQSENSE) {
243 		sense_cmd.opcode = REQUEST_SENSE;
244 		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 		sense_cmd.unused[0] = 0;
246 		sense_cmd.unused[1] = 0;
247 		sense_cmd.length = csio->sense_len;
248 		sense_cmd.control = 0;
249 		cdb = &sense_cmd.opcode;
250 		cdb_len = sizeof(sense_cmd);
251 	} else {
252 		cdb = &srb->CmdBlock[0];
253 		cdb_len = srb->ScsiCmdLen;
254 	}
255 	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
256 }
257 
258 /*
259  * Attempt to start a waiting transaction.  Interrupts must be disabled
260  * upon entry to this function.
261  */
262 static void
263 amdrunwaiting(struct amd_softc *amd) {
264 	struct amd_srb *srb;
265 
266 	if (amd->last_phase != SCSI_BUS_FREE)
267 		return;
268 
269 	srb = TAILQ_FIRST(&amd->waiting_srbs);
270 	if (srb == NULL)
271 		return;
272 
273 	if (amdstart(amd, srb) == 0) {
274 		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
276 	}
277 }
278 
279 static void
280 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
281 {
282 	struct	 amd_srb *srb;
283 	union	 ccb *ccb;
284 	struct	 amd_softc *amd;
285 
286 	srb = (struct amd_srb *)arg;
287 	ccb = srb->pccb;
288 	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
289 
290 	if (error != 0) {
291 		if (error != EFBIG)
292 			kprintf("amd%d: Unexpected error 0x%x returned from "
293 			       "bus_dmamap_load\n", amd->unit, error);
294 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
295 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
296 			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
297 		}
298 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
299 		xpt_done(ccb);
300 		return;
301 	}
302 
303 	if (nseg != 0) {
304 		struct amd_sg *sg;
305 		bus_dma_segment_t *end_seg;
306 		bus_dmasync_op_t op;
307 
308 		end_seg = dm_segs + nseg;
309 
310 		/* Copy the segments into our SG list */
311 		srb->pSGlist = &srb->SGsegment[0];
312 		sg = srb->pSGlist;
313 		while (dm_segs < end_seg) {
314 			sg->SGXLen = dm_segs->ds_len;
315 			sg->SGXPtr = dm_segs->ds_addr;
316 			sg++;
317 			dm_segs++;
318 		}
319 
320 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
321 			op = BUS_DMASYNC_PREREAD;
322 		else
323 			op = BUS_DMASYNC_PREWRITE;
324 
325 		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
326 
327 	}
328 	srb->SGcount = nseg;
329 	srb->SGIndex = 0;
330 	srb->AdaptStatus = 0;
331 	srb->TargetStatus = 0;
332 	srb->MsgCnt = 0;
333 	srb->SRBStatus = 0;
334 	srb->SRBFlag = 0;
335 	srb->SRBState = 0;
336 	srb->TotalXferredLen = 0;
337 	srb->SGPhysAddr = 0;
338 	srb->SGToBeXferLen = 0;
339 	srb->EndMessage = 0;
340 
341 	crit_enter();
342 
343 	/*
344 	 * Last time we need to check if this CCB needs to
345 	 * be aborted.
346 	 */
347 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
348 		if (nseg != 0)
349 			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
350 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
351 		xpt_done(ccb);
352 		crit_exit();
353 		return;
354 	}
355 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
356 #if 0
357 	/* XXX Need a timeout handler */
358 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
359 	    amdtimeout, srb);
360 #endif
361 	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
362 	amdrunwaiting(amd);
363 	crit_exit();
364 }
365 
366 static void
367 amd_action(struct cam_sim * psim, union ccb * pccb)
368 {
369 	struct amd_softc *    amd;
370 	u_int   target_id, target_lun;
371 
372 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
373 
374 	amd = (struct amd_softc *) cam_sim_softc(psim);
375 	target_id = pccb->ccb_h.target_id;
376 	target_lun = pccb->ccb_h.target_lun;
377 
378 	switch (pccb->ccb_h.func_code) {
379 	case XPT_SCSI_IO:
380 	{
381 		struct amd_srb *    pSRB;
382 		struct ccb_scsiio *pcsio;
383 
384 		pcsio = &pccb->csio;
385 
386 		/*
387 		 * Assign an SRB and connect it with this ccb.
388 		 */
389 		pSRB = amdgetsrb(amd);
390 
391 		if (!pSRB) {
392 			/* Freeze SIMQ */
393 			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
394 			xpt_done(pccb);
395 			return;
396 		}
397 		pSRB->pccb = pccb;
398 		pccb->ccb_h.ccb_srb_ptr = pSRB;
399 		pccb->ccb_h.ccb_amd_ptr = amd;
400 		pSRB->ScsiCmdLen = pcsio->cdb_len;
401 		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
402 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
403 			if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
404 				/*
405 				 * We've been given a pointer
406 				 * to a single buffer.
407 				 */
408 				if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
409 					int error;
410 
411 					crit_enter();
412 					error =
413 					    bus_dmamap_load(amd->buffer_dmat,
414 							    pSRB->dmamap,
415 							    pcsio->data_ptr,
416 							    pcsio->dxfer_len,
417 							    amdexecutesrb,
418 							    pSRB, /*flags*/0);
419 					if (error == EINPROGRESS) {
420 						/*
421 						 * So as to maintain
422 						 * ordering, freeze the
423 						 * controller queue
424 						 * until our mapping is
425 						 * returned.
426 						 */
427 						xpt_freeze_simq(amd->psim, 1);
428 						pccb->ccb_h.status |=
429 						    CAM_RELEASE_SIMQ;
430 					}
431 					crit_exit();
432 				} else {
433 					struct bus_dma_segment seg;
434 
435 					/* Pointer to physical buffer */
436 					seg.ds_addr =
437 					    (bus_addr_t)pcsio->data_ptr;
438 					seg.ds_len = pcsio->dxfer_len;
439 					amdexecutesrb(pSRB, &seg, 1, 0);
440 				}
441 			} else {
442 				struct bus_dma_segment *segs;
443 
444 				if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
445 				 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
446 					TAILQ_INSERT_HEAD(&amd->free_srbs,
447 							  pSRB, links);
448 					pccb->ccb_h.status = CAM_PROVIDE_FAIL;
449 					xpt_done(pccb);
450 					return;
451 				}
452 
453 				/* Just use the segments provided */
454 				segs =
455 				    (struct bus_dma_segment *)pcsio->data_ptr;
456 				amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
457 			}
458 		} else
459 			amdexecutesrb(pSRB, NULL, 0, 0);
460 		break;
461 	}
462 	case XPT_PATH_INQ:
463 	{
464 		struct ccb_pathinq *cpi = &pccb->cpi;
465 
466 		cpi->version_num = 1;
467 		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
468 		cpi->target_sprt = 0;
469 		cpi->hba_misc = 0;
470 		cpi->hba_eng_cnt = 0;
471 		cpi->max_target = 7;
472 		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
473 		cpi->initiator_id = amd->AdaptSCSIID;
474 		cpi->bus_id = cam_sim_bus(psim);
475 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
476 		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
477 		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
478 		cpi->unit_number = cam_sim_unit(psim);
479 #ifdef	CAM_NEW_TRAN_CODE
480                 cpi->transport = XPORT_SPI;
481                 cpi->transport_version = 2;
482                 cpi->protocol = PROTO_SCSI;
483                 cpi->protocol_version = SCSI_REV_2;
484 #endif
485 		cpi->ccb_h.status = CAM_REQ_CMP;
486 		xpt_done(pccb);
487 		break;
488 	}
489 	case XPT_ABORT:
490 		pccb->ccb_h.status = CAM_REQ_INVALID;
491 		xpt_done(pccb);
492 		break;
493 	case XPT_RESET_BUS:
494 	{
495 
496 		int     i;
497 
498 		amd_ResetSCSIBus(amd);
499 		amd->ACBFlag = 0;
500 
501 		for (i = 0; i < 500; i++) {
502 			DELAY(1000);	/* Wait until our interrupt
503 					 * handler sees it */
504 		}
505 
506 		pccb->ccb_h.status = CAM_REQ_CMP;
507 		xpt_done(pccb);
508 		break;
509 	}
510 	case XPT_RESET_DEV:
511 		pccb->ccb_h.status = CAM_REQ_INVALID;
512 		xpt_done(pccb);
513 		break;
514 	case XPT_TERM_IO:
515 		pccb->ccb_h.status = CAM_REQ_INVALID;
516 		xpt_done(pccb);
517 		break;
518 	case XPT_GET_TRAN_SETTINGS:
519 	{
520 		struct ccb_trans_settings *cts = &pccb->cts;
521 		struct amd_target_info *targ_info = &amd->tinfo[target_id];
522 		struct amd_transinfo *tinfo;
523 #ifdef	CAM_NEW_TRAN_CODE
524 		struct ccb_trans_settings_scsi *scsi =
525 		    &cts->proto_specific.scsi;
526 		struct ccb_trans_settings_spi *spi =
527 		    &cts->xport_specific.spi;
528 
529 		cts->protocol = PROTO_SCSI;
530 		cts->protocol_version = SCSI_REV_2;
531 		cts->transport = XPORT_SPI;
532 		cts->transport_version = 2;
533 
534 		crit_enter();
535 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
536 			/* current transfer settings */
537 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
538 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
539 			} else {
540 				spi->flags = 0;
541 			}
542 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
543 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
544 			} else {
545 				scsi->flags = 0;
546 			}
547 			tinfo = &targ_info->current;
548 		} else {
549 			/* default(user) transfer settings */
550 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
551 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
552 			} else {
553 				spi->flags = 0;
554 			}
555 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
556 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
557 			} else {
558 				scsi->flags = 0;
559 			}
560 			tinfo = &targ_info->user;
561 		}
562 		spi->sync_period = tinfo->period;
563 		spi->sync_offset = tinfo->offset;
564 		crit_exit();
565 
566 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
567 		spi->valid = CTS_SPI_VALID_SYNC_RATE
568 			   | CTS_SPI_VALID_SYNC_OFFSET
569 			   | CTS_SPI_VALID_BUS_WIDTH
570 			   | CTS_SPI_VALID_DISC;
571 		scsi->valid = CTS_SCSI_VALID_TQ;
572 #else
573 		crit_enter();
574 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
575 			/* current transfer settings */
576 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
577 				cts->flags = CCB_TRANS_DISC_ENB;
578 			} else {
579 				cts->flags = 0;	/* no tag & disconnect */
580 			}
581 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
582 				cts->flags |= CCB_TRANS_TAG_ENB;
583 			}
584 			tinfo = &targ_info->current;
585 		} else {
586 			/* default(user) transfer settings */
587 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
588 				cts->flags = CCB_TRANS_DISC_ENB;
589 			} else {
590 				cts->flags = 0;
591 			}
592 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
593 				cts->flags |= CCB_TRANS_TAG_ENB;
594 			}
595 			tinfo = &targ_info->user;
596 		}
597 
598 		cts->sync_period = tinfo->period;
599 		cts->sync_offset = tinfo->offset;
600 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
601 		crit_exit();
602 
603 		cts->valid = CCB_TRANS_SYNC_RATE_VALID
604 			   | CCB_TRANS_SYNC_OFFSET_VALID
605 			   | CCB_TRANS_BUS_WIDTH_VALID
606 			   | CCB_TRANS_DISC_VALID
607 			   | CCB_TRANS_TQ_VALID;
608 #endif
609 		pccb->ccb_h.status = CAM_REQ_CMP;
610 		xpt_done(pccb);
611 		break;
612 	}
613 #ifdef  CAM_NEW_TRAN_CODE
614 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
615 #define	IS_USER_SETTINGS(c)	(c->type == CTS_TYPE_USER_SETTINGS)
616 #else
617 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
618 #define	IS_USER_SETTINGS(c)	(c->flags & CCB_TRANS_USER_SETTINGS)
619 #endif
620 	case XPT_SET_TRAN_SETTINGS:
621 	{
622 		struct ccb_trans_settings *cts = &pccb->cts;
623 		struct amd_target_info *targ_info;
624 		u_int  update_type = 0;
625 		int    last_entry;
626 
627 #ifdef  CAM_NEW_TRAN_CODE
628 		struct ccb_trans_settings_scsi *scsi =
629 		    &cts->proto_specific.scsi;
630 		struct ccb_trans_settings_spi *spi =
631 		    &cts->xport_specific.spi;
632 #endif
633 		if (IS_CURRENT_SETTINGS(cts)) {
634 			update_type |= AMD_TRANS_GOAL;
635 		} else if (IS_USER_SETTINGS(cts)) {
636 			update_type |= AMD_TRANS_USER;
637 		}
638 		if (update_type == 0
639 		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
640 			cts->ccb_h.status = CAM_REQ_INVALID;
641 			xpt_done(pccb);
642 		}
643 
644 #ifdef	CAM_NEW_TRAN_CODE
645 		crit_enter();
646 		targ_info = &amd->tinfo[target_id];
647 
648 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
649 			if (update_type & AMD_TRANS_GOAL) {
650 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
651 				   != 0) {
652 					targ_info->disc_tag |= AMD_CUR_DISCENB;
653 				} else {
654 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
655 				}
656 			}
657 			if (update_type & AMD_TRANS_USER) {
658 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
659 				   != 0) {
660 					targ_info->disc_tag |= AMD_USR_DISCENB;
661 				} else {
662 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
663 				}
664 			}
665 		}
666 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
667 			if (update_type & AMD_TRANS_GOAL) {
668 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
669 				   != 0) {
670 					targ_info->disc_tag |= AMD_CUR_TAGENB;
671 				} else {
672 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
673 				}
674 			}
675 			if (update_type & AMD_TRANS_USER) {
676 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
677 				    != 0) {
678 					targ_info->disc_tag |= AMD_USR_TAGENB;
679 				} else {
680 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
681 				}
682 			}
683 		}
684 
685 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
686 			if (update_type & AMD_TRANS_GOAL)
687 				spi->sync_offset = targ_info->goal.offset;
688 			else
689 				spi->sync_offset = targ_info->user.offset;
690 		}
691 
692 		if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
693 			spi->sync_offset = AMD_MAX_SYNC_OFFSET;
694 
695 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
696 			if (update_type & AMD_TRANS_GOAL)
697 				spi->sync_period = targ_info->goal.period;
698 			else
699 				spi->sync_period = targ_info->user.period;
700 		}
701 
702 		last_entry = sizeof(tinfo_sync_period) - 1;
703 		if ((spi->sync_period != 0)
704 		 && (spi->sync_period < tinfo_sync_period[0]))
705 			spi->sync_period = tinfo_sync_period[0];
706 		if (spi->sync_period > tinfo_sync_period[last_entry])
707 			spi->sync_period = 0;
708 		if (spi->sync_offset == 0)
709 			spi->sync_period = 0;
710 
711 		if ((update_type & AMD_TRANS_USER) != 0) {
712 			targ_info->user.period = spi->sync_period;
713 			targ_info->user.offset = spi->sync_offset;
714 		}
715 		if ((update_type & AMD_TRANS_GOAL) != 0) {
716 			targ_info->goal.period = spi->sync_period;
717 			targ_info->goal.offset = spi->sync_offset;
718 		}
719 #else
720 		crit_enter();
721 		targ_info = &amd->tinfo[target_id];
722 
723 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
724 			if (update_type & AMD_TRANS_GOAL) {
725 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
726 					targ_info->disc_tag |= AMD_CUR_DISCENB;
727 				} else {
728 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
729 				}
730 			}
731 			if (update_type & AMD_TRANS_USER) {
732 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
733 					targ_info->disc_tag |= AMD_USR_DISCENB;
734 				} else {
735 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
736 				}
737 			}
738 		}
739 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
740 			if (update_type & AMD_TRANS_GOAL) {
741 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
742 					targ_info->disc_tag |= AMD_CUR_TAGENB;
743 				} else {
744 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
745 				}
746 			}
747 			if (update_type & AMD_TRANS_USER) {
748 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
749 					targ_info->disc_tag |= AMD_USR_TAGENB;
750 				} else {
751 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
752 				}
753 			}
754 		}
755 
756 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
757 			if (update_type & AMD_TRANS_GOAL)
758 				cts->sync_offset = targ_info->goal.offset;
759 			else
760 				cts->sync_offset = targ_info->user.offset;
761 		}
762 
763 		if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
764 			cts->sync_offset = AMD_MAX_SYNC_OFFSET;
765 
766 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
767 			if (update_type & AMD_TRANS_GOAL)
768 				cts->sync_period = targ_info->goal.period;
769 			else
770 				cts->sync_period = targ_info->user.period;
771 		}
772 
773 		last_entry = sizeof(tinfo_sync_period) - 1;
774 		if ((cts->sync_period != 0)
775 		 && (cts->sync_period < tinfo_sync_period[0]))
776 			cts->sync_period = tinfo_sync_period[0];
777 		if (cts->sync_period > tinfo_sync_period[last_entry])
778 		 	cts->sync_period = 0;
779 		if (cts->sync_offset == 0)
780 			cts->sync_period = 0;
781 
782 		if ((update_type & AMD_TRANS_USER) != 0) {
783 			targ_info->user.period = cts->sync_period;
784 			targ_info->user.offset = cts->sync_offset;
785 		}
786 		if ((update_type & AMD_TRANS_GOAL) != 0) {
787 			targ_info->goal.period = cts->sync_period;
788 			targ_info->goal.offset = cts->sync_offset;
789 		}
790 #endif
791 		crit_exit();
792 		pccb->ccb_h.status = CAM_REQ_CMP;
793 		xpt_done(pccb);
794 		break;
795 	}
796 	case XPT_CALC_GEOMETRY:
797 	{
798 		struct ccb_calc_geometry *ccg;
799 		u_int32_t size_mb;
800 		u_int32_t secs_per_cylinder;
801 		int     extended;
802 
803 		ccg = &pccb->ccg;
804 		size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
805 		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
806 
807 		if (size_mb > 1024 && extended) {
808 			ccg->heads = 255;
809 			ccg->secs_per_track = 63;
810 		} else {
811 			ccg->heads = 64;
812 			ccg->secs_per_track = 32;
813 		}
814 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
815 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
816 		pccb->ccb_h.status = CAM_REQ_CMP;
817 		xpt_done(pccb);
818 		break;
819 	}
820 	default:
821 		pccb->ccb_h.status = CAM_REQ_INVALID;
822 		xpt_done(pccb);
823 		break;
824 	}
825 }
826 
827 static void
828 amd_poll(struct cam_sim * psim)
829 {
830 	amd_intr(cam_sim_softc(psim));
831 }
832 
833 static u_int8_t *
834 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
835 {
836 	int     dataPtr;
837 	struct ccb_scsiio *pcsio;
838 	u_int8_t   i;
839 	struct amd_sg *    pseg;
840 
841 	dataPtr = 0;
842 	pcsio = &pSRB->pccb->csio;
843 
844 	dataPtr = (int) pcsio->data_ptr;
845 	pseg = pSRB->SGsegment;
846 	for (i = 0; i < pSRB->SGIndex; i++) {
847 		dataPtr += (int) pseg->SGXLen;
848 		pseg++;
849 	}
850 	dataPtr += (int) xferCnt;
851 	return ((u_int8_t *) dataPtr);
852 }
853 
854 static void
855 ResetDevParam(struct amd_softc * amd)
856 {
857 	u_int target;
858 
859 	for (target = 0; target <= amd->max_id; target++) {
860 		if (amd->AdaptSCSIID != target) {
861 			amdsetsync(amd, target, /*clockrate*/0,
862 				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
863 		}
864 	}
865 }
866 
867 static void
868 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
869 		 u_int tag, struct srb_queue *queue, cam_status status)
870 {
871 	struct amd_srb *srb;
872 	struct amd_srb *next_srb;
873 
874 	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
875 		union ccb *ccb;
876 
877 		next_srb = TAILQ_NEXT(srb, links);
878 		if (srb->pccb->ccb_h.target_id != target
879 		 && target != CAM_TARGET_WILDCARD)
880 			continue;
881 
882 		if (srb->pccb->ccb_h.target_lun != lun
883 		 && lun != CAM_LUN_WILDCARD)
884 			continue;
885 
886 		if (srb->TagNumber != tag
887 		 && tag != AMD_TAG_WILDCARD)
888 			continue;
889 
890 		ccb = srb->pccb;
891 		TAILQ_REMOVE(queue, srb, links);
892 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
893 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
894 		 && (status & CAM_DEV_QFRZN) != 0)
895 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
896 		ccb->ccb_h.status = status;
897 		xpt_done(ccb);
898 	}
899 
900 }
901 
902 static void
903 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
904 	   u_int period, u_int offset, u_int type)
905 {
906 	struct amd_target_info *tinfo;
907 	u_int old_period;
908 	u_int old_offset;
909 
910 	tinfo = &amd->tinfo[target];
911 	old_period = tinfo->current.period;
912 	old_offset = tinfo->current.offset;
913 	if ((type & AMD_TRANS_CUR) != 0
914 	 && (old_period != period || old_offset != offset)) {
915 		struct cam_path *path;
916 
917 		tinfo->current.period = period;
918 		tinfo->current.offset = offset;
919 		tinfo->sync_period_reg = clockrate;
920 		tinfo->sync_offset_reg = offset;
921 		tinfo->CtrlR3 &= ~FAST_SCSI;
922 		tinfo->CtrlR4 &= ~EATER_25NS;
923 		if (clockrate > 7)
924 			tinfo->CtrlR4 |= EATER_25NS;
925 		else
926 			tinfo->CtrlR3 |= FAST_SCSI;
927 
928 		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
929 			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
930 			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
931 			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
932 			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
933 		}
934 		/* If possible, update the XPT's notion of our transfer rate */
935 		if (xpt_create_path(&path, /*periph*/NULL,
936 				    cam_sim_path(amd->psim), target,
937 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
938 			struct ccb_trans_settings neg;
939 #ifdef	CAM_NEW_TRAN_CODE
940 			struct ccb_trans_settings_spi *spi =
941 			    &neg.xport_specific.spi;
942 #endif
943 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
944 			memset(&neg, 0, sizeof (neg));
945 #ifdef	CAM_NEW_TRAN_CODE
946 			spi->sync_period = period;
947 			spi->sync_offset = offset;
948 			spi->valid = CTS_SPI_VALID_SYNC_RATE
949 				  | CTS_SPI_VALID_SYNC_OFFSET;
950 #else
951 			neg.sync_period = period;
952 			neg.sync_offset = offset;
953 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
954 				  | CCB_TRANS_SYNC_OFFSET_VALID;
955 #endif
956 			xpt_async(AC_TRANSFER_NEG, path, &neg);
957 			xpt_free_path(path);
958 		}
959 	}
960 	if ((type & AMD_TRANS_GOAL) != 0) {
961 		tinfo->goal.period = period;
962 		tinfo->goal.offset = offset;
963 	}
964 
965 	if ((type & AMD_TRANS_USER) != 0) {
966 		tinfo->user.period = period;
967 		tinfo->user.offset = offset;
968 	}
969 }
970 
971 static void
972 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
973 {
974 	panic("Implement me!\n");
975 }
976 
977 
978 #if 0
979 /*
980  **********************************************************************
981  * Function : amd_reset (struct amd_softc * amd)
982  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
983  * Inputs   : cmd - command which caused the SCSI RESET
984  **********************************************************************
985  */
986 static void
987 amd_reset(struct amd_softc * amd)
988 {
989 	u_int8_t   bval;
990 	u_int16_t  i;
991 
992 
993 #ifdef AMD_DEBUG0
994 	kprintf("DC390: RESET");
995 #endif
996 
997 	crit_enter();
998 	bval = amd_read8(amd, CNTLREG1);
999 	bval |= DIS_INT_ON_SCSI_RST;
1000 	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
1001 	amd_ResetSCSIBus(amd);
1002 
1003 	for (i = 0; i < 500; i++) {
1004 		DELAY(1000);
1005 	}
1006 
1007 	bval = amd_read8(amd, CNTLREG1);
1008 	bval &= ~DIS_INT_ON_SCSI_RST;
1009 	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
1010 
1011 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
1012 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1013 
1014 	ResetDevParam(amd);
1015 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
1016 			 AMD_TAG_WILDCARD, &amd->running_srbs,
1017 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
1018 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
1019 			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1020 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
1021 	amd->active_srb = NULL;
1022 	amd->ACBFlag = 0;
1023 	crit_exit();
1024 	return;
1025 }
1026 
1027 void
1028 amd_timeout(void *arg1)
1029 {
1030 	struct amd_srb *    pSRB;
1031 
1032 	pSRB = (struct amd_srb *) arg1;
1033 }
1034 #endif
1035 
1036 static int
1037 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
1038 {
1039 	union ccb *pccb;
1040 	struct ccb_scsiio *pcsio;
1041 	struct amd_target_info *targ_info;
1042 	u_int identify_msg;
1043 	u_int command;
1044 	u_int target;
1045 	u_int lun;
1046 	int tagged;
1047 
1048 	pccb = pSRB->pccb;
1049 	pcsio = &pccb->csio;
1050 	target = pccb->ccb_h.target_id;
1051 	lun = pccb->ccb_h.target_lun;
1052 	targ_info = &amd->tinfo[target];
1053 
1054 	amd_clear_msg_state(amd);
1055 	amd_write8(amd, SCSIDESTIDREG, target);
1056 	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
1057 	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
1058 	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
1059 	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
1060 	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
1061 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1062 
1063 	identify_msg = MSG_IDENTIFYFLAG | lun;
1064 	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1065 	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
1066 	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
1067 	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
1068 		identify_msg |= MSG_IDENTIFY_DISCFLAG;
1069 
1070 	amd_write8(amd, SCSIFIFOREG, identify_msg);
1071 	tagged = 0;
1072 	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
1073 	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
1074 		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1075 	if (targ_info->current.period != targ_info->goal.period
1076 	 || targ_info->current.offset != targ_info->goal.offset) {
1077 		command = SEL_W_ATN_STOP;
1078 		amdconstructsdtr(amd, targ_info->goal.period,
1079 				 targ_info->goal.offset);
1080 	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1081 		command = SEL_W_ATN2;
1082 		pSRB->SRBState = SRB_START;
1083 		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
1084 		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
1085 		tagged++;
1086 	} else {
1087 		command = SEL_W_ATN;
1088 		pSRB->SRBState = SRB_START;
1089 	}
1090 	if (command != SEL_W_ATN_STOP)
1091 		amdsetupcommand(amd, pSRB);
1092 
1093 	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
1094 		pSRB->SRBState = SRB_READY;
1095 		return (1);
1096 	} else {
1097 		amd->last_phase = SCSI_ARBITRATING;
1098 		amd_write8(amd, SCSICMDREG, command);
1099 		amd->active_srb = pSRB;
1100 		amd->cur_target = target;
1101 		amd->cur_lun = lun;
1102 		return (0);
1103 	}
1104 }
1105 
1106 /*
1107  *  Catch an interrupt from the adapter.
1108  *  Process pending device interrupts.
1109  */
1110 static void
1111 amd_intr(void   *arg)
1112 {
1113 	struct amd_softc *amd;
1114 	struct amd_srb *pSRB;
1115 	u_int  internstat = 0;
1116 	u_int  scsistat;
1117 	u_int  intstat;
1118 
1119 	amd = (struct amd_softc *)arg;
1120 
1121 	if (amd == NULL) {
1122 #ifdef AMD_DEBUG0
1123 		kprintf("amd_intr: amd NULL return......");
1124 #endif
1125 		return;
1126 	}
1127 
1128 	scsistat = amd_read8(amd, SCSISTATREG);
1129 	if (!(scsistat & INTERRUPT)) {
1130 #ifdef AMD_DEBUG0
1131 		kprintf("amd_intr: scsistat = NULL ,return......");
1132 #endif
1133 		return;
1134 	}
1135 #ifdef AMD_DEBUG_SCSI_PHASE
1136 	kprintf("scsistat=%2x,", scsistat);
1137 #endif
1138 
1139 	internstat = amd_read8(amd, INTERNSTATREG);
1140 	intstat = amd_read8(amd, INTSTATREG);
1141 
1142 #ifdef AMD_DEBUG_SCSI_PHASE
1143 	kprintf("intstat=%2x,", intstat);
1144 #endif
1145 
1146 	if (intstat & DISCONNECTED) {
1147 		amd_Disconnect(amd);
1148 		return;
1149 	}
1150 	if (intstat & RESELECTED) {
1151 		amd_Reselect(amd);
1152 		return;
1153 	}
1154 	if (intstat & INVALID_CMD) {
1155 		amd_InvalidCmd(amd);
1156 		return;
1157 	}
1158 	if (intstat & SCSI_RESET_) {
1159 		amd_ScsiRstDetect(amd);
1160 		return;
1161 	}
1162 	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1163 		pSRB = amd->active_srb;
1164 		/*
1165 		 * Run our state engine.  First perform
1166 		 * post processing for the last phase we
1167 		 * were in, followed by any processing
1168 		 * required to handle the current phase.
1169 		 */
1170 		scsistat =
1171 		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1172 		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1173 		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1174 	}
1175 }
1176 
1177 static u_int
1178 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1179 {
1180 	struct amd_sg *psgl;
1181 	u_int32_t   ResidCnt, xferCnt;
1182 
1183 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1184 		if (scsistat & PARITY_ERR) {
1185 			pSRB->SRBStatus |= PARITY_ERROR;
1186 		}
1187 		if (scsistat & COUNT_2_ZERO) {
1188 			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1189 				;
1190 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1191 			pSRB->SGIndex++;
1192 			if (pSRB->SGIndex < pSRB->SGcount) {
1193 				pSRB->pSGlist++;
1194 				psgl = pSRB->pSGlist;
1195 				pSRB->SGPhysAddr = psgl->SGXPtr;
1196 				pSRB->SGToBeXferLen = psgl->SGXLen;
1197 			} else {
1198 				pSRB->SGToBeXferLen = 0;
1199 			}
1200 		} else {
1201 			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1202 			ResidCnt += amd_read8(amd, CTCREG_LOW)
1203 				  | (amd_read8(amd, CTCREG_MID) << 8)
1204 				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1205 
1206 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1207 			pSRB->SGPhysAddr += xferCnt;
1208 			pSRB->TotalXferredLen += xferCnt;
1209 			pSRB->SGToBeXferLen = ResidCnt;
1210 		}
1211 	}
1212 	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1213 	return (scsistat);
1214 }
1215 
1216 static u_int
1217 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1218 {
1219 	u_int8_t bval;
1220 	u_int16_t  i, residual;
1221 	struct amd_sg *psgl;
1222 	u_int32_t   ResidCnt, xferCnt;
1223 	u_int8_t *  ptr;
1224 
1225 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1226 		if (scsistat & PARITY_ERR) {
1227 			pSRB->SRBStatus |= PARITY_ERROR;
1228 		}
1229 		if (scsistat & COUNT_2_ZERO) {
1230 			while (1) {
1231 				bval = amd_read8(amd, DMA_Status);
1232 				if ((bval & DMA_XFER_DONE) != 0)
1233 					break;
1234 			}
1235 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1236 
1237 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1238 			pSRB->SGIndex++;
1239 			if (pSRB->SGIndex < pSRB->SGcount) {
1240 				pSRB->pSGlist++;
1241 				psgl = pSRB->pSGlist;
1242 				pSRB->SGPhysAddr = psgl->SGXPtr;
1243 				pSRB->SGToBeXferLen = psgl->SGXLen;
1244 			} else {
1245 				pSRB->SGToBeXferLen = 0;
1246 			}
1247 		} else {	/* phase changed */
1248 			residual = 0;
1249 			bval = amd_read8(amd, CURRENTFIFOREG);
1250 			while (bval & 0x1f) {
1251 				if ((bval & 0x1f) == 1) {
1252 					for (i = 0; i < 0x100; i++) {
1253 						bval = amd_read8(amd, CURRENTFIFOREG);
1254 						if (!(bval & 0x1f)) {
1255 							goto din_1;
1256 						} else if (i == 0x0ff) {
1257 							residual = 1;
1258 							goto din_1;
1259 						}
1260 					}
1261 				} else {
1262 					bval = amd_read8(amd, CURRENTFIFOREG);
1263 				}
1264 			}
1265 	din_1:
1266 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1267 			for (i = 0; i < 0x8000; i++) {
1268 				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1269 					break;
1270 			}
1271 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1272 
1273 			ResidCnt = amd_read8(amd, CTCREG_LOW)
1274 				 | (amd_read8(amd, CTCREG_MID) << 8)
1275 				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1276 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1277 			pSRB->SGPhysAddr += xferCnt;
1278 			pSRB->TotalXferredLen += xferCnt;
1279 			pSRB->SGToBeXferLen = ResidCnt;
1280 			if (residual) {
1281 				/* get residual byte */
1282 				bval = amd_read8(amd, SCSIFIFOREG);
1283 				ptr = phystovirt(pSRB, xferCnt);
1284 				*ptr = bval;
1285 				pSRB->SGPhysAddr++;
1286 				pSRB->TotalXferredLen++;
1287 				pSRB->SGToBeXferLen--;
1288 			}
1289 		}
1290 	}
1291 	return (scsistat);
1292 }
1293 
1294 static u_int
1295 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1296 {
1297 	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1298 	/* get message */
1299 	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1300 	pSRB->SRBState = SRB_COMPLETED;
1301 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1302 	return (SCSI_NOP0);
1303 }
1304 
1305 static u_int
1306 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1307 {
1308 	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1309 		scsistat = SCSI_NOP0;
1310 	}
1311 	return (scsistat);
1312 }
1313 
1314 static u_int
1315 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1316 {
1317 	int done;
1318 
1319 	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1320 
1321 	done = amdparsemsg(amd);
1322 	if (done)
1323 		amd->msgin_index = 0;
1324 	else
1325 		amd->msgin_index++;
1326 	return (SCSI_NOP0);
1327 }
1328 
1329 static int
1330 amdparsemsg(struct amd_softc *amd)
1331 {
1332 	struct	amd_target_info *targ_info;
1333 	int	reject;
1334 	int	done;
1335 	int	response;
1336 
1337 	done = FALSE;
1338 	response = FALSE;
1339 	reject = FALSE;
1340 
1341 	targ_info = &amd->tinfo[amd->cur_target];
1342 
1343 	/*
1344 	 * Parse as much of the message as is availible,
1345 	 * rejecting it if we don't support it.  When
1346 	 * the entire message is availible and has been
1347 	 * handled, return TRUE indicating that we have
1348 	 * parsed an entire message.
1349 	 */
1350 	switch (amd->msgin_buf[0]) {
1351 	case MSG_DISCONNECT:
1352 		amd->active_srb->SRBState = SRB_DISCONNECT;
1353 		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1354 		done = TRUE;
1355 		break;
1356 	case MSG_SIMPLE_Q_TAG:
1357 	{
1358 		struct amd_srb *disc_srb;
1359 
1360 		if (amd->msgin_index < 1)
1361 			break;
1362 		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1363 		if (amd->active_srb != NULL
1364 		 || disc_srb->SRBState != SRB_DISCONNECT
1365 		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1366 		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1367 			kprintf("amd%d: Unexpected tagged reselection "
1368 			       "for target %d, Issuing Abort\n", amd->unit,
1369 			       amd->cur_target);
1370 			amd->msgout_buf[0] = MSG_ABORT;
1371 			amd->msgout_len = 1;
1372 			response = TRUE;
1373 			break;
1374 		}
1375 		amd->active_srb = disc_srb;
1376 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1377 		done = TRUE;
1378 		break;
1379 	}
1380 	case MSG_MESSAGE_REJECT:
1381 		response = amdhandlemsgreject(amd);
1382 		if (response == FALSE)
1383 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1384 		/* FALLTHROUGH */
1385 	case MSG_NOOP:
1386 		done = TRUE;
1387 		break;
1388 	case MSG_EXTENDED:
1389 	{
1390 		u_int clockrate;
1391 		u_int period;
1392 		u_int offset;
1393 		u_int saved_offset;
1394 
1395 		/* Wait for enough of the message to begin validation */
1396 		if (amd->msgin_index < 1)
1397 			break;
1398 		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1399 			reject = TRUE;
1400 			break;
1401 		}
1402 
1403 		/* Wait for opcode */
1404 		if (amd->msgin_index < 2)
1405 			break;
1406 
1407 		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1408 			reject = TRUE;
1409 			break;
1410 		}
1411 
1412 		/*
1413 		 * Wait until we have both args before validating
1414 		 * and acting on this message.
1415 		 *
1416 		 * Add one to MSG_EXT_SDTR_LEN to account for
1417 		 * the extended message preamble.
1418 		 */
1419 		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1420 			break;
1421 
1422 		period = amd->msgin_buf[3];
1423 		saved_offset = offset = amd->msgin_buf[4];
1424 		clockrate = amdfindclockrate(amd, &period);
1425 		if (offset > AMD_MAX_SYNC_OFFSET)
1426 			offset = AMD_MAX_SYNC_OFFSET;
1427 		if (period == 0 || offset == 0) {
1428 			offset = 0;
1429 			period = 0;
1430 			clockrate = 0;
1431 		}
1432 		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1433 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1434 
1435 		/*
1436 		 * See if we initiated Sync Negotiation
1437 		 * and didn't have to fall down to async
1438 		 * transfers.
1439 		 */
1440 		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1441 			/* We started it */
1442 			if (saved_offset != offset) {
1443 				/* Went too low - force async */
1444 				reject = TRUE;
1445 			}
1446 		} else {
1447 			/*
1448 			 * Send our own SDTR in reply
1449 			 */
1450 			if (bootverbose)
1451 				kprintf("Sending SDTR!\n");
1452 			amd->msgout_index = 0;
1453 			amd->msgout_len = 0;
1454 			amdconstructsdtr(amd, period, offset);
1455 			amd->msgout_index = 0;
1456 			response = TRUE;
1457 		}
1458 		done = TRUE;
1459 		break;
1460 	}
1461 	case MSG_SAVEDATAPOINTER:
1462 	case MSG_RESTOREPOINTERS:
1463 		/* XXX Implement!!! */
1464 		done = TRUE;
1465 		break;
1466 	default:
1467 		reject = TRUE;
1468 		break;
1469 	}
1470 
1471 	if (reject) {
1472 		amd->msgout_index = 0;
1473 		amd->msgout_len = 1;
1474 		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1475 		done = TRUE;
1476 		response = TRUE;
1477 	}
1478 
1479 	if (response)
1480 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1481 
1482 	if (done && !response)
1483 		/* Clear the outgoing message buffer */
1484 		amd->msgout_len = 0;
1485 
1486 	/* Drop Ack */
1487 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1488 
1489 	return (done);
1490 }
1491 
1492 static u_int
1493 amdfindclockrate(struct amd_softc *amd, u_int *period)
1494 {
1495 	u_int i;
1496 	u_int clockrate;
1497 
1498 	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1499 		u_int8_t *table_entry;
1500 
1501 		table_entry = &tinfo_sync_period[i];
1502 		if (*period <= *table_entry) {
1503 			/*
1504 			 * When responding to a target that requests
1505 			 * sync, the requested rate may fall between
1506 			 * two rates that we can output, but still be
1507 			 * a rate that we can receive.  Because of this,
1508 			 * we want to respond to the target with
1509 			 * the same rate that it sent to us even
1510 			 * if the period we use to send data to it
1511 			 * is lower.  Only lower the response period
1512 			 * if we must.
1513 			 */
1514 			if (i == 0) {
1515 				*period = *table_entry;
1516 			}
1517 			break;
1518 		}
1519 	}
1520 
1521 	if (i == sizeof(tinfo_sync_period)) {
1522 		/* Too slow for us.  Use asnyc transfers. */
1523 		*period = 0;
1524 		clockrate = 0;
1525 	} else
1526 		clockrate = i + 4;
1527 
1528 	return (clockrate);
1529 }
1530 
1531 /*
1532  * See if we sent a particular extended message to the target.
1533  * If "full" is true, the target saw the full message.
1534  * If "full" is false, the target saw at least the first
1535  * byte of the message.
1536  */
1537 static int
1538 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1539 {
1540 	int found;
1541 	int index;
1542 
1543 	found = FALSE;
1544 	index = 0;
1545 
1546 	while (index < amd->msgout_len) {
1547 		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1548 		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1549 			index++;
1550 		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1551 		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1552 			/* Skip tag type and tag id */
1553 			index += 2;
1554 		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1555 			/* Found a candidate */
1556 			if (amd->msgout_buf[index+2] == msgtype) {
1557 				u_int end_index;
1558 
1559 				end_index = index + 1
1560 					  + amd->msgout_buf[index + 1];
1561 				if (full) {
1562 					if (amd->msgout_index > end_index)
1563 						found = TRUE;
1564 				} else if (amd->msgout_index > index)
1565 					found = TRUE;
1566 			}
1567 			break;
1568 		} else {
1569 			panic("amdsentmsg: Inconsistent msg buffer");
1570 		}
1571 	}
1572 	return (found);
1573 }
1574 
1575 static void
1576 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1577 {
1578 	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1579 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1580 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1581 	amd->msgout_buf[amd->msgout_index++] = period;
1582 	amd->msgout_buf[amd->msgout_index++] = offset;
1583 	amd->msgout_len += 5;
1584 }
1585 
1586 static int
1587 amdhandlemsgreject(struct amd_softc *amd)
1588 {
1589 	/*
1590 	 * If we had an outstanding SDTR for this
1591 	 * target, this is a signal that the target
1592 	 * is refusing negotiation.  Also watch out
1593 	 * for rejected tag messages.
1594 	 */
1595 	struct	amd_srb *srb;
1596 	struct	amd_target_info *targ_info;
1597 	int	response = FALSE;
1598 
1599 	srb = amd->active_srb;
1600 	targ_info = &amd->tinfo[amd->cur_target];
1601 	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1602 		/* note asynch xfers and clear flag */
1603 		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1604 			   /*period*/0, /*offset*/0,
1605 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1606 		kprintf("amd%d:%d: refuses synchronous negotiation. "
1607 		       "Using asynchronous transfers\n",
1608 		       amd->unit, amd->cur_target);
1609 	} else if ((srb != NULL)
1610 		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1611 		struct  ccb_trans_settings neg;
1612 #ifdef CAM_NEW_TRAN_CODE
1613 		struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1614 #endif
1615 
1616 		kprintf("amd%d:%d: refuses tagged commands.  Performing "
1617 		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1618 
1619 		amdsettags(amd, amd->cur_target, FALSE);
1620 		memset(&neg, 0, sizeof (neg));
1621 #ifdef	CAM_NEW_TRAN_CODE
1622 		scsi->valid = CTS_SCSI_VALID_TQ;
1623 #else
1624 		neg.flags = 0;
1625 		neg.valid = CCB_TRANS_TQ_VALID;
1626 #endif
1627 		xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1628 		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1629 
1630 		/*
1631 		 * Resend the identify for this CCB as the target
1632 		 * may believe that the selection is invalid otherwise.
1633 		 */
1634 		if (amd->msgout_len != 0)
1635 			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1636 			      amd->msgout_len);
1637 		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1638 				    | srb->pccb->ccb_h.target_lun;
1639 		amd->msgout_len++;
1640 		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1641 		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1642 			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1643 
1644 		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1645 
1646 		/*
1647 		 * Requeue all tagged commands for this target
1648 		 * currently in our posession so they can be
1649 		 * converted to untagged commands.
1650 		 */
1651 		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1652 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1653 				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1654 	} else {
1655 		/*
1656 		 * Otherwise, we ignore it.
1657 		 */
1658 		kprintf("amd%d:%d: Message reject received -- ignored\n",
1659 		       amd->unit, amd->cur_target);
1660 	}
1661 	return (response);
1662 }
1663 
1664 #if 0
1665 	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1666 		if (bval == MSG_DISCONNECT) {
1667 			pSRB->SRBState = SRB_DISCONNECT;
1668 		} else if (bval == MSG_SAVEDATAPOINTER) {
1669 			goto min6;
1670 		} else if ((bval == MSG_EXTENDED)
1671 			|| ((bval >= MSG_SIMPLE_Q_TAG)
1672 			 && (bval <= MSG_ORDERED_Q_TAG))) {
1673 			pSRB->SRBState |= SRB_MSGIN_MULTI;
1674 			pSRB->MsgInBuf[0] = bval;
1675 			pSRB->MsgCnt = 1;
1676 			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1677 		} else if (bval == MSG_MESSAGE_REJECT) {
1678 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1679 
1680 			if (pSRB->SRBState & DO_SYNC_NEGO) {
1681 				goto set_async;
1682 			}
1683 		} else if (bval == MSG_RESTOREPOINTERS) {
1684 			goto min6;
1685 		} else {
1686 			goto min6;
1687 		}
1688 	} else {		/* minx: */
1689 		*pSRB->pMsgPtr = bval;
1690 		pSRB->MsgCnt++;
1691 		pSRB->pMsgPtr++;
1692 		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1693 		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1694 			if (pSRB->MsgCnt == 2) {
1695 				pSRB->SRBState = 0;
1696 				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1697 				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1698 					pSRB = amd->pTmpSRB;
1699 					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1700 					pDCB->pActiveSRB = pSRB;
1701 					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1702 					EnableMsgOut2(amd, pSRB);
1703 				} else {
1704 					if (pDCB->DCBFlag & ABORT_DEV_) {
1705 						pSRB->SRBState = SRB_ABORT_SENT;
1706 						EnableMsgOut1(amd, pSRB);
1707 					}
1708 					pDCB->pActiveSRB = pSRB;
1709 					pSRB->SRBState = SRB_DATA_XFER;
1710 				}
1711 			}
1712 		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1713 			&& (pSRB->MsgCnt == 5)) {
1714 			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1715 			if ((pSRB->MsgInBuf[1] != 3)
1716 			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1717 				pSRB->MsgCnt = 1;
1718 				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1719 				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1720 			} else if (!(pSRB->MsgInBuf[3])
1721 				|| !(pSRB->MsgInBuf[4])) {
1722 		set_async:	/* set async */
1723 
1724 				pDCB = pSRB->pSRBDCB;
1725 				/* disable sync & sync nego */
1726 				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1727 				pDCB->SyncPeriod = 0;
1728 				pDCB->SyncOffset = 0;
1729 
1730 				pDCB->tinfo.goal.period = 0;
1731 				pDCB->tinfo.goal.offset = 0;
1732 
1733 				pDCB->tinfo.current.period = 0;
1734 				pDCB->tinfo.current.offset = 0;
1735 				pDCB->tinfo.current.width =
1736 				    MSG_EXT_WDTR_BUS_8_BIT;
1737 
1738 				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1739 				pDCB->CtrlR4 &= 0x3f;
1740 				pDCB->CtrlR4 |= EATER_25NS;
1741 				goto re_prog;
1742 			} else {/* set sync */
1743 
1744 				pDCB = pSRB->pSRBDCB;
1745 				/* enable sync & sync nego */
1746 				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1747 
1748 				/* set sync offset */
1749 				pDCB->SyncOffset &= 0x0f0;
1750 				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1751 
1752 				/* set sync period */
1753 				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1754 
1755 				wval = (u_int16_t) pSRB->MsgInBuf[3];
1756 				wval = wval << 2;
1757 				wval--;
1758 				wval1 = wval / 25;
1759 				if ((wval1 * 25) != wval) {
1760 					wval1++;
1761 				}
1762 				bval = FAST_CLK|FAST_SCSI;
1763 				pDCB->CtrlR4 &= 0x3f;
1764 				if (wval1 >= 8) {
1765 					/* Fast SCSI */
1766 					wval1--;
1767 					bval = FAST_CLK;
1768 					pDCB->CtrlR4 |= EATER_25NS;
1769 				}
1770 				pDCB->CtrlR3 = bval;
1771 				pDCB->SyncPeriod = (u_int8_t) wval1;
1772 
1773 				pDCB->tinfo.goal.period =
1774 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1775 				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1776 				pDCB->tinfo.current.period =
1777 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1778 				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1779 
1780 				/*
1781 				 * program SCSI control register
1782 				 */
1783 		re_prog:
1784 				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1785 				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1786 				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1787 				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1788 			}
1789 		}
1790 	}
1791 min6:
1792 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1793 	return (SCSI_NOP0);
1794 }
1795 #endif
1796 
1797 static u_int
1798 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1799 {
1800 	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1801 	return (scsistat);
1802 }
1803 
1804 static u_int
1805 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1806 {
1807 	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1808 	return (scsistat);
1809 }
1810 
1811 static void
1812 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1813 {
1814 	struct amd_sg *    psgl;
1815 	u_int32_t   lval;
1816 
1817 	if (pSRB->SGIndex < pSRB->SGcount) {
1818 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1819 
1820 		if (!pSRB->SGToBeXferLen) {
1821 			psgl = pSRB->pSGlist;
1822 			pSRB->SGPhysAddr = psgl->SGXPtr;
1823 			pSRB->SGToBeXferLen = psgl->SGXLen;
1824 		}
1825 		lval = pSRB->SGToBeXferLen;
1826 		amd_write8(amd, CTCREG_LOW, lval);
1827 		amd_write8(amd, CTCREG_MID, lval >> 8);
1828 		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1829 
1830 		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1831 
1832 		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1833 
1834 		pSRB->SRBState = SRB_DATA_XFER;
1835 
1836 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1837 
1838 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1839 
1840 		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1841 	} else {		/* xfer pad */
1842 		if (pSRB->SGcount) {
1843 			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1844 			pSRB->SRBStatus |= OVER_RUN;
1845 		}
1846 		amd_write8(amd, CTCREG_LOW, 0);
1847 		amd_write8(amd, CTCREG_MID, 0);
1848 		amd_write8(amd, CURTXTCNTREG, 0);
1849 
1850 		pSRB->SRBState |= SRB_XFERPAD;
1851 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1852 	}
1853 }
1854 
1855 static u_int
1856 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1857 {
1858 	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1859 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1860 
1861 	amdsetupcommand(amd, srb);
1862 
1863 	srb->SRBState = SRB_COMMAND;
1864 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1865 	return (scsistat);
1866 }
1867 
1868 static u_int
1869 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1870 {
1871 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1872 	pSRB->SRBState = SRB_STATUS;
1873 	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1874 	return (scsistat);
1875 }
1876 
1877 static u_int
1878 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1879 {
1880 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1881 
1882 	if (amd->msgout_len == 0) {
1883 		amd->msgout_buf[0] = MSG_NOOP;
1884 		amd->msgout_len = 1;
1885 	}
1886 	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1887 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1888 	return (scsistat);
1889 }
1890 
1891 static u_int
1892 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1893 {
1894 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1895 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1896 	return (scsistat);
1897 }
1898 
1899 static u_int
1900 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1901 {
1902 	return (scsistat);
1903 }
1904 
1905 static void
1906 amd_Disconnect(struct amd_softc * amd)
1907 {
1908 	struct	amd_srb *srb;
1909 	int	target;
1910 	int	lun;
1911 
1912 	srb = amd->active_srb;
1913 	amd->active_srb = NULL;
1914 	amd->last_phase = SCSI_BUS_FREE;
1915 	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1916 	target = amd->cur_target;
1917 	lun = amd->cur_lun;
1918 
1919 	if (srb == NULL) {
1920 		/* Invalid reselection */
1921 		amdrunwaiting(amd);
1922 	} else if (srb->SRBState & SRB_ABORT_SENT) {
1923 		/* Clean up and done this srb */
1924 #if 0
1925 		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1926 			/* XXX What about "done'ing" these srbs??? */
1927 			if (pSRB->pSRBDCB == pDCB) {
1928 				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1929 				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1930 			}
1931 		}
1932 		amdrunwaiting(amd);
1933 #endif
1934 	} else {
1935 		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1936 		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1937 			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1938 			goto disc1;
1939 		} else if (srb->SRBState & SRB_DISCONNECT) {
1940 			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1941 				amd->untagged_srbs[target][lun] = srb;
1942 			amdrunwaiting(amd);
1943 		} else if (srb->SRBState & SRB_COMPLETED) {
1944 	disc1:
1945 			srb->SRBState = SRB_FREE;
1946 			SRBdone(amd, srb);
1947 		}
1948 	}
1949 	return;
1950 }
1951 
1952 static void
1953 amd_Reselect(struct amd_softc *amd)
1954 {
1955 	struct amd_target_info *tinfo;
1956 	u_int16_t disc_count;
1957 
1958 	amd_clear_msg_state(amd);
1959 	if (amd->active_srb != NULL) {
1960 		/* Requeue the SRB for our attempted Selection */
1961 		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1962 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1963 		amd->active_srb = NULL;
1964 	}
1965 	/* get ID */
1966 	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1967 	amd->cur_target ^= amd->HostID_Bit;
1968 	amd->cur_target = ffs(amd->cur_target) - 1;
1969 	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1970 	tinfo = &amd->tinfo[amd->cur_target];
1971 	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1972 	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1973 	if (disc_count == 0) {
1974 		kprintf("amd%d: Unexpected reselection for target %d, "
1975 		       "Issuing Abort\n", amd->unit, amd->cur_target);
1976 		amd->msgout_buf[0] = MSG_ABORT;
1977 		amd->msgout_len = 1;
1978 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1979 	}
1980 	if (amd->active_srb != NULL) {
1981 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1982 		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1983 	}
1984 
1985 	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1986 	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1987 	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1988 	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1989 	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1990 	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1991 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1992 	amd->last_phase = SCSI_NOP0;
1993 }
1994 
1995 static void
1996 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1997 {
1998 	u_int8_t   bval, i, status;
1999 	union ccb *pccb;
2000 	struct ccb_scsiio *pcsio;
2001 	struct amd_sg *ptr2;
2002 	u_int32_t   swlval;
2003 	u_int   target_id, target_lun;
2004 
2005 	pccb = pSRB->pccb;
2006 	pcsio = &pccb->csio;
2007 	target_id = pSRB->pccb->ccb_h.target_id;
2008 	target_lun = pSRB->pccb->ccb_h.target_lun;
2009 
2010 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
2011 		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
2012 
2013 	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2014 		bus_dmasync_op_t op;
2015 
2016 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2017 			op = BUS_DMASYNC_POSTREAD;
2018 		else
2019 			op = BUS_DMASYNC_POSTWRITE;
2020 		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
2021 		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
2022 	}
2023 
2024 	status = pSRB->TargetStatus;
2025 	pccb->ccb_h.status = CAM_REQ_CMP;
2026 	pccb->ccb_h.status = CAM_REQ_CMP;
2027 	if (pSRB->SRBFlag & AUTO_REQSENSE) {
2028 		pSRB->SRBFlag &= ~AUTO_REQSENSE;
2029 		pSRB->AdaptStatus = 0;
2030 		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
2031 
2032 		if (status == SCSI_STATUS_CHECK_COND) {
2033 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
2034 			goto ckc_e;
2035 		}
2036 		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
2037 
2038 		pcsio->sense_resid = pcsio->sense_len
2039 				   - pSRB->TotalXferredLen;
2040 		pSRB->TotalXferredLen = pSRB->Segment1[1];
2041 		if (pSRB->TotalXferredLen) {
2042 			/* ???? */
2043 			pcsio->resid = pcsio->dxfer_len
2044 				     - pSRB->TotalXferredLen;
2045 			/* The resid field contains valid data	 */
2046 			/* Flush resid bytes on complete        */
2047 		} else {
2048 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
2049 		}
2050 		pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
2051 		goto ckc_e;
2052 	}
2053 	if (status) {
2054 		if (status == SCSI_STATUS_CHECK_COND) {
2055 
2056 			if ((pSRB->SGIndex < pSRB->SGcount)
2057 			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
2058 				bval = pSRB->SGcount;
2059 				swlval = pSRB->SGToBeXferLen;
2060 				ptr2 = pSRB->pSGlist;
2061 				ptr2++;
2062 				for (i = pSRB->SGIndex + 1; i < bval; i++) {
2063 					swlval += ptr2->SGXLen;
2064 					ptr2++;
2065 				}
2066 				/* ??????? */
2067 				pcsio->resid = (u_int32_t) swlval;
2068 
2069 #ifdef	AMD_DEBUG0
2070 				kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
2071 					pSRB->TotalXferredLen, swlval);
2072 #endif
2073 			}
2074 			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
2075 #ifdef	AMD_DEBUG0
2076 				kprintf("RequestSense..................\n");
2077 #endif
2078 				RequestSense(amd, pSRB);
2079 				return;
2080 			}
2081 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
2082 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2083 			goto ckc_e;
2084 		} else if (status == SCSI_STATUS_QUEUE_FULL) {
2085 			pSRB->AdaptStatus = 0;
2086 			pSRB->TargetStatus = 0;
2087 			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
2088 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2089 			goto ckc_e;
2090 		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
2091 			pSRB->AdaptStatus = H_SEL_TIMEOUT;
2092 			pSRB->TargetStatus = 0;
2093 
2094 			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
2095 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
2096 		} else if (status == SCSI_STATUS_BUSY) {
2097 #ifdef AMD_DEBUG0
2098 			kprintf("DC390: target busy at %s %d\n",
2099 			       __FILE__, __LINE__);
2100 #endif
2101 			pcsio->scsi_status = SCSI_STATUS_BUSY;
2102 			pccb->ccb_h.status = CAM_SCSI_BUSY;
2103 		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
2104 #ifdef AMD_DEBUG0
2105 			kprintf("DC390: target reserved at %s %d\n",
2106 			       __FILE__, __LINE__);
2107 #endif
2108 			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
2109 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
2110 		} else {
2111 			pSRB->AdaptStatus = 0;
2112 #ifdef AMD_DEBUG0
2113 			kprintf("DC390: driver stuffup at %s %d\n",
2114 			       __FILE__, __LINE__);
2115 #endif
2116 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2117 		}
2118 	} else {
2119 		status = pSRB->AdaptStatus;
2120 		if (status & H_OVER_UNDER_RUN) {
2121 			pSRB->TargetStatus = 0;
2122 
2123 			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
2124 		} else if (pSRB->SRBStatus & PARITY_ERROR) {
2125 #ifdef AMD_DEBUG0
2126 			kprintf("DC390: driver stuffup %s %d\n",
2127 			       __FILE__, __LINE__);
2128 #endif
2129 			/* Driver failed to perform operation	  */
2130 			pccb->ccb_h.status = CAM_UNCOR_PARITY;
2131 		} else {	/* No error */
2132 			pSRB->AdaptStatus = 0;
2133 			pSRB->TargetStatus = 0;
2134 			pcsio->resid = 0;
2135 			/* there is no error, (sense is invalid)  */
2136 		}
2137 	}
2138 ckc_e:
2139 	crit_enter();
2140 	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2141 		/* CAM request not yet complete =>device_Q frozen */
2142 		xpt_freeze_devq(pccb->ccb_h.path, 1);
2143 		pccb->ccb_h.status |= CAM_DEV_QFRZN;
2144 	}
2145 	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2146 	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2147 	amdrunwaiting(amd);
2148 	crit_exit();
2149 	xpt_done(pccb);
2150 
2151 }
2152 
2153 static void
2154 amd_ResetSCSIBus(struct amd_softc * amd)
2155 {
2156 	crit_enter();
2157 	amd->ACBFlag |= RESET_DEV;
2158 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2159 	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2160 	crit_exit();
2161 	return;
2162 }
2163 
2164 static void
2165 amd_ScsiRstDetect(struct amd_softc * amd)
2166 {
2167 	u_int32_t   wlval;
2168 
2169 #ifdef AMD_DEBUG0
2170 	kprintf("amd_ScsiRstDetect \n");
2171 #endif
2172 
2173 	wlval = 1000;
2174 	while (--wlval) {	/* delay 1 sec */
2175 		DELAY(1000);
2176 	}
2177 	crit_enter();
2178 
2179 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2180 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2181 
2182 	if (amd->ACBFlag & RESET_DEV) {
2183 		amd->ACBFlag |= RESET_DONE;
2184 	} else {
2185 		amd->ACBFlag |= RESET_DETECT;
2186 		ResetDevParam(amd);
2187 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2188 				 AMD_TAG_WILDCARD, &amd->running_srbs,
2189 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2190 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2191 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2192 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2193 		amd->active_srb = NULL;
2194 		amd->ACBFlag = 0;
2195 		amdrunwaiting(amd);
2196 	}
2197 	crit_exit();
2198 	return;
2199 }
2200 
2201 static void
2202 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2203 {
2204 	union ccb *pccb;
2205 	struct ccb_scsiio *pcsio;
2206 
2207 	pccb = pSRB->pccb;
2208 	pcsio = &pccb->csio;
2209 
2210 	pSRB->SRBFlag |= AUTO_REQSENSE;
2211 	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2212 	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2213 	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2214 	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2215 
2216 	pSRB->AdaptStatus = 0;
2217 	pSRB->TargetStatus = 0;
2218 
2219 	pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2220 	pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2221 
2222 	pSRB->pSGlist = &pSRB->Segmentx;
2223 	pSRB->SGcount = 1;
2224 	pSRB->SGIndex = 0;
2225 
2226 	*((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2227 	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2228 	*((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2229 	pSRB->ScsiCmdLen = 6;
2230 
2231 	pSRB->TotalXferredLen = 0;
2232 	pSRB->SGToBeXferLen = 0;
2233 	if (amdstart(amd, pSRB) != 0) {
2234 		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2235 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2236 	}
2237 }
2238 
2239 static void
2240 amd_InvalidCmd(struct amd_softc * amd)
2241 {
2242 	struct amd_srb *srb;
2243 
2244 	srb = amd->active_srb;
2245 	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2246 		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2247 }
2248 
2249 void
2250 amd_linkSRB(struct amd_softc *amd)
2251 {
2252 	u_int16_t count, i;
2253 	struct amd_srb *psrb;
2254 	int error;
2255 
2256 	count = amd->SRBCount;
2257 
2258 	for (i = 0; i < count; i++) {
2259 		psrb = (struct amd_srb *)&amd->SRB_array[i];
2260 		psrb->TagNumber = i;
2261 
2262 		/*
2263 		 * Create the dmamap.  This is no longer optional!
2264 		 */
2265 		error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2266 		if (error) {
2267 			device_printf(amd->dev, "Error %d creating buffer "
2268 					"dmamap!\n", error);
2269 			break;
2270 		}
2271 		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2272 	}
2273 }
2274 
2275 void
2276 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2277 {
2278 	if (mode == ENABLE_CE) {
2279 		*regval = 0xc0;
2280 	} else {
2281 		*regval = 0x80;
2282 	}
2283 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2284 	if (mode == DISABLE_CE) {
2285 		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2286 	}
2287 	DELAY(160);
2288 }
2289 
2290 void
2291 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2292 {
2293 	u_int bval;
2294 
2295 	bval = 0;
2296 	if (Carry) {
2297 		bval = 0x40;
2298 		*regval = 0x80;
2299 		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2300 	}
2301 	DELAY(160);
2302 	bval |= 0x80;
2303 	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2304 	DELAY(160);
2305 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2306 	DELAY(160);
2307 }
2308 
2309 static int
2310 amd_EEpromInDO(struct amd_softc *amd)
2311 {
2312 	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2313 	DELAY(160);
2314 	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2315 	DELAY(160);
2316 	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2317 		return (1);
2318 	return (0);
2319 }
2320 
2321 static u_int16_t
2322 EEpromGetData1(struct amd_softc *amd)
2323 {
2324 	u_int	  i;
2325 	u_int	  carryFlag;
2326 	u_int16_t wval;
2327 
2328 	wval = 0;
2329 	for (i = 0; i < 16; i++) {
2330 		wval <<= 1;
2331 		carryFlag = amd_EEpromInDO(amd);
2332 		wval |= carryFlag;
2333 	}
2334 	return (wval);
2335 }
2336 
2337 static void
2338 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2339 {
2340 	u_int i, j;
2341 	int carryFlag;
2342 
2343 	carryFlag = 1;
2344 	j = 0x80;
2345 	for (i = 0; i < 9; i++) {
2346 		amd_EEpromOutDI(amd, regval, carryFlag);
2347 		carryFlag = (EEpromCmd & j) ? 1 : 0;
2348 		j >>= 1;
2349 	}
2350 }
2351 
2352 static void
2353 amd_ReadEEprom(struct amd_softc *amd)
2354 {
2355 	int	   regval;
2356 	u_int	   i;
2357 	u_int16_t *ptr;
2358 	u_int8_t   cmd;
2359 
2360 	ptr = (u_int16_t *)&amd->eepromBuf[0];
2361 	cmd = EEPROM_READ;
2362 	for (i = 0; i < 0x40; i++) {
2363 		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2364 		amd_Prepare(amd, &regval, cmd);
2365 		*ptr = EEpromGetData1(amd);
2366 		ptr++;
2367 		cmd++;
2368 		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2369 	}
2370 }
2371 
2372 static void
2373 amd_load_defaults(struct amd_softc *amd)
2374 {
2375 	int target;
2376 
2377 	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2378 	for (target = 0; target < MAX_SCSI_ID; target++)
2379 		amd->eepromBuf[target << 2] =
2380 		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2381 	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2382 	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2383 	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2384 }
2385 
2386 static void
2387 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2388 {
2389 	u_int16_t  wval, *ptr;
2390 	u_int8_t   i;
2391 
2392 	amd_ReadEEprom(amd);
2393 	wval = 0;
2394 	ptr = (u_int16_t *) & amd->eepromBuf[0];
2395 	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2396 		wval += *ptr;
2397 
2398 	if (wval != EE_CHECKSUM) {
2399 		if (bootverbose)
2400 			kprintf("amd%d: SEEPROM data unavailable.  "
2401 			       "Using default device parameters.\n",
2402 			       amd->unit);
2403 		amd_load_defaults(amd);
2404 	}
2405 }
2406 
2407 /*
2408  **********************************************************************
2409  * Function      : static int amd_init (struct Scsi_Host *host)
2410  * Purpose       : initialize the internal structures for a given SCSI host
2411  * Inputs        : host - pointer to this host adapter's structure/
2412  **********************************************************************
2413  */
2414 static int
2415 amd_init(device_t dev)
2416 {
2417 	struct amd_softc *amd = device_get_softc(dev);
2418 	struct resource	*iores;
2419 	int	i, rid;
2420 	u_int	bval;
2421 
2422 	rid = PCI_BASE_ADDR0;
2423 	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2424 				   RF_ACTIVE);
2425 	if (iores == NULL) {
2426 		if (bootverbose)
2427 			kprintf("amd_init: bus_alloc_resource failure!\n");
2428 		return ENXIO;
2429 	}
2430 	amd->tag = rman_get_bustag(iores);
2431 	amd->bsh = rman_get_bushandle(iores);
2432 
2433 	/* DMA tag for mapping buffers into device visible space. */
2434 	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2435 			       /*boundary*/0,
2436 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2437 			       /*highaddr*/BUS_SPACE_MAXADDR,
2438 			       /*filter*/NULL, /*filterarg*/NULL,
2439 			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2440 			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2441 			       /*flags*/BUS_DMA_ALLOCNOW,
2442 			       &amd->buffer_dmat) != 0) {
2443 		if (bootverbose)
2444 			kprintf("amd_init: bus_dma_tag_create failure!\n");
2445 		return ENXIO;
2446         }
2447 	TAILQ_INIT(&amd->free_srbs);
2448 	TAILQ_INIT(&amd->running_srbs);
2449 	TAILQ_INIT(&amd->waiting_srbs);
2450 	amd->last_phase = SCSI_BUS_FREE;
2451 	amd->dev = dev;
2452 	amd->unit = device_get_unit(dev);
2453 	amd->SRBCount = MAX_SRB_CNT;
2454 	amd->status = 0;
2455 	amd_load_eeprom_or_defaults(amd);
2456 	amd->max_id = 7;
2457 	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2458 		amd->max_lun = 7;
2459 	} else {
2460 		amd->max_lun = 0;
2461 	}
2462 	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2463 	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2464 	amd->AdaptSCSILUN = 0;
2465 	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2466 	amd->ACBFlag = 0;
2467 	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2468 	amd_linkSRB(amd);
2469 	for (i = 0; i <= amd->max_id; i++) {
2470 
2471 		if (amd->AdaptSCSIID != i) {
2472 			struct amd_target_info *tinfo;
2473 			PEEprom prom;
2474 
2475 			tinfo = &amd->tinfo[i];
2476 			prom = (PEEprom)&amd->eepromBuf[i << 2];
2477 			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2478 				tinfo->disc_tag |= AMD_USR_DISCENB;
2479 				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2480 					tinfo->disc_tag |= AMD_USR_TAGENB;
2481 			}
2482 			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2483 				tinfo->user.period =
2484 				    eeprom_period[prom->EE_SPEED];
2485 				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2486 			}
2487 			tinfo->CtrlR1 = amd->AdaptSCSIID;
2488 			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2489 				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2490 			tinfo->CtrlR3 = FAST_CLK;
2491 			tinfo->CtrlR4 = EATER_25NS;
2492 			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2493 				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2494 		}
2495 	}
2496 	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2497 	/* Conversion factor = 0 , 40MHz clock */
2498 	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2499 	/* NOP cmd - clear command register */
2500 	amd_write8(amd, SCSICMDREG, NOP_CMD);
2501 	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2502 	amd_write8(amd, CNTLREG3, FAST_CLK);
2503 	bval = EATER_25NS;
2504 	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2505 		bval |= NEGATE_REQACKDATA;
2506 	}
2507 	amd_write8(amd, CNTLREG4, bval);
2508 
2509 	/* Disable SCSI bus reset interrupt */
2510 	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2511 
2512 	return 0;
2513 }
2514 
2515 /*
2516  * attach and init a host adapter
2517  */
2518 static int
2519 amd_attach(device_t dev)
2520 {
2521 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2522 	u_int8_t	intstat;
2523 	struct amd_softc *amd = device_get_softc(dev);
2524 	int		unit = device_get_unit(dev);
2525 	int		rid;
2526 	void		*ih;
2527 	struct resource	*irqres;
2528 
2529 	if (amd_init(dev)) {
2530 		if (bootverbose)
2531 			kprintf("amd_attach: amd_init failure!\n");
2532 		return ENXIO;
2533 	}
2534 
2535 	/* Reset Pending INT */
2536 	intstat = amd_read8(amd, INTSTATREG);
2537 
2538 	/* After setting up the adapter, map our interrupt */
2539 	rid = 0;
2540 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2541 				    RF_SHAREABLE | RF_ACTIVE);
2542 	if (irqres == NULL ||
2543 	    bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2544 	) {
2545 		if (bootverbose)
2546 			kprintf("amd%d: unable to register interrupt handler!\n",
2547 			       unit);
2548 		return ENXIO;
2549 	}
2550 
2551 	/*
2552 	 * Now let the CAM generic SCSI layer find the SCSI devices on
2553 	 * the bus *  start queue to reset to the idle loop. *
2554 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2555 	 * max_sim_transactions
2556 	 */
2557 	devq = cam_simq_alloc(MAX_START_JOB);
2558 	if (devq == NULL) {
2559 		if (bootverbose)
2560 			kprintf("amd_attach: cam_simq_alloc failure!\n");
2561 		return ENXIO;
2562 	}
2563 
2564 	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2565 				  amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2566 				  devq);
2567 	cam_simq_release(devq);
2568 	if (amd->psim == NULL) {
2569 		if (bootverbose)
2570 			kprintf("amd_attach: cam_sim_alloc failure!\n");
2571 		return ENXIO;
2572 	}
2573 
2574 	if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2575 		cam_sim_free(amd->psim);
2576 		if (bootverbose)
2577 			kprintf("amd_attach: xpt_bus_register failure!\n");
2578 		return ENXIO;
2579 	}
2580 
2581 	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2582 			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2583 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2584 		xpt_bus_deregister(cam_sim_path(amd->psim));
2585 		cam_sim_free(amd->psim);
2586 		if (bootverbose)
2587 			kprintf("amd_attach: xpt_create_path failure!\n");
2588 		return ENXIO;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 static int
2595 amd_probe(device_t dev)
2596 {
2597 	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2598 		device_set_desc(dev,
2599 			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2600 		return 0;
2601 	}
2602 	return ENXIO;
2603 }
2604 
2605 static device_method_t amd_methods[] = {
2606 	/* Device interface */
2607 	DEVMETHOD(device_probe,		amd_probe),
2608 	DEVMETHOD(device_attach,	amd_attach),
2609 	{ 0, 0 }
2610 };
2611 
2612 static driver_t amd_driver = {
2613 	"amd", amd_methods, sizeof(struct amd_softc)
2614 };
2615 
2616 static devclass_t amd_devclass;
2617 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2618