xref: /dragonfly/sys/dev/disk/amd/amd.c (revision 49781055)
1 /*
2  *********************************************************************
3  *	FILE NAME  : amd.c
4  *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5  *		     Erich Chen     (erich@tekram.com.tw)
6  *	Description: Device Driver for the amd53c974 PCI Bus Master
7  *		     SCSI Host adapter found on cards such as
8  *		     the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  * $DragonFly: src/sys/dev/disk/amd/amd.c,v 1.9 2005/10/12 17:35:50 dillon Exp $
35  */
36 
37 /*
38  *********************************************************************
39  *	HISTORY:
40  *
41  *	REV#	DATE	NAME    	DESCRIPTION
42  *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
43  *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
44  *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
45  *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
46  *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
47  *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
48  *********************************************************************
49  */
50 
51 /* #define AMD_DEBUG0           */
52 /* #define AMD_DEBUG_SCSI_PHASE */
53 
54 #include <sys/param.h>
55 
56 #include <sys/systm.h>
57 #include <sys/malloc.h>
58 #include <sys/queue.h>
59 #include <sys/buf.h>
60 #include <sys/kernel.h>
61 #include <sys/thread2.h>
62 
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 
66 #include <machine/bus_pio.h>
67 #include <machine/bus.h>
68 #include <machine/clock.h>
69 #include <machine/resource.h>
70 #include <sys/bus.h>
71 #include <sys/rman.h>
72 
73 #include <bus/cam/cam.h>
74 #include <bus/cam/cam_ccb.h>
75 #include <bus/cam/cam_sim.h>
76 #include <bus/cam/cam_xpt_sim.h>
77 #include <bus/cam/cam_debug.h>
78 
79 #include <bus/cam/scsi/scsi_all.h>
80 #include <bus/cam/scsi/scsi_message.h>
81 
82 #include <bus/pci/pcivar.h>
83 #include <bus/pci/pcireg.h>
84 #include "amd.h"
85 
86 #define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
87 #define PCI_BASE_ADDR0	    		0x10
88 
89 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
90 typedef phase_handler_t *phase_handler_func_t;
91 
92 static void amd_intr(void *vamd);
93 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
94 static phase_handler_t amd_NopPhase;
95 
96 static phase_handler_t amd_DataOutPhase0;
97 static phase_handler_t amd_DataInPhase0;
98 #define amd_CommandPhase0 amd_NopPhase
99 static phase_handler_t amd_StatusPhase0;
100 static phase_handler_t amd_MsgOutPhase0;
101 static phase_handler_t amd_MsgInPhase0;
102 static phase_handler_t amd_DataOutPhase1;
103 static phase_handler_t amd_DataInPhase1;
104 static phase_handler_t amd_CommandPhase1;
105 static phase_handler_t amd_StatusPhase1;
106 static phase_handler_t amd_MsgOutPhase1;
107 static phase_handler_t amd_MsgInPhase1;
108 
109 static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
110 static int	amdparsemsg(struct amd_softc *amd);
111 static int	amdhandlemsgreject(struct amd_softc *amd);
112 static void	amdconstructsdtr(struct amd_softc *amd,
113 				 u_int period, u_int offset);
114 static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
115 static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
116 
117 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
118 static void amd_Disconnect(struct amd_softc *amd);
119 static void amd_Reselect(struct amd_softc *amd);
120 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
121 static void amd_ScsiRstDetect(struct amd_softc *amd);
122 static void amd_ResetSCSIBus(struct amd_softc *amd);
123 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
124 static void amd_InvalidCmd(struct amd_softc *amd);
125 
126 #if 0
127 static void amd_timeout(void *arg1);
128 static void amd_reset(struct amd_softc *amd);
129 #endif
130 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
131 
132 void    amd_linkSRB(struct amd_softc *amd);
133 static int amd_init(device_t);
134 static void amd_load_defaults(struct amd_softc *amd);
135 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
136 static int amd_EEpromInDO(struct amd_softc *amd);
137 static u_int16_t EEpromGetData1(struct amd_softc *amd);
138 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
139 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
140 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
141 static void amd_ReadEEprom(struct amd_softc *amd);
142 
143 static int amd_probe(device_t);
144 static int amd_attach(device_t);
145 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
146 			     lun_id_t lun, u_int tag, struct srb_queue *queue,
147 			     cam_status status);
148 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
149 		       u_int period, u_int offset, u_int type);
150 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
151 
152 static __inline void amd_clear_msg_state(struct amd_softc *amd);
153 
154 static __inline void
155 amd_clear_msg_state(struct amd_softc *amd)
156 {
157 	amd->msgout_len = 0;
158 	amd->msgout_index = 0;
159 	amd->msgin_index = 0;
160 }
161 
162 /* CAM SIM entry points */
163 #define ccb_srb_ptr spriv_ptr0
164 #define ccb_amd_ptr spriv_ptr1
165 static void	amd_action(struct cam_sim *sim, union ccb *ccb);
166 static void	amd_poll(struct cam_sim *sim);
167 
168 /*
169  * State engine function tables indexed by SCSI phase number
170  */
171 phase_handler_func_t amd_SCSI_phase0[] = {
172 	amd_DataOutPhase0,
173 	amd_DataInPhase0,
174 	amd_CommandPhase0,
175 	amd_StatusPhase0,
176 	amd_NopPhase,
177 	amd_NopPhase,
178 	amd_MsgOutPhase0,
179 	amd_MsgInPhase0
180 };
181 
182 phase_handler_func_t amd_SCSI_phase1[] = {
183 	amd_DataOutPhase1,
184 	amd_DataInPhase1,
185 	amd_CommandPhase1,
186 	amd_StatusPhase1,
187 	amd_NopPhase,
188 	amd_NopPhase,
189 	amd_MsgOutPhase1,
190 	amd_MsgInPhase1
191 };
192 
193 /*
194  * EEProm/BIOS negotiation periods
195  */
196 u_int8_t   eeprom_period[] = {
197 	 25,	/* 10.0MHz */
198 	 32,	/*  8.0MHz */
199 	 38,	/*  6.6MHz */
200 	 44,	/*  5.7MHz */
201 	 50,	/*  5.0MHz */
202 	 63,	/*  4.0MHz */
203 	 83,	/*  3.0MHz */
204 	125	/*  2.0MHz */
205 };
206 
207 /*
208  * chip clock setting to SCSI specified sync parameter table.
209  */
210 u_int8_t tinfo_sync_period[] = {
211 	25,	/* 10.0 */
212 	32,	/* 8.0 */
213 	38,	/* 6.6 */
214 	44,	/* 5.7 */
215 	50,	/* 5.0 */
216 	57,	/* 4.4 */
217 	63,	/* 4.0 */
218 	70,	/* 3.6 */
219 	76,	/* 3.3 */
220 	83	/* 3.0 */
221 };
222 
223 static __inline struct amd_srb *
224 amdgetsrb(struct amd_softc * amd)
225 {
226 	struct amd_srb *    pSRB;
227 
228 	crit_enter();
229 	pSRB = TAILQ_FIRST(&amd->free_srbs);
230 	if (pSRB)
231 		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
232 	crit_exit();
233 	return (pSRB);
234 }
235 
236 static void
237 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
238 {
239 	struct scsi_request_sense sense_cmd;
240 	struct ccb_scsiio *csio;
241 	u_int8_t *cdb;
242 	u_int cdb_len;
243 
244 	csio = &srb->pccb->csio;
245 
246 	if (srb->SRBFlag & AUTO_REQSENSE) {
247 		sense_cmd.opcode = REQUEST_SENSE;
248 		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
249 		sense_cmd.unused[0] = 0;
250 		sense_cmd.unused[1] = 0;
251 		sense_cmd.length = csio->sense_len;
252 		sense_cmd.control = 0;
253 		cdb = &sense_cmd.opcode;
254 		cdb_len = sizeof(sense_cmd);
255 	} else {
256 		cdb = &srb->CmdBlock[0];
257 		cdb_len = srb->ScsiCmdLen;
258 	}
259 	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
260 }
261 
262 /*
263  * Attempt to start a waiting transaction.  Interrupts must be disabled
264  * upon entry to this function.
265  */
266 static void
267 amdrunwaiting(struct amd_softc *amd) {
268 	struct amd_srb *srb;
269 
270 	if (amd->last_phase != SCSI_BUS_FREE)
271 		return;
272 
273 	srb = TAILQ_FIRST(&amd->waiting_srbs);
274 	if (srb == NULL)
275 		return;
276 
277 	if (amdstart(amd, srb) == 0) {
278 		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
279 		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
280 	}
281 }
282 
283 static void
284 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
285 {
286 	struct	 amd_srb *srb;
287 	union	 ccb *ccb;
288 	struct	 amd_softc *amd;
289 
290 	srb = (struct amd_srb *)arg;
291 	ccb = srb->pccb;
292 	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
293 
294 	if (error != 0) {
295 		if (error != EFBIG)
296 			printf("amd%d: Unexepected error 0x%x returned from "
297 			       "bus_dmamap_load\n", amd->unit, error);
298 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
299 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
300 			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
301 		}
302 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
303 		xpt_done(ccb);
304 		return;
305 	}
306 
307 	if (nseg != 0) {
308 		struct amd_sg *sg;
309 		bus_dma_segment_t *end_seg;
310 		bus_dmasync_op_t op;
311 
312 		end_seg = dm_segs + nseg;
313 
314 		/* Copy the segments into our SG list */
315 		srb->pSGlist = &srb->SGsegment[0];
316 		sg = srb->pSGlist;
317 		while (dm_segs < end_seg) {
318 			sg->SGXLen = dm_segs->ds_len;
319 			sg->SGXPtr = dm_segs->ds_addr;
320 			sg++;
321 			dm_segs++;
322 		}
323 
324 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
325 			op = BUS_DMASYNC_PREREAD;
326 		else
327 			op = BUS_DMASYNC_PREWRITE;
328 
329 		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
330 
331 	}
332 	srb->SGcount = nseg;
333 	srb->SGIndex = 0;
334 	srb->AdaptStatus = 0;
335 	srb->TargetStatus = 0;
336 	srb->MsgCnt = 0;
337 	srb->SRBStatus = 0;
338 	srb->SRBFlag = 0;
339 	srb->SRBState = 0;
340 	srb->TotalXferredLen = 0;
341 	srb->SGPhysAddr = 0;
342 	srb->SGToBeXferLen = 0;
343 	srb->EndMessage = 0;
344 
345 	crit_enter();
346 
347 	/*
348 	 * Last time we need to check if this CCB needs to
349 	 * be aborted.
350 	 */
351 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
352 		if (nseg != 0)
353 			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
354 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
355 		xpt_done(ccb);
356 		crit_exit();
357 		return;
358 	}
359 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
360 #if 0
361 	/* XXX Need a timeout handler */
362 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
363 	    amdtimeout, srb);
364 #endif
365 	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
366 	amdrunwaiting(amd);
367 	crit_exit();
368 }
369 
370 static void
371 amd_action(struct cam_sim * psim, union ccb * pccb)
372 {
373 	struct amd_softc *    amd;
374 	u_int   target_id, target_lun;
375 
376 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
377 
378 	amd = (struct amd_softc *) cam_sim_softc(psim);
379 	target_id = pccb->ccb_h.target_id;
380 	target_lun = pccb->ccb_h.target_lun;
381 
382 	switch (pccb->ccb_h.func_code) {
383 	case XPT_SCSI_IO:
384 	{
385 		struct amd_srb *    pSRB;
386 		struct ccb_scsiio *pcsio;
387 
388 		pcsio = &pccb->csio;
389 
390 		/*
391 		 * Assign an SRB and connect it with this ccb.
392 		 */
393 		pSRB = amdgetsrb(amd);
394 
395 		if (!pSRB) {
396 			/* Freeze SIMQ */
397 			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
398 			xpt_done(pccb);
399 			return;
400 		}
401 		pSRB->pccb = pccb;
402 		pccb->ccb_h.ccb_srb_ptr = pSRB;
403 		pccb->ccb_h.ccb_amd_ptr = amd;
404 		pSRB->ScsiCmdLen = pcsio->cdb_len;
405 		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
406 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
407 			if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
408 				/*
409 				 * We've been given a pointer
410 				 * to a single buffer.
411 				 */
412 				if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
413 					int error;
414 
415 					crit_enter();
416 					error =
417 					    bus_dmamap_load(amd->buffer_dmat,
418 							    pSRB->dmamap,
419 							    pcsio->data_ptr,
420 							    pcsio->dxfer_len,
421 							    amdexecutesrb,
422 							    pSRB, /*flags*/0);
423 					if (error == EINPROGRESS) {
424 						/*
425 						 * So as to maintain
426 						 * ordering, freeze the
427 						 * controller queue
428 						 * until our mapping is
429 						 * returned.
430 						 */
431 						xpt_freeze_simq(amd->psim, 1);
432 						pccb->ccb_h.status |=
433 						    CAM_RELEASE_SIMQ;
434 					}
435 					crit_exit();
436 				} else {
437 					struct bus_dma_segment seg;
438 
439 					/* Pointer to physical buffer */
440 					seg.ds_addr =
441 					    (bus_addr_t)pcsio->data_ptr;
442 					seg.ds_len = pcsio->dxfer_len;
443 					amdexecutesrb(pSRB, &seg, 1, 0);
444 				}
445 			} else {
446 				struct bus_dma_segment *segs;
447 
448 				if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
449 				 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
450 					TAILQ_INSERT_HEAD(&amd->free_srbs,
451 							  pSRB, links);
452 					pccb->ccb_h.status = CAM_PROVIDE_FAIL;
453 					xpt_done(pccb);
454 					return;
455 				}
456 
457 				/* Just use the segments provided */
458 				segs =
459 				    (struct bus_dma_segment *)pcsio->data_ptr;
460 				amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
461 			}
462 		} else
463 			amdexecutesrb(pSRB, NULL, 0, 0);
464 		break;
465 	}
466 	case XPT_PATH_INQ:
467 	{
468 		struct ccb_pathinq *cpi = &pccb->cpi;
469 
470 		cpi->version_num = 1;
471 		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
472 		cpi->target_sprt = 0;
473 		cpi->hba_misc = 0;
474 		cpi->hba_eng_cnt = 0;
475 		cpi->max_target = 7;
476 		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
477 		cpi->initiator_id = amd->AdaptSCSIID;
478 		cpi->bus_id = cam_sim_bus(psim);
479 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
480 		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
481 		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
482 		cpi->unit_number = cam_sim_unit(psim);
483 		cpi->ccb_h.status = CAM_REQ_CMP;
484 		xpt_done(pccb);
485 		break;
486 	}
487 	case XPT_ABORT:
488 		pccb->ccb_h.status = CAM_REQ_INVALID;
489 		xpt_done(pccb);
490 		break;
491 	case XPT_RESET_BUS:
492 	{
493 
494 		int     i;
495 
496 		amd_ResetSCSIBus(amd);
497 		amd->ACBFlag = 0;
498 
499 		for (i = 0; i < 500; i++) {
500 			DELAY(1000);	/* Wait until our interrupt
501 					 * handler sees it */
502 		}
503 
504 		pccb->ccb_h.status = CAM_REQ_CMP;
505 		xpt_done(pccb);
506 		break;
507 	}
508 	case XPT_RESET_DEV:
509 		pccb->ccb_h.status = CAM_REQ_INVALID;
510 		xpt_done(pccb);
511 		break;
512 	case XPT_TERM_IO:
513 		pccb->ccb_h.status = CAM_REQ_INVALID;
514 		xpt_done(pccb);
515 	case XPT_GET_TRAN_SETTINGS:
516 	{
517 		struct ccb_trans_settings *cts;
518 		struct amd_target_info *targ_info;
519 		struct amd_transinfo *tinfo;
520 
521 		cts = &pccb->cts;
522 		crit_enter();
523 		targ_info = &amd->tinfo[target_id];
524 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
525 			/* current transfer settings */
526 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
527 				cts->flags = CCB_TRANS_DISC_ENB;
528 			} else {
529 				cts->flags = 0;	/* no tag & disconnect */
530 			}
531 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
532 				cts->flags |= CCB_TRANS_TAG_ENB;
533 			}
534 			tinfo = &targ_info->current;
535 		} else {
536 			/* default(user) transfer settings */
537 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
538 				cts->flags = CCB_TRANS_DISC_ENB;
539 			} else {
540 				cts->flags = 0;
541 			}
542 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
543 				cts->flags |= CCB_TRANS_TAG_ENB;
544 			}
545 			tinfo = &targ_info->user;
546 		}
547 
548 		cts->sync_period = tinfo->period;
549 		cts->sync_offset = tinfo->offset;
550 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
551 		crit_exit();
552 		cts->valid = CCB_TRANS_SYNC_RATE_VALID
553 			   | CCB_TRANS_SYNC_OFFSET_VALID
554 			   | CCB_TRANS_BUS_WIDTH_VALID
555 			   | CCB_TRANS_DISC_VALID
556 			   | CCB_TRANS_TQ_VALID;
557 		pccb->ccb_h.status = CAM_REQ_CMP;
558 		xpt_done(pccb);
559 		break;
560 	}
561 	case XPT_SET_TRAN_SETTINGS:
562 	{
563 		struct ccb_trans_settings *cts;
564 		struct amd_target_info *targ_info;
565 		u_int  update_type;
566 		int    last_entry;
567 
568 		cts = &pccb->cts;
569 		update_type = 0;
570 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
571 			update_type |= AMD_TRANS_GOAL;
572 		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
573 			update_type |= AMD_TRANS_USER;
574 		}
575 		if (update_type == 0
576 		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
577 			cts->ccb_h.status = CAM_REQ_INVALID;
578 			xpt_done(pccb);
579 		}
580 
581 		crit_enter();
582 		targ_info = &amd->tinfo[target_id];
583 
584 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
585 			if (update_type & AMD_TRANS_GOAL) {
586 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
587 					targ_info->disc_tag |= AMD_CUR_DISCENB;
588 				} else {
589 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
590 				}
591 			}
592 			if (update_type & AMD_TRANS_USER) {
593 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
594 					targ_info->disc_tag |= AMD_USR_DISCENB;
595 				} else {
596 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
597 				}
598 			}
599 		}
600 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
601 			if (update_type & AMD_TRANS_GOAL) {
602 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
603 					targ_info->disc_tag |= AMD_CUR_TAGENB;
604 				} else {
605 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
606 				}
607 			}
608 			if (update_type & AMD_TRANS_USER) {
609 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
610 					targ_info->disc_tag |= AMD_USR_TAGENB;
611 				} else {
612 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
613 				}
614 			}
615 		}
616 
617 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
618 			if (update_type & AMD_TRANS_GOAL)
619 				cts->sync_offset = targ_info->goal.offset;
620 			else
621 				cts->sync_offset = targ_info->user.offset;
622 		}
623 
624 		if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
625 			cts->sync_offset = AMD_MAX_SYNC_OFFSET;
626 
627 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
628 			if (update_type & AMD_TRANS_GOAL)
629 				cts->sync_period = targ_info->goal.period;
630 			else
631 				cts->sync_period = targ_info->user.period;
632 		}
633 
634 		last_entry = sizeof(tinfo_sync_period) - 1;
635 		if ((cts->sync_period != 0)
636 		 && (cts->sync_period < tinfo_sync_period[0]))
637 			cts->sync_period = tinfo_sync_period[0];
638 		if (cts->sync_period > tinfo_sync_period[last_entry])
639 		 	cts->sync_period = 0;
640 		if (cts->sync_offset == 0)
641 			cts->sync_period = 0;
642 
643 		if ((update_type & AMD_TRANS_USER) != 0) {
644 			targ_info->user.period = cts->sync_period;
645 			targ_info->user.offset = cts->sync_offset;
646 		}
647 		if ((update_type & AMD_TRANS_GOAL) != 0) {
648 			targ_info->goal.period = cts->sync_period;
649 			targ_info->goal.offset = cts->sync_offset;
650 		}
651 		crit_exit();
652 		pccb->ccb_h.status = CAM_REQ_CMP;
653 		xpt_done(pccb);
654 		break;
655 	}
656 	case XPT_CALC_GEOMETRY:
657 	{
658 		struct ccb_calc_geometry *ccg;
659 		u_int32_t size_mb;
660 		u_int32_t secs_per_cylinder;
661 		int     extended;
662 
663 		ccg = &pccb->ccg;
664 		size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
665 		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
666 
667 		if (size_mb > 1024 && extended) {
668 			ccg->heads = 255;
669 			ccg->secs_per_track = 63;
670 		} else {
671 			ccg->heads = 64;
672 			ccg->secs_per_track = 32;
673 		}
674 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
675 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
676 		pccb->ccb_h.status = CAM_REQ_CMP;
677 		xpt_done(pccb);
678 		break;
679 	}
680 	default:
681 		pccb->ccb_h.status = CAM_REQ_INVALID;
682 		xpt_done(pccb);
683 		break;
684 	}
685 }
686 
687 static void
688 amd_poll(struct cam_sim * psim)
689 {
690 	amd_intr(cam_sim_softc(psim));
691 }
692 
693 static u_int8_t *
694 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
695 {
696 	int     dataPtr;
697 	struct ccb_scsiio *pcsio;
698 	u_int8_t   i;
699 	struct amd_sg *    pseg;
700 
701 	dataPtr = 0;
702 	pcsio = &pSRB->pccb->csio;
703 
704 	dataPtr = (int) pcsio->data_ptr;
705 	pseg = pSRB->SGsegment;
706 	for (i = 0; i < pSRB->SGIndex; i++) {
707 		dataPtr += (int) pseg->SGXLen;
708 		pseg++;
709 	}
710 	dataPtr += (int) xferCnt;
711 	return ((u_int8_t *) dataPtr);
712 }
713 
714 static void
715 ResetDevParam(struct amd_softc * amd)
716 {
717 	u_int target;
718 
719 	for (target = 0; target <= amd->max_id; target++) {
720 		if (amd->AdaptSCSIID != target) {
721 			amdsetsync(amd, target, /*clockrate*/0,
722 				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
723 		}
724 	}
725 }
726 
727 static void
728 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
729 		 u_int tag, struct srb_queue *queue, cam_status status)
730 {
731 	struct amd_srb *srb;
732 	struct amd_srb *next_srb;
733 
734 	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
735 		union ccb *ccb;
736 
737 		next_srb = TAILQ_NEXT(srb, links);
738 		if (srb->pccb->ccb_h.target_id != target
739 		 && target != CAM_TARGET_WILDCARD)
740 			continue;
741 
742 		if (srb->pccb->ccb_h.target_lun != lun
743 		 && lun != CAM_LUN_WILDCARD)
744 			continue;
745 
746 		if (srb->TagNumber != tag
747 		 && tag != AMD_TAG_WILDCARD)
748 			continue;
749 
750 		ccb = srb->pccb;
751 		TAILQ_REMOVE(queue, srb, links);
752 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
753 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
754 		 && (status & CAM_DEV_QFRZN) != 0)
755 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
756 		ccb->ccb_h.status = status;
757 		xpt_done(ccb);
758 	}
759 
760 }
761 
762 static void
763 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
764 	   u_int period, u_int offset, u_int type)
765 {
766 	struct amd_target_info *tinfo;
767 	u_int old_period;
768 	u_int old_offset;
769 
770 	tinfo = &amd->tinfo[target];
771 	old_period = tinfo->current.period;
772 	old_offset = tinfo->current.offset;
773 	if ((type & AMD_TRANS_CUR) != 0
774 	 && (old_period != period || old_offset != offset)) {
775 		struct cam_path *path;
776 
777 		tinfo->current.period = period;
778 		tinfo->current.offset = offset;
779 		tinfo->sync_period_reg = clockrate;
780 		tinfo->sync_offset_reg = offset;
781 		tinfo->CtrlR3 &= ~FAST_SCSI;
782 		tinfo->CtrlR4 &= ~EATER_25NS;
783 		if (clockrate > 7)
784 			tinfo->CtrlR4 |= EATER_25NS;
785 		else
786 			tinfo->CtrlR3 |= FAST_SCSI;
787 
788 		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
789 			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
790 			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
791 			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
792 			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
793 		}
794 		/* If possible, update the XPT's notion of our transfer rate */
795 		if (xpt_create_path(&path, /*periph*/NULL,
796 				    cam_sim_path(amd->psim), target,
797 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
798 			struct ccb_trans_settings neg;
799 
800 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
801 			neg.sync_period = period;
802 			neg.sync_offset = offset;
803 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
804 				  | CCB_TRANS_SYNC_OFFSET_VALID;
805 			xpt_async(AC_TRANSFER_NEG, path, &neg);
806 			xpt_free_path(path);
807 		}
808 	}
809 	if ((type & AMD_TRANS_GOAL) != 0) {
810 		tinfo->goal.period = period;
811 		tinfo->goal.offset = offset;
812 	}
813 
814 	if ((type & AMD_TRANS_USER) != 0) {
815 		tinfo->user.period = period;
816 		tinfo->user.offset = offset;
817 	}
818 }
819 
820 static void
821 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
822 {
823 	panic("Implement me!\n");
824 }
825 
826 
827 #if 0
828 /*
829  **********************************************************************
830  * Function : amd_reset (struct amd_softc * amd)
831  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
832  * Inputs   : cmd - command which caused the SCSI RESET
833  **********************************************************************
834  */
835 static void
836 amd_reset(struct amd_softc * amd)
837 {
838 	u_int8_t   bval;
839 	u_int16_t  i;
840 
841 
842 #ifdef AMD_DEBUG0
843 	printf("DC390: RESET");
844 #endif
845 
846 	crit_enter();
847 	bval = amd_read8(amd, CNTLREG1);
848 	bval |= DIS_INT_ON_SCSI_RST;
849 	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
850 	amd_ResetSCSIBus(amd);
851 
852 	for (i = 0; i < 500; i++) {
853 		DELAY(1000);
854 	}
855 
856 	bval = amd_read8(amd, CNTLREG1);
857 	bval &= ~DIS_INT_ON_SCSI_RST;
858 	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
859 
860 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
861 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
862 
863 	ResetDevParam(amd);
864 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
865 			 AMD_TAG_WILDCARD, &amd->running_srbs,
866 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
867 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
868 			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
869 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
870 	amd->active_srb = NULL;
871 	amd->ACBFlag = 0;
872 	crit_exit();
873 	return;
874 }
875 
876 void
877 amd_timeout(void *arg1)
878 {
879 	struct amd_srb *    pSRB;
880 
881 	pSRB = (struct amd_srb *) arg1;
882 }
883 #endif
884 
885 static int
886 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
887 {
888 	union ccb *pccb;
889 	struct ccb_scsiio *pcsio;
890 	struct amd_target_info *targ_info;
891 	u_int identify_msg;
892 	u_int command;
893 	u_int target;
894 	u_int lun;
895 	int tagged;
896 
897 	pccb = pSRB->pccb;
898 	pcsio = &pccb->csio;
899 	target = pccb->ccb_h.target_id;
900 	lun = pccb->ccb_h.target_lun;
901 	targ_info = &amd->tinfo[target];
902 
903 	amd_clear_msg_state(amd);
904 	amd_write8(amd, SCSIDESTIDREG, target);
905 	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
906 	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
907 	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
908 	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
909 	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
910 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
911 
912 	identify_msg = MSG_IDENTIFYFLAG | lun;
913 	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
914 	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
915 	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
916 	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
917 		identify_msg |= MSG_IDENTIFY_DISCFLAG;
918 
919 	amd_write8(amd, SCSIFIFOREG, identify_msg);
920 	tagged = 0;
921 	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
922 	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
923 		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
924 	if (targ_info->current.period != targ_info->goal.period
925 	 || targ_info->current.offset != targ_info->goal.offset) {
926 		command = SEL_W_ATN_STOP;
927 		amdconstructsdtr(amd, targ_info->goal.period,
928 				 targ_info->goal.offset);
929 	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
930 		command = SEL_W_ATN2;
931 		pSRB->SRBState = SRB_START;
932 		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
933 		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
934 		tagged++;
935 	} else {
936 		command = SEL_W_ATN;
937 		pSRB->SRBState = SRB_START;
938 	}
939 	if (command != SEL_W_ATN_STOP)
940 		amdsetupcommand(amd, pSRB);
941 
942 	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
943 		pSRB->SRBState = SRB_READY;
944 		return (1);
945 	} else {
946 		amd->last_phase = SCSI_ARBITRATING;
947 		amd_write8(amd, SCSICMDREG, command);
948 		amd->active_srb = pSRB;
949 		amd->cur_target = target;
950 		amd->cur_lun = lun;
951 		return (0);
952 	}
953 }
954 
955 /*
956  *  Catch an interrupt from the adapter.
957  *  Process pending device interrupts.
958  */
959 static void
960 amd_intr(void   *arg)
961 {
962 	struct amd_softc *amd;
963 	struct amd_srb *pSRB;
964 	u_int  internstat = 0;
965 	u_int  scsistat;
966 	u_int  intstat;
967 
968 	amd = (struct amd_softc *)arg;
969 
970 	if (amd == NULL) {
971 #ifdef AMD_DEBUG0
972 		printf("amd_intr: amd NULL return......");
973 #endif
974 		return;
975 	}
976 
977 	scsistat = amd_read8(amd, SCSISTATREG);
978 	if (!(scsistat & INTERRUPT)) {
979 #ifdef AMD_DEBUG0
980 		printf("amd_intr: scsistat = NULL ,return......");
981 #endif
982 		return;
983 	}
984 #ifdef AMD_DEBUG_SCSI_PHASE
985 	printf("scsistat=%2x,", scsistat);
986 #endif
987 
988 	internstat = amd_read8(amd, INTERNSTATREG);
989 	intstat = amd_read8(amd, INTSTATREG);
990 
991 #ifdef AMD_DEBUG_SCSI_PHASE
992 	printf("intstat=%2x,", intstat);
993 #endif
994 
995 	if (intstat & DISCONNECTED) {
996 		amd_Disconnect(amd);
997 		return;
998 	}
999 	if (intstat & RESELECTED) {
1000 		amd_Reselect(amd);
1001 		return;
1002 	}
1003 	if (intstat & INVALID_CMD) {
1004 		amd_InvalidCmd(amd);
1005 		return;
1006 	}
1007 	if (intstat & SCSI_RESET_) {
1008 		amd_ScsiRstDetect(amd);
1009 		return;
1010 	}
1011 	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1012 		pSRB = amd->active_srb;
1013 		/*
1014 		 * Run our state engine.  First perform
1015 		 * post processing for the last phase we
1016 		 * were in, followed by any processing
1017 		 * required to handle the current phase.
1018 		 */
1019 		scsistat =
1020 		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1021 		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1022 		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1023 	}
1024 }
1025 
1026 static u_int
1027 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1028 {
1029 	struct amd_sg *psgl;
1030 	u_int32_t   ResidCnt, xferCnt;
1031 
1032 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1033 		if (scsistat & PARITY_ERR) {
1034 			pSRB->SRBStatus |= PARITY_ERROR;
1035 		}
1036 		if (scsistat & COUNT_2_ZERO) {
1037 			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1038 				;
1039 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1040 			pSRB->SGIndex++;
1041 			if (pSRB->SGIndex < pSRB->SGcount) {
1042 				pSRB->pSGlist++;
1043 				psgl = pSRB->pSGlist;
1044 				pSRB->SGPhysAddr = psgl->SGXPtr;
1045 				pSRB->SGToBeXferLen = psgl->SGXLen;
1046 			} else {
1047 				pSRB->SGToBeXferLen = 0;
1048 			}
1049 		} else {
1050 			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1051 			ResidCnt += amd_read8(amd, CTCREG_LOW)
1052 				  | (amd_read8(amd, CTCREG_MID) << 8)
1053 				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1054 
1055 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1056 			pSRB->SGPhysAddr += xferCnt;
1057 			pSRB->TotalXferredLen += xferCnt;
1058 			pSRB->SGToBeXferLen = ResidCnt;
1059 		}
1060 	}
1061 	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1062 	return (scsistat);
1063 }
1064 
1065 static u_int
1066 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1067 {
1068 	u_int8_t bval;
1069 	u_int16_t  i, residual;
1070 	struct amd_sg *psgl;
1071 	u_int32_t   ResidCnt, xferCnt;
1072 	u_int8_t *  ptr;
1073 
1074 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1075 		if (scsistat & PARITY_ERR) {
1076 			pSRB->SRBStatus |= PARITY_ERROR;
1077 		}
1078 		if (scsistat & COUNT_2_ZERO) {
1079 			while (1) {
1080 				bval = amd_read8(amd, DMA_Status);
1081 				if ((bval & DMA_XFER_DONE) != 0)
1082 					break;
1083 			}
1084 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1085 
1086 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1087 			pSRB->SGIndex++;
1088 			if (pSRB->SGIndex < pSRB->SGcount) {
1089 				pSRB->pSGlist++;
1090 				psgl = pSRB->pSGlist;
1091 				pSRB->SGPhysAddr = psgl->SGXPtr;
1092 				pSRB->SGToBeXferLen = psgl->SGXLen;
1093 			} else {
1094 				pSRB->SGToBeXferLen = 0;
1095 			}
1096 		} else {	/* phase changed */
1097 			residual = 0;
1098 			bval = amd_read8(amd, CURRENTFIFOREG);
1099 			while (bval & 0x1f) {
1100 				if ((bval & 0x1f) == 1) {
1101 					for (i = 0; i < 0x100; i++) {
1102 						bval = amd_read8(amd, CURRENTFIFOREG);
1103 						if (!(bval & 0x1f)) {
1104 							goto din_1;
1105 						} else if (i == 0x0ff) {
1106 							residual = 1;
1107 							goto din_1;
1108 						}
1109 					}
1110 				} else {
1111 					bval = amd_read8(amd, CURRENTFIFOREG);
1112 				}
1113 			}
1114 	din_1:
1115 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1116 			for (i = 0; i < 0x8000; i++) {
1117 				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1118 					break;
1119 			}
1120 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1121 
1122 			ResidCnt = amd_read8(amd, CTCREG_LOW)
1123 				 | (amd_read8(amd, CTCREG_MID) << 8)
1124 				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1125 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1126 			pSRB->SGPhysAddr += xferCnt;
1127 			pSRB->TotalXferredLen += xferCnt;
1128 			pSRB->SGToBeXferLen = ResidCnt;
1129 			if (residual) {
1130 				/* get residual byte */
1131 				bval = amd_read8(amd, SCSIFIFOREG);
1132 				ptr = phystovirt(pSRB, xferCnt);
1133 				*ptr = bval;
1134 				pSRB->SGPhysAddr++;
1135 				pSRB->TotalXferredLen++;
1136 				pSRB->SGToBeXferLen--;
1137 			}
1138 		}
1139 	}
1140 	return (scsistat);
1141 }
1142 
1143 static u_int
1144 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1145 {
1146 	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1147 	/* get message */
1148 	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1149 	pSRB->SRBState = SRB_COMPLETED;
1150 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1151 	return (SCSI_NOP0);
1152 }
1153 
1154 static u_int
1155 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1156 {
1157 	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1158 		scsistat = SCSI_NOP0;
1159 	}
1160 	return (scsistat);
1161 }
1162 
1163 static u_int
1164 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1165 {
1166 	int done;
1167 
1168 	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1169 
1170 	done = amdparsemsg(amd);
1171 	if (done)
1172 		amd->msgin_index = 0;
1173 	else
1174 		amd->msgin_index++;
1175 	return (SCSI_NOP0);
1176 }
1177 
1178 static int
1179 amdparsemsg(struct amd_softc *amd)
1180 {
1181 	struct	amd_target_info *targ_info;
1182 	int	reject;
1183 	int	done;
1184 	int	response;
1185 
1186 	done = FALSE;
1187 	response = FALSE;
1188 	reject = FALSE;
1189 
1190 	targ_info = &amd->tinfo[amd->cur_target];
1191 
1192 	/*
1193 	 * Parse as much of the message as is availible,
1194 	 * rejecting it if we don't support it.  When
1195 	 * the entire message is availible and has been
1196 	 * handled, return TRUE indicating that we have
1197 	 * parsed an entire message.
1198 	 */
1199 	switch (amd->msgin_buf[0]) {
1200 	case MSG_DISCONNECT:
1201 		amd->active_srb->SRBState = SRB_DISCONNECT;
1202 		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1203 		done = TRUE;
1204 		break;
1205 	case MSG_SIMPLE_Q_TAG:
1206 	{
1207 		struct amd_srb *disc_srb;
1208 
1209 		if (amd->msgin_index < 1)
1210 			break;
1211 		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1212 		if (amd->active_srb != NULL
1213 		 || disc_srb->SRBState != SRB_DISCONNECT
1214 		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1215 		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1216 			printf("amd%d: Unexpected tagged reselection "
1217 			       "for target %d, Issuing Abort\n", amd->unit,
1218 			       amd->cur_target);
1219 			amd->msgout_buf[0] = MSG_ABORT;
1220 			amd->msgout_len = 1;
1221 			response = TRUE;
1222 			break;
1223 		}
1224 		amd->active_srb = disc_srb;
1225 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1226 		done = TRUE;
1227 		break;
1228 	}
1229 	case MSG_MESSAGE_REJECT:
1230 		response = amdhandlemsgreject(amd);
1231 		if (response == FALSE)
1232 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1233 		/* FALLTHROUGH */
1234 	case MSG_NOOP:
1235 		done = TRUE;
1236 		break;
1237 	case MSG_EXTENDED:
1238 	{
1239 		u_int clockrate;
1240 		u_int period;
1241 		u_int offset;
1242 		u_int saved_offset;
1243 
1244 		/* Wait for enough of the message to begin validation */
1245 		if (amd->msgin_index < 1)
1246 			break;
1247 		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1248 			reject = TRUE;
1249 			break;
1250 		}
1251 
1252 		/* Wait for opcode */
1253 		if (amd->msgin_index < 2)
1254 			break;
1255 
1256 		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1257 			reject = TRUE;
1258 			break;
1259 		}
1260 
1261 		/*
1262 		 * Wait until we have both args before validating
1263 		 * and acting on this message.
1264 		 *
1265 		 * Add one to MSG_EXT_SDTR_LEN to account for
1266 		 * the extended message preamble.
1267 		 */
1268 		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1269 			break;
1270 
1271 		period = amd->msgin_buf[3];
1272 		saved_offset = offset = amd->msgin_buf[4];
1273 		clockrate = amdfindclockrate(amd, &period);
1274 		if (offset > AMD_MAX_SYNC_OFFSET)
1275 			offset = AMD_MAX_SYNC_OFFSET;
1276 		if (period == 0 || offset == 0) {
1277 			offset = 0;
1278 			period = 0;
1279 			clockrate = 0;
1280 		}
1281 		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1282 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1283 
1284 		/*
1285 		 * See if we initiated Sync Negotiation
1286 		 * and didn't have to fall down to async
1287 		 * transfers.
1288 		 */
1289 		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1290 			/* We started it */
1291 			if (saved_offset != offset) {
1292 				/* Went too low - force async */
1293 				reject = TRUE;
1294 			}
1295 		} else {
1296 			/*
1297 			 * Send our own SDTR in reply
1298 			 */
1299 			if (bootverbose)
1300 				printf("Sending SDTR!\n");
1301 			amd->msgout_index = 0;
1302 			amd->msgout_len = 0;
1303 			amdconstructsdtr(amd, period, offset);
1304 			amd->msgout_index = 0;
1305 			response = TRUE;
1306 		}
1307 		done = TRUE;
1308 		break;
1309 	}
1310 	case MSG_SAVEDATAPOINTER:
1311 	case MSG_RESTOREPOINTERS:
1312 		/* XXX Implement!!! */
1313 		done = TRUE;
1314 		break;
1315 	default:
1316 		reject = TRUE;
1317 		break;
1318 	}
1319 
1320 	if (reject) {
1321 		amd->msgout_index = 0;
1322 		amd->msgout_len = 1;
1323 		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1324 		done = TRUE;
1325 		response = TRUE;
1326 	}
1327 
1328 	if (response)
1329 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1330 
1331 	if (done && !response)
1332 		/* Clear the outgoing message buffer */
1333 		amd->msgout_len = 0;
1334 
1335 	/* Drop Ack */
1336 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1337 
1338 	return (done);
1339 }
1340 
1341 static u_int
1342 amdfindclockrate(struct amd_softc *amd, u_int *period)
1343 {
1344 	u_int i;
1345 	u_int clockrate;
1346 
1347 	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1348 		u_int8_t *table_entry;
1349 
1350 		table_entry = &tinfo_sync_period[i];
1351 		if (*period <= *table_entry) {
1352 			/*
1353 			 * When responding to a target that requests
1354 			 * sync, the requested rate may fall between
1355 			 * two rates that we can output, but still be
1356 			 * a rate that we can receive.  Because of this,
1357 			 * we want to respond to the target with
1358 			 * the same rate that it sent to us even
1359 			 * if the period we use to send data to it
1360 			 * is lower.  Only lower the response period
1361 			 * if we must.
1362 			 */
1363 			if (i == 0) {
1364 				*period = *table_entry;
1365 			}
1366 			break;
1367 		}
1368 	}
1369 
1370 	if (i == sizeof(tinfo_sync_period)) {
1371 		/* Too slow for us.  Use asnyc transfers. */
1372 		*period = 0;
1373 		clockrate = 0;
1374 	} else
1375 		clockrate = i + 4;
1376 
1377 	return (clockrate);
1378 }
1379 
1380 /*
1381  * See if we sent a particular extended message to the target.
1382  * If "full" is true, the target saw the full message.
1383  * If "full" is false, the target saw at least the first
1384  * byte of the message.
1385  */
1386 static int
1387 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1388 {
1389 	int found;
1390 	int index;
1391 
1392 	found = FALSE;
1393 	index = 0;
1394 
1395 	while (index < amd->msgout_len) {
1396 		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1397 		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1398 			index++;
1399 		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1400 		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1401 			/* Skip tag type and tag id */
1402 			index += 2;
1403 		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1404 			/* Found a candidate */
1405 			if (amd->msgout_buf[index+2] == msgtype) {
1406 				u_int end_index;
1407 
1408 				end_index = index + 1
1409 					  + amd->msgout_buf[index + 1];
1410 				if (full) {
1411 					if (amd->msgout_index > end_index)
1412 						found = TRUE;
1413 				} else if (amd->msgout_index > index)
1414 					found = TRUE;
1415 			}
1416 			break;
1417 		} else {
1418 			panic("amdsentmsg: Inconsistent msg buffer");
1419 		}
1420 	}
1421 	return (found);
1422 }
1423 
1424 static void
1425 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1426 {
1427 	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1428 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1429 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1430 	amd->msgout_buf[amd->msgout_index++] = period;
1431 	amd->msgout_buf[amd->msgout_index++] = offset;
1432 	amd->msgout_len += 5;
1433 }
1434 
1435 static int
1436 amdhandlemsgreject(struct amd_softc *amd)
1437 {
1438 	/*
1439 	 * If we had an outstanding SDTR for this
1440 	 * target, this is a signal that the target
1441 	 * is refusing negotiation.  Also watch out
1442 	 * for rejected tag messages.
1443 	 */
1444 	struct	amd_srb *srb;
1445 	struct	amd_target_info *targ_info;
1446 	int	response = FALSE;
1447 
1448 	srb = amd->active_srb;
1449 	targ_info = &amd->tinfo[amd->cur_target];
1450 	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1451 		/* note asynch xfers and clear flag */
1452 		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1453 			   /*period*/0, /*offset*/0,
1454 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1455 		printf("amd%d:%d: refuses synchronous negotiation. "
1456 		       "Using asynchronous transfers\n",
1457 		       amd->unit, amd->cur_target);
1458 	} else if ((srb != NULL)
1459 		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1460 		struct  ccb_trans_settings neg;
1461 
1462 		printf("amd%d:%d: refuses tagged commands.  Performing "
1463 		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1464 
1465 		amdsettags(amd, amd->cur_target, FALSE);
1466 		neg.flags = 0;
1467 		neg.valid = CCB_TRANS_TQ_VALID;
1468 		xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1469 		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1470 
1471 		/*
1472 		 * Resend the identify for this CCB as the target
1473 		 * may believe that the selection is invalid otherwise.
1474 		 */
1475 		if (amd->msgout_len != 0)
1476 			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1477 			      amd->msgout_len);
1478 		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1479 				    | srb->pccb->ccb_h.target_lun;
1480 		amd->msgout_len++;
1481 		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1482 		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1483 			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1484 
1485 		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1486 
1487 		/*
1488 		 * Requeue all tagged commands for this target
1489 		 * currently in our posession so they can be
1490 		 * converted to untagged commands.
1491 		 */
1492 		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1493 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1494 				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1495 	} else {
1496 		/*
1497 		 * Otherwise, we ignore it.
1498 		 */
1499 		printf("amd%d:%d: Message reject received -- ignored\n",
1500 		       amd->unit, amd->cur_target);
1501 	}
1502 	return (response);
1503 }
1504 
1505 #if 0
1506 	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1507 		if (bval == MSG_DISCONNECT) {
1508 			pSRB->SRBState = SRB_DISCONNECT;
1509 		} else if (bval == MSG_SAVEDATAPOINTER) {
1510 			goto min6;
1511 		} else if ((bval == MSG_EXTENDED)
1512 			|| ((bval >= MSG_SIMPLE_Q_TAG)
1513 			 && (bval <= MSG_ORDERED_Q_TAG))) {
1514 			pSRB->SRBState |= SRB_MSGIN_MULTI;
1515 			pSRB->MsgInBuf[0] = bval;
1516 			pSRB->MsgCnt = 1;
1517 			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1518 		} else if (bval == MSG_MESSAGE_REJECT) {
1519 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1520 
1521 			if (pSRB->SRBState & DO_SYNC_NEGO) {
1522 				goto set_async;
1523 			}
1524 		} else if (bval == MSG_RESTOREPOINTERS) {
1525 			goto min6;
1526 		} else {
1527 			goto min6;
1528 		}
1529 	} else {		/* minx: */
1530 		*pSRB->pMsgPtr = bval;
1531 		pSRB->MsgCnt++;
1532 		pSRB->pMsgPtr++;
1533 		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1534 		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1535 			if (pSRB->MsgCnt == 2) {
1536 				pSRB->SRBState = 0;
1537 				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1538 				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1539 					pSRB = amd->pTmpSRB;
1540 					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1541 					pDCB->pActiveSRB = pSRB;
1542 					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1543 					EnableMsgOut2(amd, pSRB);
1544 				} else {
1545 					if (pDCB->DCBFlag & ABORT_DEV_) {
1546 						pSRB->SRBState = SRB_ABORT_SENT;
1547 						EnableMsgOut1(amd, pSRB);
1548 					}
1549 					pDCB->pActiveSRB = pSRB;
1550 					pSRB->SRBState = SRB_DATA_XFER;
1551 				}
1552 			}
1553 		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1554 			&& (pSRB->MsgCnt == 5)) {
1555 			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1556 			if ((pSRB->MsgInBuf[1] != 3)
1557 			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1558 				pSRB->MsgCnt = 1;
1559 				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1560 				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1561 			} else if (!(pSRB->MsgInBuf[3])
1562 				|| !(pSRB->MsgInBuf[4])) {
1563 		set_async:	/* set async */
1564 
1565 				pDCB = pSRB->pSRBDCB;
1566 				/* disable sync & sync nego */
1567 				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1568 				pDCB->SyncPeriod = 0;
1569 				pDCB->SyncOffset = 0;
1570 
1571 				pDCB->tinfo.goal.period = 0;
1572 				pDCB->tinfo.goal.offset = 0;
1573 
1574 				pDCB->tinfo.current.period = 0;
1575 				pDCB->tinfo.current.offset = 0;
1576 				pDCB->tinfo.current.width =
1577 				    MSG_EXT_WDTR_BUS_8_BIT;
1578 
1579 				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1580 				pDCB->CtrlR4 &= 0x3f;
1581 				pDCB->CtrlR4 |= EATER_25NS;
1582 				goto re_prog;
1583 			} else {/* set sync */
1584 
1585 				pDCB = pSRB->pSRBDCB;
1586 				/* enable sync & sync nego */
1587 				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1588 
1589 				/* set sync offset */
1590 				pDCB->SyncOffset &= 0x0f0;
1591 				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1592 
1593 				/* set sync period */
1594 				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1595 
1596 				wval = (u_int16_t) pSRB->MsgInBuf[3];
1597 				wval = wval << 2;
1598 				wval--;
1599 				wval1 = wval / 25;
1600 				if ((wval1 * 25) != wval) {
1601 					wval1++;
1602 				}
1603 				bval = FAST_CLK|FAST_SCSI;
1604 				pDCB->CtrlR4 &= 0x3f;
1605 				if (wval1 >= 8) {
1606 					/* Fast SCSI */
1607 					wval1--;
1608 					bval = FAST_CLK;
1609 					pDCB->CtrlR4 |= EATER_25NS;
1610 				}
1611 				pDCB->CtrlR3 = bval;
1612 				pDCB->SyncPeriod = (u_int8_t) wval1;
1613 
1614 				pDCB->tinfo.goal.period =
1615 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1616 				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1617 				pDCB->tinfo.current.period =
1618 				    tinfo_sync_period[pDCB->SyncPeriod - 4];;
1619 				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1620 
1621 				/*
1622 				 * program SCSI control register
1623 				 */
1624 		re_prog:
1625 				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1626 				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1627 				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1628 				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1629 			}
1630 		}
1631 	}
1632 min6:
1633 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1634 	return (SCSI_NOP0);
1635 }
1636 #endif
1637 
1638 static u_int
1639 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1640 {
1641 	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1642 	return (scsistat);
1643 }
1644 
1645 static u_int
1646 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1647 {
1648 	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1649 	return (scsistat);
1650 }
1651 
1652 static void
1653 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1654 {
1655 	struct amd_sg *    psgl;
1656 	u_int32_t   lval;
1657 
1658 	if (pSRB->SGIndex < pSRB->SGcount) {
1659 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1660 
1661 		if (!pSRB->SGToBeXferLen) {
1662 			psgl = pSRB->pSGlist;
1663 			pSRB->SGPhysAddr = psgl->SGXPtr;
1664 			pSRB->SGToBeXferLen = psgl->SGXLen;
1665 		}
1666 		lval = pSRB->SGToBeXferLen;
1667 		amd_write8(amd, CTCREG_LOW, lval);
1668 		amd_write8(amd, CTCREG_MID, lval >> 8);
1669 		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1670 
1671 		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1672 
1673 		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1674 
1675 		pSRB->SRBState = SRB_DATA_XFER;
1676 
1677 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1678 
1679 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1680 
1681 		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1682 	} else {		/* xfer pad */
1683 		if (pSRB->SGcount) {
1684 			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1685 			pSRB->SRBStatus |= OVER_RUN;
1686 		}
1687 		amd_write8(amd, CTCREG_LOW, 0);
1688 		amd_write8(amd, CTCREG_MID, 0);
1689 		amd_write8(amd, CURTXTCNTREG, 0);
1690 
1691 		pSRB->SRBState |= SRB_XFERPAD;
1692 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1693 	}
1694 }
1695 
1696 static u_int
1697 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1698 {
1699 	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1700 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1701 
1702 	amdsetupcommand(amd, srb);
1703 
1704 	srb->SRBState = SRB_COMMAND;
1705 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1706 	return (scsistat);
1707 }
1708 
1709 static u_int
1710 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1711 {
1712 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1713 	pSRB->SRBState = SRB_STATUS;
1714 	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1715 	return (scsistat);
1716 }
1717 
1718 static u_int
1719 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1720 {
1721 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1722 
1723 	if (amd->msgout_len == 0) {
1724 		amd->msgout_buf[0] = MSG_NOOP;
1725 		amd->msgout_len = 1;
1726 	}
1727 	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1728 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1729 	return (scsistat);
1730 }
1731 
1732 static u_int
1733 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1734 {
1735 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1736 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1737 	return (scsistat);
1738 }
1739 
1740 static u_int
1741 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1742 {
1743 	return (scsistat);
1744 }
1745 
1746 static void
1747 amd_Disconnect(struct amd_softc * amd)
1748 {
1749 	struct	amd_srb *srb;
1750 	int	target;
1751 	int	lun;
1752 
1753 	srb = amd->active_srb;
1754 	amd->active_srb = NULL;
1755 	amd->last_phase = SCSI_BUS_FREE;
1756 	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1757 	target = amd->cur_target;
1758 	lun = amd->cur_lun;
1759 
1760 	if (srb == NULL) {
1761 		/* Invalid reselection */
1762 		amdrunwaiting(amd);
1763 	} else if (srb->SRBState & SRB_ABORT_SENT) {
1764 		/* Clean up and done this srb */
1765 #if 0
1766 		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1767 			/* XXX What about "done'ing" these srbs??? */
1768 			if (pSRB->pSRBDCB == pDCB) {
1769 				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1770 				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1771 			}
1772 		}
1773 		amdrunwaiting(amd);
1774 #endif
1775 	} else {
1776 		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1777 		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1778 			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1779 			goto disc1;
1780 		} else if (srb->SRBState & SRB_DISCONNECT) {
1781 			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1782 				amd->untagged_srbs[target][lun] = srb;
1783 			amdrunwaiting(amd);
1784 		} else if (srb->SRBState & SRB_COMPLETED) {
1785 	disc1:
1786 			srb->SRBState = SRB_FREE;
1787 			SRBdone(amd, srb);
1788 		}
1789 	}
1790 	return;
1791 }
1792 
1793 static void
1794 amd_Reselect(struct amd_softc *amd)
1795 {
1796 	struct amd_target_info *tinfo;
1797 	u_int16_t disc_count;
1798 
1799 	amd_clear_msg_state(amd);
1800 	if (amd->active_srb != NULL) {
1801 		/* Requeue the SRB for our attempted Selection */
1802 		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1803 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1804 		amd->active_srb = NULL;
1805 	}
1806 	/* get ID */
1807 	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1808 	amd->cur_target ^= amd->HostID_Bit;
1809 	amd->cur_target = ffs(amd->cur_target) - 1;
1810 	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1811 	tinfo = &amd->tinfo[amd->cur_target];
1812 	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1813 	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1814 	if (disc_count == 0) {
1815 		printf("amd%d: Unexpected reselection for target %d, "
1816 		       "Issuing Abort\n", amd->unit, amd->cur_target);
1817 		amd->msgout_buf[0] = MSG_ABORT;
1818 		amd->msgout_len = 1;
1819 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1820 	}
1821 	if (amd->active_srb != NULL) {
1822 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1823 		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1824 	}
1825 
1826 	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1827 	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1828 	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1829 	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1830 	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1831 	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1832 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1833 	amd->last_phase = SCSI_NOP0;
1834 }
1835 
1836 static void
1837 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1838 {
1839 	u_int8_t   bval, i, status;
1840 	union ccb *pccb;
1841 	struct ccb_scsiio *pcsio;
1842 	struct amd_sg *ptr2;
1843 	u_int32_t   swlval;
1844 	u_int   target_id, target_lun;
1845 
1846 	pccb = pSRB->pccb;
1847 	pcsio = &pccb->csio;
1848 	target_id = pSRB->pccb->ccb_h.target_id;
1849 	target_lun = pSRB->pccb->ccb_h.target_lun;
1850 
1851 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1852 		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1853 
1854 	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1855 		bus_dmasync_op_t op;
1856 
1857 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1858 			op = BUS_DMASYNC_POSTREAD;
1859 		else
1860 			op = BUS_DMASYNC_POSTWRITE;
1861 		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1862 		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1863 	}
1864 
1865 	status = pSRB->TargetStatus;
1866 	pccb->ccb_h.status = CAM_REQ_CMP;
1867 	pccb->ccb_h.status = CAM_REQ_CMP;
1868 	if (pSRB->SRBFlag & AUTO_REQSENSE) {
1869 		pSRB->SRBFlag &= ~AUTO_REQSENSE;
1870 		pSRB->AdaptStatus = 0;
1871 		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1872 
1873 		if (status == SCSI_STATUS_CHECK_COND) {
1874 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1875 			goto ckc_e;
1876 		}
1877 		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1878 
1879 		pcsio->sense_resid = pcsio->sense_len
1880 				   - pSRB->TotalXferredLen;
1881 		pSRB->TotalXferredLen = pSRB->Segment1[1];
1882 		if (pSRB->TotalXferredLen) {
1883 			/* ???? */
1884 			pcsio->resid = pcsio->dxfer_len
1885 				     - pSRB->TotalXferredLen;
1886 			/* The resid field contains valid data	 */
1887 			/* Flush resid bytes on complete        */
1888 		} else {
1889 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1890 		}
1891 		pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1892 		goto ckc_e;
1893 	}
1894 	if (status) {
1895 		if (status == SCSI_STATUS_CHECK_COND) {
1896 
1897 			if ((pSRB->SGIndex < pSRB->SGcount)
1898 			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1899 				bval = pSRB->SGcount;
1900 				swlval = pSRB->SGToBeXferLen;
1901 				ptr2 = pSRB->pSGlist;
1902 				ptr2++;
1903 				for (i = pSRB->SGIndex + 1; i < bval; i++) {
1904 					swlval += ptr2->SGXLen;
1905 					ptr2++;
1906 				}
1907 				/* ??????? */
1908 				pcsio->resid = (u_int32_t) swlval;
1909 
1910 #ifdef	AMD_DEBUG0
1911 				printf("XferredLen=%8x,NotYetXferLen=%8x,",
1912 					pSRB->TotalXferredLen, swlval);
1913 #endif
1914 			}
1915 			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1916 #ifdef	AMD_DEBUG0
1917 				printf("RequestSense..................\n");
1918 #endif
1919 				RequestSense(amd, pSRB);
1920 				return;
1921 			}
1922 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1923 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1924 			goto ckc_e;
1925 		} else if (status == SCSI_STATUS_QUEUE_FULL) {
1926 			pSRB->AdaptStatus = 0;
1927 			pSRB->TargetStatus = 0;
1928 			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1929 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1930 			goto ckc_e;
1931 		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1932 			pSRB->AdaptStatus = H_SEL_TIMEOUT;
1933 			pSRB->TargetStatus = 0;
1934 
1935 			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1936 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1937 		} else if (status == SCSI_STATUS_BUSY) {
1938 #ifdef AMD_DEBUG0
1939 			printf("DC390: target busy at %s %d\n",
1940 			       __FILE__, __LINE__);
1941 #endif
1942 			pcsio->scsi_status = SCSI_STATUS_BUSY;
1943 			pccb->ccb_h.status = CAM_SCSI_BUSY;
1944 		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1945 #ifdef AMD_DEBUG0
1946 			printf("DC390: target reserved at %s %d\n",
1947 			       __FILE__, __LINE__);
1948 #endif
1949 			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1950 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1951 		} else {
1952 			pSRB->AdaptStatus = 0;
1953 #ifdef AMD_DEBUG0
1954 			printf("DC390: driver stuffup at %s %d\n",
1955 			       __FILE__, __LINE__);
1956 #endif
1957 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1958 		}
1959 	} else {
1960 		status = pSRB->AdaptStatus;
1961 		if (status & H_OVER_UNDER_RUN) {
1962 			pSRB->TargetStatus = 0;
1963 
1964 			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1965 		} else if (pSRB->SRBStatus & PARITY_ERROR) {
1966 #ifdef AMD_DEBUG0
1967 			printf("DC390: driver stuffup %s %d\n",
1968 			       __FILE__, __LINE__);
1969 #endif
1970 			/* Driver failed to perform operation	  */
1971 			pccb->ccb_h.status = CAM_UNCOR_PARITY;
1972 		} else {	/* No error */
1973 			pSRB->AdaptStatus = 0;
1974 			pSRB->TargetStatus = 0;
1975 			pcsio->resid = 0;
1976 			/* there is no error, (sense is invalid)  */
1977 		}
1978 	}
1979 ckc_e:
1980 	crit_enter();
1981 	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1982 		/* CAM request not yet complete =>device_Q frozen */
1983 		xpt_freeze_devq(pccb->ccb_h.path, 1);
1984 		pccb->ccb_h.status |= CAM_DEV_QFRZN;
1985 	}
1986 	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1987 	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1988 	amdrunwaiting(amd);
1989 	crit_exit();
1990 	xpt_done(pccb);
1991 
1992 }
1993 
1994 static void
1995 amd_ResetSCSIBus(struct amd_softc * amd)
1996 {
1997 	crit_enter();
1998 	amd->ACBFlag |= RESET_DEV;
1999 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2000 	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2001 	crit_exit();
2002 	return;
2003 }
2004 
2005 static void
2006 amd_ScsiRstDetect(struct amd_softc * amd)
2007 {
2008 	u_int32_t   wlval;
2009 
2010 #ifdef AMD_DEBUG0
2011 	printf("amd_ScsiRstDetect \n");
2012 #endif
2013 
2014 	wlval = 1000;
2015 	while (--wlval) {	/* delay 1 sec */
2016 		DELAY(1000);
2017 	}
2018 	crit_enter();
2019 
2020 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2021 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2022 
2023 	if (amd->ACBFlag & RESET_DEV) {
2024 		amd->ACBFlag |= RESET_DONE;
2025 	} else {
2026 		amd->ACBFlag |= RESET_DETECT;
2027 		ResetDevParam(amd);
2028 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2029 				 AMD_TAG_WILDCARD, &amd->running_srbs,
2030 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2031 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2032 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2033 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2034 		amd->active_srb = NULL;
2035 		amd->ACBFlag = 0;
2036 		amdrunwaiting(amd);
2037 	}
2038 	crit_exit();
2039 	return;
2040 }
2041 
2042 static void
2043 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2044 {
2045 	union ccb *pccb;
2046 	struct ccb_scsiio *pcsio;
2047 
2048 	pccb = pSRB->pccb;
2049 	pcsio = &pccb->csio;
2050 
2051 	pSRB->SRBFlag |= AUTO_REQSENSE;
2052 	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2053 	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2054 	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2055 	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2056 
2057 	pSRB->AdaptStatus = 0;
2058 	pSRB->TargetStatus = 0;
2059 
2060 	pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2061 	pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2062 
2063 	pSRB->pSGlist = &pSRB->Segmentx;
2064 	pSRB->SGcount = 1;
2065 	pSRB->SGIndex = 0;
2066 
2067 	*((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2068 	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2069 	*((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2070 	pSRB->ScsiCmdLen = 6;
2071 
2072 	pSRB->TotalXferredLen = 0;
2073 	pSRB->SGToBeXferLen = 0;
2074 	if (amdstart(amd, pSRB) != 0) {
2075 		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2076 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2077 	}
2078 }
2079 
2080 static void
2081 amd_InvalidCmd(struct amd_softc * amd)
2082 {
2083 	struct amd_srb *srb;
2084 
2085 	srb = amd->active_srb;
2086 	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2087 		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2088 }
2089 
2090 void
2091 amd_linkSRB(struct amd_softc *amd)
2092 {
2093 	u_int16_t count, i;
2094 	struct amd_srb *psrb;
2095 	int error;
2096 
2097 	count = amd->SRBCount;
2098 
2099 	for (i = 0; i < count; i++) {
2100 		psrb = (struct amd_srb *)&amd->SRB_array[i];
2101 		psrb->TagNumber = i;
2102 
2103 		/*
2104 		 * Create the dmamap.  This is no longer optional!
2105 		 */
2106 		error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2107 		if (error) {
2108 			device_printf(amd->dev, "Error %d creating buffer "
2109 					"dmamap!\n", error);
2110 			break;
2111 		}
2112 		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2113 	}
2114 }
2115 
2116 void
2117 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2118 {
2119 	if (mode == ENABLE_CE) {
2120 		*regval = 0xc0;
2121 	} else {
2122 		*regval = 0x80;
2123 	}
2124 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2125 	if (mode == DISABLE_CE) {
2126 		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2127 	}
2128 	DELAY(160);
2129 }
2130 
2131 void
2132 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2133 {
2134 	u_int bval;
2135 
2136 	bval = 0;
2137 	if (Carry) {
2138 		bval = 0x40;
2139 		*regval = 0x80;
2140 		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2141 	}
2142 	DELAY(160);
2143 	bval |= 0x80;
2144 	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2145 	DELAY(160);
2146 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2147 	DELAY(160);
2148 }
2149 
2150 static int
2151 amd_EEpromInDO(struct amd_softc *amd)
2152 {
2153 	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2154 	DELAY(160);
2155 	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2156 	DELAY(160);
2157 	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2158 		return (1);
2159 	return (0);
2160 }
2161 
2162 static u_int16_t
2163 EEpromGetData1(struct amd_softc *amd)
2164 {
2165 	u_int	  i;
2166 	u_int	  carryFlag;
2167 	u_int16_t wval;
2168 
2169 	wval = 0;
2170 	for (i = 0; i < 16; i++) {
2171 		wval <<= 1;
2172 		carryFlag = amd_EEpromInDO(amd);
2173 		wval |= carryFlag;
2174 	}
2175 	return (wval);
2176 }
2177 
2178 static void
2179 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2180 {
2181 	u_int i, j;
2182 	int carryFlag;
2183 
2184 	carryFlag = 1;
2185 	j = 0x80;
2186 	for (i = 0; i < 9; i++) {
2187 		amd_EEpromOutDI(amd, regval, carryFlag);
2188 		carryFlag = (EEpromCmd & j) ? 1 : 0;
2189 		j >>= 1;
2190 	}
2191 }
2192 
2193 static void
2194 amd_ReadEEprom(struct amd_softc *amd)
2195 {
2196 	int	   regval;
2197 	u_int	   i;
2198 	u_int16_t *ptr;
2199 	u_int8_t   cmd;
2200 
2201 	ptr = (u_int16_t *)&amd->eepromBuf[0];
2202 	cmd = EEPROM_READ;
2203 	for (i = 0; i < 0x40; i++) {
2204 		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2205 		amd_Prepare(amd, &regval, cmd);
2206 		*ptr = EEpromGetData1(amd);
2207 		ptr++;
2208 		cmd++;
2209 		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2210 	}
2211 }
2212 
2213 static void
2214 amd_load_defaults(struct amd_softc *amd)
2215 {
2216 	int target;
2217 
2218 	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2219 	for (target = 0; target < MAX_SCSI_ID; target++)
2220 		amd->eepromBuf[target << 2] =
2221 		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2222 	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2223 	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2224 	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2225 }
2226 
2227 static void
2228 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2229 {
2230 	u_int16_t  wval, *ptr;
2231 	u_int8_t   i;
2232 
2233 	amd_ReadEEprom(amd);
2234 	wval = 0;
2235 	ptr = (u_int16_t *) & amd->eepromBuf[0];
2236 	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2237 		wval += *ptr;
2238 
2239 	if (wval != EE_CHECKSUM) {
2240 		if (bootverbose)
2241 			printf("amd%d: SEEPROM data unavailable.  "
2242 			       "Using default device parameters.\n",
2243 			       amd->unit);
2244 		amd_load_defaults(amd);
2245 	}
2246 }
2247 
2248 /*
2249  **********************************************************************
2250  * Function      : static int amd_init (struct Scsi_Host *host)
2251  * Purpose       : initialize the internal structures for a given SCSI host
2252  * Inputs        : host - pointer to this host adapter's structure/
2253  **********************************************************************
2254  */
2255 static int
2256 amd_init(device_t dev)
2257 {
2258 	struct amd_softc *amd = device_get_softc(dev);
2259 	struct resource	*iores;
2260 	int	i, rid;
2261 	u_int	bval;
2262 
2263 	rid = PCI_BASE_ADDR0;
2264 	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2265 				   RF_ACTIVE);
2266 	if (iores == NULL) {
2267 		if (bootverbose)
2268 			printf("amd_init: bus_alloc_resource failure!\n");
2269 		return ENXIO;
2270 	}
2271 	amd->tag = rman_get_bustag(iores);
2272 	amd->bsh = rman_get_bushandle(iores);
2273 
2274 	/* DMA tag for mapping buffers into device visible space. */
2275 	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2276 			       /*boundary*/0,
2277 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2278 			       /*highaddr*/BUS_SPACE_MAXADDR,
2279 			       /*filter*/NULL, /*filterarg*/NULL,
2280 			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2281 			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2282 			       /*flags*/BUS_DMA_ALLOCNOW,
2283 			       &amd->buffer_dmat) != 0) {
2284 		if (bootverbose)
2285 			printf("amd_init: bus_dma_tag_create failure!\n");
2286 		return ENXIO;
2287         }
2288 	TAILQ_INIT(&amd->free_srbs);
2289 	TAILQ_INIT(&amd->running_srbs);
2290 	TAILQ_INIT(&amd->waiting_srbs);
2291 	amd->last_phase = SCSI_BUS_FREE;
2292 	amd->dev = dev;
2293 	amd->unit = device_get_unit(dev);
2294 	amd->SRBCount = MAX_SRB_CNT;
2295 	amd->status = 0;
2296 	amd_load_eeprom_or_defaults(amd);
2297 	amd->max_id = 7;
2298 	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2299 		amd->max_lun = 7;
2300 	} else {
2301 		amd->max_lun = 0;
2302 	}
2303 	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2304 	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2305 	amd->AdaptSCSILUN = 0;
2306 	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2307 	amd->ACBFlag = 0;
2308 	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2309 	amd_linkSRB(amd);
2310 	for (i = 0; i <= amd->max_id; i++) {
2311 
2312 		if (amd->AdaptSCSIID != i) {
2313 			struct amd_target_info *tinfo;
2314 			PEEprom prom;
2315 
2316 			tinfo = &amd->tinfo[i];
2317 			prom = (PEEprom)&amd->eepromBuf[i << 2];
2318 			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2319 				tinfo->disc_tag |= AMD_USR_DISCENB;
2320 				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2321 					tinfo->disc_tag |= AMD_USR_TAGENB;
2322 			}
2323 			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2324 				tinfo->user.period =
2325 				    eeprom_period[prom->EE_SPEED];
2326 				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2327 			}
2328 			tinfo->CtrlR1 = amd->AdaptSCSIID;
2329 			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2330 				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2331 			tinfo->CtrlR3 = FAST_CLK;
2332 			tinfo->CtrlR4 = EATER_25NS;
2333 			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2334 				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2335 		}
2336 	}
2337 	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2338 	/* Conversion factor = 0 , 40MHz clock */
2339 	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2340 	/* NOP cmd - clear command register */
2341 	amd_write8(amd, SCSICMDREG, NOP_CMD);
2342 	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2343 	amd_write8(amd, CNTLREG3, FAST_CLK);
2344 	bval = EATER_25NS;
2345 	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2346 		bval |= NEGATE_REQACKDATA;
2347 	}
2348 	amd_write8(amd, CNTLREG4, bval);
2349 
2350 	/* Disable SCSI bus reset interrupt */
2351 	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2352 
2353 	return 0;
2354 }
2355 
2356 /*
2357  * attach and init a host adapter
2358  */
2359 static int
2360 amd_attach(device_t dev)
2361 {
2362 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2363 	u_int8_t	intstat;
2364 	struct amd_softc *amd = device_get_softc(dev);
2365 	int		unit = device_get_unit(dev);
2366 	int		rid;
2367 	void		*ih;
2368 	struct resource	*irqres;
2369 
2370 	if (amd_init(dev)) {
2371 		if (bootverbose)
2372 			printf("amd_attach: amd_init failure!\n");
2373 		return ENXIO;
2374 	}
2375 
2376 	/* Reset Pending INT */
2377 	intstat = amd_read8(amd, INTSTATREG);
2378 
2379 	/* After setting up the adapter, map our interrupt */
2380 	rid = 0;
2381 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2382 				    RF_SHAREABLE | RF_ACTIVE);
2383 	if (irqres == NULL ||
2384 	    bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2385 	) {
2386 		if (bootverbose)
2387 			printf("amd%d: unable to register interrupt handler!\n",
2388 			       unit);
2389 		return ENXIO;
2390 	}
2391 
2392 	/*
2393 	 * Now let the CAM generic SCSI layer find the SCSI devices on
2394 	 * the bus *  start queue to reset to the idle loop. *
2395 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2396 	 * max_sim_transactions
2397 	 */
2398 	devq = cam_simq_alloc(MAX_START_JOB);
2399 	if (devq == NULL) {
2400 		if (bootverbose)
2401 			printf("amd_attach: cam_simq_alloc failure!\n");
2402 		return ENXIO;
2403 	}
2404 
2405 	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2406 				  amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2407 				  devq);
2408 	cam_simq_release(devq);
2409 	if (amd->psim == NULL) {
2410 		if (bootverbose)
2411 			printf("amd_attach: cam_sim_alloc failure!\n");
2412 		return ENXIO;
2413 	}
2414 
2415 	if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2416 		cam_sim_free(amd->psim);
2417 		if (bootverbose)
2418 			printf("amd_attach: xpt_bus_register failure!\n");
2419 		return ENXIO;
2420 	}
2421 
2422 	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2423 			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2424 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2425 		xpt_bus_deregister(cam_sim_path(amd->psim));
2426 		cam_sim_free(amd->psim);
2427 		if (bootverbose)
2428 			printf("amd_attach: xpt_create_path failure!\n");
2429 		return ENXIO;
2430 	}
2431 
2432 	return 0;
2433 }
2434 
2435 static int
2436 amd_probe(device_t dev)
2437 {
2438 	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2439 		device_set_desc(dev,
2440 			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2441 		return 0;
2442 	}
2443 	return ENXIO;
2444 }
2445 
2446 static device_method_t amd_methods[] = {
2447 	/* Device interface */
2448 	DEVMETHOD(device_probe,		amd_probe),
2449 	DEVMETHOD(device_attach,	amd_attach),
2450 	{ 0, 0 }
2451 };
2452 
2453 static driver_t amd_driver = {
2454 	"amd", amd_methods, sizeof(struct amd_softc)
2455 };
2456 
2457 static devclass_t amd_devclass;
2458 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2459