xref: /dragonfly/sys/dev/disk/amd/amd.c (revision 984263bc)
1 /*
2  *********************************************************************
3  *	FILE NAME  : amd.c
4  *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5  *		     Erich Chen     (erich@tekram.com.tw)
6  *	Description: Device Driver for the amd53c974 PCI Bus Master
7  *		     SCSI Host adapter found on cards such as
8  *		     the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  */
35 
36 /*
37  *********************************************************************
38  *	HISTORY:
39  *
40  *	REV#	DATE	NAME    	DESCRIPTION
41  *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
42  *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
43  *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
44  *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
45  *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
46  *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
47  *********************************************************************
48  */
49 
50 /* #define AMD_DEBUG0           */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52 
53 #include <sys/param.h>
54 
55 #include <sys/systm.h>
56 #include <sys/malloc.h>
57 #include <sys/queue.h>
58 #include <sys/buf.h>
59 #include <sys/kernel.h>
60 
61 #include <vm/vm.h>
62 #include <vm/pmap.h>
63 
64 #include <machine/bus_pio.h>
65 #include <machine/bus.h>
66 #include <machine/clock.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70 
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_sim.h>
74 #include <cam/cam_xpt_sim.h>
75 #include <cam/cam_debug.h>
76 
77 #include <cam/scsi/scsi_all.h>
78 #include <cam/scsi/scsi_message.h>
79 
80 #include <pci/pcivar.h>
81 #include <pci/pcireg.h>
82 #include <pci/amd.h>
83 
84 #define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
85 #define PCI_BASE_ADDR0	    		0x10
86 
87 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88 typedef phase_handler_t *phase_handler_func_t;
89 
90 static void amd_intr(void *vamd);
91 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92 static phase_handler_t amd_NopPhase;
93 
94 static phase_handler_t amd_DataOutPhase0;
95 static phase_handler_t amd_DataInPhase0;
96 #define amd_CommandPhase0 amd_NopPhase
97 static phase_handler_t amd_StatusPhase0;
98 static phase_handler_t amd_MsgOutPhase0;
99 static phase_handler_t amd_MsgInPhase0;
100 static phase_handler_t amd_DataOutPhase1;
101 static phase_handler_t amd_DataInPhase1;
102 static phase_handler_t amd_CommandPhase1;
103 static phase_handler_t amd_StatusPhase1;
104 static phase_handler_t amd_MsgOutPhase1;
105 static phase_handler_t amd_MsgInPhase1;
106 
107 static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108 static int	amdparsemsg(struct amd_softc *amd);
109 static int	amdhandlemsgreject(struct amd_softc *amd);
110 static void	amdconstructsdtr(struct amd_softc *amd,
111 				 u_int period, u_int offset);
112 static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
113 static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
114 
115 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116 static void amd_Disconnect(struct amd_softc *amd);
117 static void amd_Reselect(struct amd_softc *amd);
118 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_ScsiRstDetect(struct amd_softc *amd);
120 static void amd_ResetSCSIBus(struct amd_softc *amd);
121 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122 static void amd_InvalidCmd(struct amd_softc *amd);
123 
124 #if 0
125 static void amd_timeout(void *arg1);
126 static void amd_reset(struct amd_softc *amd);
127 #endif
128 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
129 
130 void    amd_linkSRB(struct amd_softc *amd);
131 static int amd_init(device_t);
132 static void amd_load_defaults(struct amd_softc *amd);
133 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
134 static int amd_EEpromInDO(struct amd_softc *amd);
135 static u_int16_t EEpromGetData1(struct amd_softc *amd);
136 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
137 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
138 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
139 static void amd_ReadEEprom(struct amd_softc *amd);
140 
141 static int amd_probe(device_t);
142 static int amd_attach(device_t);
143 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
144 			     lun_id_t lun, u_int tag, struct srb_queue *queue,
145 			     cam_status status);
146 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
147 		       u_int period, u_int offset, u_int type);
148 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
149 
150 static __inline void amd_clear_msg_state(struct amd_softc *amd);
151 
152 static __inline void
153 amd_clear_msg_state(struct amd_softc *amd)
154 {
155 	amd->msgout_len = 0;
156 	amd->msgout_index = 0;
157 	amd->msgin_index = 0;
158 }
159 
160 /* CAM SIM entry points */
161 #define ccb_srb_ptr spriv_ptr0
162 #define ccb_amd_ptr spriv_ptr1
163 static void	amd_action(struct cam_sim *sim, union ccb *ccb);
164 static void	amd_poll(struct cam_sim *sim);
165 
166 /*
167  * State engine function tables indexed by SCSI phase number
168  */
169 phase_handler_func_t amd_SCSI_phase0[] = {
170 	amd_DataOutPhase0,
171 	amd_DataInPhase0,
172 	amd_CommandPhase0,
173 	amd_StatusPhase0,
174 	amd_NopPhase,
175 	amd_NopPhase,
176 	amd_MsgOutPhase0,
177 	amd_MsgInPhase0
178 };
179 
180 phase_handler_func_t amd_SCSI_phase1[] = {
181 	amd_DataOutPhase1,
182 	amd_DataInPhase1,
183 	amd_CommandPhase1,
184 	amd_StatusPhase1,
185 	amd_NopPhase,
186 	amd_NopPhase,
187 	amd_MsgOutPhase1,
188 	amd_MsgInPhase1
189 };
190 
191 /*
192  * EEProm/BIOS negotiation periods
193  */
194 u_int8_t   eeprom_period[] = {
195 	 25,	/* 10.0MHz */
196 	 32,	/*  8.0MHz */
197 	 38,	/*  6.6MHz */
198 	 44,	/*  5.7MHz */
199 	 50,	/*  5.0MHz */
200 	 63,	/*  4.0MHz */
201 	 83,	/*  3.0MHz */
202 	125	/*  2.0MHz */
203 };
204 
205 /*
206  * chip clock setting to SCSI specified sync parameter table.
207  */
208 u_int8_t tinfo_sync_period[] = {
209 	25,	/* 10.0 */
210 	32,	/* 8.0 */
211 	38,	/* 6.6 */
212 	44,	/* 5.7 */
213 	50,	/* 5.0 */
214 	57,	/* 4.4 */
215 	63,	/* 4.0 */
216 	70,	/* 3.6 */
217 	76,	/* 3.3 */
218 	83	/* 3.0 */
219 };
220 
221 static __inline struct amd_srb *
222 amdgetsrb(struct amd_softc * amd)
223 {
224 	int     intflag;
225 	struct amd_srb *    pSRB;
226 
227 	intflag = splcam();
228 	pSRB = TAILQ_FIRST(&amd->free_srbs);
229 	if (pSRB)
230 		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
231 	splx(intflag);
232 	return (pSRB);
233 }
234 
235 static void
236 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
237 {
238 	struct scsi_request_sense sense_cmd;
239 	struct ccb_scsiio *csio;
240 	u_int8_t *cdb;
241 	u_int cdb_len;
242 
243 	csio = &srb->pccb->csio;
244 
245 	if (srb->SRBFlag & AUTO_REQSENSE) {
246 		sense_cmd.opcode = REQUEST_SENSE;
247 		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
248 		sense_cmd.unused[0] = 0;
249 		sense_cmd.unused[1] = 0;
250 		sense_cmd.length = csio->sense_len;
251 		sense_cmd.control = 0;
252 		cdb = &sense_cmd.opcode;
253 		cdb_len = sizeof(sense_cmd);
254 	} else {
255 		cdb = &srb->CmdBlock[0];
256 		cdb_len = srb->ScsiCmdLen;
257 	}
258 	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
259 }
260 
261 /*
262  * Attempt to start a waiting transaction.  Interrupts must be disabled
263  * upon entry to this function.
264  */
265 static void
266 amdrunwaiting(struct amd_softc *amd) {
267 	struct amd_srb *srb;
268 
269 	if (amd->last_phase != SCSI_BUS_FREE)
270 		return;
271 
272 	srb = TAILQ_FIRST(&amd->waiting_srbs);
273 	if (srb == NULL)
274 		return;
275 
276 	if (amdstart(amd, srb) == 0) {
277 		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
278 		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
279 	}
280 }
281 
282 static void
283 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
284 {
285 	struct	 amd_srb *srb;
286 	union	 ccb *ccb;
287 	struct	 amd_softc *amd;
288 	int	 s;
289 
290 	srb = (struct amd_srb *)arg;
291 	ccb = srb->pccb;
292 	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
293 
294 	if (error != 0) {
295 		if (error != EFBIG)
296 			printf("amd%d: Unexepected error 0x%x returned from "
297 			       "bus_dmamap_load\n", amd->unit, error);
298 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
299 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
300 			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
301 		}
302 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
303 		xpt_done(ccb);
304 		return;
305 	}
306 
307 	if (nseg != 0) {
308 		struct amd_sg *sg;
309 		bus_dma_segment_t *end_seg;
310 		bus_dmasync_op_t op;
311 
312 		end_seg = dm_segs + nseg;
313 
314 		/* Copy the segments into our SG list */
315 		srb->pSGlist = &srb->SGsegment[0];
316 		sg = srb->pSGlist;
317 		while (dm_segs < end_seg) {
318 			sg->SGXLen = dm_segs->ds_len;
319 			sg->SGXPtr = dm_segs->ds_addr;
320 			sg++;
321 			dm_segs++;
322 		}
323 
324 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
325 			op = BUS_DMASYNC_PREREAD;
326 		else
327 			op = BUS_DMASYNC_PREWRITE;
328 
329 		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
330 
331 	}
332 	srb->SGcount = nseg;
333 	srb->SGIndex = 0;
334 	srb->AdaptStatus = 0;
335 	srb->TargetStatus = 0;
336 	srb->MsgCnt = 0;
337 	srb->SRBStatus = 0;
338 	srb->SRBFlag = 0;
339 	srb->SRBState = 0;
340 	srb->TotalXferredLen = 0;
341 	srb->SGPhysAddr = 0;
342 	srb->SGToBeXferLen = 0;
343 	srb->EndMessage = 0;
344 
345 	s = splcam();
346 
347 	/*
348 	 * Last time we need to check if this CCB needs to
349 	 * be aborted.
350 	 */
351 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
352 		if (nseg != 0)
353 			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
354 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
355 		xpt_done(ccb);
356 		splx(s);
357 		return;
358 	}
359 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
360 #if 0
361 	/* XXX Need a timeout handler */
362 	ccb->ccb_h.timeout_ch =
363 	    timeout(amdtimeout, (caddr_t)srb,
364 		    (ccb->ccb_h.timeout * hz) / 1000);
365 #endif
366 	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
367 	amdrunwaiting(amd);
368 	splx(s);
369 }
370 
371 static void
372 amd_action(struct cam_sim * psim, union ccb * pccb)
373 {
374 	struct amd_softc *    amd;
375 	u_int   target_id, target_lun;
376 
377 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
378 
379 	amd = (struct amd_softc *) cam_sim_softc(psim);
380 	target_id = pccb->ccb_h.target_id;
381 	target_lun = pccb->ccb_h.target_lun;
382 
383 	switch (pccb->ccb_h.func_code) {
384 	case XPT_SCSI_IO:
385 	{
386 		struct amd_srb *    pSRB;
387 		struct ccb_scsiio *pcsio;
388 
389 		pcsio = &pccb->csio;
390 
391 		/*
392 		 * Assign an SRB and connect it with this ccb.
393 		 */
394 		pSRB = amdgetsrb(amd);
395 
396 		if (!pSRB) {
397 			/* Freeze SIMQ */
398 			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
399 			xpt_done(pccb);
400 			return;
401 		}
402 		pSRB->pccb = pccb;
403 		pccb->ccb_h.ccb_srb_ptr = pSRB;
404 		pccb->ccb_h.ccb_amd_ptr = amd;
405 		pSRB->ScsiCmdLen = pcsio->cdb_len;
406 		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
407 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
408 			if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
409 				/*
410 				 * We've been given a pointer
411 				 * to a single buffer.
412 				 */
413 				if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
414 					int s;
415 					int error;
416 
417 					s = splsoftvm();
418 					error =
419 					    bus_dmamap_load(amd->buffer_dmat,
420 							    pSRB->dmamap,
421 							    pcsio->data_ptr,
422 							    pcsio->dxfer_len,
423 							    amdexecutesrb,
424 							    pSRB, /*flags*/0);
425 					if (error == EINPROGRESS) {
426 						/*
427 						 * So as to maintain
428 						 * ordering, freeze the
429 						 * controller queue
430 						 * until our mapping is
431 						 * returned.
432 						 */
433 						xpt_freeze_simq(amd->psim, 1);
434 						pccb->ccb_h.status |=
435 						    CAM_RELEASE_SIMQ;
436 					}
437 					splx(s);
438 				} else {
439 					struct bus_dma_segment seg;
440 
441 					/* Pointer to physical buffer */
442 					seg.ds_addr =
443 					    (bus_addr_t)pcsio->data_ptr;
444 					seg.ds_len = pcsio->dxfer_len;
445 					amdexecutesrb(pSRB, &seg, 1, 0);
446 				}
447 			} else {
448 				struct bus_dma_segment *segs;
449 
450 				if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
451 				 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
452 					TAILQ_INSERT_HEAD(&amd->free_srbs,
453 							  pSRB, links);
454 					pccb->ccb_h.status = CAM_PROVIDE_FAIL;
455 					xpt_done(pccb);
456 					return;
457 				}
458 
459 				/* Just use the segments provided */
460 				segs =
461 				    (struct bus_dma_segment *)pcsio->data_ptr;
462 				amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
463 			}
464 		} else
465 			amdexecutesrb(pSRB, NULL, 0, 0);
466 		break;
467 	}
468 	case XPT_PATH_INQ:
469 	{
470 		struct ccb_pathinq *cpi = &pccb->cpi;
471 
472 		cpi->version_num = 1;
473 		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
474 		cpi->target_sprt = 0;
475 		cpi->hba_misc = 0;
476 		cpi->hba_eng_cnt = 0;
477 		cpi->max_target = 7;
478 		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
479 		cpi->initiator_id = amd->AdaptSCSIID;
480 		cpi->bus_id = cam_sim_bus(psim);
481 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
482 		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
483 		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
484 		cpi->unit_number = cam_sim_unit(psim);
485 		cpi->ccb_h.status = CAM_REQ_CMP;
486 		xpt_done(pccb);
487 		break;
488 	}
489 	case XPT_ABORT:
490 		pccb->ccb_h.status = CAM_REQ_INVALID;
491 		xpt_done(pccb);
492 		break;
493 	case XPT_RESET_BUS:
494 	{
495 
496 		int     i;
497 
498 		amd_ResetSCSIBus(amd);
499 		amd->ACBFlag = 0;
500 
501 		for (i = 0; i < 500; i++) {
502 			DELAY(1000);	/* Wait until our interrupt
503 					 * handler sees it */
504 		}
505 
506 		pccb->ccb_h.status = CAM_REQ_CMP;
507 		xpt_done(pccb);
508 		break;
509 	}
510 	case XPT_RESET_DEV:
511 		pccb->ccb_h.status = CAM_REQ_INVALID;
512 		xpt_done(pccb);
513 		break;
514 	case XPT_TERM_IO:
515 		pccb->ccb_h.status = CAM_REQ_INVALID;
516 		xpt_done(pccb);
517 	case XPT_GET_TRAN_SETTINGS:
518 	{
519 		struct ccb_trans_settings *cts;
520 		struct amd_target_info *targ_info;
521 		struct amd_transinfo *tinfo;
522 		int     intflag;
523 
524 		cts = &pccb->cts;
525 		intflag = splcam();
526 		targ_info = &amd->tinfo[target_id];
527 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
528 			/* current transfer settings */
529 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
530 				cts->flags = CCB_TRANS_DISC_ENB;
531 			} else {
532 				cts->flags = 0;	/* no tag & disconnect */
533 			}
534 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
535 				cts->flags |= CCB_TRANS_TAG_ENB;
536 			}
537 			tinfo = &targ_info->current;
538 		} else {
539 			/* default(user) transfer settings */
540 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
541 				cts->flags = CCB_TRANS_DISC_ENB;
542 			} else {
543 				cts->flags = 0;
544 			}
545 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
546 				cts->flags |= CCB_TRANS_TAG_ENB;
547 			}
548 			tinfo = &targ_info->user;
549 		}
550 
551 		cts->sync_period = tinfo->period;
552 		cts->sync_offset = tinfo->offset;
553 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
554 		splx(intflag);
555 		cts->valid = CCB_TRANS_SYNC_RATE_VALID
556 			   | CCB_TRANS_SYNC_OFFSET_VALID
557 			   | CCB_TRANS_BUS_WIDTH_VALID
558 			   | CCB_TRANS_DISC_VALID
559 			   | CCB_TRANS_TQ_VALID;
560 		pccb->ccb_h.status = CAM_REQ_CMP;
561 		xpt_done(pccb);
562 		break;
563 	}
564 	case XPT_SET_TRAN_SETTINGS:
565 	{
566 		struct ccb_trans_settings *cts;
567 		struct amd_target_info *targ_info;
568 		u_int  update_type;
569 		int    intflag;
570 		int    last_entry;
571 
572 		cts = &pccb->cts;
573 		update_type = 0;
574 		if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
575 			update_type |= AMD_TRANS_GOAL;
576 		} else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
577 			update_type |= AMD_TRANS_USER;
578 		}
579 		if (update_type == 0
580 		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
581 			cts->ccb_h.status = CAM_REQ_INVALID;
582 			xpt_done(pccb);
583 		}
584 
585 		intflag = splcam();
586 		targ_info = &amd->tinfo[target_id];
587 
588 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
589 			if (update_type & AMD_TRANS_GOAL) {
590 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
591 					targ_info->disc_tag |= AMD_CUR_DISCENB;
592 				} else {
593 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
594 				}
595 			}
596 			if (update_type & AMD_TRANS_USER) {
597 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
598 					targ_info->disc_tag |= AMD_USR_DISCENB;
599 				} else {
600 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
601 				}
602 			}
603 		}
604 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
605 			if (update_type & AMD_TRANS_GOAL) {
606 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
607 					targ_info->disc_tag |= AMD_CUR_TAGENB;
608 				} else {
609 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
610 				}
611 			}
612 			if (update_type & AMD_TRANS_USER) {
613 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
614 					targ_info->disc_tag |= AMD_USR_TAGENB;
615 				} else {
616 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
617 				}
618 			}
619 		}
620 
621 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
622 			if (update_type & AMD_TRANS_GOAL)
623 				cts->sync_offset = targ_info->goal.offset;
624 			else
625 				cts->sync_offset = targ_info->user.offset;
626 		}
627 
628 		if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
629 			cts->sync_offset = AMD_MAX_SYNC_OFFSET;
630 
631 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
632 			if (update_type & AMD_TRANS_GOAL)
633 				cts->sync_period = targ_info->goal.period;
634 			else
635 				cts->sync_period = targ_info->user.period;
636 		}
637 
638 		last_entry = sizeof(tinfo_sync_period) - 1;
639 		if ((cts->sync_period != 0)
640 		 && (cts->sync_period < tinfo_sync_period[0]))
641 			cts->sync_period = tinfo_sync_period[0];
642 		if (cts->sync_period > tinfo_sync_period[last_entry])
643 		 	cts->sync_period = 0;
644 		if (cts->sync_offset == 0)
645 			cts->sync_period = 0;
646 
647 		if ((update_type & AMD_TRANS_USER) != 0) {
648 			targ_info->user.period = cts->sync_period;
649 			targ_info->user.offset = cts->sync_offset;
650 		}
651 		if ((update_type & AMD_TRANS_GOAL) != 0) {
652 			targ_info->goal.period = cts->sync_period;
653 			targ_info->goal.offset = cts->sync_offset;
654 		}
655 		splx(intflag);
656 		pccb->ccb_h.status = CAM_REQ_CMP;
657 		xpt_done(pccb);
658 		break;
659 	}
660 	case XPT_CALC_GEOMETRY:
661 	{
662 		struct ccb_calc_geometry *ccg;
663 		u_int32_t size_mb;
664 		u_int32_t secs_per_cylinder;
665 		int     extended;
666 
667 		ccg = &pccb->ccg;
668 		size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
669 		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
670 
671 		if (size_mb > 1024 && extended) {
672 			ccg->heads = 255;
673 			ccg->secs_per_track = 63;
674 		} else {
675 			ccg->heads = 64;
676 			ccg->secs_per_track = 32;
677 		}
678 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
679 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
680 		pccb->ccb_h.status = CAM_REQ_CMP;
681 		xpt_done(pccb);
682 		break;
683 	}
684 	default:
685 		pccb->ccb_h.status = CAM_REQ_INVALID;
686 		xpt_done(pccb);
687 		break;
688 	}
689 }
690 
691 static void
692 amd_poll(struct cam_sim * psim)
693 {
694 	amd_intr(cam_sim_softc(psim));
695 }
696 
697 static u_int8_t *
698 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
699 {
700 	int     dataPtr;
701 	struct ccb_scsiio *pcsio;
702 	u_int8_t   i;
703 	struct amd_sg *    pseg;
704 
705 	dataPtr = 0;
706 	pcsio = &pSRB->pccb->csio;
707 
708 	dataPtr = (int) pcsio->data_ptr;
709 	pseg = pSRB->SGsegment;
710 	for (i = 0; i < pSRB->SGIndex; i++) {
711 		dataPtr += (int) pseg->SGXLen;
712 		pseg++;
713 	}
714 	dataPtr += (int) xferCnt;
715 	return ((u_int8_t *) dataPtr);
716 }
717 
718 static void
719 ResetDevParam(struct amd_softc * amd)
720 {
721 	u_int target;
722 
723 	for (target = 0; target <= amd->max_id; target++) {
724 		if (amd->AdaptSCSIID != target) {
725 			amdsetsync(amd, target, /*clockrate*/0,
726 				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
727 		}
728 	}
729 }
730 
731 static void
732 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
733 		 u_int tag, struct srb_queue *queue, cam_status status)
734 {
735 	struct amd_srb *srb;
736 	struct amd_srb *next_srb;
737 
738 	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
739 		union ccb *ccb;
740 
741 		next_srb = TAILQ_NEXT(srb, links);
742 		if (srb->pccb->ccb_h.target_id != target
743 		 && target != CAM_TARGET_WILDCARD)
744 			continue;
745 
746 		if (srb->pccb->ccb_h.target_lun != lun
747 		 && lun != CAM_LUN_WILDCARD)
748 			continue;
749 
750 		if (srb->TagNumber != tag
751 		 && tag != AMD_TAG_WILDCARD)
752 			continue;
753 
754 		ccb = srb->pccb;
755 		TAILQ_REMOVE(queue, srb, links);
756 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
757 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
758 		 && (status & CAM_DEV_QFRZN) != 0)
759 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
760 		ccb->ccb_h.status = status;
761 		xpt_done(ccb);
762 	}
763 
764 }
765 
766 static void
767 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
768 	   u_int period, u_int offset, u_int type)
769 {
770 	struct amd_target_info *tinfo;
771 	u_int old_period;
772 	u_int old_offset;
773 
774 	tinfo = &amd->tinfo[target];
775 	old_period = tinfo->current.period;
776 	old_offset = tinfo->current.offset;
777 	if ((type & AMD_TRANS_CUR) != 0
778 	 && (old_period != period || old_offset != offset)) {
779 		struct cam_path *path;
780 
781 		tinfo->current.period = period;
782 		tinfo->current.offset = offset;
783 		tinfo->sync_period_reg = clockrate;
784 		tinfo->sync_offset_reg = offset;
785 		tinfo->CtrlR3 &= ~FAST_SCSI;
786 		tinfo->CtrlR4 &= ~EATER_25NS;
787 		if (clockrate > 7)
788 			tinfo->CtrlR4 |= EATER_25NS;
789 		else
790 			tinfo->CtrlR3 |= FAST_SCSI;
791 
792 		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
793 			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
794 			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
795 			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
796 			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
797 		}
798 		/* If possible, update the XPT's notion of our transfer rate */
799 		if (xpt_create_path(&path, /*periph*/NULL,
800 				    cam_sim_path(amd->psim), target,
801 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
802 			struct ccb_trans_settings neg;
803 
804 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
805 			neg.sync_period = period;
806 			neg.sync_offset = offset;
807 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
808 				  | CCB_TRANS_SYNC_OFFSET_VALID;
809 			xpt_async(AC_TRANSFER_NEG, path, &neg);
810 			xpt_free_path(path);
811 		}
812 	}
813 	if ((type & AMD_TRANS_GOAL) != 0) {
814 		tinfo->goal.period = period;
815 		tinfo->goal.offset = offset;
816 	}
817 
818 	if ((type & AMD_TRANS_USER) != 0) {
819 		tinfo->user.period = period;
820 		tinfo->user.offset = offset;
821 	}
822 }
823 
824 static void
825 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
826 {
827 	panic("Implement me!\n");
828 }
829 
830 
831 #if 0
832 /*
833  **********************************************************************
834  * Function : amd_reset (struct amd_softc * amd)
835  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
836  * Inputs   : cmd - command which caused the SCSI RESET
837  **********************************************************************
838  */
839 static void
840 amd_reset(struct amd_softc * amd)
841 {
842 	int	   intflag;
843 	u_int8_t   bval;
844 	u_int16_t  i;
845 
846 
847 #ifdef AMD_DEBUG0
848 	printf("DC390: RESET");
849 #endif
850 
851 	intflag = splcam();
852 	bval = amd_read8(amd, CNTLREG1);
853 	bval |= DIS_INT_ON_SCSI_RST;
854 	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
855 	amd_ResetSCSIBus(amd);
856 
857 	for (i = 0; i < 500; i++) {
858 		DELAY(1000);
859 	}
860 
861 	bval = amd_read8(amd, CNTLREG1);
862 	bval &= ~DIS_INT_ON_SCSI_RST;
863 	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
864 
865 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
866 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
867 
868 	ResetDevParam(amd);
869 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
870 			 AMD_TAG_WILDCARD, &amd->running_srbs,
871 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
872 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
873 			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
874 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
875 	amd->active_srb = NULL;
876 	amd->ACBFlag = 0;
877 	splx(intflag);
878 	return;
879 }
880 
881 void
882 amd_timeout(void *arg1)
883 {
884 	struct amd_srb *    pSRB;
885 
886 	pSRB = (struct amd_srb *) arg1;
887 }
888 #endif
889 
890 static int
891 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
892 {
893 	union ccb *pccb;
894 	struct ccb_scsiio *pcsio;
895 	struct amd_target_info *targ_info;
896 	u_int identify_msg;
897 	u_int command;
898 	u_int target;
899 	u_int lun;
900 	int tagged;
901 
902 	pccb = pSRB->pccb;
903 	pcsio = &pccb->csio;
904 	target = pccb->ccb_h.target_id;
905 	lun = pccb->ccb_h.target_lun;
906 	targ_info = &amd->tinfo[target];
907 
908 	amd_clear_msg_state(amd);
909 	amd_write8(amd, SCSIDESTIDREG, target);
910 	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
911 	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
912 	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
913 	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
914 	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
915 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
916 
917 	identify_msg = MSG_IDENTIFYFLAG | lun;
918 	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
919 	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
920 	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
921 	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
922 		identify_msg |= MSG_IDENTIFY_DISCFLAG;
923 
924 	amd_write8(amd, SCSIFIFOREG, identify_msg);
925 	tagged = 0;
926 	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
927 	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
928 		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
929 	if (targ_info->current.period != targ_info->goal.period
930 	 || targ_info->current.offset != targ_info->goal.offset) {
931 		command = SEL_W_ATN_STOP;
932 		amdconstructsdtr(amd, targ_info->goal.period,
933 				 targ_info->goal.offset);
934 	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
935 		command = SEL_W_ATN2;
936 		pSRB->SRBState = SRB_START;
937 		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
938 		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
939 		tagged++;
940 	} else {
941 		command = SEL_W_ATN;
942 		pSRB->SRBState = SRB_START;
943 	}
944 	if (command != SEL_W_ATN_STOP)
945 		amdsetupcommand(amd, pSRB);
946 
947 	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
948 		pSRB->SRBState = SRB_READY;
949 		return (1);
950 	} else {
951 		amd->last_phase = SCSI_ARBITRATING;
952 		amd_write8(amd, SCSICMDREG, command);
953 		amd->active_srb = pSRB;
954 		amd->cur_target = target;
955 		amd->cur_lun = lun;
956 		return (0);
957 	}
958 }
959 
960 /*
961  *  Catch an interrupt from the adapter.
962  *  Process pending device interrupts.
963  */
964 static void
965 amd_intr(void   *arg)
966 {
967 	struct amd_softc *amd;
968 	struct amd_srb *pSRB;
969 	u_int  internstat = 0;
970 	u_int  scsistat;
971 	u_int  intstat;
972 
973 	amd = (struct amd_softc *)arg;
974 
975 	if (amd == NULL) {
976 #ifdef AMD_DEBUG0
977 		printf("amd_intr: amd NULL return......");
978 #endif
979 		return;
980 	}
981 
982 	scsistat = amd_read8(amd, SCSISTATREG);
983 	if (!(scsistat & INTERRUPT)) {
984 #ifdef AMD_DEBUG0
985 		printf("amd_intr: scsistat = NULL ,return......");
986 #endif
987 		return;
988 	}
989 #ifdef AMD_DEBUG_SCSI_PHASE
990 	printf("scsistat=%2x,", scsistat);
991 #endif
992 
993 	internstat = amd_read8(amd, INTERNSTATREG);
994 	intstat = amd_read8(amd, INTSTATREG);
995 
996 #ifdef AMD_DEBUG_SCSI_PHASE
997 	printf("intstat=%2x,", intstat);
998 #endif
999 
1000 	if (intstat & DISCONNECTED) {
1001 		amd_Disconnect(amd);
1002 		return;
1003 	}
1004 	if (intstat & RESELECTED) {
1005 		amd_Reselect(amd);
1006 		return;
1007 	}
1008 	if (intstat & INVALID_CMD) {
1009 		amd_InvalidCmd(amd);
1010 		return;
1011 	}
1012 	if (intstat & SCSI_RESET_) {
1013 		amd_ScsiRstDetect(amd);
1014 		return;
1015 	}
1016 	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1017 		pSRB = amd->active_srb;
1018 		/*
1019 		 * Run our state engine.  First perform
1020 		 * post processing for the last phase we
1021 		 * were in, followed by any processing
1022 		 * required to handle the current phase.
1023 		 */
1024 		scsistat =
1025 		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1026 		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1027 		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1028 	}
1029 }
1030 
1031 static u_int
1032 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1033 {
1034 	struct amd_sg *psgl;
1035 	u_int32_t   ResidCnt, xferCnt;
1036 
1037 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1038 		if (scsistat & PARITY_ERR) {
1039 			pSRB->SRBStatus |= PARITY_ERROR;
1040 		}
1041 		if (scsistat & COUNT_2_ZERO) {
1042 			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1043 				;
1044 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1045 			pSRB->SGIndex++;
1046 			if (pSRB->SGIndex < pSRB->SGcount) {
1047 				pSRB->pSGlist++;
1048 				psgl = pSRB->pSGlist;
1049 				pSRB->SGPhysAddr = psgl->SGXPtr;
1050 				pSRB->SGToBeXferLen = psgl->SGXLen;
1051 			} else {
1052 				pSRB->SGToBeXferLen = 0;
1053 			}
1054 		} else {
1055 			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1056 			ResidCnt += amd_read8(amd, CTCREG_LOW)
1057 				  | (amd_read8(amd, CTCREG_MID) << 8)
1058 				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1059 
1060 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1061 			pSRB->SGPhysAddr += xferCnt;
1062 			pSRB->TotalXferredLen += xferCnt;
1063 			pSRB->SGToBeXferLen = ResidCnt;
1064 		}
1065 	}
1066 	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1067 	return (scsistat);
1068 }
1069 
1070 static u_int
1071 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1072 {
1073 	u_int8_t bval;
1074 	u_int16_t  i, residual;
1075 	struct amd_sg *psgl;
1076 	u_int32_t   ResidCnt, xferCnt;
1077 	u_int8_t *  ptr;
1078 
1079 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1080 		if (scsistat & PARITY_ERR) {
1081 			pSRB->SRBStatus |= PARITY_ERROR;
1082 		}
1083 		if (scsistat & COUNT_2_ZERO) {
1084 			while (1) {
1085 				bval = amd_read8(amd, DMA_Status);
1086 				if ((bval & DMA_XFER_DONE) != 0)
1087 					break;
1088 			}
1089 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1090 
1091 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1092 			pSRB->SGIndex++;
1093 			if (pSRB->SGIndex < pSRB->SGcount) {
1094 				pSRB->pSGlist++;
1095 				psgl = pSRB->pSGlist;
1096 				pSRB->SGPhysAddr = psgl->SGXPtr;
1097 				pSRB->SGToBeXferLen = psgl->SGXLen;
1098 			} else {
1099 				pSRB->SGToBeXferLen = 0;
1100 			}
1101 		} else {	/* phase changed */
1102 			residual = 0;
1103 			bval = amd_read8(amd, CURRENTFIFOREG);
1104 			while (bval & 0x1f) {
1105 				if ((bval & 0x1f) == 1) {
1106 					for (i = 0; i < 0x100; i++) {
1107 						bval = amd_read8(amd, CURRENTFIFOREG);
1108 						if (!(bval & 0x1f)) {
1109 							goto din_1;
1110 						} else if (i == 0x0ff) {
1111 							residual = 1;
1112 							goto din_1;
1113 						}
1114 					}
1115 				} else {
1116 					bval = amd_read8(amd, CURRENTFIFOREG);
1117 				}
1118 			}
1119 	din_1:
1120 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1121 			for (i = 0; i < 0x8000; i++) {
1122 				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1123 					break;
1124 			}
1125 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1126 
1127 			ResidCnt = amd_read8(amd, CTCREG_LOW)
1128 				 | (amd_read8(amd, CTCREG_MID) << 8)
1129 				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1130 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1131 			pSRB->SGPhysAddr += xferCnt;
1132 			pSRB->TotalXferredLen += xferCnt;
1133 			pSRB->SGToBeXferLen = ResidCnt;
1134 			if (residual) {
1135 				/* get residual byte */
1136 				bval = amd_read8(amd, SCSIFIFOREG);
1137 				ptr = phystovirt(pSRB, xferCnt);
1138 				*ptr = bval;
1139 				pSRB->SGPhysAddr++;
1140 				pSRB->TotalXferredLen++;
1141 				pSRB->SGToBeXferLen--;
1142 			}
1143 		}
1144 	}
1145 	return (scsistat);
1146 }
1147 
1148 static u_int
1149 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1150 {
1151 	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1152 	/* get message */
1153 	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1154 	pSRB->SRBState = SRB_COMPLETED;
1155 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1156 	return (SCSI_NOP0);
1157 }
1158 
1159 static u_int
1160 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1161 {
1162 	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1163 		scsistat = SCSI_NOP0;
1164 	}
1165 	return (scsistat);
1166 }
1167 
1168 static u_int
1169 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1170 {
1171 	int done;
1172 
1173 	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1174 
1175 	done = amdparsemsg(amd);
1176 	if (done)
1177 		amd->msgin_index = 0;
1178 	else
1179 		amd->msgin_index++;
1180 	return (SCSI_NOP0);
1181 }
1182 
1183 static int
1184 amdparsemsg(struct amd_softc *amd)
1185 {
1186 	struct	amd_target_info *targ_info;
1187 	int	reject;
1188 	int	done;
1189 	int	response;
1190 
1191 	done = FALSE;
1192 	response = FALSE;
1193 	reject = FALSE;
1194 
1195 	targ_info = &amd->tinfo[amd->cur_target];
1196 
1197 	/*
1198 	 * Parse as much of the message as is availible,
1199 	 * rejecting it if we don't support it.  When
1200 	 * the entire message is availible and has been
1201 	 * handled, return TRUE indicating that we have
1202 	 * parsed an entire message.
1203 	 */
1204 	switch (amd->msgin_buf[0]) {
1205 	case MSG_DISCONNECT:
1206 		amd->active_srb->SRBState = SRB_DISCONNECT;
1207 		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1208 		done = TRUE;
1209 		break;
1210 	case MSG_SIMPLE_Q_TAG:
1211 	{
1212 		struct amd_srb *disc_srb;
1213 
1214 		if (amd->msgin_index < 1)
1215 			break;
1216 		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1217 		if (amd->active_srb != NULL
1218 		 || disc_srb->SRBState != SRB_DISCONNECT
1219 		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1220 		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1221 			printf("amd%d: Unexpected tagged reselection "
1222 			       "for target %d, Issuing Abort\n", amd->unit,
1223 			       amd->cur_target);
1224 			amd->msgout_buf[0] = MSG_ABORT;
1225 			amd->msgout_len = 1;
1226 			response = TRUE;
1227 			break;
1228 		}
1229 		amd->active_srb = disc_srb;
1230 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1231 		done = TRUE;
1232 		break;
1233 	}
1234 	case MSG_MESSAGE_REJECT:
1235 		response = amdhandlemsgreject(amd);
1236 		if (response == FALSE)
1237 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1238 		/* FALLTHROUGH */
1239 	case MSG_NOOP:
1240 		done = TRUE;
1241 		break;
1242 	case MSG_EXTENDED:
1243 	{
1244 		u_int clockrate;
1245 		u_int period;
1246 		u_int offset;
1247 		u_int saved_offset;
1248 
1249 		/* Wait for enough of the message to begin validation */
1250 		if (amd->msgin_index < 1)
1251 			break;
1252 		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1253 			reject = TRUE;
1254 			break;
1255 		}
1256 
1257 		/* Wait for opcode */
1258 		if (amd->msgin_index < 2)
1259 			break;
1260 
1261 		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1262 			reject = TRUE;
1263 			break;
1264 		}
1265 
1266 		/*
1267 		 * Wait until we have both args before validating
1268 		 * and acting on this message.
1269 		 *
1270 		 * Add one to MSG_EXT_SDTR_LEN to account for
1271 		 * the extended message preamble.
1272 		 */
1273 		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1274 			break;
1275 
1276 		period = amd->msgin_buf[3];
1277 		saved_offset = offset = amd->msgin_buf[4];
1278 		clockrate = amdfindclockrate(amd, &period);
1279 		if (offset > AMD_MAX_SYNC_OFFSET)
1280 			offset = AMD_MAX_SYNC_OFFSET;
1281 		if (period == 0 || offset == 0) {
1282 			offset = 0;
1283 			period = 0;
1284 			clockrate = 0;
1285 		}
1286 		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1287 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1288 
1289 		/*
1290 		 * See if we initiated Sync Negotiation
1291 		 * and didn't have to fall down to async
1292 		 * transfers.
1293 		 */
1294 		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1295 			/* We started it */
1296 			if (saved_offset != offset) {
1297 				/* Went too low - force async */
1298 				reject = TRUE;
1299 			}
1300 		} else {
1301 			/*
1302 			 * Send our own SDTR in reply
1303 			 */
1304 			if (bootverbose)
1305 				printf("Sending SDTR!\n");
1306 			amd->msgout_index = 0;
1307 			amd->msgout_len = 0;
1308 			amdconstructsdtr(amd, period, offset);
1309 			amd->msgout_index = 0;
1310 			response = TRUE;
1311 		}
1312 		done = TRUE;
1313 		break;
1314 	}
1315 	case MSG_SAVEDATAPOINTER:
1316 	case MSG_RESTOREPOINTERS:
1317 		/* XXX Implement!!! */
1318 		done = TRUE;
1319 		break;
1320 	default:
1321 		reject = TRUE;
1322 		break;
1323 	}
1324 
1325 	if (reject) {
1326 		amd->msgout_index = 0;
1327 		amd->msgout_len = 1;
1328 		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1329 		done = TRUE;
1330 		response = TRUE;
1331 	}
1332 
1333 	if (response)
1334 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1335 
1336 	if (done && !response)
1337 		/* Clear the outgoing message buffer */
1338 		amd->msgout_len = 0;
1339 
1340 	/* Drop Ack */
1341 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1342 
1343 	return (done);
1344 }
1345 
1346 static u_int
1347 amdfindclockrate(struct amd_softc *amd, u_int *period)
1348 {
1349 	u_int i;
1350 	u_int clockrate;
1351 
1352 	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1353 		u_int8_t *table_entry;
1354 
1355 		table_entry = &tinfo_sync_period[i];
1356 		if (*period <= *table_entry) {
1357 			/*
1358 			 * When responding to a target that requests
1359 			 * sync, the requested rate may fall between
1360 			 * two rates that we can output, but still be
1361 			 * a rate that we can receive.  Because of this,
1362 			 * we want to respond to the target with
1363 			 * the same rate that it sent to us even
1364 			 * if the period we use to send data to it
1365 			 * is lower.  Only lower the response period
1366 			 * if we must.
1367 			 */
1368 			if (i == 0) {
1369 				*period = *table_entry;
1370 			}
1371 			break;
1372 		}
1373 	}
1374 
1375 	if (i == sizeof(tinfo_sync_period)) {
1376 		/* Too slow for us.  Use asnyc transfers. */
1377 		*period = 0;
1378 		clockrate = 0;
1379 	} else
1380 		clockrate = i + 4;
1381 
1382 	return (clockrate);
1383 }
1384 
1385 /*
1386  * See if we sent a particular extended message to the target.
1387  * If "full" is true, the target saw the full message.
1388  * If "full" is false, the target saw at least the first
1389  * byte of the message.
1390  */
1391 static int
1392 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1393 {
1394 	int found;
1395 	int index;
1396 
1397 	found = FALSE;
1398 	index = 0;
1399 
1400 	while (index < amd->msgout_len) {
1401 		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1402 		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1403 			index++;
1404 		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1405 		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1406 			/* Skip tag type and tag id */
1407 			index += 2;
1408 		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1409 			/* Found a candidate */
1410 			if (amd->msgout_buf[index+2] == msgtype) {
1411 				u_int end_index;
1412 
1413 				end_index = index + 1
1414 					  + amd->msgout_buf[index + 1];
1415 				if (full) {
1416 					if (amd->msgout_index > end_index)
1417 						found = TRUE;
1418 				} else if (amd->msgout_index > index)
1419 					found = TRUE;
1420 			}
1421 			break;
1422 		} else {
1423 			panic("amdsentmsg: Inconsistent msg buffer");
1424 		}
1425 	}
1426 	return (found);
1427 }
1428 
1429 static void
1430 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1431 {
1432 	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1433 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1434 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1435 	amd->msgout_buf[amd->msgout_index++] = period;
1436 	amd->msgout_buf[amd->msgout_index++] = offset;
1437 	amd->msgout_len += 5;
1438 }
1439 
1440 static int
1441 amdhandlemsgreject(struct amd_softc *amd)
1442 {
1443 	/*
1444 	 * If we had an outstanding SDTR for this
1445 	 * target, this is a signal that the target
1446 	 * is refusing negotiation.  Also watch out
1447 	 * for rejected tag messages.
1448 	 */
1449 	struct	amd_srb *srb;
1450 	struct	amd_target_info *targ_info;
1451 	int	response = FALSE;
1452 
1453 	srb = amd->active_srb;
1454 	targ_info = &amd->tinfo[amd->cur_target];
1455 	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1456 		/* note asynch xfers and clear flag */
1457 		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1458 			   /*period*/0, /*offset*/0,
1459 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1460 		printf("amd%d:%d: refuses synchronous negotiation. "
1461 		       "Using asynchronous transfers\n",
1462 		       amd->unit, amd->cur_target);
1463 	} else if ((srb != NULL)
1464 		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1465 		struct  ccb_trans_settings neg;
1466 
1467 		printf("amd%d:%d: refuses tagged commands.  Performing "
1468 		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1469 
1470 		amdsettags(amd, amd->cur_target, FALSE);
1471 		neg.flags = 0;
1472 		neg.valid = CCB_TRANS_TQ_VALID;
1473 		xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1474 		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1475 
1476 		/*
1477 		 * Resend the identify for this CCB as the target
1478 		 * may believe that the selection is invalid otherwise.
1479 		 */
1480 		if (amd->msgout_len != 0)
1481 			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1482 			      amd->msgout_len);
1483 		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1484 				    | srb->pccb->ccb_h.target_lun;
1485 		amd->msgout_len++;
1486 		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1487 		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1488 			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1489 
1490 		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1491 
1492 		/*
1493 		 * Requeue all tagged commands for this target
1494 		 * currently in our posession so they can be
1495 		 * converted to untagged commands.
1496 		 */
1497 		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1498 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1499 				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1500 	} else {
1501 		/*
1502 		 * Otherwise, we ignore it.
1503 		 */
1504 		printf("amd%d:%d: Message reject received -- ignored\n",
1505 		       amd->unit, amd->cur_target);
1506 	}
1507 	return (response);
1508 }
1509 
1510 #if 0
1511 	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1512 		if (bval == MSG_DISCONNECT) {
1513 			pSRB->SRBState = SRB_DISCONNECT;
1514 		} else if (bval == MSG_SAVEDATAPOINTER) {
1515 			goto min6;
1516 		} else if ((bval == MSG_EXTENDED)
1517 			|| ((bval >= MSG_SIMPLE_Q_TAG)
1518 			 && (bval <= MSG_ORDERED_Q_TAG))) {
1519 			pSRB->SRBState |= SRB_MSGIN_MULTI;
1520 			pSRB->MsgInBuf[0] = bval;
1521 			pSRB->MsgCnt = 1;
1522 			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1523 		} else if (bval == MSG_MESSAGE_REJECT) {
1524 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1525 
1526 			if (pSRB->SRBState & DO_SYNC_NEGO) {
1527 				goto set_async;
1528 			}
1529 		} else if (bval == MSG_RESTOREPOINTERS) {
1530 			goto min6;
1531 		} else {
1532 			goto min6;
1533 		}
1534 	} else {		/* minx: */
1535 		*pSRB->pMsgPtr = bval;
1536 		pSRB->MsgCnt++;
1537 		pSRB->pMsgPtr++;
1538 		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1539 		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1540 			if (pSRB->MsgCnt == 2) {
1541 				pSRB->SRBState = 0;
1542 				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1543 				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1544 					pSRB = amd->pTmpSRB;
1545 					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1546 					pDCB->pActiveSRB = pSRB;
1547 					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1548 					EnableMsgOut2(amd, pSRB);
1549 				} else {
1550 					if (pDCB->DCBFlag & ABORT_DEV_) {
1551 						pSRB->SRBState = SRB_ABORT_SENT;
1552 						EnableMsgOut1(amd, pSRB);
1553 					}
1554 					pDCB->pActiveSRB = pSRB;
1555 					pSRB->SRBState = SRB_DATA_XFER;
1556 				}
1557 			}
1558 		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1559 			&& (pSRB->MsgCnt == 5)) {
1560 			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1561 			if ((pSRB->MsgInBuf[1] != 3)
1562 			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1563 				pSRB->MsgCnt = 1;
1564 				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1565 				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1566 			} else if (!(pSRB->MsgInBuf[3])
1567 				|| !(pSRB->MsgInBuf[4])) {
1568 		set_async:	/* set async */
1569 
1570 				pDCB = pSRB->pSRBDCB;
1571 				/* disable sync & sync nego */
1572 				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1573 				pDCB->SyncPeriod = 0;
1574 				pDCB->SyncOffset = 0;
1575 
1576 				pDCB->tinfo.goal.period = 0;
1577 				pDCB->tinfo.goal.offset = 0;
1578 
1579 				pDCB->tinfo.current.period = 0;
1580 				pDCB->tinfo.current.offset = 0;
1581 				pDCB->tinfo.current.width =
1582 				    MSG_EXT_WDTR_BUS_8_BIT;
1583 
1584 				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1585 				pDCB->CtrlR4 &= 0x3f;
1586 				pDCB->CtrlR4 |= EATER_25NS;
1587 				goto re_prog;
1588 			} else {/* set sync */
1589 
1590 				pDCB = pSRB->pSRBDCB;
1591 				/* enable sync & sync nego */
1592 				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1593 
1594 				/* set sync offset */
1595 				pDCB->SyncOffset &= 0x0f0;
1596 				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1597 
1598 				/* set sync period */
1599 				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1600 
1601 				wval = (u_int16_t) pSRB->MsgInBuf[3];
1602 				wval = wval << 2;
1603 				wval--;
1604 				wval1 = wval / 25;
1605 				if ((wval1 * 25) != wval) {
1606 					wval1++;
1607 				}
1608 				bval = FAST_CLK|FAST_SCSI;
1609 				pDCB->CtrlR4 &= 0x3f;
1610 				if (wval1 >= 8) {
1611 					/* Fast SCSI */
1612 					wval1--;
1613 					bval = FAST_CLK;
1614 					pDCB->CtrlR4 |= EATER_25NS;
1615 				}
1616 				pDCB->CtrlR3 = bval;
1617 				pDCB->SyncPeriod = (u_int8_t) wval1;
1618 
1619 				pDCB->tinfo.goal.period =
1620 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1621 				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1622 				pDCB->tinfo.current.period =
1623 				    tinfo_sync_period[pDCB->SyncPeriod - 4];;
1624 				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1625 
1626 				/*
1627 				 * program SCSI control register
1628 				 */
1629 		re_prog:
1630 				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1631 				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1632 				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1633 				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1634 			}
1635 		}
1636 	}
1637 min6:
1638 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1639 	return (SCSI_NOP0);
1640 }
1641 #endif
1642 
1643 static u_int
1644 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1645 {
1646 	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1647 	return (scsistat);
1648 }
1649 
1650 static u_int
1651 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1652 {
1653 	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1654 	return (scsistat);
1655 }
1656 
1657 static void
1658 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1659 {
1660 	struct amd_sg *    psgl;
1661 	u_int32_t   lval;
1662 
1663 	if (pSRB->SGIndex < pSRB->SGcount) {
1664 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1665 
1666 		if (!pSRB->SGToBeXferLen) {
1667 			psgl = pSRB->pSGlist;
1668 			pSRB->SGPhysAddr = psgl->SGXPtr;
1669 			pSRB->SGToBeXferLen = psgl->SGXLen;
1670 		}
1671 		lval = pSRB->SGToBeXferLen;
1672 		amd_write8(amd, CTCREG_LOW, lval);
1673 		amd_write8(amd, CTCREG_MID, lval >> 8);
1674 		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1675 
1676 		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1677 
1678 		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1679 
1680 		pSRB->SRBState = SRB_DATA_XFER;
1681 
1682 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1683 
1684 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1685 
1686 		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1687 	} else {		/* xfer pad */
1688 		if (pSRB->SGcount) {
1689 			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1690 			pSRB->SRBStatus |= OVER_RUN;
1691 		}
1692 		amd_write8(amd, CTCREG_LOW, 0);
1693 		amd_write8(amd, CTCREG_MID, 0);
1694 		amd_write8(amd, CURTXTCNTREG, 0);
1695 
1696 		pSRB->SRBState |= SRB_XFERPAD;
1697 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1698 	}
1699 }
1700 
1701 static u_int
1702 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1703 {
1704 	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1705 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1706 
1707 	amdsetupcommand(amd, srb);
1708 
1709 	srb->SRBState = SRB_COMMAND;
1710 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1711 	return (scsistat);
1712 }
1713 
1714 static u_int
1715 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1716 {
1717 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1718 	pSRB->SRBState = SRB_STATUS;
1719 	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1720 	return (scsistat);
1721 }
1722 
1723 static u_int
1724 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1725 {
1726 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1727 
1728 	if (amd->msgout_len == 0) {
1729 		amd->msgout_buf[0] = MSG_NOOP;
1730 		amd->msgout_len = 1;
1731 	}
1732 	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1733 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1734 	return (scsistat);
1735 }
1736 
1737 static u_int
1738 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1739 {
1740 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1741 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1742 	return (scsistat);
1743 }
1744 
1745 static u_int
1746 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1747 {
1748 	return (scsistat);
1749 }
1750 
1751 static void
1752 amd_Disconnect(struct amd_softc * amd)
1753 {
1754 	struct	amd_srb *srb;
1755 	int	target;
1756 	int	lun;
1757 
1758 	srb = amd->active_srb;
1759 	amd->active_srb = NULL;
1760 	amd->last_phase = SCSI_BUS_FREE;
1761 	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1762 	target = amd->cur_target;
1763 	lun = amd->cur_lun;
1764 
1765 	if (srb == NULL) {
1766 		/* Invalid reselection */
1767 		amdrunwaiting(amd);
1768 	} else if (srb->SRBState & SRB_ABORT_SENT) {
1769 		/* Clean up and done this srb */
1770 #if 0
1771 		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1772 			/* XXX What about "done'ing" these srbs??? */
1773 			if (pSRB->pSRBDCB == pDCB) {
1774 				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1775 				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1776 			}
1777 		}
1778 		amdrunwaiting(amd);
1779 #endif
1780 	} else {
1781 		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1782 		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1783 			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1784 			goto disc1;
1785 		} else if (srb->SRBState & SRB_DISCONNECT) {
1786 			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1787 				amd->untagged_srbs[target][lun] = srb;
1788 			amdrunwaiting(amd);
1789 		} else if (srb->SRBState & SRB_COMPLETED) {
1790 	disc1:
1791 			srb->SRBState = SRB_FREE;
1792 			SRBdone(amd, srb);
1793 		}
1794 	}
1795 	return;
1796 }
1797 
1798 static void
1799 amd_Reselect(struct amd_softc *amd)
1800 {
1801 	struct amd_target_info *tinfo;
1802 	u_int16_t disc_count;
1803 
1804 	amd_clear_msg_state(amd);
1805 	if (amd->active_srb != NULL) {
1806 		/* Requeue the SRB for our attempted Selection */
1807 		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1808 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1809 		amd->active_srb = NULL;
1810 	}
1811 	/* get ID */
1812 	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1813 	amd->cur_target ^= amd->HostID_Bit;
1814 	amd->cur_target = ffs(amd->cur_target) - 1;
1815 	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1816 	tinfo = &amd->tinfo[amd->cur_target];
1817 	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1818 	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1819 	if (disc_count == 0) {
1820 		printf("amd%d: Unexpected reselection for target %d, "
1821 		       "Issuing Abort\n", amd->unit, amd->cur_target);
1822 		amd->msgout_buf[0] = MSG_ABORT;
1823 		amd->msgout_len = 1;
1824 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1825 	}
1826 	if (amd->active_srb != NULL) {
1827 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1828 		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1829 	}
1830 
1831 	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1832 	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1833 	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1834 	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1835 	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1836 	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1837 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1838 	amd->last_phase = SCSI_NOP0;
1839 }
1840 
1841 static void
1842 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1843 {
1844 	u_int8_t   bval, i, status;
1845 	union ccb *pccb;
1846 	struct ccb_scsiio *pcsio;
1847 	int	   intflag;
1848 	struct amd_sg *ptr2;
1849 	u_int32_t   swlval;
1850 	u_int   target_id, target_lun;
1851 
1852 	pccb = pSRB->pccb;
1853 	pcsio = &pccb->csio;
1854 	target_id = pSRB->pccb->ccb_h.target_id;
1855 	target_lun = pSRB->pccb->ccb_h.target_lun;
1856 
1857 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1858 		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1859 
1860 	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1861 		bus_dmasync_op_t op;
1862 
1863 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1864 			op = BUS_DMASYNC_POSTREAD;
1865 		else
1866 			op = BUS_DMASYNC_POSTWRITE;
1867 		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1868 		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1869 	}
1870 
1871 	status = pSRB->TargetStatus;
1872 	pccb->ccb_h.status = CAM_REQ_CMP;
1873 	pccb->ccb_h.status = CAM_REQ_CMP;
1874 	if (pSRB->SRBFlag & AUTO_REQSENSE) {
1875 		pSRB->SRBFlag &= ~AUTO_REQSENSE;
1876 		pSRB->AdaptStatus = 0;
1877 		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1878 
1879 		if (status == SCSI_STATUS_CHECK_COND) {
1880 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1881 			goto ckc_e;
1882 		}
1883 		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1884 
1885 		pcsio->sense_resid = pcsio->sense_len
1886 				   - pSRB->TotalXferredLen;
1887 		pSRB->TotalXferredLen = pSRB->Segment1[1];
1888 		if (pSRB->TotalXferredLen) {
1889 			/* ???? */
1890 			pcsio->resid = pcsio->dxfer_len
1891 				     - pSRB->TotalXferredLen;
1892 			/* The resid field contains valid data	 */
1893 			/* Flush resid bytes on complete        */
1894 		} else {
1895 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1896 		}
1897 		pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1898 		goto ckc_e;
1899 	}
1900 	if (status) {
1901 		if (status == SCSI_STATUS_CHECK_COND) {
1902 
1903 			if ((pSRB->SGIndex < pSRB->SGcount)
1904 			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1905 				bval = pSRB->SGcount;
1906 				swlval = pSRB->SGToBeXferLen;
1907 				ptr2 = pSRB->pSGlist;
1908 				ptr2++;
1909 				for (i = pSRB->SGIndex + 1; i < bval; i++) {
1910 					swlval += ptr2->SGXLen;
1911 					ptr2++;
1912 				}
1913 				/* ??????? */
1914 				pcsio->resid = (u_int32_t) swlval;
1915 
1916 #ifdef	AMD_DEBUG0
1917 				printf("XferredLen=%8x,NotYetXferLen=%8x,",
1918 					pSRB->TotalXferredLen, swlval);
1919 #endif
1920 			}
1921 			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1922 #ifdef	AMD_DEBUG0
1923 				printf("RequestSense..................\n");
1924 #endif
1925 				RequestSense(amd, pSRB);
1926 				return;
1927 			}
1928 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1929 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1930 			goto ckc_e;
1931 		} else if (status == SCSI_STATUS_QUEUE_FULL) {
1932 			pSRB->AdaptStatus = 0;
1933 			pSRB->TargetStatus = 0;
1934 			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1935 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1936 			goto ckc_e;
1937 		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1938 			pSRB->AdaptStatus = H_SEL_TIMEOUT;
1939 			pSRB->TargetStatus = 0;
1940 
1941 			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1942 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1943 		} else if (status == SCSI_STATUS_BUSY) {
1944 #ifdef AMD_DEBUG0
1945 			printf("DC390: target busy at %s %d\n",
1946 			       __FILE__, __LINE__);
1947 #endif
1948 			pcsio->scsi_status = SCSI_STATUS_BUSY;
1949 			pccb->ccb_h.status = CAM_SCSI_BUSY;
1950 		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1951 #ifdef AMD_DEBUG0
1952 			printf("DC390: target reserved at %s %d\n",
1953 			       __FILE__, __LINE__);
1954 #endif
1955 			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1956 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1957 		} else {
1958 			pSRB->AdaptStatus = 0;
1959 #ifdef AMD_DEBUG0
1960 			printf("DC390: driver stuffup at %s %d\n",
1961 			       __FILE__, __LINE__);
1962 #endif
1963 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1964 		}
1965 	} else {
1966 		status = pSRB->AdaptStatus;
1967 		if (status & H_OVER_UNDER_RUN) {
1968 			pSRB->TargetStatus = 0;
1969 
1970 			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1971 		} else if (pSRB->SRBStatus & PARITY_ERROR) {
1972 #ifdef AMD_DEBUG0
1973 			printf("DC390: driver stuffup %s %d\n",
1974 			       __FILE__, __LINE__);
1975 #endif
1976 			/* Driver failed to perform operation	  */
1977 			pccb->ccb_h.status = CAM_UNCOR_PARITY;
1978 		} else {	/* No error */
1979 			pSRB->AdaptStatus = 0;
1980 			pSRB->TargetStatus = 0;
1981 			pcsio->resid = 0;
1982 			/* there is no error, (sense is invalid)  */
1983 		}
1984 	}
1985 ckc_e:
1986 	intflag = splcam();
1987 	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1988 		/* CAM request not yet complete =>device_Q frozen */
1989 		xpt_freeze_devq(pccb->ccb_h.path, 1);
1990 		pccb->ccb_h.status |= CAM_DEV_QFRZN;
1991 	}
1992 	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1993 	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1994 	amdrunwaiting(amd);
1995 	splx(intflag);
1996 	xpt_done(pccb);
1997 
1998 }
1999 
2000 static void
2001 amd_ResetSCSIBus(struct amd_softc * amd)
2002 {
2003 	int     intflag;
2004 
2005 	intflag = splcam();
2006 	amd->ACBFlag |= RESET_DEV;
2007 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2008 	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2009 	splx(intflag);
2010 	return;
2011 }
2012 
2013 static void
2014 amd_ScsiRstDetect(struct amd_softc * amd)
2015 {
2016 	int     intflag;
2017 	u_int32_t   wlval;
2018 
2019 #ifdef AMD_DEBUG0
2020 	printf("amd_ScsiRstDetect \n");
2021 #endif
2022 
2023 	wlval = 1000;
2024 	while (--wlval) {	/* delay 1 sec */
2025 		DELAY(1000);
2026 	}
2027 	intflag = splcam();
2028 
2029 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2030 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2031 
2032 	if (amd->ACBFlag & RESET_DEV) {
2033 		amd->ACBFlag |= RESET_DONE;
2034 	} else {
2035 		amd->ACBFlag |= RESET_DETECT;
2036 		ResetDevParam(amd);
2037 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2038 				 AMD_TAG_WILDCARD, &amd->running_srbs,
2039 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2040 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2041 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2042 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2043 		amd->active_srb = NULL;
2044 		amd->ACBFlag = 0;
2045 		amdrunwaiting(amd);
2046 	}
2047 	splx(intflag);
2048 	return;
2049 }
2050 
2051 static void
2052 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2053 {
2054 	union ccb *pccb;
2055 	struct ccb_scsiio *pcsio;
2056 
2057 	pccb = pSRB->pccb;
2058 	pcsio = &pccb->csio;
2059 
2060 	pSRB->SRBFlag |= AUTO_REQSENSE;
2061 	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2062 	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2063 	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2064 	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2065 
2066 	pSRB->AdaptStatus = 0;
2067 	pSRB->TargetStatus = 0;
2068 
2069 	pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2070 	pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2071 
2072 	pSRB->pSGlist = &pSRB->Segmentx;
2073 	pSRB->SGcount = 1;
2074 	pSRB->SGIndex = 0;
2075 
2076 	*((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2077 	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2078 	*((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2079 	pSRB->ScsiCmdLen = 6;
2080 
2081 	pSRB->TotalXferredLen = 0;
2082 	pSRB->SGToBeXferLen = 0;
2083 	if (amdstart(amd, pSRB) != 0) {
2084 		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2085 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2086 	}
2087 }
2088 
2089 static void
2090 amd_InvalidCmd(struct amd_softc * amd)
2091 {
2092 	struct amd_srb *srb;
2093 
2094 	srb = amd->active_srb;
2095 	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2096 		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2097 }
2098 
2099 void
2100 amd_linkSRB(struct amd_softc *amd)
2101 {
2102 	u_int16_t  count, i;
2103 	struct amd_srb *psrb;
2104 
2105 	count = amd->SRBCount;
2106 
2107 	for (i = 0; i < count; i++) {
2108 		psrb = (struct amd_srb *)&amd->SRB_array[i];
2109 		psrb->TagNumber = i;
2110 		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2111 	}
2112 }
2113 
2114 void
2115 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2116 {
2117 	if (mode == ENABLE_CE) {
2118 		*regval = 0xc0;
2119 	} else {
2120 		*regval = 0x80;
2121 	}
2122 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2123 	if (mode == DISABLE_CE) {
2124 		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2125 	}
2126 	DELAY(160);
2127 }
2128 
2129 void
2130 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2131 {
2132 	u_int bval;
2133 
2134 	bval = 0;
2135 	if (Carry) {
2136 		bval = 0x40;
2137 		*regval = 0x80;
2138 		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2139 	}
2140 	DELAY(160);
2141 	bval |= 0x80;
2142 	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2143 	DELAY(160);
2144 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2145 	DELAY(160);
2146 }
2147 
2148 static int
2149 amd_EEpromInDO(struct amd_softc *amd)
2150 {
2151 	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2152 	DELAY(160);
2153 	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2154 	DELAY(160);
2155 	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2156 		return (1);
2157 	return (0);
2158 }
2159 
2160 static u_int16_t
2161 EEpromGetData1(struct amd_softc *amd)
2162 {
2163 	u_int	  i;
2164 	u_int	  carryFlag;
2165 	u_int16_t wval;
2166 
2167 	wval = 0;
2168 	for (i = 0; i < 16; i++) {
2169 		wval <<= 1;
2170 		carryFlag = amd_EEpromInDO(amd);
2171 		wval |= carryFlag;
2172 	}
2173 	return (wval);
2174 }
2175 
2176 static void
2177 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2178 {
2179 	u_int i, j;
2180 	int carryFlag;
2181 
2182 	carryFlag = 1;
2183 	j = 0x80;
2184 	for (i = 0; i < 9; i++) {
2185 		amd_EEpromOutDI(amd, regval, carryFlag);
2186 		carryFlag = (EEpromCmd & j) ? 1 : 0;
2187 		j >>= 1;
2188 	}
2189 }
2190 
2191 static void
2192 amd_ReadEEprom(struct amd_softc *amd)
2193 {
2194 	int	   regval;
2195 	u_int	   i;
2196 	u_int16_t *ptr;
2197 	u_int8_t   cmd;
2198 
2199 	ptr = (u_int16_t *)&amd->eepromBuf[0];
2200 	cmd = EEPROM_READ;
2201 	for (i = 0; i < 0x40; i++) {
2202 		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2203 		amd_Prepare(amd, &regval, cmd);
2204 		*ptr = EEpromGetData1(amd);
2205 		ptr++;
2206 		cmd++;
2207 		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2208 	}
2209 }
2210 
2211 static void
2212 amd_load_defaults(struct amd_softc *amd)
2213 {
2214 	int target;
2215 
2216 	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2217 	for (target = 0; target < MAX_SCSI_ID; target++)
2218 		amd->eepromBuf[target << 2] =
2219 		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2220 	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2221 	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2222 	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2223 }
2224 
2225 static void
2226 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2227 {
2228 	u_int16_t  wval, *ptr;
2229 	u_int8_t   i;
2230 
2231 	amd_ReadEEprom(amd);
2232 	wval = 0;
2233 	ptr = (u_int16_t *) & amd->eepromBuf[0];
2234 	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2235 		wval += *ptr;
2236 
2237 	if (wval != EE_CHECKSUM) {
2238 		if (bootverbose)
2239 			printf("amd%d: SEEPROM data unavailable.  "
2240 			       "Using default device parameters.\n",
2241 			       amd->unit);
2242 		amd_load_defaults(amd);
2243 	}
2244 }
2245 
2246 /*
2247  **********************************************************************
2248  * Function      : static int amd_init (struct Scsi_Host *host)
2249  * Purpose       : initialize the internal structures for a given SCSI host
2250  * Inputs        : host - pointer to this host adapter's structure/
2251  **********************************************************************
2252  */
2253 static int
2254 amd_init(device_t dev)
2255 {
2256 	struct amd_softc *amd = device_get_softc(dev);
2257 	struct resource	*iores;
2258 	int	i, rid;
2259 	u_int	bval;
2260 
2261 	rid = PCI_BASE_ADDR0;
2262 	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2263 				   RF_ACTIVE);
2264 	if (iores == NULL) {
2265 		if (bootverbose)
2266 			printf("amd_init: bus_alloc_resource failure!\n");
2267 		return ENXIO;
2268 	}
2269 	amd->tag = rman_get_bustag(iores);
2270 	amd->bsh = rman_get_bushandle(iores);
2271 
2272 	/* DMA tag for mapping buffers into device visible space. */
2273 	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2274 			       /*boundary*/0,
2275 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2276 			       /*highaddr*/BUS_SPACE_MAXADDR,
2277 			       /*filter*/NULL, /*filterarg*/NULL,
2278 			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2279 			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2280 			       /*flags*/BUS_DMA_ALLOCNOW,
2281 			       &amd->buffer_dmat) != 0) {
2282 		if (bootverbose)
2283 			printf("amd_init: bus_dma_tag_create failure!\n");
2284 		return ENXIO;
2285         }
2286 	TAILQ_INIT(&amd->free_srbs);
2287 	TAILQ_INIT(&amd->running_srbs);
2288 	TAILQ_INIT(&amd->waiting_srbs);
2289 	amd->last_phase = SCSI_BUS_FREE;
2290 	amd->dev = dev;
2291 	amd->unit = device_get_unit(dev);
2292 	amd->SRBCount = MAX_SRB_CNT;
2293 	amd->status = 0;
2294 	amd_load_eeprom_or_defaults(amd);
2295 	amd->max_id = 7;
2296 	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2297 		amd->max_lun = 7;
2298 	} else {
2299 		amd->max_lun = 0;
2300 	}
2301 	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2302 	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2303 	amd->AdaptSCSILUN = 0;
2304 	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2305 	amd->ACBFlag = 0;
2306 	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2307 	amd_linkSRB(amd);
2308 	for (i = 0; i <= amd->max_id; i++) {
2309 
2310 		if (amd->AdaptSCSIID != i) {
2311 			struct amd_target_info *tinfo;
2312 			PEEprom prom;
2313 
2314 			tinfo = &amd->tinfo[i];
2315 			prom = (PEEprom)&amd->eepromBuf[i << 2];
2316 			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2317 				tinfo->disc_tag |= AMD_USR_DISCENB;
2318 				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2319 					tinfo->disc_tag |= AMD_USR_TAGENB;
2320 			}
2321 			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2322 				tinfo->user.period =
2323 				    eeprom_period[prom->EE_SPEED];
2324 				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2325 			}
2326 			tinfo->CtrlR1 = amd->AdaptSCSIID;
2327 			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2328 				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2329 			tinfo->CtrlR3 = FAST_CLK;
2330 			tinfo->CtrlR4 = EATER_25NS;
2331 			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2332 				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2333 		}
2334 	}
2335 	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2336 	/* Conversion factor = 0 , 40MHz clock */
2337 	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2338 	/* NOP cmd - clear command register */
2339 	amd_write8(amd, SCSICMDREG, NOP_CMD);
2340 	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2341 	amd_write8(amd, CNTLREG3, FAST_CLK);
2342 	bval = EATER_25NS;
2343 	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2344 		bval |= NEGATE_REQACKDATA;
2345 	}
2346 	amd_write8(amd, CNTLREG4, bval);
2347 
2348 	/* Disable SCSI bus reset interrupt */
2349 	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2350 
2351 	return 0;
2352 }
2353 
2354 /*
2355  * attach and init a host adapter
2356  */
2357 static int
2358 amd_attach(device_t dev)
2359 {
2360 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2361 	u_int8_t	intstat;
2362 	struct amd_softc *amd = device_get_softc(dev);
2363 	int		unit = device_get_unit(dev);
2364 	int		rid;
2365 	void		*ih;
2366 	struct resource	*irqres;
2367 
2368 	if (amd_init(dev)) {
2369 		if (bootverbose)
2370 			printf("amd_attach: amd_init failure!\n");
2371 		return ENXIO;
2372 	}
2373 
2374 	/* Reset Pending INT */
2375 	intstat = amd_read8(amd, INTSTATREG);
2376 
2377 	/* After setting up the adapter, map our interrupt */
2378 	rid = 0;
2379 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2380 				    RF_SHAREABLE | RF_ACTIVE);
2381 	if (irqres == NULL ||
2382 	    bus_setup_intr(dev, irqres, INTR_TYPE_CAM, amd_intr, amd, &ih)) {
2383 		if (bootverbose)
2384 			printf("amd%d: unable to register interrupt handler!\n",
2385 			       unit);
2386 		return ENXIO;
2387 	}
2388 
2389 	/*
2390 	 * Now let the CAM generic SCSI layer find the SCSI devices on
2391 	 * the bus *  start queue to reset to the idle loop. *
2392 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2393 	 * max_sim_transactions
2394 	 */
2395 	devq = cam_simq_alloc(MAX_START_JOB);
2396 	if (devq == NULL) {
2397 		if (bootverbose)
2398 			printf("amd_attach: cam_simq_alloc failure!\n");
2399 		return ENXIO;
2400 	}
2401 
2402 	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2403 				  amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2404 				  devq);
2405 	if (amd->psim == NULL) {
2406 		cam_simq_free(devq);
2407 		if (bootverbose)
2408 			printf("amd_attach: cam_sim_alloc failure!\n");
2409 		return ENXIO;
2410 	}
2411 
2412 	if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2413 		cam_sim_free(amd->psim, /*free_devq*/TRUE);
2414 		if (bootverbose)
2415 			printf("amd_attach: xpt_bus_register failure!\n");
2416 		return ENXIO;
2417 	}
2418 
2419 	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2420 			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2421 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2422 		xpt_bus_deregister(cam_sim_path(amd->psim));
2423 		cam_sim_free(amd->psim, /* free_simq */ TRUE);
2424 		if (bootverbose)
2425 			printf("amd_attach: xpt_create_path failure!\n");
2426 		return ENXIO;
2427 	}
2428 
2429 	return 0;
2430 }
2431 
2432 static int
2433 amd_probe(device_t dev)
2434 {
2435 	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2436 		device_set_desc(dev,
2437 			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2438 		return 0;
2439 	}
2440 	return ENXIO;
2441 }
2442 
2443 static device_method_t amd_methods[] = {
2444 	/* Device interface */
2445 	DEVMETHOD(device_probe,		amd_probe),
2446 	DEVMETHOD(device_attach,	amd_attach),
2447 	{ 0, 0 }
2448 };
2449 
2450 static driver_t amd_driver = {
2451 	"amd", amd_methods, sizeof(struct amd_softc)
2452 };
2453 
2454 static devclass_t amd_devclass;
2455 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2456