xref: /dragonfly/sys/dev/disk/amd/amd.c (revision c69bf40f)
1 /*
2  *********************************************************************
3  *	FILE NAME  : amd.c
4  *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5  *		     Erich Chen     (erich@tekram.com.tw)
6  *	Description: Device Driver for the amd53c974 PCI Bus Master
7  *		     SCSI Host adapter found on cards such as
8  *		     the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  */
35 
36 /*
37  *********************************************************************
38  *	HISTORY:
39  *
40  *	REV#	DATE	NAME    	DESCRIPTION
41  *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
42  *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
43  *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
44  *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
45  *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
46  *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
47  *********************************************************************
48  */
49 
50 /* #define AMD_DEBUG0           */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/queue.h>
58 #include <sys/buf.h>
59 #include <sys/bus.h>
60 #include <sys/rman.h>
61 #include <sys/thread2.h>
62 
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 
66 #include <machine/clock.h>
67 
68 #include <bus/cam/cam.h>
69 #include <bus/cam/cam_ccb.h>
70 #include <bus/cam/cam_sim.h>
71 #include <bus/cam/cam_xpt_sim.h>
72 #include <bus/cam/cam_debug.h>
73 
74 #include <bus/cam/scsi/scsi_all.h>
75 #include <bus/cam/scsi/scsi_message.h>
76 
77 #include <bus/pci/pcivar.h>
78 #include <bus/pci/pcireg.h>
79 #include "amd.h"
80 
81 #define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
82 #define PCI_BASE_ADDR0	    		0x10
83 
84 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
85 typedef phase_handler_t *phase_handler_func_t;
86 
87 static void amd_intr(void *vamd);
88 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
89 static phase_handler_t amd_NopPhase;
90 
91 static phase_handler_t amd_DataOutPhase0;
92 static phase_handler_t amd_DataInPhase0;
93 #define amd_CommandPhase0 amd_NopPhase
94 static phase_handler_t amd_StatusPhase0;
95 static phase_handler_t amd_MsgOutPhase0;
96 static phase_handler_t amd_MsgInPhase0;
97 static phase_handler_t amd_DataOutPhase1;
98 static phase_handler_t amd_DataInPhase1;
99 static phase_handler_t amd_CommandPhase1;
100 static phase_handler_t amd_StatusPhase1;
101 static phase_handler_t amd_MsgOutPhase1;
102 static phase_handler_t amd_MsgInPhase1;
103 
104 static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
105 static int	amdparsemsg(struct amd_softc *amd);
106 static int	amdhandlemsgreject(struct amd_softc *amd);
107 static void	amdconstructsdtr(struct amd_softc *amd,
108 				 u_int period, u_int offset);
109 static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
110 static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
111 
112 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
113 static void amd_Disconnect(struct amd_softc *amd);
114 static void amd_Reselect(struct amd_softc *amd);
115 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
116 static void amd_ScsiRstDetect(struct amd_softc *amd);
117 static void amd_ResetSCSIBus(struct amd_softc *amd);
118 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_InvalidCmd(struct amd_softc *amd);
120 
121 #if 0
122 static void amd_timeout(void *arg1);
123 static void amd_reset(struct amd_softc *amd);
124 #endif
125 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
126 
127 void    amd_linkSRB(struct amd_softc *amd);
128 static int amd_init(device_t);
129 static void amd_load_defaults(struct amd_softc *amd);
130 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
131 static int amd_EEpromInDO(struct amd_softc *amd);
132 static u_int16_t EEpromGetData1(struct amd_softc *amd);
133 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
134 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
135 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
136 static void amd_ReadEEprom(struct amd_softc *amd);
137 
138 static int amd_probe(device_t);
139 static int amd_attach(device_t);
140 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
141 			     lun_id_t lun, u_int tag, struct srb_queue *queue,
142 			     cam_status status);
143 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
144 		       u_int period, u_int offset, u_int type);
145 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
146 
147 static __inline void amd_clear_msg_state(struct amd_softc *amd);
148 
149 static __inline void
150 amd_clear_msg_state(struct amd_softc *amd)
151 {
152 	amd->msgout_len = 0;
153 	amd->msgout_index = 0;
154 	amd->msgin_index = 0;
155 }
156 
157 /* CAM SIM entry points */
158 #define ccb_srb_ptr spriv_ptr0
159 #define ccb_amd_ptr spriv_ptr1
160 static void	amd_action(struct cam_sim *sim, union ccb *ccb);
161 static void	amd_poll(struct cam_sim *sim);
162 
163 /*
164  * State engine function tables indexed by SCSI phase number
165  */
166 phase_handler_func_t amd_SCSI_phase0[] = {
167 	amd_DataOutPhase0,
168 	amd_DataInPhase0,
169 	amd_CommandPhase0,
170 	amd_StatusPhase0,
171 	amd_NopPhase,
172 	amd_NopPhase,
173 	amd_MsgOutPhase0,
174 	amd_MsgInPhase0
175 };
176 
177 phase_handler_func_t amd_SCSI_phase1[] = {
178 	amd_DataOutPhase1,
179 	amd_DataInPhase1,
180 	amd_CommandPhase1,
181 	amd_StatusPhase1,
182 	amd_NopPhase,
183 	amd_NopPhase,
184 	amd_MsgOutPhase1,
185 	amd_MsgInPhase1
186 };
187 
188 /*
189  * EEProm/BIOS negotiation periods
190  */
191 u_int8_t   eeprom_period[] = {
192 	 25,	/* 10.0MHz */
193 	 32,	/*  8.0MHz */
194 	 38,	/*  6.6MHz */
195 	 44,	/*  5.7MHz */
196 	 50,	/*  5.0MHz */
197 	 63,	/*  4.0MHz */
198 	 83,	/*  3.0MHz */
199 	125	/*  2.0MHz */
200 };
201 
202 /*
203  * chip clock setting to SCSI specified sync parameter table.
204  */
205 u_int8_t tinfo_sync_period[] = {
206 	25,	/* 10.0 */
207 	32,	/* 8.0 */
208 	38,	/* 6.6 */
209 	44,	/* 5.7 */
210 	50,	/* 5.0 */
211 	57,	/* 4.4 */
212 	63,	/* 4.0 */
213 	70,	/* 3.6 */
214 	76,	/* 3.3 */
215 	83	/* 3.0 */
216 };
217 
218 static __inline struct amd_srb *
219 amdgetsrb(struct amd_softc * amd)
220 {
221 	struct amd_srb *    pSRB;
222 
223 	crit_enter();
224 	pSRB = TAILQ_FIRST(&amd->free_srbs);
225 	if (pSRB)
226 		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
227 	crit_exit();
228 	return (pSRB);
229 }
230 
231 static void
232 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
233 {
234 	struct scsi_request_sense sense_cmd;
235 	struct ccb_scsiio *csio;
236 	u_int8_t *cdb;
237 	u_int cdb_len;
238 
239 	csio = &srb->pccb->csio;
240 
241 	if (srb->SRBFlag & AUTO_REQSENSE) {
242 		sense_cmd.opcode = REQUEST_SENSE;
243 		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
244 		sense_cmd.unused[0] = 0;
245 		sense_cmd.unused[1] = 0;
246 		sense_cmd.length = csio->sense_len;
247 		sense_cmd.control = 0;
248 		cdb = &sense_cmd.opcode;
249 		cdb_len = sizeof(sense_cmd);
250 	} else {
251 		cdb = &srb->CmdBlock[0];
252 		cdb_len = srb->ScsiCmdLen;
253 	}
254 	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
255 }
256 
257 /*
258  * Attempt to start a waiting transaction.  Interrupts must be disabled
259  * upon entry to this function.
260  */
261 static void
262 amdrunwaiting(struct amd_softc *amd) {
263 	struct amd_srb *srb;
264 
265 	if (amd->last_phase != SCSI_BUS_FREE)
266 		return;
267 
268 	srb = TAILQ_FIRST(&amd->waiting_srbs);
269 	if (srb == NULL)
270 		return;
271 
272 	if (amdstart(amd, srb) == 0) {
273 		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
274 		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
275 	}
276 }
277 
278 static void
279 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
280 {
281 	struct	 amd_srb *srb;
282 	union	 ccb *ccb;
283 	struct	 amd_softc *amd;
284 
285 	srb = (struct amd_srb *)arg;
286 	ccb = srb->pccb;
287 	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
288 
289 	if (error != 0) {
290 		if (error != EFBIG)
291 			kprintf("amd%d: Unexpected error 0x%x returned from "
292 			       "bus_dmamap_load\n", amd->unit, error);
293 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
294 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
295 			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
296 		}
297 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
298 		xpt_done(ccb);
299 		return;
300 	}
301 
302 	if (nseg != 0) {
303 		struct amd_sg *sg;
304 		bus_dma_segment_t *end_seg;
305 		bus_dmasync_op_t op;
306 
307 		end_seg = dm_segs + nseg;
308 
309 		/* Copy the segments into our SG list */
310 		srb->pSGlist = &srb->SGsegment[0];
311 		sg = srb->pSGlist;
312 		while (dm_segs < end_seg) {
313 			sg->SGXLen = dm_segs->ds_len;
314 			sg->SGXPtr = dm_segs->ds_addr;
315 			sg++;
316 			dm_segs++;
317 		}
318 
319 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
320 			op = BUS_DMASYNC_PREREAD;
321 		else
322 			op = BUS_DMASYNC_PREWRITE;
323 
324 		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
325 
326 	}
327 	srb->SGcount = nseg;
328 	srb->SGIndex = 0;
329 	srb->AdaptStatus = 0;
330 	srb->TargetStatus = 0;
331 	srb->MsgCnt = 0;
332 	srb->SRBStatus = 0;
333 	srb->SRBFlag = 0;
334 	srb->SRBState = 0;
335 	srb->TotalXferredLen = 0;
336 	srb->SGPhysAddr = 0;
337 	srb->SGToBeXferLen = 0;
338 	srb->EndMessage = 0;
339 
340 	crit_enter();
341 
342 	/*
343 	 * Last time we need to check if this CCB needs to
344 	 * be aborted.
345 	 */
346 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
347 		if (nseg != 0)
348 			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
349 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
350 		xpt_done(ccb);
351 		crit_exit();
352 		return;
353 	}
354 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
355 #if 0
356 	/* XXX Need a timeout handler */
357 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
358 	    amdtimeout, srb);
359 #endif
360 	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
361 	amdrunwaiting(amd);
362 	crit_exit();
363 }
364 
365 static void
366 amd_action(struct cam_sim * psim, union ccb * pccb)
367 {
368 	struct amd_softc *    amd;
369 	u_int   target_id;
370 
371 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
372 
373 	amd = (struct amd_softc *) cam_sim_softc(psim);
374 	target_id = pccb->ccb_h.target_id;
375 
376 	switch (pccb->ccb_h.func_code) {
377 	case XPT_SCSI_IO:
378 	{
379 		struct amd_srb *    pSRB;
380 		struct ccb_scsiio *pcsio;
381 
382 		pcsio = &pccb->csio;
383 
384 		/*
385 		 * Assign an SRB and connect it with this ccb.
386 		 */
387 		pSRB = amdgetsrb(amd);
388 
389 		if (!pSRB) {
390 			/* Freeze SIMQ */
391 			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
392 			xpt_done(pccb);
393 			return;
394 		}
395 		pSRB->pccb = pccb;
396 		pccb->ccb_h.ccb_srb_ptr = pSRB;
397 		pccb->ccb_h.ccb_amd_ptr = amd;
398 		pSRB->ScsiCmdLen = pcsio->cdb_len;
399 		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
400 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
401 			if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
402 				/*
403 				 * We've been given a pointer
404 				 * to a single buffer.
405 				 */
406 				if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
407 					int error;
408 
409 					crit_enter();
410 					error =
411 					    bus_dmamap_load(amd->buffer_dmat,
412 							    pSRB->dmamap,
413 							    pcsio->data_ptr,
414 							    pcsio->dxfer_len,
415 							    amdexecutesrb,
416 							    pSRB, /*flags*/0);
417 					if (error == EINPROGRESS) {
418 						/*
419 						 * So as to maintain
420 						 * ordering, freeze the
421 						 * controller queue
422 						 * until our mapping is
423 						 * returned.
424 						 */
425 						xpt_freeze_simq(amd->psim, 1);
426 						pccb->ccb_h.status |=
427 						    CAM_RELEASE_SIMQ;
428 					}
429 					crit_exit();
430 				} else {
431 					struct bus_dma_segment seg;
432 
433 					/* Pointer to physical buffer */
434 					seg.ds_addr =
435 					    (bus_addr_t)pcsio->data_ptr;
436 					seg.ds_len = pcsio->dxfer_len;
437 					amdexecutesrb(pSRB, &seg, 1, 0);
438 				}
439 			} else {
440 				struct bus_dma_segment *segs;
441 
442 				if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
443 				 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
444 					TAILQ_INSERT_HEAD(&amd->free_srbs,
445 							  pSRB, links);
446 					pccb->ccb_h.status = CAM_PROVIDE_FAIL;
447 					xpt_done(pccb);
448 					return;
449 				}
450 
451 				/* Just use the segments provided */
452 				segs =
453 				    (struct bus_dma_segment *)pcsio->data_ptr;
454 				amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
455 			}
456 		} else
457 			amdexecutesrb(pSRB, NULL, 0, 0);
458 		break;
459 	}
460 	case XPT_PATH_INQ:
461 	{
462 		struct ccb_pathinq *cpi = &pccb->cpi;
463 
464 		cpi->version_num = 1;
465 		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
466 		cpi->target_sprt = 0;
467 		cpi->hba_misc = 0;
468 		cpi->hba_eng_cnt = 0;
469 		cpi->max_target = 7;
470 		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
471 		cpi->initiator_id = amd->AdaptSCSIID;
472 		cpi->bus_id = cam_sim_bus(psim);
473 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
474 		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
475 		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
476 		cpi->unit_number = cam_sim_unit(psim);
477                 cpi->transport = XPORT_SPI;
478                 cpi->transport_version = 2;
479                 cpi->protocol = PROTO_SCSI;
480                 cpi->protocol_version = SCSI_REV_2;
481 		cpi->ccb_h.status = CAM_REQ_CMP;
482 		xpt_done(pccb);
483 		break;
484 	}
485 	case XPT_ABORT:
486 		pccb->ccb_h.status = CAM_REQ_INVALID;
487 		xpt_done(pccb);
488 		break;
489 	case XPT_RESET_BUS:
490 	{
491 
492 		int     i;
493 
494 		amd_ResetSCSIBus(amd);
495 		amd->ACBFlag = 0;
496 
497 		for (i = 0; i < 500; i++) {
498 			DELAY(1000);	/* Wait until our interrupt
499 					 * handler sees it */
500 		}
501 
502 		pccb->ccb_h.status = CAM_REQ_CMP;
503 		xpt_done(pccb);
504 		break;
505 	}
506 	case XPT_RESET_DEV:
507 		pccb->ccb_h.status = CAM_REQ_INVALID;
508 		xpt_done(pccb);
509 		break;
510 	case XPT_TERM_IO:
511 		pccb->ccb_h.status = CAM_REQ_INVALID;
512 		xpt_done(pccb);
513 		break;
514 	case XPT_GET_TRAN_SETTINGS:
515 	{
516 		struct ccb_trans_settings *cts = &pccb->cts;
517 		struct amd_target_info *targ_info = &amd->tinfo[target_id];
518 		struct amd_transinfo *tinfo;
519 		struct ccb_trans_settings_scsi *scsi =
520 		    &cts->proto_specific.scsi;
521 		struct ccb_trans_settings_spi *spi =
522 		    &cts->xport_specific.spi;
523 
524 		cts->protocol = PROTO_SCSI;
525 		cts->protocol_version = SCSI_REV_2;
526 		cts->transport = XPORT_SPI;
527 		cts->transport_version = 2;
528 
529 		crit_enter();
530 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
531 			/* current transfer settings */
532 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
533 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
534 			} else {
535 				spi->flags = 0;
536 			}
537 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
538 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
539 			} else {
540 				scsi->flags = 0;
541 			}
542 			tinfo = &targ_info->current;
543 		} else {
544 			/* default(user) transfer settings */
545 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
546 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
547 			} else {
548 				spi->flags = 0;
549 			}
550 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
551 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
552 			} else {
553 				scsi->flags = 0;
554 			}
555 			tinfo = &targ_info->user;
556 		}
557 		spi->sync_period = tinfo->period;
558 		spi->sync_offset = tinfo->offset;
559 		crit_exit();
560 
561 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
562 		spi->valid = CTS_SPI_VALID_SYNC_RATE
563 			   | CTS_SPI_VALID_SYNC_OFFSET
564 			   | CTS_SPI_VALID_BUS_WIDTH
565 			   | CTS_SPI_VALID_DISC;
566 		scsi->valid = CTS_SCSI_VALID_TQ;
567 		pccb->ccb_h.status = CAM_REQ_CMP;
568 		xpt_done(pccb);
569 		break;
570 	}
571 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
572 #define	IS_USER_SETTINGS(c)	(c->type == CTS_TYPE_USER_SETTINGS)
573 	case XPT_SET_TRAN_SETTINGS:
574 	{
575 		struct ccb_trans_settings *cts = &pccb->cts;
576 		struct amd_target_info *targ_info;
577 		u_int  update_type = 0;
578 		int    last_entry;
579 
580 		struct ccb_trans_settings_scsi *scsi =
581 		    &cts->proto_specific.scsi;
582 		struct ccb_trans_settings_spi *spi =
583 		    &cts->xport_specific.spi;
584 		if (IS_CURRENT_SETTINGS(cts)) {
585 			update_type |= AMD_TRANS_GOAL;
586 		} else if (IS_USER_SETTINGS(cts)) {
587 			update_type |= AMD_TRANS_USER;
588 		}
589 		if (update_type == 0
590 		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
591 			cts->ccb_h.status = CAM_REQ_INVALID;
592 			xpt_done(pccb);
593 		}
594 
595 		crit_enter();
596 		targ_info = &amd->tinfo[target_id];
597 
598 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
599 			if (update_type & AMD_TRANS_GOAL) {
600 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
601 				   != 0) {
602 					targ_info->disc_tag |= AMD_CUR_DISCENB;
603 				} else {
604 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
605 				}
606 			}
607 			if (update_type & AMD_TRANS_USER) {
608 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
609 				   != 0) {
610 					targ_info->disc_tag |= AMD_USR_DISCENB;
611 				} else {
612 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
613 				}
614 			}
615 		}
616 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
617 			if (update_type & AMD_TRANS_GOAL) {
618 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
619 				   != 0) {
620 					targ_info->disc_tag |= AMD_CUR_TAGENB;
621 				} else {
622 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
623 				}
624 			}
625 			if (update_type & AMD_TRANS_USER) {
626 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
627 				    != 0) {
628 					targ_info->disc_tag |= AMD_USR_TAGENB;
629 				} else {
630 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
631 				}
632 			}
633 		}
634 
635 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
636 			if (update_type & AMD_TRANS_GOAL)
637 				spi->sync_offset = targ_info->goal.offset;
638 			else
639 				spi->sync_offset = targ_info->user.offset;
640 		}
641 
642 		if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
643 			spi->sync_offset = AMD_MAX_SYNC_OFFSET;
644 
645 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
646 			if (update_type & AMD_TRANS_GOAL)
647 				spi->sync_period = targ_info->goal.period;
648 			else
649 				spi->sync_period = targ_info->user.period;
650 		}
651 
652 		last_entry = sizeof(tinfo_sync_period) - 1;
653 		if ((spi->sync_period != 0)
654 		 && (spi->sync_period < tinfo_sync_period[0]))
655 			spi->sync_period = tinfo_sync_period[0];
656 		if (spi->sync_period > tinfo_sync_period[last_entry])
657 			spi->sync_period = 0;
658 		if (spi->sync_offset == 0)
659 			spi->sync_period = 0;
660 
661 		if ((update_type & AMD_TRANS_USER) != 0) {
662 			targ_info->user.period = spi->sync_period;
663 			targ_info->user.offset = spi->sync_offset;
664 		}
665 		if ((update_type & AMD_TRANS_GOAL) != 0) {
666 			targ_info->goal.period = spi->sync_period;
667 			targ_info->goal.offset = spi->sync_offset;
668 		}
669 		crit_exit();
670 		pccb->ccb_h.status = CAM_REQ_CMP;
671 		xpt_done(pccb);
672 		break;
673 	}
674 	case XPT_CALC_GEOMETRY:
675 	{
676 		struct ccb_calc_geometry *ccg;
677 		u_int32_t size_mb;
678 		u_int32_t secs_per_cylinder;
679 		int     extended;
680 
681 		ccg = &pccb->ccg;
682 		size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
683 		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
684 
685 		if (size_mb > 1024 && extended) {
686 			ccg->heads = 255;
687 			ccg->secs_per_track = 63;
688 		} else {
689 			ccg->heads = 64;
690 			ccg->secs_per_track = 32;
691 		}
692 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
693 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
694 		pccb->ccb_h.status = CAM_REQ_CMP;
695 		xpt_done(pccb);
696 		break;
697 	}
698 	default:
699 		pccb->ccb_h.status = CAM_REQ_INVALID;
700 		xpt_done(pccb);
701 		break;
702 	}
703 }
704 
705 static void
706 amd_poll(struct cam_sim * psim)
707 {
708 	amd_intr(cam_sim_softc(psim));
709 }
710 
711 static u_int8_t *
712 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
713 {
714 	intptr_t     dataPtr;
715 	struct ccb_scsiio *pcsio;
716 	u_int8_t   i;
717 	struct amd_sg *    pseg;
718 
719 	dataPtr = 0;
720 	pcsio = &pSRB->pccb->csio;
721 
722 	dataPtr = (intptr_t) pcsio->data_ptr;
723 	pseg = pSRB->SGsegment;
724 	for (i = 0; i < pSRB->SGIndex; i++) {
725 		dataPtr += (int) pseg->SGXLen;
726 		pseg++;
727 	}
728 	dataPtr += (int) xferCnt;
729 	return ((u_int8_t *) dataPtr);
730 }
731 
732 static void
733 ResetDevParam(struct amd_softc * amd)
734 {
735 	u_int target;
736 
737 	for (target = 0; target <= amd->max_id; target++) {
738 		if (amd->AdaptSCSIID != target) {
739 			amdsetsync(amd, target, /*clockrate*/0,
740 				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
741 		}
742 	}
743 }
744 
745 static void
746 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
747 		 u_int tag, struct srb_queue *queue, cam_status status)
748 {
749 	struct amd_srb *srb;
750 	struct amd_srb *next_srb;
751 
752 	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
753 		union ccb *ccb;
754 
755 		next_srb = TAILQ_NEXT(srb, links);
756 		if (srb->pccb->ccb_h.target_id != target
757 		 && target != CAM_TARGET_WILDCARD)
758 			continue;
759 
760 		if (srb->pccb->ccb_h.target_lun != lun
761 		 && lun != CAM_LUN_WILDCARD)
762 			continue;
763 
764 		if (srb->TagNumber != tag
765 		 && tag != AMD_TAG_WILDCARD)
766 			continue;
767 
768 		ccb = srb->pccb;
769 		TAILQ_REMOVE(queue, srb, links);
770 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
771 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
772 		 && (status & CAM_DEV_QFRZN) != 0)
773 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
774 		ccb->ccb_h.status = status;
775 		xpt_done(ccb);
776 	}
777 
778 }
779 
780 static void
781 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
782 	   u_int period, u_int offset, u_int type)
783 {
784 	struct amd_target_info *tinfo;
785 	u_int old_period;
786 	u_int old_offset;
787 
788 	tinfo = &amd->tinfo[target];
789 	old_period = tinfo->current.period;
790 	old_offset = tinfo->current.offset;
791 	if ((type & AMD_TRANS_CUR) != 0
792 	 && (old_period != period || old_offset != offset)) {
793 		struct cam_path *path;
794 
795 		tinfo->current.period = period;
796 		tinfo->current.offset = offset;
797 		tinfo->sync_period_reg = clockrate;
798 		tinfo->sync_offset_reg = offset;
799 		tinfo->CtrlR3 &= ~FAST_SCSI;
800 		tinfo->CtrlR4 &= ~EATER_25NS;
801 		if (clockrate > 7)
802 			tinfo->CtrlR4 |= EATER_25NS;
803 		else
804 			tinfo->CtrlR3 |= FAST_SCSI;
805 
806 		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
807 			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
808 			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
809 			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
810 			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
811 		}
812 		/* If possible, update the XPT's notion of our transfer rate */
813 		if (xpt_create_path(&path, /*periph*/NULL,
814 				    cam_sim_path(amd->psim), target,
815 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
816 			struct ccb_trans_settings neg;
817 			struct ccb_trans_settings_spi *spi =
818 			    &neg.xport_specific.spi;
819 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
820 			memset(&neg, 0, sizeof (neg));
821 			spi->sync_period = period;
822 			spi->sync_offset = offset;
823 			spi->valid = CTS_SPI_VALID_SYNC_RATE
824 				  | CTS_SPI_VALID_SYNC_OFFSET;
825 			xpt_async(AC_TRANSFER_NEG, path, &neg);
826 			xpt_free_path(path);
827 		}
828 	}
829 	if ((type & AMD_TRANS_GOAL) != 0) {
830 		tinfo->goal.period = period;
831 		tinfo->goal.offset = offset;
832 	}
833 
834 	if ((type & AMD_TRANS_USER) != 0) {
835 		tinfo->user.period = period;
836 		tinfo->user.offset = offset;
837 	}
838 }
839 
840 static void
841 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
842 {
843 	panic("Implement me!");
844 }
845 
846 
847 #if 0
848 /*
849  **********************************************************************
850  * Function : amd_reset (struct amd_softc * amd)
851  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
852  * Inputs   : cmd - command which caused the SCSI RESET
853  **********************************************************************
854  */
855 static void
856 amd_reset(struct amd_softc * amd)
857 {
858 	u_int8_t   bval;
859 	u_int16_t  i;
860 
861 
862 #ifdef AMD_DEBUG0
863 	kprintf("DC390: RESET");
864 #endif
865 
866 	crit_enter();
867 	bval = amd_read8(amd, CNTLREG1);
868 	bval |= DIS_INT_ON_SCSI_RST;
869 	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
870 	amd_ResetSCSIBus(amd);
871 
872 	for (i = 0; i < 500; i++) {
873 		DELAY(1000);
874 	}
875 
876 	bval = amd_read8(amd, CNTLREG1);
877 	bval &= ~DIS_INT_ON_SCSI_RST;
878 	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
879 
880 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
881 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
882 
883 	ResetDevParam(amd);
884 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
885 			 AMD_TAG_WILDCARD, &amd->running_srbs,
886 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
887 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
888 			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
889 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
890 	amd->active_srb = NULL;
891 	amd->ACBFlag = 0;
892 	crit_exit();
893 	return;
894 }
895 
896 void
897 amd_timeout(void *arg1)
898 {
899 	struct amd_srb *    pSRB;
900 
901 	pSRB = (struct amd_srb *) arg1;
902 }
903 #endif
904 
905 static int
906 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
907 {
908 	union ccb *pccb;
909 	struct ccb_scsiio *pcsio;
910 	struct amd_target_info *targ_info;
911 	u_int identify_msg;
912 	u_int command;
913 	u_int target;
914 	u_int lun;
915 
916 	pccb = pSRB->pccb;
917 	pcsio = &pccb->csio;
918 	target = pccb->ccb_h.target_id;
919 	lun = pccb->ccb_h.target_lun;
920 	targ_info = &amd->tinfo[target];
921 
922 	amd_clear_msg_state(amd);
923 	amd_write8(amd, SCSIDESTIDREG, target);
924 	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
925 	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
926 	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
927 	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
928 	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
929 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
930 
931 	identify_msg = MSG_IDENTIFYFLAG | lun;
932 	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
933 	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
934 	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
935 	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
936 		identify_msg |= MSG_IDENTIFY_DISCFLAG;
937 
938 	amd_write8(amd, SCSIFIFOREG, identify_msg);
939 	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
940 	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
941 		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
942 	if (targ_info->current.period != targ_info->goal.period
943 	 || targ_info->current.offset != targ_info->goal.offset) {
944 		command = SEL_W_ATN_STOP;
945 		amdconstructsdtr(amd, targ_info->goal.period,
946 				 targ_info->goal.offset);
947 	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
948 		command = SEL_W_ATN2;
949 		pSRB->SRBState = SRB_START;
950 		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
951 		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
952 	} else {
953 		command = SEL_W_ATN;
954 		pSRB->SRBState = SRB_START;
955 	}
956 	if (command != SEL_W_ATN_STOP)
957 		amdsetupcommand(amd, pSRB);
958 
959 	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
960 		pSRB->SRBState = SRB_READY;
961 		return (1);
962 	} else {
963 		amd->last_phase = SCSI_ARBITRATING;
964 		amd_write8(amd, SCSICMDREG, command);
965 		amd->active_srb = pSRB;
966 		amd->cur_target = target;
967 		amd->cur_lun = lun;
968 		return (0);
969 	}
970 }
971 
972 /*
973  *  Catch an interrupt from the adapter.
974  *  Process pending device interrupts.
975  */
976 static void
977 amd_intr(void   *arg)
978 {
979 	struct amd_softc *amd;
980 	struct amd_srb *pSRB;
981 	u_int  internstat = 0;
982 	u_int  scsistat;
983 	u_int  intstat;
984 
985 	amd = (struct amd_softc *)arg;
986 
987 	if (amd == NULL) {
988 #ifdef AMD_DEBUG0
989 		kprintf("amd_intr: amd NULL return......");
990 #endif
991 		return;
992 	}
993 
994 	scsistat = amd_read8(amd, SCSISTATREG);
995 	if (!(scsistat & INTERRUPT)) {
996 #ifdef AMD_DEBUG0
997 		kprintf("amd_intr: scsistat = NULL ,return......");
998 #endif
999 		return;
1000 	}
1001 #ifdef AMD_DEBUG_SCSI_PHASE
1002 	kprintf("scsistat=%2x,", scsistat);
1003 #endif
1004 
1005 	internstat = amd_read8(amd, INTERNSTATREG);
1006 	intstat = amd_read8(amd, INTSTATREG);
1007 
1008 #ifdef AMD_DEBUG_SCSI_PHASE
1009 	kprintf("intstat=%2x,", intstat);
1010 #endif
1011 
1012 	if (intstat & DISCONNECTED) {
1013 		amd_Disconnect(amd);
1014 		return;
1015 	}
1016 	if (intstat & RESELECTED) {
1017 		amd_Reselect(amd);
1018 		return;
1019 	}
1020 	if (intstat & INVALID_CMD) {
1021 		amd_InvalidCmd(amd);
1022 		return;
1023 	}
1024 	if (intstat & SCSI_RESET_) {
1025 		amd_ScsiRstDetect(amd);
1026 		return;
1027 	}
1028 	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1029 		pSRB = amd->active_srb;
1030 		/*
1031 		 * Run our state engine.  First perform
1032 		 * post processing for the last phase we
1033 		 * were in, followed by any processing
1034 		 * required to handle the current phase.
1035 		 */
1036 		scsistat =
1037 		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1038 		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1039 		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1040 	}
1041 }
1042 
1043 static u_int
1044 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1045 {
1046 	struct amd_sg *psgl;
1047 	u_int32_t   ResidCnt, xferCnt;
1048 
1049 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1050 		if (scsistat & PARITY_ERR) {
1051 			pSRB->SRBStatus |= PARITY_ERROR;
1052 		}
1053 		if (scsistat & COUNT_2_ZERO) {
1054 			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1055 				;
1056 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1057 			pSRB->SGIndex++;
1058 			if (pSRB->SGIndex < pSRB->SGcount) {
1059 				pSRB->pSGlist++;
1060 				psgl = pSRB->pSGlist;
1061 				pSRB->SGPhysAddr = psgl->SGXPtr;
1062 				pSRB->SGToBeXferLen = psgl->SGXLen;
1063 			} else {
1064 				pSRB->SGToBeXferLen = 0;
1065 			}
1066 		} else {
1067 			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1068 			ResidCnt += amd_read8(amd, CTCREG_LOW)
1069 				  | (amd_read8(amd, CTCREG_MID) << 8)
1070 				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1071 
1072 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1073 			pSRB->SGPhysAddr += xferCnt;
1074 			pSRB->TotalXferredLen += xferCnt;
1075 			pSRB->SGToBeXferLen = ResidCnt;
1076 		}
1077 	}
1078 	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1079 	return (scsistat);
1080 }
1081 
1082 static u_int
1083 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1084 {
1085 	u_int8_t bval;
1086 	u_int16_t  i, residual;
1087 	struct amd_sg *psgl;
1088 	u_int32_t   ResidCnt, xferCnt;
1089 	u_int8_t *  ptr;
1090 
1091 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1092 		if (scsistat & PARITY_ERR) {
1093 			pSRB->SRBStatus |= PARITY_ERROR;
1094 		}
1095 		if (scsistat & COUNT_2_ZERO) {
1096 			while (1) {
1097 				bval = amd_read8(amd, DMA_Status);
1098 				if ((bval & DMA_XFER_DONE) != 0)
1099 					break;
1100 			}
1101 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1102 
1103 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1104 			pSRB->SGIndex++;
1105 			if (pSRB->SGIndex < pSRB->SGcount) {
1106 				pSRB->pSGlist++;
1107 				psgl = pSRB->pSGlist;
1108 				pSRB->SGPhysAddr = psgl->SGXPtr;
1109 				pSRB->SGToBeXferLen = psgl->SGXLen;
1110 			} else {
1111 				pSRB->SGToBeXferLen = 0;
1112 			}
1113 		} else {	/* phase changed */
1114 			residual = 0;
1115 			bval = amd_read8(amd, CURRENTFIFOREG);
1116 			while (bval & 0x1f) {
1117 				if ((bval & 0x1f) == 1) {
1118 					for (i = 0; i < 0x100; i++) {
1119 						bval = amd_read8(amd, CURRENTFIFOREG);
1120 						if (!(bval & 0x1f)) {
1121 							goto din_1;
1122 						} else if (i == 0x0ff) {
1123 							residual = 1;
1124 							goto din_1;
1125 						}
1126 					}
1127 				} else {
1128 					bval = amd_read8(amd, CURRENTFIFOREG);
1129 				}
1130 			}
1131 	din_1:
1132 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1133 			for (i = 0; i < 0x8000; i++) {
1134 				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1135 					break;
1136 			}
1137 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1138 
1139 			ResidCnt = amd_read8(amd, CTCREG_LOW)
1140 				 | (amd_read8(amd, CTCREG_MID) << 8)
1141 				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1142 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1143 			pSRB->SGPhysAddr += xferCnt;
1144 			pSRB->TotalXferredLen += xferCnt;
1145 			pSRB->SGToBeXferLen = ResidCnt;
1146 			if (residual) {
1147 				/* get residual byte */
1148 				bval = amd_read8(amd, SCSIFIFOREG);
1149 				ptr = phystovirt(pSRB, xferCnt);
1150 				*ptr = bval;
1151 				pSRB->SGPhysAddr++;
1152 				pSRB->TotalXferredLen++;
1153 				pSRB->SGToBeXferLen--;
1154 			}
1155 		}
1156 	}
1157 	return (scsistat);
1158 }
1159 
1160 static u_int
1161 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1162 {
1163 	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1164 	/* get message */
1165 	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1166 	pSRB->SRBState = SRB_COMPLETED;
1167 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1168 	return (SCSI_NOP0);
1169 }
1170 
1171 static u_int
1172 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1173 {
1174 	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1175 		scsistat = SCSI_NOP0;
1176 	}
1177 	return (scsistat);
1178 }
1179 
1180 static u_int
1181 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1182 {
1183 	int done;
1184 
1185 	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1186 
1187 	done = amdparsemsg(amd);
1188 	if (done)
1189 		amd->msgin_index = 0;
1190 	else
1191 		amd->msgin_index++;
1192 	return (SCSI_NOP0);
1193 }
1194 
1195 static int
1196 amdparsemsg(struct amd_softc *amd)
1197 {
1198 	int	reject;
1199 	int	done;
1200 	int	response;
1201 
1202 	done = FALSE;
1203 	response = FALSE;
1204 	reject = FALSE;
1205 
1206 	/*
1207 	 * Parse as much of the message as is availible,
1208 	 * rejecting it if we don't support it.  When
1209 	 * the entire message is availible and has been
1210 	 * handled, return TRUE indicating that we have
1211 	 * parsed an entire message.
1212 	 */
1213 	switch (amd->msgin_buf[0]) {
1214 	case MSG_DISCONNECT:
1215 		amd->active_srb->SRBState = SRB_DISCONNECT;
1216 		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1217 		done = TRUE;
1218 		break;
1219 	case MSG_SIMPLE_Q_TAG:
1220 	{
1221 		struct amd_srb *disc_srb;
1222 
1223 		if (amd->msgin_index < 1)
1224 			break;
1225 		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1226 		if (amd->active_srb != NULL
1227 		 || disc_srb->SRBState != SRB_DISCONNECT
1228 		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1229 		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1230 			kprintf("amd%d: Unexpected tagged reselection "
1231 			       "for target %d, Issuing Abort\n", amd->unit,
1232 			       amd->cur_target);
1233 			amd->msgout_buf[0] = MSG_ABORT;
1234 			amd->msgout_len = 1;
1235 			response = TRUE;
1236 			break;
1237 		}
1238 		amd->active_srb = disc_srb;
1239 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1240 		done = TRUE;
1241 		break;
1242 	}
1243 	case MSG_MESSAGE_REJECT:
1244 		response = amdhandlemsgreject(amd);
1245 		if (response == FALSE)
1246 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1247 		/* FALLTHROUGH */
1248 	case MSG_NOOP:
1249 		done = TRUE;
1250 		break;
1251 	case MSG_EXTENDED:
1252 	{
1253 		u_int clockrate;
1254 		u_int period;
1255 		u_int offset;
1256 		u_int saved_offset;
1257 
1258 		/* Wait for enough of the message to begin validation */
1259 		if (amd->msgin_index < 1)
1260 			break;
1261 		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1262 			reject = TRUE;
1263 			break;
1264 		}
1265 
1266 		/* Wait for opcode */
1267 		if (amd->msgin_index < 2)
1268 			break;
1269 
1270 		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1271 			reject = TRUE;
1272 			break;
1273 		}
1274 
1275 		/*
1276 		 * Wait until we have both args before validating
1277 		 * and acting on this message.
1278 		 *
1279 		 * Add one to MSG_EXT_SDTR_LEN to account for
1280 		 * the extended message preamble.
1281 		 */
1282 		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1283 			break;
1284 
1285 		period = amd->msgin_buf[3];
1286 		saved_offset = offset = amd->msgin_buf[4];
1287 		clockrate = amdfindclockrate(amd, &period);
1288 		if (offset > AMD_MAX_SYNC_OFFSET)
1289 			offset = AMD_MAX_SYNC_OFFSET;
1290 		if (period == 0 || offset == 0) {
1291 			offset = 0;
1292 			period = 0;
1293 			clockrate = 0;
1294 		}
1295 		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1296 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1297 
1298 		/*
1299 		 * See if we initiated Sync Negotiation
1300 		 * and didn't have to fall down to async
1301 		 * transfers.
1302 		 */
1303 		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1304 			/* We started it */
1305 			if (saved_offset != offset) {
1306 				/* Went too low - force async */
1307 				reject = TRUE;
1308 			}
1309 		} else {
1310 			/*
1311 			 * Send our own SDTR in reply
1312 			 */
1313 			if (bootverbose)
1314 				kprintf("Sending SDTR!\n");
1315 			amd->msgout_index = 0;
1316 			amd->msgout_len = 0;
1317 			amdconstructsdtr(amd, period, offset);
1318 			amd->msgout_index = 0;
1319 			response = TRUE;
1320 		}
1321 		done = TRUE;
1322 		break;
1323 	}
1324 	case MSG_SAVEDATAPOINTER:
1325 	case MSG_RESTOREPOINTERS:
1326 		/* XXX Implement!!! */
1327 		done = TRUE;
1328 		break;
1329 	default:
1330 		reject = TRUE;
1331 		break;
1332 	}
1333 
1334 	if (reject) {
1335 		amd->msgout_index = 0;
1336 		amd->msgout_len = 1;
1337 		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1338 		done = TRUE;
1339 		response = TRUE;
1340 	}
1341 
1342 	if (response)
1343 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1344 
1345 	if (done && !response)
1346 		/* Clear the outgoing message buffer */
1347 		amd->msgout_len = 0;
1348 
1349 	/* Drop Ack */
1350 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1351 
1352 	return (done);
1353 }
1354 
1355 static u_int
1356 amdfindclockrate(struct amd_softc *amd, u_int *period)
1357 {
1358 	u_int i;
1359 	u_int clockrate;
1360 
1361 	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1362 		u_int8_t *table_entry;
1363 
1364 		table_entry = &tinfo_sync_period[i];
1365 		if (*period <= *table_entry) {
1366 			/*
1367 			 * When responding to a target that requests
1368 			 * sync, the requested rate may fall between
1369 			 * two rates that we can output, but still be
1370 			 * a rate that we can receive.  Because of this,
1371 			 * we want to respond to the target with
1372 			 * the same rate that it sent to us even
1373 			 * if the period we use to send data to it
1374 			 * is lower.  Only lower the response period
1375 			 * if we must.
1376 			 */
1377 			if (i == 0) {
1378 				*period = *table_entry;
1379 			}
1380 			break;
1381 		}
1382 	}
1383 
1384 	if (i == sizeof(tinfo_sync_period)) {
1385 		/* Too slow for us.  Use asnyc transfers. */
1386 		*period = 0;
1387 		clockrate = 0;
1388 	} else
1389 		clockrate = i + 4;
1390 
1391 	return (clockrate);
1392 }
1393 
1394 /*
1395  * See if we sent a particular extended message to the target.
1396  * If "full" is true, the target saw the full message.
1397  * If "full" is false, the target saw at least the first
1398  * byte of the message.
1399  */
1400 static int
1401 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1402 {
1403 	int found;
1404 	int index;
1405 
1406 	found = FALSE;
1407 	index = 0;
1408 
1409 	while (index < amd->msgout_len) {
1410 		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1411 		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1412 			index++;
1413 		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1414 		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1415 			/* Skip tag type and tag id */
1416 			index += 2;
1417 		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1418 			/* Found a candidate */
1419 			if (amd->msgout_buf[index+2] == msgtype) {
1420 				u_int end_index;
1421 
1422 				end_index = index + 1
1423 					  + amd->msgout_buf[index + 1];
1424 				if (full) {
1425 					if (amd->msgout_index > end_index)
1426 						found = TRUE;
1427 				} else if (amd->msgout_index > index)
1428 					found = TRUE;
1429 			}
1430 			break;
1431 		} else {
1432 			panic("amdsentmsg: Inconsistent msg buffer");
1433 		}
1434 	}
1435 	return (found);
1436 }
1437 
1438 static void
1439 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1440 {
1441 	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1442 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1443 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1444 	amd->msgout_buf[amd->msgout_index++] = period;
1445 	amd->msgout_buf[amd->msgout_index++] = offset;
1446 	amd->msgout_len += 5;
1447 }
1448 
1449 static int
1450 amdhandlemsgreject(struct amd_softc *amd)
1451 {
1452 	/*
1453 	 * If we had an outstanding SDTR for this
1454 	 * target, this is a signal that the target
1455 	 * is refusing negotiation.  Also watch out
1456 	 * for rejected tag messages.
1457 	 */
1458 	struct	amd_srb *srb;
1459 	struct	amd_target_info *targ_info;
1460 	int	response = FALSE;
1461 
1462 	srb = amd->active_srb;
1463 	targ_info = &amd->tinfo[amd->cur_target];
1464 	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1465 		/* note asynch xfers and clear flag */
1466 		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1467 			   /*period*/0, /*offset*/0,
1468 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1469 		kprintf("amd%d:%d: refuses synchronous negotiation. "
1470 		       "Using asynchronous transfers\n",
1471 		       amd->unit, amd->cur_target);
1472 	} else if ((srb != NULL)
1473 		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1474 		struct  ccb_trans_settings neg;
1475 		struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
1476 
1477 		kprintf("amd%d:%d: refuses tagged commands.  Performing "
1478 		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1479 
1480 		amdsettags(amd, amd->cur_target, FALSE);
1481 		memset(&neg, 0, sizeof (neg));
1482 		scsi->valid = CTS_SCSI_VALID_TQ;
1483 		xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1484 		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1485 
1486 		/*
1487 		 * Resend the identify for this CCB as the target
1488 		 * may believe that the selection is invalid otherwise.
1489 		 */
1490 		if (amd->msgout_len != 0)
1491 			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1492 			      amd->msgout_len);
1493 		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1494 				    | srb->pccb->ccb_h.target_lun;
1495 		amd->msgout_len++;
1496 		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1497 		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1498 			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1499 
1500 		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1501 
1502 		/*
1503 		 * Requeue all tagged commands for this target
1504 		 * currently in our posession so they can be
1505 		 * converted to untagged commands.
1506 		 */
1507 		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1508 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1509 				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1510 	} else {
1511 		/*
1512 		 * Otherwise, we ignore it.
1513 		 */
1514 		kprintf("amd%d:%d: Message reject received -- ignored\n",
1515 		       amd->unit, amd->cur_target);
1516 	}
1517 	return (response);
1518 }
1519 
1520 #if 0
1521 	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1522 		if (bval == MSG_DISCONNECT) {
1523 			pSRB->SRBState = SRB_DISCONNECT;
1524 		} else if (bval == MSG_SAVEDATAPOINTER) {
1525 			goto min6;
1526 		} else if ((bval == MSG_EXTENDED)
1527 			|| ((bval >= MSG_SIMPLE_Q_TAG)
1528 			 && (bval <= MSG_ORDERED_Q_TAG))) {
1529 			pSRB->SRBState |= SRB_MSGIN_MULTI;
1530 			pSRB->MsgInBuf[0] = bval;
1531 			pSRB->MsgCnt = 1;
1532 			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1533 		} else if (bval == MSG_MESSAGE_REJECT) {
1534 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1535 
1536 			if (pSRB->SRBState & DO_SYNC_NEGO) {
1537 				goto set_async;
1538 			}
1539 		} else if (bval == MSG_RESTOREPOINTERS) {
1540 			goto min6;
1541 		} else {
1542 			goto min6;
1543 		}
1544 	} else {		/* minx: */
1545 		*pSRB->pMsgPtr = bval;
1546 		pSRB->MsgCnt++;
1547 		pSRB->pMsgPtr++;
1548 		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1549 		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1550 			if (pSRB->MsgCnt == 2) {
1551 				pSRB->SRBState = 0;
1552 				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1553 				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1554 					pSRB = amd->pTmpSRB;
1555 					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1556 					pDCB->pActiveSRB = pSRB;
1557 					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1558 					EnableMsgOut2(amd, pSRB);
1559 				} else {
1560 					if (pDCB->DCBFlag & ABORT_DEV_) {
1561 						pSRB->SRBState = SRB_ABORT_SENT;
1562 						EnableMsgOut1(amd, pSRB);
1563 					}
1564 					pDCB->pActiveSRB = pSRB;
1565 					pSRB->SRBState = SRB_DATA_XFER;
1566 				}
1567 			}
1568 		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1569 			&& (pSRB->MsgCnt == 5)) {
1570 			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1571 			if ((pSRB->MsgInBuf[1] != 3)
1572 			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1573 				pSRB->MsgCnt = 1;
1574 				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1575 				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1576 			} else if (!(pSRB->MsgInBuf[3])
1577 				|| !(pSRB->MsgInBuf[4])) {
1578 		set_async:	/* set async */
1579 
1580 				pDCB = pSRB->pSRBDCB;
1581 				/* disable sync & sync nego */
1582 				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1583 				pDCB->SyncPeriod = 0;
1584 				pDCB->SyncOffset = 0;
1585 
1586 				pDCB->tinfo.goal.period = 0;
1587 				pDCB->tinfo.goal.offset = 0;
1588 
1589 				pDCB->tinfo.current.period = 0;
1590 				pDCB->tinfo.current.offset = 0;
1591 				pDCB->tinfo.current.width =
1592 				    MSG_EXT_WDTR_BUS_8_BIT;
1593 
1594 				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1595 				pDCB->CtrlR4 &= 0x3f;
1596 				pDCB->CtrlR4 |= EATER_25NS;
1597 				goto re_prog;
1598 			} else {/* set sync */
1599 
1600 				pDCB = pSRB->pSRBDCB;
1601 				/* enable sync & sync nego */
1602 				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1603 
1604 				/* set sync offset */
1605 				pDCB->SyncOffset &= 0x0f0;
1606 				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1607 
1608 				/* set sync period */
1609 				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1610 
1611 				wval = (u_int16_t) pSRB->MsgInBuf[3];
1612 				wval = wval << 2;
1613 				wval--;
1614 				wval1 = wval / 25;
1615 				if ((wval1 * 25) != wval) {
1616 					wval1++;
1617 				}
1618 				bval = FAST_CLK|FAST_SCSI;
1619 				pDCB->CtrlR4 &= 0x3f;
1620 				if (wval1 >= 8) {
1621 					/* Fast SCSI */
1622 					wval1--;
1623 					bval = FAST_CLK;
1624 					pDCB->CtrlR4 |= EATER_25NS;
1625 				}
1626 				pDCB->CtrlR3 = bval;
1627 				pDCB->SyncPeriod = (u_int8_t) wval1;
1628 
1629 				pDCB->tinfo.goal.period =
1630 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1631 				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1632 				pDCB->tinfo.current.period =
1633 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1634 				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1635 
1636 				/*
1637 				 * program SCSI control register
1638 				 */
1639 		re_prog:
1640 				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1641 				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1642 				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1643 				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1644 			}
1645 		}
1646 	}
1647 min6:
1648 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1649 	return (SCSI_NOP0);
1650 }
1651 #endif
1652 
1653 static u_int
1654 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1655 {
1656 	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1657 	return (scsistat);
1658 }
1659 
1660 static u_int
1661 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1662 {
1663 	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1664 	return (scsistat);
1665 }
1666 
1667 static void
1668 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1669 {
1670 	struct amd_sg *    psgl;
1671 	u_int32_t   lval;
1672 
1673 	if (pSRB->SGIndex < pSRB->SGcount) {
1674 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1675 
1676 		if (!pSRB->SGToBeXferLen) {
1677 			psgl = pSRB->pSGlist;
1678 			pSRB->SGPhysAddr = psgl->SGXPtr;
1679 			pSRB->SGToBeXferLen = psgl->SGXLen;
1680 		}
1681 		lval = pSRB->SGToBeXferLen;
1682 		amd_write8(amd, CTCREG_LOW, lval);
1683 		amd_write8(amd, CTCREG_MID, lval >> 8);
1684 		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1685 
1686 		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1687 
1688 		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1689 
1690 		pSRB->SRBState = SRB_DATA_XFER;
1691 
1692 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1693 
1694 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1695 
1696 		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1697 	} else {		/* xfer pad */
1698 		if (pSRB->SGcount) {
1699 			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1700 			pSRB->SRBStatus |= OVER_RUN;
1701 		}
1702 		amd_write8(amd, CTCREG_LOW, 0);
1703 		amd_write8(amd, CTCREG_MID, 0);
1704 		amd_write8(amd, CURTXTCNTREG, 0);
1705 
1706 		pSRB->SRBState |= SRB_XFERPAD;
1707 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1708 	}
1709 }
1710 
1711 static u_int
1712 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1713 {
1714 	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1715 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1716 
1717 	amdsetupcommand(amd, srb);
1718 
1719 	srb->SRBState = SRB_COMMAND;
1720 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1721 	return (scsistat);
1722 }
1723 
1724 static u_int
1725 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1726 {
1727 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1728 	pSRB->SRBState = SRB_STATUS;
1729 	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1730 	return (scsistat);
1731 }
1732 
1733 static u_int
1734 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1735 {
1736 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1737 
1738 	if (amd->msgout_len == 0) {
1739 		amd->msgout_buf[0] = MSG_NOOP;
1740 		amd->msgout_len = 1;
1741 	}
1742 	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1743 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1744 	return (scsistat);
1745 }
1746 
1747 static u_int
1748 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1749 {
1750 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1751 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1752 	return (scsistat);
1753 }
1754 
1755 static u_int
1756 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1757 {
1758 	return (scsistat);
1759 }
1760 
1761 static void
1762 amd_Disconnect(struct amd_softc * amd)
1763 {
1764 	struct	amd_srb *srb;
1765 	int	target;
1766 	int	lun;
1767 
1768 	srb = amd->active_srb;
1769 	amd->active_srb = NULL;
1770 	amd->last_phase = SCSI_BUS_FREE;
1771 	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1772 	target = amd->cur_target;
1773 	lun = amd->cur_lun;
1774 
1775 	if (srb == NULL) {
1776 		/* Invalid reselection */
1777 		amdrunwaiting(amd);
1778 	} else if (srb->SRBState & SRB_ABORT_SENT) {
1779 		/* Clean up and done this srb */
1780 #if 0
1781 		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1782 			/* XXX What about "done'ing" these srbs??? */
1783 			if (pSRB->pSRBDCB == pDCB) {
1784 				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1785 				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1786 			}
1787 		}
1788 		amdrunwaiting(amd);
1789 #endif
1790 	} else {
1791 		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1792 		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1793 			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1794 			goto disc1;
1795 		} else if (srb->SRBState & SRB_DISCONNECT) {
1796 			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1797 				amd->untagged_srbs[target][lun] = srb;
1798 			amdrunwaiting(amd);
1799 		} else if (srb->SRBState & SRB_COMPLETED) {
1800 	disc1:
1801 			srb->SRBState = SRB_FREE;
1802 			SRBdone(amd, srb);
1803 		}
1804 	}
1805 	return;
1806 }
1807 
1808 static void
1809 amd_Reselect(struct amd_softc *amd)
1810 {
1811 	struct amd_target_info *tinfo;
1812 	u_int16_t disc_count;
1813 
1814 	amd_clear_msg_state(amd);
1815 	if (amd->active_srb != NULL) {
1816 		/* Requeue the SRB for our attempted Selection */
1817 		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1818 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1819 		amd->active_srb = NULL;
1820 	}
1821 	/* get ID */
1822 	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1823 	amd->cur_target ^= amd->HostID_Bit;
1824 	amd->cur_target = ffs(amd->cur_target) - 1;
1825 	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1826 	tinfo = &amd->tinfo[amd->cur_target];
1827 	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1828 	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1829 	if (disc_count == 0) {
1830 		kprintf("amd%d: Unexpected reselection for target %d, "
1831 		       "Issuing Abort\n", amd->unit, amd->cur_target);
1832 		amd->msgout_buf[0] = MSG_ABORT;
1833 		amd->msgout_len = 1;
1834 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1835 	}
1836 	if (amd->active_srb != NULL) {
1837 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1838 		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1839 	}
1840 
1841 	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1842 	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1843 	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1844 	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1845 	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1846 	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1847 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1848 	amd->last_phase = SCSI_NOP0;
1849 }
1850 
1851 static void
1852 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1853 {
1854 	u_int8_t   bval, i, status;
1855 	union ccb *pccb;
1856 	struct ccb_scsiio *pcsio;
1857 	struct amd_sg *ptr2;
1858 	u_int32_t   swlval;
1859 
1860 	pccb = pSRB->pccb;
1861 	pcsio = &pccb->csio;
1862 
1863 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1864 		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1865 
1866 	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1867 		bus_dmasync_op_t op;
1868 
1869 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1870 			op = BUS_DMASYNC_POSTREAD;
1871 		else
1872 			op = BUS_DMASYNC_POSTWRITE;
1873 		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1874 		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1875 	}
1876 
1877 	status = pSRB->TargetStatus;
1878 	pccb->ccb_h.status = CAM_REQ_CMP;
1879 	if (pSRB->SRBFlag & AUTO_REQSENSE) {
1880 		pSRB->SRBFlag &= ~AUTO_REQSENSE;
1881 		pSRB->AdaptStatus = 0;
1882 		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1883 
1884 		if (status == SCSI_STATUS_CHECK_COND) {
1885 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1886 			goto ckc_e;
1887 		}
1888 		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1889 
1890 		pcsio->sense_resid = pcsio->sense_len
1891 				   - pSRB->TotalXferredLen;
1892 		pSRB->TotalXferredLen = pSRB->Segment1[1];
1893 		if (pSRB->TotalXferredLen) {
1894 			/* ???? */
1895 			pcsio->resid = pcsio->dxfer_len
1896 				     - pSRB->TotalXferredLen;
1897 			/* The resid field contains valid data	 */
1898 			/* Flush resid bytes on complete        */
1899 		} else {
1900 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1901 		}
1902 		pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1903 		goto ckc_e;
1904 	}
1905 	if (status) {
1906 		if (status == SCSI_STATUS_CHECK_COND) {
1907 
1908 			if ((pSRB->SGIndex < pSRB->SGcount)
1909 			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1910 				bval = pSRB->SGcount;
1911 				swlval = pSRB->SGToBeXferLen;
1912 				ptr2 = pSRB->pSGlist;
1913 				ptr2++;
1914 				for (i = pSRB->SGIndex + 1; i < bval; i++) {
1915 					swlval += ptr2->SGXLen;
1916 					ptr2++;
1917 				}
1918 				/* ??????? */
1919 				pcsio->resid = swlval;
1920 
1921 #ifdef	AMD_DEBUG0
1922 				kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
1923 					pSRB->TotalXferredLen, swlval);
1924 #endif
1925 			}
1926 			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1927 #ifdef	AMD_DEBUG0
1928 				kprintf("RequestSense..................\n");
1929 #endif
1930 				RequestSense(amd, pSRB);
1931 				return;
1932 			}
1933 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1934 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1935 			goto ckc_e;
1936 		} else if (status == SCSI_STATUS_QUEUE_FULL) {
1937 			pSRB->AdaptStatus = 0;
1938 			pSRB->TargetStatus = 0;
1939 			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1940 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1941 			goto ckc_e;
1942 		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1943 			pSRB->AdaptStatus = H_SEL_TIMEOUT;
1944 			pSRB->TargetStatus = 0;
1945 
1946 			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1947 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1948 		} else if (status == SCSI_STATUS_BUSY) {
1949 #ifdef AMD_DEBUG0
1950 			kprintf("DC390: target busy at %s %d\n",
1951 			       __FILE__, __LINE__);
1952 #endif
1953 			pcsio->scsi_status = SCSI_STATUS_BUSY;
1954 			pccb->ccb_h.status = CAM_SCSI_BUSY;
1955 		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1956 #ifdef AMD_DEBUG0
1957 			kprintf("DC390: target reserved at %s %d\n",
1958 			       __FILE__, __LINE__);
1959 #endif
1960 			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1961 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1962 		} else {
1963 			pSRB->AdaptStatus = 0;
1964 #ifdef AMD_DEBUG0
1965 			kprintf("DC390: driver stuffup at %s %d\n",
1966 			       __FILE__, __LINE__);
1967 #endif
1968 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1969 		}
1970 	} else {
1971 		status = pSRB->AdaptStatus;
1972 		if (status & H_OVER_UNDER_RUN) {
1973 			pSRB->TargetStatus = 0;
1974 
1975 			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1976 		} else if (pSRB->SRBStatus & PARITY_ERROR) {
1977 #ifdef AMD_DEBUG0
1978 			kprintf("DC390: driver stuffup %s %d\n",
1979 			       __FILE__, __LINE__);
1980 #endif
1981 			/* Driver failed to perform operation	  */
1982 			pccb->ccb_h.status = CAM_UNCOR_PARITY;
1983 		} else {	/* No error */
1984 			pSRB->AdaptStatus = 0;
1985 			pSRB->TargetStatus = 0;
1986 			pcsio->resid = 0;
1987 			/* there is no error, (sense is invalid)  */
1988 		}
1989 	}
1990 ckc_e:
1991 	crit_enter();
1992 	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1993 		/* CAM request not yet complete =>device_Q frozen */
1994 		xpt_freeze_devq(pccb->ccb_h.path, 1);
1995 		pccb->ccb_h.status |= CAM_DEV_QFRZN;
1996 	}
1997 	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1998 	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1999 	amdrunwaiting(amd);
2000 	crit_exit();
2001 	xpt_done(pccb);
2002 
2003 }
2004 
2005 static void
2006 amd_ResetSCSIBus(struct amd_softc * amd)
2007 {
2008 	crit_enter();
2009 	amd->ACBFlag |= RESET_DEV;
2010 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2011 	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2012 	crit_exit();
2013 	return;
2014 }
2015 
2016 static void
2017 amd_ScsiRstDetect(struct amd_softc * amd)
2018 {
2019 	u_int32_t   wlval;
2020 
2021 #ifdef AMD_DEBUG0
2022 	kprintf("amd_ScsiRstDetect \n");
2023 #endif
2024 
2025 	wlval = 1000;
2026 	while (--wlval) {	/* delay 1 sec */
2027 		DELAY(1000);
2028 	}
2029 	crit_enter();
2030 
2031 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2032 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2033 
2034 	if (amd->ACBFlag & RESET_DEV) {
2035 		amd->ACBFlag |= RESET_DONE;
2036 	} else {
2037 		amd->ACBFlag |= RESET_DETECT;
2038 		ResetDevParam(amd);
2039 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2040 				 AMD_TAG_WILDCARD, &amd->running_srbs,
2041 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2042 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2043 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2044 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2045 		amd->active_srb = NULL;
2046 		amd->ACBFlag = 0;
2047 		amdrunwaiting(amd);
2048 	}
2049 	crit_exit();
2050 	return;
2051 }
2052 
2053 static void
2054 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2055 {
2056 	union ccb *pccb;
2057 	struct ccb_scsiio *pcsio;
2058 
2059 	pccb = pSRB->pccb;
2060 	pcsio = &pccb->csio;
2061 
2062 	pSRB->SRBFlag |= AUTO_REQSENSE;
2063 	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2064 	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2065 	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2066 	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2067 
2068 	pSRB->AdaptStatus = 0;
2069 	pSRB->TargetStatus = 0;
2070 
2071 	pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2072 	pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2073 
2074 	pSRB->pSGlist = &pSRB->Segmentx;
2075 	pSRB->SGcount = 1;
2076 	pSRB->SGIndex = 0;
2077 
2078 	*((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2079 	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2080 	*((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2081 	pSRB->ScsiCmdLen = 6;
2082 
2083 	pSRB->TotalXferredLen = 0;
2084 	pSRB->SGToBeXferLen = 0;
2085 	if (amdstart(amd, pSRB) != 0) {
2086 		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2087 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2088 	}
2089 }
2090 
2091 static void
2092 amd_InvalidCmd(struct amd_softc * amd)
2093 {
2094 	struct amd_srb *srb;
2095 
2096 	srb = amd->active_srb;
2097 	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2098 		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2099 }
2100 
2101 void
2102 amd_linkSRB(struct amd_softc *amd)
2103 {
2104 	u_int16_t count, i;
2105 	struct amd_srb *psrb;
2106 	int error;
2107 
2108 	count = amd->SRBCount;
2109 
2110 	for (i = 0; i < count; i++) {
2111 		psrb = (struct amd_srb *)&amd->SRB_array[i];
2112 		psrb->TagNumber = i;
2113 
2114 		/*
2115 		 * Create the dmamap.  This is no longer optional!
2116 		 */
2117 		error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2118 		if (error) {
2119 			device_printf(amd->dev, "Error %d creating buffer "
2120 					"dmamap!\n", error);
2121 			break;
2122 		}
2123 		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2124 	}
2125 }
2126 
2127 static void
2128 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2129 {
2130 	if (mode == ENABLE_CE) {
2131 		*regval = 0xc0;
2132 	} else {
2133 		*regval = 0x80;
2134 	}
2135 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2136 	if (mode == DISABLE_CE) {
2137 		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2138 	}
2139 	DELAY(160);
2140 }
2141 
2142 static void
2143 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2144 {
2145 	u_int bval;
2146 
2147 	bval = 0;
2148 	if (Carry) {
2149 		bval = 0x40;
2150 		*regval = 0x80;
2151 		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2152 	}
2153 	DELAY(160);
2154 	bval |= 0x80;
2155 	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2156 	DELAY(160);
2157 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2158 	DELAY(160);
2159 }
2160 
2161 static int
2162 amd_EEpromInDO(struct amd_softc *amd)
2163 {
2164 	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2165 	DELAY(160);
2166 	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2167 	DELAY(160);
2168 	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2169 		return (1);
2170 	return (0);
2171 }
2172 
2173 static u_int16_t
2174 EEpromGetData1(struct amd_softc *amd)
2175 {
2176 	u_int	  i;
2177 	u_int	  carryFlag;
2178 	u_int16_t wval;
2179 
2180 	wval = 0;
2181 	for (i = 0; i < 16; i++) {
2182 		wval <<= 1;
2183 		carryFlag = amd_EEpromInDO(amd);
2184 		wval |= carryFlag;
2185 	}
2186 	return (wval);
2187 }
2188 
2189 static void
2190 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2191 {
2192 	u_int i, j;
2193 	int carryFlag;
2194 
2195 	carryFlag = 1;
2196 	j = 0x80;
2197 	for (i = 0; i < 9; i++) {
2198 		amd_EEpromOutDI(amd, regval, carryFlag);
2199 		carryFlag = (EEpromCmd & j) ? 1 : 0;
2200 		j >>= 1;
2201 	}
2202 }
2203 
2204 static void
2205 amd_ReadEEprom(struct amd_softc *amd)
2206 {
2207 	int	   regval;
2208 	u_int	   i;
2209 	u_int16_t *ptr;
2210 	u_int8_t   cmd;
2211 
2212 	ptr = (u_int16_t *)&amd->eepromBuf[0];
2213 	cmd = EEPROM_READ;
2214 	for (i = 0; i < 0x40; i++) {
2215 		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2216 		amd_Prepare(amd, &regval, cmd);
2217 		*ptr = EEpromGetData1(amd);
2218 		ptr++;
2219 		cmd++;
2220 		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2221 	}
2222 }
2223 
2224 static void
2225 amd_load_defaults(struct amd_softc *amd)
2226 {
2227 	int target;
2228 
2229 	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2230 	for (target = 0; target < MAX_SCSI_ID; target++)
2231 		amd->eepromBuf[target << 2] =
2232 		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2233 	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2234 	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2235 	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2236 }
2237 
2238 static void
2239 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2240 {
2241 	u_int16_t  wval, *ptr;
2242 	u_int8_t   i;
2243 
2244 	amd_ReadEEprom(amd);
2245 	wval = 0;
2246 	ptr = (u_int16_t *) & amd->eepromBuf[0];
2247 	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2248 		wval += *ptr;
2249 
2250 	if (wval != EE_CHECKSUM) {
2251 		if (bootverbose)
2252 			kprintf("amd%d: SEEPROM data unavailable.  "
2253 			       "Using default device parameters.\n",
2254 			       amd->unit);
2255 		amd_load_defaults(amd);
2256 	}
2257 }
2258 
2259 /*
2260  **********************************************************************
2261  * Function      : static int amd_init (struct Scsi_Host *host)
2262  * Purpose       : initialize the internal structures for a given SCSI host
2263  * Inputs        : host - pointer to this host adapter's structure/
2264  **********************************************************************
2265  */
2266 static int
2267 amd_init(device_t dev)
2268 {
2269 	struct amd_softc *amd = device_get_softc(dev);
2270 	struct resource	*iores;
2271 	int	i, rid;
2272 	u_int	bval;
2273 
2274 	rid = PCI_BASE_ADDR0;
2275 	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2276 				   RF_ACTIVE);
2277 	if (iores == NULL) {
2278 		if (bootverbose)
2279 			kprintf("amd_init: bus_alloc_resource failure!\n");
2280 		return ENXIO;
2281 	}
2282 	amd->tag = rman_get_bustag(iores);
2283 	amd->bsh = rman_get_bushandle(iores);
2284 
2285 	/* DMA tag for mapping buffers into device visible space. */
2286 	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2287 			       /*boundary*/0,
2288 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2289 			       /*highaddr*/BUS_SPACE_MAXADDR,
2290 			       /*filter*/NULL, /*filterarg*/NULL,
2291 			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2292 			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2293 			       /*flags*/BUS_DMA_ALLOCNOW,
2294 			       &amd->buffer_dmat) != 0) {
2295 		if (bootverbose)
2296 			kprintf("amd_init: bus_dma_tag_create failure!\n");
2297 		return ENXIO;
2298         }
2299 	TAILQ_INIT(&amd->free_srbs);
2300 	TAILQ_INIT(&amd->running_srbs);
2301 	TAILQ_INIT(&amd->waiting_srbs);
2302 	amd->last_phase = SCSI_BUS_FREE;
2303 	amd->dev = dev;
2304 	amd->unit = device_get_unit(dev);
2305 	amd->SRBCount = MAX_SRB_CNT;
2306 	amd->status = 0;
2307 	amd_load_eeprom_or_defaults(amd);
2308 	amd->max_id = 7;
2309 	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2310 		amd->max_lun = 7;
2311 	} else {
2312 		amd->max_lun = 0;
2313 	}
2314 	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2315 	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2316 	amd->AdaptSCSILUN = 0;
2317 	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2318 	amd->ACBFlag = 0;
2319 	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2320 	amd_linkSRB(amd);
2321 	for (i = 0; i <= amd->max_id; i++) {
2322 
2323 		if (amd->AdaptSCSIID != i) {
2324 			struct amd_target_info *tinfo;
2325 			PEEprom prom;
2326 
2327 			tinfo = &amd->tinfo[i];
2328 			prom = (PEEprom)&amd->eepromBuf[i << 2];
2329 			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2330 				tinfo->disc_tag |= AMD_USR_DISCENB;
2331 				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2332 					tinfo->disc_tag |= AMD_USR_TAGENB;
2333 			}
2334 			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2335 				tinfo->user.period =
2336 				    eeprom_period[prom->EE_SPEED];
2337 				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2338 			}
2339 			tinfo->CtrlR1 = amd->AdaptSCSIID;
2340 			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2341 				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2342 			tinfo->CtrlR3 = FAST_CLK;
2343 			tinfo->CtrlR4 = EATER_25NS;
2344 			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2345 				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2346 		}
2347 	}
2348 	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2349 	/* Conversion factor = 0 , 40MHz clock */
2350 	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2351 	/* NOP cmd - clear command register */
2352 	amd_write8(amd, SCSICMDREG, NOP_CMD);
2353 	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2354 	amd_write8(amd, CNTLREG3, FAST_CLK);
2355 	bval = EATER_25NS;
2356 	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2357 		bval |= NEGATE_REQACKDATA;
2358 	}
2359 	amd_write8(amd, CNTLREG4, bval);
2360 
2361 	/* Disable SCSI bus reset interrupt */
2362 	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2363 
2364 	return 0;
2365 }
2366 
2367 /*
2368  * attach and init a host adapter
2369  */
2370 static int
2371 amd_attach(device_t dev)
2372 {
2373 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2374 	u_int8_t	intstat;
2375 	struct amd_softc *amd = device_get_softc(dev);
2376 	int		unit = device_get_unit(dev);
2377 	int		rid;
2378 	void		*ih;
2379 	struct resource	*irqres;
2380 
2381 	if (amd_init(dev)) {
2382 		if (bootverbose)
2383 			kprintf("amd_attach: amd_init failure!\n");
2384 		return ENXIO;
2385 	}
2386 
2387 	/* Reset Pending INT */
2388 	intstat = amd_read8(amd, INTSTATREG);
2389 
2390 	/* After setting up the adapter, map our interrupt */
2391 	rid = 0;
2392 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2393 				    RF_SHAREABLE | RF_ACTIVE);
2394 	if (irqres == NULL ||
2395 	    bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2396 	) {
2397 		if (bootverbose)
2398 			kprintf("amd%d: unable to register interrupt handler!\n",
2399 			       unit);
2400 		return ENXIO;
2401 	}
2402 
2403 	/*
2404 	 * Now let the CAM generic SCSI layer find the SCSI devices on
2405 	 * the bus *  start queue to reset to the idle loop. *
2406 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2407 	 * max_sim_transactions
2408 	 */
2409 	devq = cam_simq_alloc(MAX_START_JOB);
2410 	if (devq == NULL) {
2411 		if (bootverbose)
2412 			kprintf("amd_attach: cam_simq_alloc failure!\n");
2413 		return ENXIO;
2414 	}
2415 
2416 	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2417 				  amd, amd->unit, &sim_mplock, 1,
2418 				  MAX_TAGS_CMD_QUEUE, devq);
2419 	cam_simq_release(devq);
2420 	if (amd->psim == NULL) {
2421 		if (bootverbose)
2422 			kprintf("amd_attach: cam_sim_alloc failure!\n");
2423 		return ENXIO;
2424 	}
2425 
2426 	if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2427 		cam_sim_free(amd->psim);
2428 		if (bootverbose)
2429 			kprintf("amd_attach: xpt_bus_register failure!\n");
2430 		return ENXIO;
2431 	}
2432 
2433 	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2434 			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2435 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2436 		xpt_bus_deregister(cam_sim_path(amd->psim));
2437 		cam_sim_free(amd->psim);
2438 		if (bootverbose)
2439 			kprintf("amd_attach: xpt_create_path failure!\n");
2440 		return ENXIO;
2441 	}
2442 
2443 	return 0;
2444 }
2445 
2446 static int
2447 amd_probe(device_t dev)
2448 {
2449 	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2450 		device_set_desc(dev,
2451 			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2452 		return 0;
2453 	}
2454 	return ENXIO;
2455 }
2456 
2457 static device_method_t amd_methods[] = {
2458 	/* Device interface */
2459 	DEVMETHOD(device_probe,		amd_probe),
2460 	DEVMETHOD(device_attach,	amd_attach),
2461 	DEVMETHOD_END
2462 };
2463 
2464 static driver_t amd_driver = {
2465 	"amd", amd_methods, sizeof(struct amd_softc)
2466 };
2467 
2468 static devclass_t amd_devclass;
2469 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, NULL, NULL);
2470