xref: /dragonfly/sys/dev/disk/amd/amd.c (revision 030b0c8c)
1 /*
2  *********************************************************************
3  *	FILE NAME  : amd.c
4  *	     BY    : C.L. Huang 	(ching@tekram.com.tw)
5  *		     Erich Chen     (erich@tekram.com.tw)
6  *	Description: Device Driver for the amd53c974 PCI Bus Master
7  *		     SCSI Host adapter found on cards such as
8  *		     the Tekram DC-390(T).
9  * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *********************************************************************
33  * $FreeBSD: src/sys/pci/amd.c,v 1.3.2.2 2001/06/02 04:32:50 nyan Exp $
34  */
35 
36 /*
37  *********************************************************************
38  *	HISTORY:
39  *
40  *	REV#	DATE	NAME    	DESCRIPTION
41  *	1.00  07/02/96	CLH	        First release for RELEASE-2.1.0
42  *	1.01  08/20/96	CLH	        Update for RELEASE-2.1.5
43  *	1.02  11/06/96	CLH	        Fixed more than 1 LUN scanning
44  *	1.03  12/20/96	CLH	        Modify to support 2.2-ALPHA
45  *	1.04  12/26/97	CLH	        Modify to support RELEASE-2.2.5
46  *	1.05  01/01/99  ERICH CHEN	Modify to support RELEASE-3.0.x (CAM)
47  *********************************************************************
48  */
49 
50 /* #define AMD_DEBUG0           */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/kernel.h>
56 #include <sys/malloc.h>
57 #include <sys/queue.h>
58 #include <sys/buf.h>
59 #include <sys/bus.h>
60 #include <sys/rman.h>
61 #include <sys/thread2.h>
62 
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 
66 #include <machine/clock.h>
67 
68 #include <bus/cam/cam.h>
69 #include <bus/cam/cam_ccb.h>
70 #include <bus/cam/cam_sim.h>
71 #include <bus/cam/cam_xpt.h>
72 #include <bus/cam/cam_xpt_sim.h>
73 #include <bus/cam/cam_xpt_periph.h>
74 #include <bus/cam/cam_debug.h>
75 #include <bus/cam/scsi/scsi_all.h>
76 #include <bus/cam/scsi/scsi_message.h>
77 
78 #include <bus/pci/pcivar.h>
79 #include <bus/pci/pcireg.h>
80 #include "amd.h"
81 
82 #define PCI_DEVICE_ID_AMD53C974 	0x20201022ul
83 #define PCI_BASE_ADDR0	    		0x10
84 
85 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
86 typedef phase_handler_t *phase_handler_func_t;
87 
88 static void amd_intr(void *vamd);
89 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
90 static phase_handler_t amd_NopPhase;
91 
92 static phase_handler_t amd_DataOutPhase0;
93 static phase_handler_t amd_DataInPhase0;
94 #define amd_CommandPhase0 amd_NopPhase
95 static phase_handler_t amd_StatusPhase0;
96 static phase_handler_t amd_MsgOutPhase0;
97 static phase_handler_t amd_MsgInPhase0;
98 static phase_handler_t amd_DataOutPhase1;
99 static phase_handler_t amd_DataInPhase1;
100 static phase_handler_t amd_CommandPhase1;
101 static phase_handler_t amd_StatusPhase1;
102 static phase_handler_t amd_MsgOutPhase1;
103 static phase_handler_t amd_MsgInPhase1;
104 
105 static void	amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
106 static int	amdparsemsg(struct amd_softc *amd);
107 static int	amdhandlemsgreject(struct amd_softc *amd);
108 static void	amdconstructsdtr(struct amd_softc *amd,
109 				 u_int period, u_int offset);
110 static u_int	amdfindclockrate(struct amd_softc *amd, u_int *period);
111 static int	amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
112 
113 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
114 static void amd_Disconnect(struct amd_softc *amd);
115 static void amd_Reselect(struct amd_softc *amd);
116 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
117 static void amd_ScsiRstDetect(struct amd_softc *amd);
118 static void amd_ResetSCSIBus(struct amd_softc *amd);
119 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
120 static void amd_InvalidCmd(struct amd_softc *amd);
121 
122 #if 0
123 static void amd_timeout(void *arg1);
124 static void amd_reset(struct amd_softc *amd);
125 #endif
126 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
127 
128 void    amd_linkSRB(struct amd_softc *amd);
129 static int amd_init(device_t);
130 static void amd_load_defaults(struct amd_softc *amd);
131 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
132 static int amd_EEpromInDO(struct amd_softc *amd);
133 static u_int16_t EEpromGetData1(struct amd_softc *amd);
134 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
135 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
136 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
137 static void amd_ReadEEprom(struct amd_softc *amd);
138 
139 static int amd_probe(device_t);
140 static int amd_attach(device_t);
141 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
142 			     lun_id_t lun, u_int tag, struct srb_queue *queue,
143 			     cam_status status);
144 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
145 		       u_int period, u_int offset, u_int type);
146 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
147 
148 static __inline void amd_clear_msg_state(struct amd_softc *amd);
149 
150 static __inline void
amd_clear_msg_state(struct amd_softc * amd)151 amd_clear_msg_state(struct amd_softc *amd)
152 {
153 	amd->msgout_len = 0;
154 	amd->msgout_index = 0;
155 	amd->msgin_index = 0;
156 }
157 
158 /* CAM SIM entry points */
159 #define ccb_srb_ptr spriv_ptr0
160 #define ccb_amd_ptr spriv_ptr1
161 static void	amd_action(struct cam_sim *sim, union ccb *ccb);
162 static void	amd_poll(struct cam_sim *sim);
163 
164 /*
165  * State engine function tables indexed by SCSI phase number
166  */
167 phase_handler_func_t amd_SCSI_phase0[] = {
168 	amd_DataOutPhase0,
169 	amd_DataInPhase0,
170 	amd_CommandPhase0,
171 	amd_StatusPhase0,
172 	amd_NopPhase,
173 	amd_NopPhase,
174 	amd_MsgOutPhase0,
175 	amd_MsgInPhase0
176 };
177 
178 phase_handler_func_t amd_SCSI_phase1[] = {
179 	amd_DataOutPhase1,
180 	amd_DataInPhase1,
181 	amd_CommandPhase1,
182 	amd_StatusPhase1,
183 	amd_NopPhase,
184 	amd_NopPhase,
185 	amd_MsgOutPhase1,
186 	amd_MsgInPhase1
187 };
188 
189 /*
190  * EEProm/BIOS negotiation periods
191  */
192 u_int8_t   eeprom_period[] = {
193 	 25,	/* 10.0MHz */
194 	 32,	/*  8.0MHz */
195 	 38,	/*  6.6MHz */
196 	 44,	/*  5.7MHz */
197 	 50,	/*  5.0MHz */
198 	 63,	/*  4.0MHz */
199 	 83,	/*  3.0MHz */
200 	125	/*  2.0MHz */
201 };
202 
203 /*
204  * chip clock setting to SCSI specified sync parameter table.
205  */
206 u_int8_t tinfo_sync_period[] = {
207 	25,	/* 10.0 */
208 	32,	/* 8.0 */
209 	38,	/* 6.6 */
210 	44,	/* 5.7 */
211 	50,	/* 5.0 */
212 	57,	/* 4.4 */
213 	63,	/* 4.0 */
214 	70,	/* 3.6 */
215 	76,	/* 3.3 */
216 	83	/* 3.0 */
217 };
218 
219 static __inline struct amd_srb *
amdgetsrb(struct amd_softc * amd)220 amdgetsrb(struct amd_softc * amd)
221 {
222 	struct amd_srb *    pSRB;
223 
224 	crit_enter();
225 	pSRB = TAILQ_FIRST(&amd->free_srbs);
226 	if (pSRB)
227 		TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
228 	crit_exit();
229 	return (pSRB);
230 }
231 
232 static void
amdsetupcommand(struct amd_softc * amd,struct amd_srb * srb)233 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
234 {
235 	struct scsi_request_sense sense_cmd;
236 	struct ccb_scsiio *csio;
237 	u_int8_t *cdb;
238 	u_int cdb_len;
239 
240 	csio = &srb->pccb->csio;
241 
242 	if (srb->SRBFlag & AUTO_REQSENSE) {
243 		sense_cmd.opcode = REQUEST_SENSE;
244 		sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 		sense_cmd.unused[0] = 0;
246 		sense_cmd.unused[1] = 0;
247 		sense_cmd.length = csio->sense_len;
248 		sense_cmd.control = 0;
249 		cdb = &sense_cmd.opcode;
250 		cdb_len = sizeof(sense_cmd);
251 	} else {
252 		cdb = &srb->CmdBlock[0];
253 		cdb_len = srb->ScsiCmdLen;
254 	}
255 	amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
256 }
257 
258 /*
259  * Attempt to start a waiting transaction.  Interrupts must be disabled
260  * upon entry to this function.
261  */
262 static void
amdrunwaiting(struct amd_softc * amd)263 amdrunwaiting(struct amd_softc *amd) {
264 	struct amd_srb *srb;
265 
266 	if (amd->last_phase != SCSI_BUS_FREE)
267 		return;
268 
269 	srb = TAILQ_FIRST(&amd->waiting_srbs);
270 	if (srb == NULL)
271 		return;
272 
273 	if (amdstart(amd, srb) == 0) {
274 		TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 		TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
276 	}
277 }
278 
279 static void
amdexecutesrb(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)280 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
281 {
282 	struct	 amd_srb *srb;
283 	union	 ccb *ccb;
284 	struct	 amd_softc *amd;
285 
286 	srb = (struct amd_srb *)arg;
287 	ccb = srb->pccb;
288 	amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
289 
290 	if (error != 0) {
291 		if (error != EFBIG)
292 			kprintf("amd%d: Unexpected error 0x%x returned from "
293 			       "bus_dmamap_load\n", amd->unit, error);
294 		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
295 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
296 			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
297 		}
298 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
299 		xpt_done(ccb);
300 		return;
301 	}
302 
303 	if (nseg != 0) {
304 		struct amd_sg *sg;
305 		bus_dma_segment_t *end_seg;
306 		bus_dmasync_op_t op;
307 
308 		end_seg = dm_segs + nseg;
309 
310 		/* Copy the segments into our SG list */
311 		srb->pSGlist = &srb->SGsegment[0];
312 		sg = srb->pSGlist;
313 		while (dm_segs < end_seg) {
314 			sg->SGXLen = dm_segs->ds_len;
315 			sg->SGXPtr = dm_segs->ds_addr;
316 			sg++;
317 			dm_segs++;
318 		}
319 
320 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
321 			op = BUS_DMASYNC_PREREAD;
322 		else
323 			op = BUS_DMASYNC_PREWRITE;
324 
325 		bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
326 
327 	}
328 	srb->SGcount = nseg;
329 	srb->SGIndex = 0;
330 	srb->AdaptStatus = 0;
331 	srb->TargetStatus = 0;
332 	srb->MsgCnt = 0;
333 	srb->SRBStatus = 0;
334 	srb->SRBFlag = 0;
335 	srb->SRBState = 0;
336 	srb->TotalXferredLen = 0;
337 	srb->SGPhysAddr = 0;
338 	srb->SGToBeXferLen = 0;
339 	srb->EndMessage = 0;
340 
341 	crit_enter();
342 
343 	/*
344 	 * Last time we need to check if this CCB needs to
345 	 * be aborted.
346 	 */
347 	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
348 		if (nseg != 0)
349 			bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
350 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
351 		xpt_done(ccb);
352 		crit_exit();
353 		return;
354 	}
355 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
356 #if 0
357 	/* XXX Need a timeout handler */
358 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
359 	    amdtimeout, srb);
360 #endif
361 	TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
362 	amdrunwaiting(amd);
363 	crit_exit();
364 }
365 
366 static void
amd_action(struct cam_sim * psim,union ccb * pccb)367 amd_action(struct cam_sim * psim, union ccb * pccb)
368 {
369 	struct amd_softc *    amd;
370 	u_int   target_id;
371 
372 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
373 
374 	amd = (struct amd_softc *) cam_sim_softc(psim);
375 	target_id = pccb->ccb_h.target_id;
376 
377 	switch (pccb->ccb_h.func_code) {
378 	case XPT_SCSI_IO:
379 	{
380 		struct amd_srb *    pSRB;
381 		struct ccb_scsiio *pcsio;
382 
383 		pcsio = &pccb->csio;
384 
385 		/*
386 		 * Assign an SRB and connect it with this ccb.
387 		 */
388 		pSRB = amdgetsrb(amd);
389 
390 		if (!pSRB) {
391 			/* Freeze SIMQ */
392 			pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
393 			xpt_done(pccb);
394 			return;
395 		}
396 		pSRB->pccb = pccb;
397 		pccb->ccb_h.ccb_srb_ptr = pSRB;
398 		pccb->ccb_h.ccb_amd_ptr = amd;
399 		pSRB->ScsiCmdLen = pcsio->cdb_len;
400 		bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
401 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
402 			if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
403 				/*
404 				 * We've been given a pointer
405 				 * to a single buffer.
406 				 */
407 				if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
408 					int error;
409 
410 					crit_enter();
411 					error =
412 					    bus_dmamap_load(amd->buffer_dmat,
413 							    pSRB->dmamap,
414 							    pcsio->data_ptr,
415 							    pcsio->dxfer_len,
416 							    amdexecutesrb,
417 							    pSRB, /*flags*/0);
418 					if (error == EINPROGRESS) {
419 						/*
420 						 * So as to maintain
421 						 * ordering, freeze the
422 						 * controller queue
423 						 * until our mapping is
424 						 * returned.
425 						 */
426 						xpt_freeze_simq(amd->psim, 1);
427 						pccb->ccb_h.status |=
428 						    CAM_RELEASE_SIMQ;
429 					}
430 					crit_exit();
431 				} else {
432 					struct bus_dma_segment seg;
433 
434 					/* Pointer to physical buffer */
435 					seg.ds_addr =
436 					    (bus_addr_t)pcsio->data_ptr;
437 					seg.ds_len = pcsio->dxfer_len;
438 					amdexecutesrb(pSRB, &seg, 1, 0);
439 				}
440 			} else {
441 				struct bus_dma_segment *segs;
442 
443 				if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
444 				 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
445 					TAILQ_INSERT_HEAD(&amd->free_srbs,
446 							  pSRB, links);
447 					pccb->ccb_h.status = CAM_PROVIDE_FAIL;
448 					xpt_done(pccb);
449 					return;
450 				}
451 
452 				/* Just use the segments provided */
453 				segs =
454 				    (struct bus_dma_segment *)pcsio->data_ptr;
455 				amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
456 			}
457 		} else
458 			amdexecutesrb(pSRB, NULL, 0, 0);
459 		break;
460 	}
461 	case XPT_PATH_INQ:
462 	{
463 		struct ccb_pathinq *cpi = &pccb->cpi;
464 
465 		cpi->version_num = 1;
466 		cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
467 		cpi->target_sprt = 0;
468 		cpi->hba_misc = 0;
469 		cpi->hba_eng_cnt = 0;
470 		cpi->max_target = 7;
471 		cpi->max_lun = amd->max_lun;	/* 7 or 0 */
472 		cpi->initiator_id = amd->AdaptSCSIID;
473 		cpi->bus_id = cam_sim_bus(psim);
474 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
475 		strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
476 		strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
477 		cpi->unit_number = cam_sim_unit(psim);
478                 cpi->transport = XPORT_SPI;
479                 cpi->transport_version = 2;
480                 cpi->protocol = PROTO_SCSI;
481                 cpi->protocol_version = SCSI_REV_2;
482 		cpi->ccb_h.status = CAM_REQ_CMP;
483 		xpt_done(pccb);
484 		break;
485 	}
486 	case XPT_ABORT:
487 		pccb->ccb_h.status = CAM_REQ_INVALID;
488 		xpt_done(pccb);
489 		break;
490 	case XPT_RESET_BUS:
491 	{
492 
493 		int     i;
494 
495 		amd_ResetSCSIBus(amd);
496 		amd->ACBFlag = 0;
497 
498 		for (i = 0; i < 500; i++) {
499 			DELAY(1000);	/* Wait until our interrupt
500 					 * handler sees it */
501 		}
502 
503 		pccb->ccb_h.status = CAM_REQ_CMP;
504 		xpt_done(pccb);
505 		break;
506 	}
507 	case XPT_RESET_DEV:
508 		pccb->ccb_h.status = CAM_REQ_INVALID;
509 		xpt_done(pccb);
510 		break;
511 	case XPT_TERM_IO:
512 		pccb->ccb_h.status = CAM_REQ_INVALID;
513 		xpt_done(pccb);
514 		break;
515 	case XPT_GET_TRAN_SETTINGS:
516 	{
517 		struct ccb_trans_settings *cts = &pccb->cts;
518 		struct amd_target_info *targ_info = &amd->tinfo[target_id];
519 		struct amd_transinfo *tinfo;
520 		struct ccb_trans_settings_scsi *scsi =
521 		    &cts->proto_specific.scsi;
522 		struct ccb_trans_settings_spi *spi =
523 		    &cts->xport_specific.spi;
524 
525 		cts->protocol = PROTO_SCSI;
526 		cts->protocol_version = SCSI_REV_2;
527 		cts->transport = XPORT_SPI;
528 		cts->transport_version = 2;
529 
530 		crit_enter();
531 		if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
532 			/* current transfer settings */
533 			if (targ_info->disc_tag & AMD_CUR_DISCENB) {
534 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
535 			} else {
536 				spi->flags = 0;
537 			}
538 			if (targ_info->disc_tag & AMD_CUR_TAGENB) {
539 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
540 			} else {
541 				scsi->flags = 0;
542 			}
543 			tinfo = &targ_info->current;
544 		} else {
545 			/* default(user) transfer settings */
546 			if (targ_info->disc_tag & AMD_USR_DISCENB) {
547 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
548 			} else {
549 				spi->flags = 0;
550 			}
551 			if (targ_info->disc_tag & AMD_USR_TAGENB) {
552 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
553 			} else {
554 				scsi->flags = 0;
555 			}
556 			tinfo = &targ_info->user;
557 		}
558 		spi->sync_period = tinfo->period;
559 		spi->sync_offset = tinfo->offset;
560 		crit_exit();
561 
562 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
563 		spi->valid = CTS_SPI_VALID_SYNC_RATE
564 			   | CTS_SPI_VALID_SYNC_OFFSET
565 			   | CTS_SPI_VALID_BUS_WIDTH
566 			   | CTS_SPI_VALID_DISC;
567 		scsi->valid = CTS_SCSI_VALID_TQ;
568 		pccb->ccb_h.status = CAM_REQ_CMP;
569 		xpt_done(pccb);
570 		break;
571 	}
572 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
573 #define	IS_USER_SETTINGS(c)	(c->type == CTS_TYPE_USER_SETTINGS)
574 	case XPT_SET_TRAN_SETTINGS:
575 	{
576 		struct ccb_trans_settings *cts = &pccb->cts;
577 		struct amd_target_info *targ_info;
578 		u_int  update_type = 0;
579 		int    last_entry;
580 
581 		struct ccb_trans_settings_scsi *scsi =
582 		    &cts->proto_specific.scsi;
583 		struct ccb_trans_settings_spi *spi =
584 		    &cts->xport_specific.spi;
585 		if (IS_CURRENT_SETTINGS(cts)) {
586 			update_type |= AMD_TRANS_GOAL;
587 		} else if (IS_USER_SETTINGS(cts)) {
588 			update_type |= AMD_TRANS_USER;
589 		}
590 		if (update_type == 0
591 		 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
592 			cts->ccb_h.status = CAM_REQ_INVALID;
593 			xpt_done(pccb);
594 		}
595 
596 		crit_enter();
597 		targ_info = &amd->tinfo[target_id];
598 
599 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
600 			if (update_type & AMD_TRANS_GOAL) {
601 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
602 				   != 0) {
603 					targ_info->disc_tag |= AMD_CUR_DISCENB;
604 				} else {
605 					targ_info->disc_tag &= ~AMD_CUR_DISCENB;
606 				}
607 			}
608 			if (update_type & AMD_TRANS_USER) {
609 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB)
610 				   != 0) {
611 					targ_info->disc_tag |= AMD_USR_DISCENB;
612 				} else {
613 					targ_info->disc_tag &= ~AMD_USR_DISCENB;
614 				}
615 			}
616 		}
617 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
618 			if (update_type & AMD_TRANS_GOAL) {
619 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
620 				   != 0) {
621 					targ_info->disc_tag |= AMD_CUR_TAGENB;
622 				} else {
623 					targ_info->disc_tag &= ~AMD_CUR_TAGENB;
624 				}
625 			}
626 			if (update_type & AMD_TRANS_USER) {
627 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)
628 				    != 0) {
629 					targ_info->disc_tag |= AMD_USR_TAGENB;
630 				} else {
631 					targ_info->disc_tag &= ~AMD_USR_TAGENB;
632 				}
633 			}
634 		}
635 
636 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) {
637 			if (update_type & AMD_TRANS_GOAL)
638 				spi->sync_offset = targ_info->goal.offset;
639 			else
640 				spi->sync_offset = targ_info->user.offset;
641 		}
642 
643 		if (spi->sync_offset > AMD_MAX_SYNC_OFFSET)
644 			spi->sync_offset = AMD_MAX_SYNC_OFFSET;
645 
646 		if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
647 			if (update_type & AMD_TRANS_GOAL)
648 				spi->sync_period = targ_info->goal.period;
649 			else
650 				spi->sync_period = targ_info->user.period;
651 		}
652 
653 		last_entry = sizeof(tinfo_sync_period) - 1;
654 		if ((spi->sync_period != 0)
655 		 && (spi->sync_period < tinfo_sync_period[0]))
656 			spi->sync_period = tinfo_sync_period[0];
657 		if (spi->sync_period > tinfo_sync_period[last_entry])
658 			spi->sync_period = 0;
659 		if (spi->sync_offset == 0)
660 			spi->sync_period = 0;
661 
662 		if ((update_type & AMD_TRANS_USER) != 0) {
663 			targ_info->user.period = spi->sync_period;
664 			targ_info->user.offset = spi->sync_offset;
665 		}
666 		if ((update_type & AMD_TRANS_GOAL) != 0) {
667 			targ_info->goal.period = spi->sync_period;
668 			targ_info->goal.offset = spi->sync_offset;
669 		}
670 		crit_exit();
671 		pccb->ccb_h.status = CAM_REQ_CMP;
672 		xpt_done(pccb);
673 		break;
674 	}
675 	case XPT_CALC_GEOMETRY:
676 	{
677 		struct ccb_calc_geometry *ccg;
678 		u_int32_t size_mb;
679 		u_int32_t secs_per_cylinder;
680 		int     extended;
681 
682 		ccg = &pccb->ccg;
683 		size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
684 		extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
685 
686 		if (size_mb > 1024 && extended) {
687 			ccg->heads = 255;
688 			ccg->secs_per_track = 63;
689 		} else {
690 			ccg->heads = 64;
691 			ccg->secs_per_track = 32;
692 		}
693 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
694 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
695 		pccb->ccb_h.status = CAM_REQ_CMP;
696 		xpt_done(pccb);
697 		break;
698 	}
699 	default:
700 		pccb->ccb_h.status = CAM_REQ_INVALID;
701 		xpt_done(pccb);
702 		break;
703 	}
704 }
705 
706 static void
amd_poll(struct cam_sim * psim)707 amd_poll(struct cam_sim * psim)
708 {
709 	amd_intr(cam_sim_softc(psim));
710 }
711 
712 static u_int8_t *
phystovirt(struct amd_srb * pSRB,u_int32_t xferCnt)713 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
714 {
715 	intptr_t     dataPtr;
716 	struct ccb_scsiio *pcsio;
717 	u_int8_t   i;
718 	struct amd_sg *    pseg;
719 
720 	dataPtr = 0;
721 	pcsio = &pSRB->pccb->csio;
722 
723 	dataPtr = (intptr_t) pcsio->data_ptr;
724 	pseg = pSRB->SGsegment;
725 	for (i = 0; i < pSRB->SGIndex; i++) {
726 		dataPtr += (int) pseg->SGXLen;
727 		pseg++;
728 	}
729 	dataPtr += (int) xferCnt;
730 	return ((u_int8_t *) dataPtr);
731 }
732 
733 static void
ResetDevParam(struct amd_softc * amd)734 ResetDevParam(struct amd_softc * amd)
735 {
736 	u_int target;
737 
738 	for (target = 0; target <= amd->max_id; target++) {
739 		if (amd->AdaptSCSIID != target) {
740 			amdsetsync(amd, target, /*clockrate*/0,
741 				   /*period*/0, /*offset*/0, AMD_TRANS_CUR);
742 		}
743 	}
744 }
745 
746 static void
amdcompletematch(struct amd_softc * amd,target_id_t target,lun_id_t lun,u_int tag,struct srb_queue * queue,cam_status status)747 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
748 		 u_int tag, struct srb_queue *queue, cam_status status)
749 {
750 	struct amd_srb *srb;
751 	struct amd_srb *next_srb;
752 
753 	for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
754 		union ccb *ccb;
755 
756 		next_srb = TAILQ_NEXT(srb, links);
757 		if (srb->pccb->ccb_h.target_id != target
758 		 && target != CAM_TARGET_WILDCARD)
759 			continue;
760 
761 		if (srb->pccb->ccb_h.target_lun != lun
762 		 && lun != CAM_LUN_WILDCARD)
763 			continue;
764 
765 		if (srb->TagNumber != tag
766 		 && tag != AMD_TAG_WILDCARD)
767 			continue;
768 
769 		ccb = srb->pccb;
770 		TAILQ_REMOVE(queue, srb, links);
771 		TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
772 		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
773 		 && (status & CAM_DEV_QFRZN) != 0)
774 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
775 		ccb->ccb_h.status = status;
776 		xpt_done(ccb);
777 	}
778 
779 }
780 
781 static void
amdsetsync(struct amd_softc * amd,u_int target,u_int clockrate,u_int period,u_int offset,u_int type)782 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
783 	   u_int period, u_int offset, u_int type)
784 {
785 	struct amd_target_info *tinfo;
786 	u_int old_period;
787 	u_int old_offset;
788 
789 	tinfo = &amd->tinfo[target];
790 	old_period = tinfo->current.period;
791 	old_offset = tinfo->current.offset;
792 	if ((type & AMD_TRANS_CUR) != 0
793 	 && (old_period != period || old_offset != offset)) {
794 		struct cam_path *path;
795 
796 		tinfo->current.period = period;
797 		tinfo->current.offset = offset;
798 		tinfo->sync_period_reg = clockrate;
799 		tinfo->sync_offset_reg = offset;
800 		tinfo->CtrlR3 &= ~FAST_SCSI;
801 		tinfo->CtrlR4 &= ~EATER_25NS;
802 		if (clockrate > 7)
803 			tinfo->CtrlR4 |= EATER_25NS;
804 		else
805 			tinfo->CtrlR3 |= FAST_SCSI;
806 
807 		if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
808 			amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
809 			amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
810 			amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
811 			amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
812 		}
813 		/* If possible, update the XPT's notion of our transfer rate */
814 		if (xpt_create_path(&path, /*periph*/NULL,
815 				    cam_sim_path(amd->psim), target,
816 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
817 			struct ccb_trans_settings *neg;
818 			struct ccb_trans_settings_spi *spi;
819 
820 			neg = &xpt_alloc_ccb()->cts;
821 			spi = &neg->xport_specific.spi;
822 			xpt_setup_ccb(&neg->ccb_h, path, /*priority*/1);
823 			spi->sync_period = period;
824 			spi->sync_offset = offset;
825 			spi->valid = CTS_SPI_VALID_SYNC_RATE
826 				  | CTS_SPI_VALID_SYNC_OFFSET;
827 			xpt_async(AC_TRANSFER_NEG, path, neg);
828 			xpt_free_path(path);
829 			xpt_free_ccb(&neg->ccb_h);
830 		}
831 	}
832 	if ((type & AMD_TRANS_GOAL) != 0) {
833 		tinfo->goal.period = period;
834 		tinfo->goal.offset = offset;
835 	}
836 
837 	if ((type & AMD_TRANS_USER) != 0) {
838 		tinfo->user.period = period;
839 		tinfo->user.offset = offset;
840 	}
841 }
842 
843 static void
amdsettags(struct amd_softc * amd,u_int target,int tagenb)844 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
845 {
846 	panic("Implement me!");
847 }
848 
849 
850 #if 0
851 /*
852  **********************************************************************
853  * Function : amd_reset (struct amd_softc * amd)
854  * Purpose  : perform a hard reset on the SCSI bus( and AMD chip).
855  * Inputs   : cmd - command which caused the SCSI RESET
856  **********************************************************************
857  */
858 static void
859 amd_reset(struct amd_softc * amd)
860 {
861 	u_int8_t   bval;
862 	u_int16_t  i;
863 
864 
865 #ifdef AMD_DEBUG0
866 	kprintf("DC390: RESET");
867 #endif
868 
869 	crit_enter();
870 	bval = amd_read8(amd, CNTLREG1);
871 	bval |= DIS_INT_ON_SCSI_RST;
872 	amd_write8(amd, CNTLREG1, bval);	/* disable interrupt */
873 	amd_ResetSCSIBus(amd);
874 
875 	for (i = 0; i < 500; i++) {
876 		DELAY(1000);
877 	}
878 
879 	bval = amd_read8(amd, CNTLREG1);
880 	bval &= ~DIS_INT_ON_SCSI_RST;
881 	amd_write8(amd, CNTLREG1, bval);	/* re-enable interrupt */
882 
883 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
884 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
885 
886 	ResetDevParam(amd);
887 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
888 			 AMD_TAG_WILDCARD, &amd->running_srbs,
889 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
890 	amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
891 			 AMD_TAG_WILDCARD, &amd->waiting_srbs,
892 			 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
893 	amd->active_srb = NULL;
894 	amd->ACBFlag = 0;
895 	crit_exit();
896 	return;
897 }
898 
899 void
900 amd_timeout(void *arg1)
901 {
902 	struct amd_srb *    pSRB;
903 
904 	pSRB = (struct amd_srb *) arg1;
905 }
906 #endif
907 
908 static int
amdstart(struct amd_softc * amd,struct amd_srb * pSRB)909 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
910 {
911 	union ccb *pccb;
912 	struct ccb_scsiio *pcsio;
913 	struct amd_target_info *targ_info;
914 	u_int identify_msg;
915 	u_int command;
916 	u_int target;
917 	u_int lun;
918 
919 	pccb = pSRB->pccb;
920 	pcsio = &pccb->csio;
921 	target = pccb->ccb_h.target_id;
922 	lun = pccb->ccb_h.target_lun;
923 	targ_info = &amd->tinfo[target];
924 
925 	amd_clear_msg_state(amd);
926 	amd_write8(amd, SCSIDESTIDREG, target);
927 	amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
928 	amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
929 	amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
930 	amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
931 	amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
932 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
933 
934 	identify_msg = MSG_IDENTIFYFLAG | lun;
935 	if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
936 	  && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
937 	  && (pSRB->CmdBlock[0] != REQUEST_SENSE)
938 	  && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
939 		identify_msg |= MSG_IDENTIFY_DISCFLAG;
940 
941 	amd_write8(amd, SCSIFIFOREG, identify_msg);
942 	if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
943 	  || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
944 		pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
945 	if (targ_info->current.period != targ_info->goal.period
946 	 || targ_info->current.offset != targ_info->goal.offset) {
947 		command = SEL_W_ATN_STOP;
948 		amdconstructsdtr(amd, targ_info->goal.period,
949 				 targ_info->goal.offset);
950 	} else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
951 		command = SEL_W_ATN2;
952 		pSRB->SRBState = SRB_START;
953 		amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
954 		amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
955 	} else {
956 		command = SEL_W_ATN;
957 		pSRB->SRBState = SRB_START;
958 	}
959 	if (command != SEL_W_ATN_STOP)
960 		amdsetupcommand(amd, pSRB);
961 
962 	if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
963 		pSRB->SRBState = SRB_READY;
964 		return (1);
965 	} else {
966 		amd->last_phase = SCSI_ARBITRATING;
967 		amd_write8(amd, SCSICMDREG, command);
968 		amd->active_srb = pSRB;
969 		amd->cur_target = target;
970 		amd->cur_lun = lun;
971 		return (0);
972 	}
973 }
974 
975 /*
976  *  Catch an interrupt from the adapter.
977  *  Process pending device interrupts.
978  */
979 static void
amd_intr(void * arg)980 amd_intr(void   *arg)
981 {
982 	struct amd_softc *amd;
983 	struct amd_srb *pSRB;
984 	u_int  internstat = 0;
985 	u_int  scsistat;
986 	u_int  intstat;
987 
988 	amd = (struct amd_softc *)arg;
989 
990 	if (amd == NULL) {
991 #ifdef AMD_DEBUG0
992 		kprintf("amd_intr: amd NULL return......");
993 #endif
994 		return;
995 	}
996 
997 	scsistat = amd_read8(amd, SCSISTATREG);
998 	if (!(scsistat & INTERRUPT)) {
999 #ifdef AMD_DEBUG0
1000 		kprintf("amd_intr: scsistat = NULL ,return......");
1001 #endif
1002 		return;
1003 	}
1004 #ifdef AMD_DEBUG_SCSI_PHASE
1005 	kprintf("scsistat=%2x,", scsistat);
1006 #endif
1007 
1008 	internstat = amd_read8(amd, INTERNSTATREG);
1009 	intstat = amd_read8(amd, INTSTATREG);
1010 
1011 #ifdef AMD_DEBUG_SCSI_PHASE
1012 	kprintf("intstat=%2x,", intstat);
1013 #endif
1014 
1015 	if (intstat & DISCONNECTED) {
1016 		amd_Disconnect(amd);
1017 		return;
1018 	}
1019 	if (intstat & RESELECTED) {
1020 		amd_Reselect(amd);
1021 		return;
1022 	}
1023 	if (intstat & INVALID_CMD) {
1024 		amd_InvalidCmd(amd);
1025 		return;
1026 	}
1027 	if (intstat & SCSI_RESET_) {
1028 		amd_ScsiRstDetect(amd);
1029 		return;
1030 	}
1031 	if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1032 		pSRB = amd->active_srb;
1033 		/*
1034 		 * Run our state engine.  First perform
1035 		 * post processing for the last phase we
1036 		 * were in, followed by any processing
1037 		 * required to handle the current phase.
1038 		 */
1039 		scsistat =
1040 		    amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1041 		amd->last_phase = scsistat & SCSI_PHASE_MASK;
1042 		(void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1043 	}
1044 }
1045 
1046 static u_int
amd_DataOutPhase0(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1047 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1048 {
1049 	struct amd_sg *psgl;
1050 	u_int32_t   ResidCnt, xferCnt;
1051 
1052 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1053 		if (scsistat & PARITY_ERR) {
1054 			pSRB->SRBStatus |= PARITY_ERROR;
1055 		}
1056 		if (scsistat & COUNT_2_ZERO) {
1057 			while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1058 				;
1059 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1060 			pSRB->SGIndex++;
1061 			if (pSRB->SGIndex < pSRB->SGcount) {
1062 				pSRB->pSGlist++;
1063 				psgl = pSRB->pSGlist;
1064 				pSRB->SGPhysAddr = psgl->SGXPtr;
1065 				pSRB->SGToBeXferLen = psgl->SGXLen;
1066 			} else {
1067 				pSRB->SGToBeXferLen = 0;
1068 			}
1069 		} else {
1070 			ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1071 			ResidCnt += amd_read8(amd, CTCREG_LOW)
1072 				  | (amd_read8(amd, CTCREG_MID) << 8)
1073 				  | (amd_read8(amd, CURTXTCNTREG) << 16);
1074 
1075 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1076 			pSRB->SGPhysAddr += xferCnt;
1077 			pSRB->TotalXferredLen += xferCnt;
1078 			pSRB->SGToBeXferLen = ResidCnt;
1079 		}
1080 	}
1081 	amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1082 	return (scsistat);
1083 }
1084 
1085 static u_int
amd_DataInPhase0(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1086 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1087 {
1088 	u_int8_t bval;
1089 	u_int16_t  i, residual;
1090 	struct amd_sg *psgl;
1091 	u_int32_t   ResidCnt, xferCnt;
1092 	u_int8_t *  ptr;
1093 
1094 	if (!(pSRB->SRBState & SRB_XFERPAD)) {
1095 		if (scsistat & PARITY_ERR) {
1096 			pSRB->SRBStatus |= PARITY_ERROR;
1097 		}
1098 		if (scsistat & COUNT_2_ZERO) {
1099 			while (1) {
1100 				bval = amd_read8(amd, DMA_Status);
1101 				if ((bval & DMA_XFER_DONE) != 0)
1102 					break;
1103 			}
1104 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1105 
1106 			pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1107 			pSRB->SGIndex++;
1108 			if (pSRB->SGIndex < pSRB->SGcount) {
1109 				pSRB->pSGlist++;
1110 				psgl = pSRB->pSGlist;
1111 				pSRB->SGPhysAddr = psgl->SGXPtr;
1112 				pSRB->SGToBeXferLen = psgl->SGXLen;
1113 			} else {
1114 				pSRB->SGToBeXferLen = 0;
1115 			}
1116 		} else {	/* phase changed */
1117 			residual = 0;
1118 			bval = amd_read8(amd, CURRENTFIFOREG);
1119 			while (bval & 0x1f) {
1120 				if ((bval & 0x1f) == 1) {
1121 					for (i = 0; i < 0x100; i++) {
1122 						bval = amd_read8(amd, CURRENTFIFOREG);
1123 						if (!(bval & 0x1f)) {
1124 							goto din_1;
1125 						} else if (i == 0x0ff) {
1126 							residual = 1;
1127 							goto din_1;
1128 						}
1129 					}
1130 				} else {
1131 					bval = amd_read8(amd, CURRENTFIFOREG);
1132 				}
1133 			}
1134 	din_1:
1135 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1136 			for (i = 0; i < 0x8000; i++) {
1137 				if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1138 					break;
1139 			}
1140 			amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1141 
1142 			ResidCnt = amd_read8(amd, CTCREG_LOW)
1143 				 | (amd_read8(amd, CTCREG_MID) << 8)
1144 				 | (amd_read8(amd, CURTXTCNTREG) << 16);
1145 			xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1146 			pSRB->SGPhysAddr += xferCnt;
1147 			pSRB->TotalXferredLen += xferCnt;
1148 			pSRB->SGToBeXferLen = ResidCnt;
1149 			if (residual) {
1150 				/* get residual byte */
1151 				bval = amd_read8(amd, SCSIFIFOREG);
1152 				ptr = phystovirt(pSRB, xferCnt);
1153 				*ptr = bval;
1154 				pSRB->SGPhysAddr++;
1155 				pSRB->TotalXferredLen++;
1156 				pSRB->SGToBeXferLen--;
1157 			}
1158 		}
1159 	}
1160 	return (scsistat);
1161 }
1162 
1163 static u_int
amd_StatusPhase0(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1164 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1165 {
1166 	pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1167 	/* get message */
1168 	pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1169 	pSRB->SRBState = SRB_COMPLETED;
1170 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1171 	return (SCSI_NOP0);
1172 }
1173 
1174 static u_int
amd_MsgOutPhase0(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1175 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1176 {
1177 	if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1178 		scsistat = SCSI_NOP0;
1179 	}
1180 	return (scsistat);
1181 }
1182 
1183 static u_int
amd_MsgInPhase0(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1184 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1185 {
1186 	int done;
1187 
1188 	amd->msgin_buf[amd->msgin_index]  = amd_read8(amd, SCSIFIFOREG);
1189 
1190 	done = amdparsemsg(amd);
1191 	if (done)
1192 		amd->msgin_index = 0;
1193 	else
1194 		amd->msgin_index++;
1195 	return (SCSI_NOP0);
1196 }
1197 
1198 static int
amdparsemsg(struct amd_softc * amd)1199 amdparsemsg(struct amd_softc *amd)
1200 {
1201 	int	reject;
1202 	int	done;
1203 	int	response;
1204 
1205 	done = FALSE;
1206 	response = FALSE;
1207 	reject = FALSE;
1208 
1209 	/*
1210 	 * Parse as much of the message as is availible,
1211 	 * rejecting it if we don't support it.  When
1212 	 * the entire message is availible and has been
1213 	 * handled, return TRUE indicating that we have
1214 	 * parsed an entire message.
1215 	 */
1216 	switch (amd->msgin_buf[0]) {
1217 	case MSG_DISCONNECT:
1218 		amd->active_srb->SRBState = SRB_DISCONNECT;
1219 		amd->disc_count[amd->cur_target][amd->cur_lun]++;
1220 		done = TRUE;
1221 		break;
1222 	case MSG_SIMPLE_Q_TAG:
1223 	{
1224 		struct amd_srb *disc_srb;
1225 
1226 		if (amd->msgin_index < 1)
1227 			break;
1228 		disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1229 		if (amd->active_srb != NULL
1230 		 || disc_srb->SRBState != SRB_DISCONNECT
1231 		 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1232 		 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1233 			kprintf("amd%d: Unexpected tagged reselection "
1234 			       "for target %d, Issuing Abort\n", amd->unit,
1235 			       amd->cur_target);
1236 			amd->msgout_buf[0] = MSG_ABORT;
1237 			amd->msgout_len = 1;
1238 			response = TRUE;
1239 			break;
1240 		}
1241 		amd->active_srb = disc_srb;
1242 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1243 		done = TRUE;
1244 		break;
1245 	}
1246 	case MSG_MESSAGE_REJECT:
1247 		response = amdhandlemsgreject(amd);
1248 		if (response == FALSE)
1249 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1250 		/* FALLTHROUGH */
1251 	case MSG_NOOP:
1252 		done = TRUE;
1253 		break;
1254 	case MSG_EXTENDED:
1255 	{
1256 		u_int clockrate;
1257 		u_int period;
1258 		u_int offset;
1259 		u_int saved_offset;
1260 
1261 		/* Wait for enough of the message to begin validation */
1262 		if (amd->msgin_index < 1)
1263 			break;
1264 		if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1265 			reject = TRUE;
1266 			break;
1267 		}
1268 
1269 		/* Wait for opcode */
1270 		if (amd->msgin_index < 2)
1271 			break;
1272 
1273 		if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1274 			reject = TRUE;
1275 			break;
1276 		}
1277 
1278 		/*
1279 		 * Wait until we have both args before validating
1280 		 * and acting on this message.
1281 		 *
1282 		 * Add one to MSG_EXT_SDTR_LEN to account for
1283 		 * the extended message preamble.
1284 		 */
1285 		if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1286 			break;
1287 
1288 		period = amd->msgin_buf[3];
1289 		saved_offset = offset = amd->msgin_buf[4];
1290 		clockrate = amdfindclockrate(amd, &period);
1291 		if (offset > AMD_MAX_SYNC_OFFSET)
1292 			offset = AMD_MAX_SYNC_OFFSET;
1293 		if (period == 0 || offset == 0) {
1294 			offset = 0;
1295 			period = 0;
1296 			clockrate = 0;
1297 		}
1298 		amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1299 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1300 
1301 		/*
1302 		 * See if we initiated Sync Negotiation
1303 		 * and didn't have to fall down to async
1304 		 * transfers.
1305 		 */
1306 		if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1307 			/* We started it */
1308 			if (saved_offset != offset) {
1309 				/* Went too low - force async */
1310 				reject = TRUE;
1311 			}
1312 		} else {
1313 			/*
1314 			 * Send our own SDTR in reply
1315 			 */
1316 			if (bootverbose)
1317 				kprintf("Sending SDTR!\n");
1318 			amd->msgout_index = 0;
1319 			amd->msgout_len = 0;
1320 			amdconstructsdtr(amd, period, offset);
1321 			amd->msgout_index = 0;
1322 			response = TRUE;
1323 		}
1324 		done = TRUE;
1325 		break;
1326 	}
1327 	case MSG_SAVEDATAPOINTER:
1328 	case MSG_RESTOREPOINTERS:
1329 		/* XXX Implement!!! */
1330 		done = TRUE;
1331 		break;
1332 	default:
1333 		reject = TRUE;
1334 		break;
1335 	}
1336 
1337 	if (reject) {
1338 		amd->msgout_index = 0;
1339 		amd->msgout_len = 1;
1340 		amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1341 		done = TRUE;
1342 		response = TRUE;
1343 	}
1344 
1345 	if (response)
1346 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1347 
1348 	if (done && !response)
1349 		/* Clear the outgoing message buffer */
1350 		amd->msgout_len = 0;
1351 
1352 	/* Drop Ack */
1353 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1354 
1355 	return (done);
1356 }
1357 
1358 static u_int
amdfindclockrate(struct amd_softc * amd,u_int * period)1359 amdfindclockrate(struct amd_softc *amd, u_int *period)
1360 {
1361 	u_int i;
1362 	u_int clockrate;
1363 
1364 	for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1365 		u_int8_t *table_entry;
1366 
1367 		table_entry = &tinfo_sync_period[i];
1368 		if (*period <= *table_entry) {
1369 			/*
1370 			 * When responding to a target that requests
1371 			 * sync, the requested rate may fall between
1372 			 * two rates that we can output, but still be
1373 			 * a rate that we can receive.  Because of this,
1374 			 * we want to respond to the target with
1375 			 * the same rate that it sent to us even
1376 			 * if the period we use to send data to it
1377 			 * is lower.  Only lower the response period
1378 			 * if we must.
1379 			 */
1380 			if (i == 0) {
1381 				*period = *table_entry;
1382 			}
1383 			break;
1384 		}
1385 	}
1386 
1387 	if (i == sizeof(tinfo_sync_period)) {
1388 		/* Too slow for us.  Use asnyc transfers. */
1389 		*period = 0;
1390 		clockrate = 0;
1391 	} else
1392 		clockrate = i + 4;
1393 
1394 	return (clockrate);
1395 }
1396 
1397 /*
1398  * See if we sent a particular extended message to the target.
1399  * If "full" is true, the target saw the full message.
1400  * If "full" is false, the target saw at least the first
1401  * byte of the message.
1402  */
1403 static int
amdsentmsg(struct amd_softc * amd,u_int msgtype,int full)1404 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1405 {
1406 	int found;
1407 	int index;
1408 
1409 	found = FALSE;
1410 	index = 0;
1411 
1412 	while (index < amd->msgout_len) {
1413 		if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1414 		 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1415 			index++;
1416 		else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1417 		      && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1418 			/* Skip tag type and tag id */
1419 			index += 2;
1420 		} else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1421 			/* Found a candidate */
1422 			if (amd->msgout_buf[index+2] == msgtype) {
1423 				u_int end_index;
1424 
1425 				end_index = index + 1
1426 					  + amd->msgout_buf[index + 1];
1427 				if (full) {
1428 					if (amd->msgout_index > end_index)
1429 						found = TRUE;
1430 				} else if (amd->msgout_index > index)
1431 					found = TRUE;
1432 			}
1433 			break;
1434 		} else {
1435 			panic("amdsentmsg: Inconsistent msg buffer");
1436 		}
1437 	}
1438 	return (found);
1439 }
1440 
1441 static void
amdconstructsdtr(struct amd_softc * amd,u_int period,u_int offset)1442 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1443 {
1444 	amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1445 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1446 	amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1447 	amd->msgout_buf[amd->msgout_index++] = period;
1448 	amd->msgout_buf[amd->msgout_index++] = offset;
1449 	amd->msgout_len += 5;
1450 }
1451 
1452 static int
amdhandlemsgreject(struct amd_softc * amd)1453 amdhandlemsgreject(struct amd_softc *amd)
1454 {
1455 	/*
1456 	 * If we had an outstanding SDTR for this
1457 	 * target, this is a signal that the target
1458 	 * is refusing negotiation.  Also watch out
1459 	 * for rejected tag messages.
1460 	 */
1461 	struct	amd_srb *srb;
1462 	struct	amd_target_info *targ_info;
1463 	int	response = FALSE;
1464 
1465 	srb = amd->active_srb;
1466 	targ_info = &amd->tinfo[amd->cur_target];
1467 	if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1468 		/* note asynch xfers and clear flag */
1469 		amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1470 			   /*period*/0, /*offset*/0,
1471 			   AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1472 		kprintf("amd%d:%d: refuses synchronous negotiation. "
1473 		       "Using asynchronous transfers\n",
1474 		       amd->unit, amd->cur_target);
1475 	} else if ((srb != NULL)
1476 		&& (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1477 		struct ccb_trans_settings *neg;
1478 		struct ccb_trans_settings_scsi *scsi;
1479 
1480 		kprintf("amd%d:%d: refuses tagged commands.  Performing "
1481 		       "non-tagged I/O\n", amd->unit, amd->cur_target);
1482 
1483 		neg = &xpt_alloc_ccb()->cts;
1484 		scsi = &neg->proto_specific.scsi;
1485 		amdsettags(amd, amd->cur_target, FALSE);
1486 		scsi->valid = CTS_SCSI_VALID_TQ;
1487 		xpt_setup_ccb(&neg->ccb_h, srb->pccb->ccb_h.path, /*pri*/1);
1488 		xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, neg);
1489 		xpt_free_ccb(&neg->ccb_h);
1490 		neg = NULL; /* safety */
1491 		scsi = NULL; /* safety */
1492 
1493 		/*
1494 		 * Resend the identify for this CCB as the target
1495 		 * may believe that the selection is invalid otherwise.
1496 		 */
1497 		if (amd->msgout_len != 0)
1498 			bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1499 			      amd->msgout_len);
1500 		amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1501 				    | srb->pccb->ccb_h.target_lun;
1502 		amd->msgout_len++;
1503 		if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1504 		  && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1505 			amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1506 
1507 		srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1508 
1509 		/*
1510 		 * Requeue all tagged commands for this target
1511 		 * currently in our posession so they can be
1512 		 * converted to untagged commands.
1513 		 */
1514 		amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1515 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1516 				 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1517 	} else {
1518 		/*
1519 		 * Otherwise, we ignore it.
1520 		 */
1521 		kprintf("amd%d:%d: Message reject received -- ignored\n",
1522 		       amd->unit, amd->cur_target);
1523 	}
1524 	return (response);
1525 }
1526 
1527 #if 0
1528 	if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1529 		if (bval == MSG_DISCONNECT) {
1530 			pSRB->SRBState = SRB_DISCONNECT;
1531 		} else if (bval == MSG_SAVEDATAPOINTER) {
1532 			goto min6;
1533 		} else if ((bval == MSG_EXTENDED)
1534 			|| ((bval >= MSG_SIMPLE_Q_TAG)
1535 			 && (bval <= MSG_ORDERED_Q_TAG))) {
1536 			pSRB->SRBState |= SRB_MSGIN_MULTI;
1537 			pSRB->MsgInBuf[0] = bval;
1538 			pSRB->MsgCnt = 1;
1539 			pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1540 		} else if (bval == MSG_MESSAGE_REJECT) {
1541 			amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1542 
1543 			if (pSRB->SRBState & DO_SYNC_NEGO) {
1544 				goto set_async;
1545 			}
1546 		} else if (bval == MSG_RESTOREPOINTERS) {
1547 			goto min6;
1548 		} else {
1549 			goto min6;
1550 		}
1551 	} else {		/* minx: */
1552 		*pSRB->pMsgPtr = bval;
1553 		pSRB->MsgCnt++;
1554 		pSRB->pMsgPtr++;
1555 		if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1556 		 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1557 			if (pSRB->MsgCnt == 2) {
1558 				pSRB->SRBState = 0;
1559 				pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1560 				if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1561 					pSRB = amd->pTmpSRB;
1562 					pSRB->SRBState = SRB_UNEXPECT_RESEL;
1563 					pDCB->pActiveSRB = pSRB;
1564 					pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1565 					EnableMsgOut2(amd, pSRB);
1566 				} else {
1567 					if (pDCB->DCBFlag & ABORT_DEV_) {
1568 						pSRB->SRBState = SRB_ABORT_SENT;
1569 						EnableMsgOut1(amd, pSRB);
1570 					}
1571 					pDCB->pActiveSRB = pSRB;
1572 					pSRB->SRBState = SRB_DATA_XFER;
1573 				}
1574 			}
1575 		} else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1576 			&& (pSRB->MsgCnt == 5)) {
1577 			pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1578 			if ((pSRB->MsgInBuf[1] != 3)
1579 			 || (pSRB->MsgInBuf[2] != 1)) {	/* reject_msg: */
1580 				pSRB->MsgCnt = 1;
1581 				pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1582 				amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1583 			} else if (!(pSRB->MsgInBuf[3])
1584 				|| !(pSRB->MsgInBuf[4])) {
1585 		set_async:	/* set async */
1586 
1587 				pDCB = pSRB->pSRBDCB;
1588 				/* disable sync & sync nego */
1589 				pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1590 				pDCB->SyncPeriod = 0;
1591 				pDCB->SyncOffset = 0;
1592 
1593 				pDCB->tinfo.goal.period = 0;
1594 				pDCB->tinfo.goal.offset = 0;
1595 
1596 				pDCB->tinfo.current.period = 0;
1597 				pDCB->tinfo.current.offset = 0;
1598 				pDCB->tinfo.current.width =
1599 				    MSG_EXT_WDTR_BUS_8_BIT;
1600 
1601 				pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1602 				pDCB->CtrlR4 &= 0x3f;
1603 				pDCB->CtrlR4 |= EATER_25NS;
1604 				goto re_prog;
1605 			} else {/* set sync */
1606 
1607 				pDCB = pSRB->pSRBDCB;
1608 				/* enable sync & sync nego */
1609 				pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1610 
1611 				/* set sync offset */
1612 				pDCB->SyncOffset &= 0x0f0;
1613 				pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1614 
1615 				/* set sync period */
1616 				pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1617 
1618 				wval = (u_int16_t) pSRB->MsgInBuf[3];
1619 				wval = wval << 2;
1620 				wval--;
1621 				wval1 = wval / 25;
1622 				if ((wval1 * 25) != wval) {
1623 					wval1++;
1624 				}
1625 				bval = FAST_CLK|FAST_SCSI;
1626 				pDCB->CtrlR4 &= 0x3f;
1627 				if (wval1 >= 8) {
1628 					/* Fast SCSI */
1629 					wval1--;
1630 					bval = FAST_CLK;
1631 					pDCB->CtrlR4 |= EATER_25NS;
1632 				}
1633 				pDCB->CtrlR3 = bval;
1634 				pDCB->SyncPeriod = (u_int8_t) wval1;
1635 
1636 				pDCB->tinfo.goal.period =
1637 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1638 				pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1639 				pDCB->tinfo.current.period =
1640 				    tinfo_sync_period[pDCB->SyncPeriod - 4];
1641 				pDCB->tinfo.current.offset = pDCB->SyncOffset;
1642 
1643 				/*
1644 				 * program SCSI control register
1645 				 */
1646 		re_prog:
1647 				amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1648 				amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1649 				amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1650 				amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1651 			}
1652 		}
1653 	}
1654 min6:
1655 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1656 	return (SCSI_NOP0);
1657 }
1658 #endif
1659 
1660 static u_int
amd_DataOutPhase1(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1661 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1662 {
1663 	DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1664 	return (scsistat);
1665 }
1666 
1667 static u_int
amd_DataInPhase1(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1668 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1669 {
1670 	DataIO_Comm(amd, pSRB, READ_DIRECTION);
1671 	return (scsistat);
1672 }
1673 
1674 static void
DataIO_Comm(struct amd_softc * amd,struct amd_srb * pSRB,u_int ioDir)1675 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1676 {
1677 	struct amd_sg *    psgl;
1678 	u_int32_t   lval;
1679 
1680 	if (pSRB->SGIndex < pSRB->SGcount) {
1681 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1682 
1683 		if (!pSRB->SGToBeXferLen) {
1684 			psgl = pSRB->pSGlist;
1685 			pSRB->SGPhysAddr = psgl->SGXPtr;
1686 			pSRB->SGToBeXferLen = psgl->SGXLen;
1687 		}
1688 		lval = pSRB->SGToBeXferLen;
1689 		amd_write8(amd, CTCREG_LOW, lval);
1690 		amd_write8(amd, CTCREG_MID, lval >> 8);
1691 		amd_write8(amd, CURTXTCNTREG, lval >> 16);
1692 
1693 		amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1694 
1695 		amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1696 
1697 		pSRB->SRBState = SRB_DATA_XFER;
1698 
1699 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1700 
1701 		amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1702 
1703 		amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1704 	} else {		/* xfer pad */
1705 		if (pSRB->SGcount) {
1706 			pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1707 			pSRB->SRBStatus |= OVER_RUN;
1708 		}
1709 		amd_write8(amd, CTCREG_LOW, 0);
1710 		amd_write8(amd, CTCREG_MID, 0);
1711 		amd_write8(amd, CURTXTCNTREG, 0);
1712 
1713 		pSRB->SRBState |= SRB_XFERPAD;
1714 		amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1715 	}
1716 }
1717 
1718 static u_int
amd_CommandPhase1(struct amd_softc * amd,struct amd_srb * srb,u_int scsistat)1719 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1720 {
1721 	amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1722 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1723 
1724 	amdsetupcommand(amd, srb);
1725 
1726 	srb->SRBState = SRB_COMMAND;
1727 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1728 	return (scsistat);
1729 }
1730 
1731 static u_int
amd_StatusPhase1(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1732 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1733 {
1734 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1735 	pSRB->SRBState = SRB_STATUS;
1736 	amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1737 	return (scsistat);
1738 }
1739 
1740 static u_int
amd_MsgOutPhase1(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1741 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1742 {
1743 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1744 
1745 	if (amd->msgout_len == 0) {
1746 		amd->msgout_buf[0] = MSG_NOOP;
1747 		amd->msgout_len = 1;
1748 	}
1749 	amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1750 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1751 	return (scsistat);
1752 }
1753 
1754 static u_int
amd_MsgInPhase1(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1755 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1756 {
1757 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1758 	amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1759 	return (scsistat);
1760 }
1761 
1762 static u_int
amd_NopPhase(struct amd_softc * amd,struct amd_srb * pSRB,u_int scsistat)1763 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1764 {
1765 	return (scsistat);
1766 }
1767 
1768 static void
amd_Disconnect(struct amd_softc * amd)1769 amd_Disconnect(struct amd_softc * amd)
1770 {
1771 	struct	amd_srb *srb;
1772 	int	target;
1773 	int	lun;
1774 
1775 	srb = amd->active_srb;
1776 	amd->active_srb = NULL;
1777 	amd->last_phase = SCSI_BUS_FREE;
1778 	amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1779 	target = amd->cur_target;
1780 	lun = amd->cur_lun;
1781 
1782 	if (srb == NULL) {
1783 		/* Invalid reselection */
1784 		amdrunwaiting(amd);
1785 	} else if (srb->SRBState & SRB_ABORT_SENT) {
1786 		/* Clean up and done this srb */
1787 #if 0
1788 		while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1789 			/* XXX What about "done'ing" these srbs??? */
1790 			if (pSRB->pSRBDCB == pDCB) {
1791 				TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1792 				TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1793 			}
1794 		}
1795 		amdrunwaiting(amd);
1796 #endif
1797 	} else {
1798 		if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1799 		 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1800 			srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1801 			goto disc1;
1802 		} else if (srb->SRBState & SRB_DISCONNECT) {
1803 			if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1804 				amd->untagged_srbs[target][lun] = srb;
1805 			amdrunwaiting(amd);
1806 		} else if (srb->SRBState & SRB_COMPLETED) {
1807 	disc1:
1808 			srb->SRBState = SRB_FREE;
1809 			SRBdone(amd, srb);
1810 		}
1811 	}
1812 	return;
1813 }
1814 
1815 static void
amd_Reselect(struct amd_softc * amd)1816 amd_Reselect(struct amd_softc *amd)
1817 {
1818 	struct amd_target_info *tinfo;
1819 	u_int16_t disc_count;
1820 
1821 	amd_clear_msg_state(amd);
1822 	if (amd->active_srb != NULL) {
1823 		/* Requeue the SRB for our attempted Selection */
1824 		TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1825 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1826 		amd->active_srb = NULL;
1827 	}
1828 	/* get ID */
1829 	amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1830 	amd->cur_target ^= amd->HostID_Bit;
1831 	amd->cur_target = ffs(amd->cur_target) - 1;
1832 	amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1833 	tinfo = &amd->tinfo[amd->cur_target];
1834 	amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1835 	disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1836 	if (disc_count == 0) {
1837 		kprintf("amd%d: Unexpected reselection for target %d, "
1838 		       "Issuing Abort\n", amd->unit, amd->cur_target);
1839 		amd->msgout_buf[0] = MSG_ABORT;
1840 		amd->msgout_len = 1;
1841 		amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1842 	}
1843 	if (amd->active_srb != NULL) {
1844 		amd->disc_count[amd->cur_target][amd->cur_lun]--;
1845 		amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1846 	}
1847 
1848 	amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1849 	amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1850 	amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1851 	amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1852 	amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1853 	amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1854 	amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1855 	amd->last_phase = SCSI_NOP0;
1856 }
1857 
1858 static void
SRBdone(struct amd_softc * amd,struct amd_srb * pSRB)1859 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1860 {
1861 	u_int8_t   bval, i, status;
1862 	union ccb *pccb;
1863 	struct ccb_scsiio *pcsio;
1864 	struct amd_sg *ptr2;
1865 	u_int32_t   swlval;
1866 
1867 	pccb = pSRB->pccb;
1868 	pcsio = &pccb->csio;
1869 
1870 	CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1871 		  ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1872 
1873 	if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1874 		bus_dmasync_op_t op;
1875 
1876 		if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1877 			op = BUS_DMASYNC_POSTREAD;
1878 		else
1879 			op = BUS_DMASYNC_POSTWRITE;
1880 		bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1881 		bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1882 	}
1883 
1884 	status = pSRB->TargetStatus;
1885 	pccb->ccb_h.status = CAM_REQ_CMP;
1886 	if (pSRB->SRBFlag & AUTO_REQSENSE) {
1887 		pSRB->SRBFlag &= ~AUTO_REQSENSE;
1888 		pSRB->AdaptStatus = 0;
1889 		pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1890 
1891 		if (status == SCSI_STATUS_CHECK_COND) {
1892 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1893 			goto ckc_e;
1894 		}
1895 		*((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1896 
1897 		pcsio->sense_resid = pcsio->sense_len
1898 				   - pSRB->TotalXferredLen;
1899 		pSRB->TotalXferredLen = pSRB->Segment1[1];
1900 		if (pSRB->TotalXferredLen) {
1901 			/* ???? */
1902 			pcsio->resid = pcsio->dxfer_len
1903 				     - pSRB->TotalXferredLen;
1904 			/* The resid field contains valid data	 */
1905 			/* Flush resid bytes on complete        */
1906 		} else {
1907 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1908 		}
1909 		pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1910 		goto ckc_e;
1911 	}
1912 	if (status) {
1913 		if (status == SCSI_STATUS_CHECK_COND) {
1914 
1915 			if ((pSRB->SGIndex < pSRB->SGcount)
1916 			 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1917 				bval = pSRB->SGcount;
1918 				swlval = pSRB->SGToBeXferLen;
1919 				ptr2 = pSRB->pSGlist;
1920 				ptr2++;
1921 				for (i = pSRB->SGIndex + 1; i < bval; i++) {
1922 					swlval += ptr2->SGXLen;
1923 					ptr2++;
1924 				}
1925 				/* ??????? */
1926 				pcsio->resid = swlval;
1927 
1928 #ifdef	AMD_DEBUG0
1929 				kprintf("XferredLen=%8x,NotYetXferLen=%8x,",
1930 					pSRB->TotalXferredLen, swlval);
1931 #endif
1932 			}
1933 			if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1934 #ifdef	AMD_DEBUG0
1935 				kprintf("RequestSense..................\n");
1936 #endif
1937 				RequestSense(amd, pSRB);
1938 				return;
1939 			}
1940 			pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1941 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1942 			goto ckc_e;
1943 		} else if (status == SCSI_STATUS_QUEUE_FULL) {
1944 			pSRB->AdaptStatus = 0;
1945 			pSRB->TargetStatus = 0;
1946 			pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1947 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1948 			goto ckc_e;
1949 		} else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1950 			pSRB->AdaptStatus = H_SEL_TIMEOUT;
1951 			pSRB->TargetStatus = 0;
1952 
1953 			pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1954 			pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1955 		} else if (status == SCSI_STATUS_BUSY) {
1956 #ifdef AMD_DEBUG0
1957 			kprintf("DC390: target busy at %s %d\n",
1958 			       __FILE__, __LINE__);
1959 #endif
1960 			pcsio->scsi_status = SCSI_STATUS_BUSY;
1961 			pccb->ccb_h.status = CAM_SCSI_BUSY;
1962 		} else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1963 #ifdef AMD_DEBUG0
1964 			kprintf("DC390: target reserved at %s %d\n",
1965 			       __FILE__, __LINE__);
1966 #endif
1967 			pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1968 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1969 		} else {
1970 			pSRB->AdaptStatus = 0;
1971 #ifdef AMD_DEBUG0
1972 			kprintf("DC390: driver stuffup at %s %d\n",
1973 			       __FILE__, __LINE__);
1974 #endif
1975 			pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1976 		}
1977 	} else {
1978 		status = pSRB->AdaptStatus;
1979 		if (status & H_OVER_UNDER_RUN) {
1980 			pSRB->TargetStatus = 0;
1981 
1982 			pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1983 		} else if (pSRB->SRBStatus & PARITY_ERROR) {
1984 #ifdef AMD_DEBUG0
1985 			kprintf("DC390: driver stuffup %s %d\n",
1986 			       __FILE__, __LINE__);
1987 #endif
1988 			/* Driver failed to perform operation	  */
1989 			pccb->ccb_h.status = CAM_UNCOR_PARITY;
1990 		} else {	/* No error */
1991 			pSRB->AdaptStatus = 0;
1992 			pSRB->TargetStatus = 0;
1993 			pcsio->resid = 0;
1994 			/* there is no error, (sense is invalid)  */
1995 		}
1996 	}
1997 ckc_e:
1998 	crit_enter();
1999 	if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2000 		/* CAM request not yet complete =>device_Q frozen */
2001 		xpt_freeze_devq(pccb->ccb_h.path, 1);
2002 		pccb->ccb_h.status |= CAM_DEV_QFRZN;
2003 	}
2004 	TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2005 	TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2006 	amdrunwaiting(amd);
2007 	crit_exit();
2008 	xpt_done(pccb);
2009 
2010 }
2011 
2012 static void
amd_ResetSCSIBus(struct amd_softc * amd)2013 amd_ResetSCSIBus(struct amd_softc * amd)
2014 {
2015 	crit_enter();
2016 	amd->ACBFlag |= RESET_DEV;
2017 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2018 	amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2019 	crit_exit();
2020 	return;
2021 }
2022 
2023 static void
amd_ScsiRstDetect(struct amd_softc * amd)2024 amd_ScsiRstDetect(struct amd_softc * amd)
2025 {
2026 	u_int32_t   wlval;
2027 
2028 #ifdef AMD_DEBUG0
2029 	kprintf("amd_ScsiRstDetect \n");
2030 #endif
2031 
2032 	wlval = 1000;
2033 	while (--wlval) {	/* delay 1 sec */
2034 		DELAY(1000);
2035 	}
2036 	crit_enter();
2037 
2038 	amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2039 	amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2040 
2041 	if (amd->ACBFlag & RESET_DEV) {
2042 		amd->ACBFlag |= RESET_DONE;
2043 	} else {
2044 		amd->ACBFlag |= RESET_DETECT;
2045 		ResetDevParam(amd);
2046 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2047 				 AMD_TAG_WILDCARD, &amd->running_srbs,
2048 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2049 		amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2050 				 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2051 				 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2052 		amd->active_srb = NULL;
2053 		amd->ACBFlag = 0;
2054 		amdrunwaiting(amd);
2055 	}
2056 	crit_exit();
2057 	return;
2058 }
2059 
2060 static void
RequestSense(struct amd_softc * amd,struct amd_srb * pSRB)2061 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2062 {
2063 	union ccb *pccb;
2064 	struct ccb_scsiio *pcsio;
2065 
2066 	pccb = pSRB->pccb;
2067 	pcsio = &pccb->csio;
2068 
2069 	pSRB->SRBFlag |= AUTO_REQSENSE;
2070 	pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2071 	pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2072 	pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2073 	pSRB->Segment1[1] = pSRB->TotalXferredLen;
2074 
2075 	pSRB->AdaptStatus = 0;
2076 	pSRB->TargetStatus = 0;
2077 
2078 	pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2079 	pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2080 
2081 	pSRB->pSGlist = &pSRB->Segmentx;
2082 	pSRB->SGcount = 1;
2083 	pSRB->SGIndex = 0;
2084 
2085 	*((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2086 	pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2087 	*((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2088 	pSRB->ScsiCmdLen = 6;
2089 
2090 	pSRB->TotalXferredLen = 0;
2091 	pSRB->SGToBeXferLen = 0;
2092 	if (amdstart(amd, pSRB) != 0) {
2093 		TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2094 		TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2095 	}
2096 }
2097 
2098 static void
amd_InvalidCmd(struct amd_softc * amd)2099 amd_InvalidCmd(struct amd_softc * amd)
2100 {
2101 	struct amd_srb *srb;
2102 
2103 	srb = amd->active_srb;
2104 	if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2105 		amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2106 }
2107 
2108 void
amd_linkSRB(struct amd_softc * amd)2109 amd_linkSRB(struct amd_softc *amd)
2110 {
2111 	u_int16_t count, i;
2112 	struct amd_srb *psrb;
2113 	int error;
2114 
2115 	count = amd->SRBCount;
2116 
2117 	for (i = 0; i < count; i++) {
2118 		psrb = (struct amd_srb *)&amd->SRB_array[i];
2119 		psrb->TagNumber = i;
2120 
2121 		/*
2122 		 * Create the dmamap.  This is no longer optional!
2123 		 */
2124 		error = bus_dmamap_create(amd->buffer_dmat, 0, &psrb->dmamap);
2125 		if (error) {
2126 			device_printf(amd->dev, "Error %d creating buffer "
2127 					"dmamap!\n", error);
2128 			break;
2129 		}
2130 		TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2131 	}
2132 }
2133 
2134 static void
amd_EnDisableCE(struct amd_softc * amd,int mode,int * regval)2135 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2136 {
2137 	if (mode == ENABLE_CE) {
2138 		*regval = 0xc0;
2139 	} else {
2140 		*regval = 0x80;
2141 	}
2142 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2143 	if (mode == DISABLE_CE) {
2144 		pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2145 	}
2146 	DELAY(160);
2147 }
2148 
2149 static void
amd_EEpromOutDI(struct amd_softc * amd,int * regval,int Carry)2150 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2151 {
2152 	u_int bval;
2153 
2154 	bval = 0;
2155 	if (Carry) {
2156 		bval = 0x40;
2157 		*regval = 0x80;
2158 		pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2159 	}
2160 	DELAY(160);
2161 	bval |= 0x80;
2162 	pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2163 	DELAY(160);
2164 	pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2165 	DELAY(160);
2166 }
2167 
2168 static int
amd_EEpromInDO(struct amd_softc * amd)2169 amd_EEpromInDO(struct amd_softc *amd)
2170 {
2171 	pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2172 	DELAY(160);
2173 	pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2174 	DELAY(160);
2175 	if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2176 		return (1);
2177 	return (0);
2178 }
2179 
2180 static u_int16_t
EEpromGetData1(struct amd_softc * amd)2181 EEpromGetData1(struct amd_softc *amd)
2182 {
2183 	u_int	  i;
2184 	u_int	  carryFlag;
2185 	u_int16_t wval;
2186 
2187 	wval = 0;
2188 	for (i = 0; i < 16; i++) {
2189 		wval <<= 1;
2190 		carryFlag = amd_EEpromInDO(amd);
2191 		wval |= carryFlag;
2192 	}
2193 	return (wval);
2194 }
2195 
2196 static void
amd_Prepare(struct amd_softc * amd,int * regval,u_int8_t EEpromCmd)2197 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2198 {
2199 	u_int i, j;
2200 	int carryFlag;
2201 
2202 	carryFlag = 1;
2203 	j = 0x80;
2204 	for (i = 0; i < 9; i++) {
2205 		amd_EEpromOutDI(amd, regval, carryFlag);
2206 		carryFlag = (EEpromCmd & j) ? 1 : 0;
2207 		j >>= 1;
2208 	}
2209 }
2210 
2211 static void
amd_ReadEEprom(struct amd_softc * amd)2212 amd_ReadEEprom(struct amd_softc *amd)
2213 {
2214 	int	   regval;
2215 	u_int	   i;
2216 	u_int16_t *ptr;
2217 	u_int8_t   cmd;
2218 
2219 	ptr = (u_int16_t *)&amd->eepromBuf[0];
2220 	cmd = EEPROM_READ;
2221 	for (i = 0; i < 0x40; i++) {
2222 		amd_EnDisableCE(amd, ENABLE_CE, &regval);
2223 		amd_Prepare(amd, &regval, cmd);
2224 		*ptr = EEpromGetData1(amd);
2225 		ptr++;
2226 		cmd++;
2227 		amd_EnDisableCE(amd, DISABLE_CE, &regval);
2228 	}
2229 }
2230 
2231 static void
amd_load_defaults(struct amd_softc * amd)2232 amd_load_defaults(struct amd_softc *amd)
2233 {
2234 	int target;
2235 
2236 	bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2237 	for (target = 0; target < MAX_SCSI_ID; target++)
2238 		amd->eepromBuf[target << 2] =
2239 		    (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2240 	amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2241 	amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2242 	amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2243 }
2244 
2245 static void
amd_load_eeprom_or_defaults(struct amd_softc * amd)2246 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2247 {
2248 	u_int16_t  wval, *ptr;
2249 	u_int8_t   i;
2250 
2251 	amd_ReadEEprom(amd);
2252 	wval = 0;
2253 	ptr = (u_int16_t *) & amd->eepromBuf[0];
2254 	for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2255 		wval += *ptr;
2256 
2257 	if (wval != EE_CHECKSUM) {
2258 		if (bootverbose)
2259 			kprintf("amd%d: SEEPROM data unavailable.  "
2260 			       "Using default device parameters.\n",
2261 			       amd->unit);
2262 		amd_load_defaults(amd);
2263 	}
2264 }
2265 
2266 /*
2267  **********************************************************************
2268  * Function      : static int amd_init (struct Scsi_Host *host)
2269  * Purpose       : initialize the internal structures for a given SCSI host
2270  * Inputs        : host - pointer to this host adapter's structure/
2271  **********************************************************************
2272  */
2273 static int
amd_init(device_t dev)2274 amd_init(device_t dev)
2275 {
2276 	struct amd_softc *amd = device_get_softc(dev);
2277 	struct resource	*iores;
2278 	int	i, rid;
2279 	u_int	bval;
2280 
2281 	rid = PCI_BASE_ADDR0;
2282 	iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2283 				   RF_ACTIVE);
2284 	if (iores == NULL) {
2285 		if (bootverbose)
2286 			kprintf("amd_init: bus_alloc_resource failure!\n");
2287 		return ENXIO;
2288 	}
2289 	amd->tag = rman_get_bustag(iores);
2290 	amd->bsh = rman_get_bushandle(iores);
2291 
2292 	/* DMA tag for mapping buffers into device visible space. */
2293 	if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2294 			       /*boundary*/0,
2295 			       /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2296 			       /*highaddr*/BUS_SPACE_MAXADDR,
2297 			       /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2298 			       /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2299 			       /*flags*/BUS_DMA_ALLOCNOW,
2300 			       &amd->buffer_dmat) != 0) {
2301 		if (bootverbose)
2302 			kprintf("amd_init: bus_dma_tag_create failure!\n");
2303 		return ENXIO;
2304         }
2305 	TAILQ_INIT(&amd->free_srbs);
2306 	TAILQ_INIT(&amd->running_srbs);
2307 	TAILQ_INIT(&amd->waiting_srbs);
2308 	amd->last_phase = SCSI_BUS_FREE;
2309 	amd->dev = dev;
2310 	amd->unit = device_get_unit(dev);
2311 	amd->SRBCount = MAX_SRB_CNT;
2312 	amd->status = 0;
2313 	amd_load_eeprom_or_defaults(amd);
2314 	amd->max_id = 7;
2315 	if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2316 		amd->max_lun = 7;
2317 	} else {
2318 		amd->max_lun = 0;
2319 	}
2320 	amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2321 	amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2322 	amd->AdaptSCSILUN = 0;
2323 	/* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2324 	amd->ACBFlag = 0;
2325 	amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2326 	amd_linkSRB(amd);
2327 	for (i = 0; i <= amd->max_id; i++) {
2328 
2329 		if (amd->AdaptSCSIID != i) {
2330 			struct amd_target_info *tinfo;
2331 			PEEprom prom;
2332 
2333 			tinfo = &amd->tinfo[i];
2334 			prom = (PEEprom)&amd->eepromBuf[i << 2];
2335 			if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2336 				tinfo->disc_tag |= AMD_USR_DISCENB;
2337 				if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2338 					tinfo->disc_tag |= AMD_USR_TAGENB;
2339 			}
2340 			if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2341 				tinfo->user.period =
2342 				    eeprom_period[prom->EE_SPEED];
2343 				tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2344 			}
2345 			tinfo->CtrlR1 = amd->AdaptSCSIID;
2346 			if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2347 				tinfo->CtrlR1 |= PARITY_ERR_REPO;
2348 			tinfo->CtrlR3 = FAST_CLK;
2349 			tinfo->CtrlR4 = EATER_25NS;
2350 			if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2351 				tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2352 		}
2353 	}
2354 	amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2355 	/* Conversion factor = 0 , 40MHz clock */
2356 	amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2357 	/* NOP cmd - clear command register */
2358 	amd_write8(amd, SCSICMDREG, NOP_CMD);
2359 	amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2360 	amd_write8(amd, CNTLREG3, FAST_CLK);
2361 	bval = EATER_25NS;
2362 	if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2363 		bval |= NEGATE_REQACKDATA;
2364 	}
2365 	amd_write8(amd, CNTLREG4, bval);
2366 
2367 	/* Disable SCSI bus reset interrupt */
2368 	amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2369 
2370 	return 0;
2371 }
2372 
2373 /*
2374  * attach and init a host adapter
2375  */
2376 static int
amd_attach(device_t dev)2377 amd_attach(device_t dev)
2378 {
2379 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
2380 	u_int8_t	intstat;
2381 	struct amd_softc *amd = device_get_softc(dev);
2382 	int		unit = device_get_unit(dev);
2383 	int		rid;
2384 	void		*ih;
2385 	struct resource	*irqres;
2386 
2387 	if (amd_init(dev)) {
2388 		if (bootverbose)
2389 			kprintf("amd_attach: amd_init failure!\n");
2390 		return ENXIO;
2391 	}
2392 
2393 	/* Reset Pending INT */
2394 	intstat = amd_read8(amd, INTSTATREG);
2395 
2396 	/* After setting up the adapter, map our interrupt */
2397 	rid = 0;
2398 	irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2399 				    RF_SHAREABLE | RF_ACTIVE);
2400 	if (irqres == NULL ||
2401 	    bus_setup_intr(dev, irqres, 0, amd_intr, amd, &ih, NULL)
2402 	) {
2403 		if (bootverbose)
2404 			kprintf("amd%d: unable to register interrupt handler!\n",
2405 			       unit);
2406 		return ENXIO;
2407 	}
2408 
2409 	/*
2410 	 * Now let the CAM generic SCSI layer find the SCSI devices on
2411 	 * the bus *  start queue to reset to the idle loop. *
2412 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
2413 	 * max_sim_transactions
2414 	 */
2415 	devq = cam_simq_alloc(MAX_START_JOB);
2416 	if (devq == NULL) {
2417 		if (bootverbose)
2418 			kprintf("amd_attach: cam_simq_alloc failure!\n");
2419 		return ENXIO;
2420 	}
2421 
2422 	amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2423 				  amd, amd->unit, &sim_mplock, 1,
2424 				  MAX_TAGS_CMD_QUEUE, devq);
2425 	cam_simq_release(devq);
2426 	if (amd->psim == NULL) {
2427 		if (bootverbose)
2428 			kprintf("amd_attach: cam_sim_alloc failure!\n");
2429 		return ENXIO;
2430 	}
2431 
2432 	if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2433 		cam_sim_free(amd->psim);
2434 		if (bootverbose)
2435 			kprintf("amd_attach: xpt_bus_register failure!\n");
2436 		return ENXIO;
2437 	}
2438 
2439 	if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2440 			    cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2441 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2442 		xpt_bus_deregister(cam_sim_path(amd->psim));
2443 		cam_sim_free(amd->psim);
2444 		if (bootverbose)
2445 			kprintf("amd_attach: xpt_create_path failure!\n");
2446 		return ENXIO;
2447 	}
2448 
2449 	return 0;
2450 }
2451 
2452 static int
amd_probe(device_t dev)2453 amd_probe(device_t dev)
2454 {
2455 	if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2456 		device_set_desc(dev,
2457 			"Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2458 		return 0;
2459 	}
2460 	return ENXIO;
2461 }
2462 
2463 static device_method_t amd_methods[] = {
2464 	/* Device interface */
2465 	DEVMETHOD(device_probe,		amd_probe),
2466 	DEVMETHOD(device_attach,	amd_attach),
2467 	DEVMETHOD_END
2468 };
2469 
2470 static driver_t amd_driver = {
2471 	"amd", amd_methods, sizeof(struct amd_softc)
2472 };
2473 
2474 static devclass_t amd_devclass;
2475 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, NULL, NULL);
2476