xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision 82730a9c)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  */
33 /*
34  * Ported from:
35  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36  *
37  * Copyright (c) 1995-1996 Advanced System Products, Inc.
38  * All Rights Reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that redistributions of source
42  * code retain the above copyright notice and this comment without
43  * modification.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/systm.h>
49 #include <sys/thread2.h>
50 #include <sys/bus.h>
51 #include <sys/rman.h>
52 
53 #include <machine/clock.h>
54 
55 #include <bus/cam/cam.h>
56 #include <bus/cam/cam_ccb.h>
57 #include <bus/cam/cam_sim.h>
58 #include <bus/cam/cam_xpt_sim.h>
59 
60 #include <bus/cam/scsi/scsi_all.h>
61 #include <bus/cam/scsi/scsi_message.h>
62 #include <bus/cam/scsi/scsi_da.h>
63 #include <bus/cam/scsi/scsi_cd.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/pmap.h>
68 
69 #include "advansys.h"
70 #include "advmcode.h"
71 
72 struct adv_quirk_entry {
73 	struct scsi_inquiry_pattern inq_pat;
74 	u_int8_t quirks;
75 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
76 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
77 };
78 
79 static struct adv_quirk_entry adv_quirk_table[] =
80 {
81 	{
82 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
83 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
84 	},
85 	{
86 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
87 		0
88 	},
89 	{
90 		{
91 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
92 		  "TANDBERG", " TDC 36", "*"
93 		},
94 		0
95 	},
96 	{
97 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
98 		0
99 	},
100 	{
101 		{
102 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
103 		  "*", "*", "*"
104 		},
105 		0
106 	},
107 	{
108 		{
109 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
110 		  "*", "*", "*"
111 		},
112 		0
113 	},
114 	{
115 		/* Default quirk entry */
116 		{
117 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
118 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
119                 },
120                 ADV_QUIRK_FIX_ASYN_XFER,
121 	}
122 };
123 
124 /*
125  * Allowable periods in ns
126  */
127 static u_int8_t adv_sdtr_period_tbl[] =
128 {
129 	25,
130 	30,
131 	35,
132 	40,
133 	50,
134 	60,
135 	70,
136 	85
137 };
138 
139 static u_int8_t adv_sdtr_period_tbl_ultra[] =
140 {
141 	12,
142 	19,
143 	25,
144 	32,
145 	38,
146 	44,
147 	50,
148 	57,
149 	63,
150 	69,
151 	75,
152 	82,
153 	88,
154 	94,
155 	100,
156 	107
157 };
158 
159 struct ext_msg {
160 	u_int8_t msg_type;
161 	u_int8_t msg_len;
162 	u_int8_t msg_req;
163 	union {
164 		struct {
165 			u_int8_t sdtr_xfer_period;
166 			u_int8_t sdtr_req_ack_offset;
167 		} sdtr;
168 		struct {
169        			u_int8_t wdtr_width;
170 		} wdtr;
171 		struct {
172 			u_int8_t mdp[4];
173 		} mdp;
174 	} u_ext_msg;
175 	u_int8_t res;
176 };
177 
178 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
179 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
180 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
181 #define	mdp_b3		u_ext_msg.mdp_b3
182 #define	mdp_b2		u_ext_msg.mdp_b2
183 #define	mdp_b1		u_ext_msg.mdp_b1
184 #define	mdp_b0		u_ext_msg.mdp_b0
185 
186 /*
187  * Some of the early PCI adapters have problems with
188  * async transfers.  Instead use an offset of 1.
189  */
190 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
191 
192 /* LRAM routines */
193 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
194 					u_int16_t *buffer, int count);
195 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
196 					 u_int16_t s_addr, u_int16_t *buffer,
197 					 int count);
198 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
199 				  u_int16_t set_value, int count);
200 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
201 				  int count);
202 
203 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
204 					      u_int16_t addr, u_int16_t value);
205 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
206 
207 
208 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
209 				   u_int32_t value);
210 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
211 					 u_int16_t s_addr, u_int32_t *buffer,
212 					 int count);
213 
214 /* EEPROM routines */
215 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
216 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
217 				     u_int16_t value);
218 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
219 					  u_int8_t cmd_reg);
220 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
221 					    struct adv_eeprom_config *eeconfig);
222 
223 /* Initialization */
224 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
225 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
226 
227 static void	 adv_reinit_lram(struct adv_softc *adv);
228 static void	 adv_init_lram(struct adv_softc *adv);
229 static int	 adv_init_microcode_var(struct adv_softc *adv);
230 static void	 adv_init_qlink_var(struct adv_softc *adv);
231 
232 /* Interrupts */
233 static void	 adv_disable_interrupt(struct adv_softc *adv);
234 static void	 adv_enable_interrupt(struct adv_softc *adv);
235 static void	 adv_toggle_irq_act(struct adv_softc *adv);
236 
237 /* Chip Control */
238 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
239 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
240 #if 0 /* UNUSED */
241 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
242 #endif
243 
244 /* Queue handling and execution */
245 static __inline int
246 		 adv_sgcount_to_qcount(int sgcount);
247 
248 static __inline int
249 adv_sgcount_to_qcount(int sgcount)
250 {
251 	int	n_sg_list_qs;
252 
253 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
254 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
255 		n_sg_list_qs++;
256 	return (n_sg_list_qs + 1);
257 }
258 
259 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
260 				u_int16_t *inbuf, int words);
261 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
262 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
263 				       u_int8_t free_q_head, u_int8_t n_free_q);
264 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
265 				      u_int8_t free_q_head);
266 static int	 adv_send_scsi_queue(struct adv_softc *adv,
267 				     struct adv_scsi_q *scsiq,
268 				     u_int8_t n_q_required);
269 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
270 					     struct adv_scsi_q *scsiq,
271 					     u_int q_no);
272 static void	 adv_put_ready_queue(struct adv_softc *adv,
273 				     struct adv_scsi_q *scsiq, u_int q_no);
274 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
275 			       u_int16_t *buffer, int words);
276 
277 /* Messages */
278 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
279 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
280 				      target_bit_vector target_id,
281 				      int tid);
282 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
283 				 u_int8_t sdtr_offset);
284 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
285 					u_int8_t sdtr_data);
286 
287 
288 /* Exported functions first */
289 
290 void
291 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
292 {
293 	struct adv_softc *adv;
294 
295 	adv = (struct adv_softc *)callback_arg;
296 	switch (code) {
297 	case AC_FOUND_DEVICE:
298 	{
299 		struct ccb_getdev *cgd;
300 		target_bit_vector target_mask;
301 		int num_entries;
302         	caddr_t match;
303 		struct adv_quirk_entry *entry;
304 		struct adv_target_transinfo* tinfo;
305 
306 		cgd = (struct ccb_getdev *)arg;
307 
308 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
309 
310 		num_entries = NELEM(adv_quirk_table);
311 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
312 				       (caddr_t)adv_quirk_table,
313 				       num_entries, sizeof(*adv_quirk_table),
314 				       scsi_inquiry_match);
315 
316 		if (match == NULL)
317 			panic("advasync: device didn't match wildcard entry!!");
318 
319 		entry = (struct adv_quirk_entry *)match;
320 
321 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
322 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
323 				adv->fix_asyn_xfer_always |= target_mask;
324 			else
325 				adv->fix_asyn_xfer_always &= ~target_mask;
326 			/*
327 			 * We start out life with all bits set and clear them
328 			 * after we've determined that the fix isn't necessary.
329 			 * It may well be that we've already cleared a target
330 			 * before the full inquiry session completes, so don't
331 			 * gratuitously set a target bit even if it has this
332 			 * quirk.  But, if the quirk exonerates a device, clear
333 			 * the bit now.
334 			 */
335 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
336 				adv->fix_asyn_xfer &= ~target_mask;
337 		}
338 		/*
339 		 * Reset our sync settings now that we've determined
340 		 * what quirks are in effect for the device.
341 		 */
342 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
343 		adv_set_syncrate(adv, cgd->ccb_h.path,
344 				 cgd->ccb_h.target_id,
345 				 tinfo->current.period,
346 				 tinfo->current.offset,
347 				 ADV_TRANS_CUR);
348 		break;
349 	}
350 	case AC_LOST_DEVICE:
351 	{
352 		u_int target_mask;
353 
354 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
355 			target_mask = 0x01 << xpt_path_target_id(path);
356 			adv->fix_asyn_xfer |= target_mask;
357 		}
358 
359 		/*
360 		 * Revert to async transfers
361 		 * for the next device.
362 		 */
363 		adv_set_syncrate(adv, /*path*/NULL,
364 				 xpt_path_target_id(path),
365 				 /*period*/0,
366 				 /*offset*/0,
367 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
368 	}
369 	default:
370 		break;
371 	}
372 }
373 
374 void
375 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
376 {
377 	u_int8_t control;
378 
379 	/*
380 	 * Start out with the bank reset to 0
381 	 */
382 	control = ADV_INB(adv, ADV_CHIP_CTRL)
383 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
384 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
385 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
386 	if (bank == 1) {
387 		control |= ADV_CC_BANK_ONE;
388 	} else if (bank == 2) {
389 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
390 	}
391 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
392 }
393 
394 u_int8_t
395 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
396 {
397 	u_int8_t   byte_data;
398 	u_int16_t  word_data;
399 
400 	/*
401 	 * LRAM is accessed on 16bit boundaries.
402 	 */
403 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
404 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
405 	if (addr & 1) {
406 #if BYTE_ORDER == BIG_ENDIAN
407 		byte_data = (u_int8_t)(word_data & 0xFF);
408 #else
409 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
410 #endif
411 	} else {
412 #if BYTE_ORDER == BIG_ENDIAN
413 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
414 #else
415 		byte_data = (u_int8_t)(word_data & 0xFF);
416 #endif
417 	}
418 	return (byte_data);
419 }
420 
421 void
422 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
423 {
424 	u_int16_t word_data;
425 
426 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
427 	if (addr & 1) {
428 		word_data &= 0x00FF;
429 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
430 	} else {
431 		word_data &= 0xFF00;
432 		word_data |= ((u_int8_t)value & 0x00FF);
433 	}
434 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
435 }
436 
437 
438 u_int16_t
439 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
440 {
441 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
442 	return (ADV_INW(adv, ADV_LRAM_DATA));
443 }
444 
445 void
446 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
447 {
448 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
449 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
450 }
451 
452 /*
453  * Determine if there is a board at "iobase" by looking
454  * for the AdvanSys signatures.  Return 1 if a board is
455  * found, 0 otherwise.
456  */
457 int
458 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
459 {
460 	u_int16_t signature;
461 
462 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
463 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
464 		if ((signature == ADV_1000_ID0W)
465 		 || (signature == ADV_1000_ID0W_FIX))
466 			return (1);
467 	}
468 	return (0);
469 }
470 
471 void
472 adv_lib_init(struct adv_softc *adv)
473 {
474 	if ((adv->type & ADV_ULTRA) != 0) {
475 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
476 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
477 	} else {
478 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
479 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
480 	}
481 }
482 
483 u_int16_t
484 adv_get_eeprom_config(struct adv_softc *adv, struct
485 		      adv_eeprom_config  *eeprom_config)
486 {
487 	u_int16_t	sum;
488 	u_int16_t	*wbuf;
489 	u_int8_t	cfg_beg;
490 	u_int8_t	cfg_end;
491 	u_int8_t	s_addr;
492 
493 	wbuf = (u_int16_t *)eeprom_config;
494 	sum = 0;
495 
496 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
497 		*wbuf = adv_read_eeprom_16(adv, s_addr);
498 		sum += *wbuf;
499 	}
500 
501 	if (adv->type & ADV_VL) {
502 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
503 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
504 	} else {
505 		cfg_beg = ADV_EEPROM_CFG_BEG;
506 		cfg_end = ADV_EEPROM_MAX_ADDR;
507 	}
508 
509 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
510 		*wbuf = adv_read_eeprom_16(adv, s_addr);
511 		sum += *wbuf;
512 #if ADV_DEBUG_EEPROM
513 		kprintf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
514 #endif
515 	}
516 	*wbuf = adv_read_eeprom_16(adv, s_addr);
517 	return (sum);
518 }
519 
520 int
521 adv_set_eeprom_config(struct adv_softc *adv,
522 		      struct adv_eeprom_config *eeprom_config)
523 {
524 	int	retry;
525 
526 	retry = 0;
527 	while (1) {
528 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
529 			break;
530 		}
531 		if (++retry > ADV_EEPROM_MAX_RETRY) {
532 			break;
533 		}
534 	}
535 	return (retry > ADV_EEPROM_MAX_RETRY);
536 }
537 
538 int
539 adv_reset_chip(struct adv_softc *adv, int reset_bus)
540 {
541 	adv_stop_chip(adv);
542 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
543 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
544 	DELAY(60);
545 
546 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
547 	adv_set_chip_ih(adv, ADV_INS_HALT);
548 
549 	if (reset_bus)
550 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
551 
552 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
553 	if (reset_bus)
554 		DELAY(200 * 1000);
555 
556 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
557 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
558 	return (adv_is_chip_halted(adv));
559 }
560 
561 int
562 adv_test_external_lram(struct adv_softc* adv)
563 {
564 	u_int16_t	q_addr;
565 	u_int16_t	saved_value;
566 	int		success;
567 
568 	success = 0;
569 
570 	q_addr = ADV_QNO_TO_QADDR(241);
571 	saved_value = adv_read_lram_16(adv, q_addr);
572 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
573 		success = 1;
574 		adv_write_lram_16(adv, q_addr, saved_value);
575 	}
576 	return (success);
577 }
578 
579 
580 int
581 adv_init_lram_and_mcode(struct adv_softc *adv)
582 {
583 	u_int32_t	retval;
584 
585 	adv_disable_interrupt(adv);
586 
587 	adv_init_lram(adv);
588 
589 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
590 				    adv_mcode_size);
591 	if (retval != adv_mcode_chksum) {
592 		kprintf("adv%d: Microcode download failed checksum!\n",
593 		       adv->unit);
594 		return (1);
595 	}
596 
597 	if (adv_init_microcode_var(adv) != 0)
598 		return (1);
599 
600 	adv_enable_interrupt(adv);
601 	return (0);
602 }
603 
604 u_int8_t
605 adv_get_chip_irq(struct adv_softc *adv)
606 {
607 	u_int16_t	cfg_lsw;
608 	u_int8_t	chip_irq;
609 
610 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
611 
612 	if ((adv->type & ADV_VL) != 0) {
613 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
614 		if ((chip_irq == 0) ||
615 		    (chip_irq == 4) ||
616 		    (chip_irq == 7)) {
617 			return (0);
618 		}
619 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
620 	}
621 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
622 	if (chip_irq == 3)
623 		chip_irq += 2;
624 	return (chip_irq + ADV_MIN_IRQ_NO);
625 }
626 
627 u_int8_t
628 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
629 {
630 	u_int16_t	cfg_lsw;
631 
632 	if ((adv->type & ADV_VL) != 0) {
633 		if (irq_no != 0) {
634 			if ((irq_no < ADV_MIN_IRQ_NO)
635 			 || (irq_no > ADV_MAX_IRQ_NO)) {
636 				irq_no = 0;
637 			} else {
638 				irq_no -= ADV_MIN_IRQ_NO - 1;
639 			}
640 		}
641 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
642 		cfg_lsw |= 0x0010;
643 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
644 		adv_toggle_irq_act(adv);
645 
646 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
647 		cfg_lsw |= (irq_no & 0x07) << 2;
648 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
649 		adv_toggle_irq_act(adv);
650 	} else if ((adv->type & ADV_ISA) != 0) {
651 		if (irq_no == 15)
652 			irq_no -= 2;
653 		irq_no -= ADV_MIN_IRQ_NO;
654 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
655 		cfg_lsw |= (irq_no & 0x03) << 2;
656 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
657 	}
658 	return (adv_get_chip_irq(adv));
659 }
660 
661 void
662 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
663 {
664 	u_int16_t cfg_lsw;
665 
666 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
667 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
668 		return;
669     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
670 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
671 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
672 }
673 
674 int
675 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
676 		       u_int32_t datalen)
677 {
678 	struct		adv_target_transinfo* tinfo;
679 	u_int32_t	*p_data_addr;
680 	u_int32_t	*p_data_bcount;
681 	int		disable_syn_offset_one_fix;
682 	int		retval;
683 	u_int		n_q_required;
684 	u_int32_t	addr;
685 	u_int8_t	sg_entry_cnt;
686 	u_int8_t	target_ix;
687 	u_int8_t	sg_entry_cnt_minus_one;
688 	u_int8_t	tid_no;
689 
690 	scsiq->q1.q_no = 0;
691 	retval = 1;  /* Default to error case */
692 	target_ix = scsiq->q2.target_ix;
693 	tid_no = ADV_TIX_TO_TID(target_ix);
694 	tinfo = &adv->tinfo[tid_no];
695 
696 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
697 		/* Renegotiate if appropriate. */
698 		adv_set_syncrate(adv, /*struct cam_path */NULL,
699 				 tid_no, /*period*/0, /*offset*/0,
700 				 ADV_TRANS_CUR);
701 		if (tinfo->current.period != tinfo->goal.period) {
702 			adv_msgout_sdtr(adv, tinfo->goal.period,
703 					tinfo->goal.offset);
704 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
705 		}
706 	}
707 
708 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
709 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
710 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
711 
712 #ifdef DIAGNOSTIC
713 		if (sg_entry_cnt <= 1)
714 			panic("adv_execute_scsi_queue: Queue "
715 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
716 
717 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
718 			panic("adv_execute_scsi_queue: "
719 			      "Queue with too many segs.");
720 
721 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
722 			int i;
723 
724 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
725 				addr = scsiq->sg_head->sg_list[i].addr +
726 				       scsiq->sg_head->sg_list[i].bytes;
727 
728 				if ((addr & 0x0003) != 0)
729 					panic("adv_execute_scsi_queue: SG "
730 					      "with odd address or byte count");
731 			}
732 		}
733 #endif
734 		p_data_addr =
735 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
736 		p_data_bcount =
737 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
738 
739 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
740 		scsiq->sg_head->queue_cnt = n_q_required - 1;
741 	} else {
742 		p_data_addr = &scsiq->q1.data_addr;
743 		p_data_bcount = &scsiq->q1.data_cnt;
744 		n_q_required = 1;
745 	}
746 
747 	disable_syn_offset_one_fix = FALSE;
748 
749 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
750 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
751 
752 		if (datalen != 0) {
753 			if (datalen < 512) {
754 				disable_syn_offset_one_fix = TRUE;
755 			} else {
756 				if (scsiq->cdbptr[0] == INQUIRY
757 				 || scsiq->cdbptr[0] == REQUEST_SENSE
758 				 || scsiq->cdbptr[0] == READ_CAPACITY
759 				 || scsiq->cdbptr[0] == MODE_SELECT_6
760 				 || scsiq->cdbptr[0] == MODE_SENSE_6
761 				 || scsiq->cdbptr[0] == MODE_SENSE_10
762 				 || scsiq->cdbptr[0] == MODE_SELECT_10
763 				 || scsiq->cdbptr[0] == READ_TOC) {
764 					disable_syn_offset_one_fix = TRUE;
765 				}
766 			}
767 		}
768 	}
769 
770 	if (disable_syn_offset_one_fix) {
771 		scsiq->q2.tag_code &=
772 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
773 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
774 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
775 	}
776 
777 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
778 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
779 		u_int8_t extra_bytes;
780 
781 		addr = *p_data_addr + *p_data_bcount;
782 		extra_bytes = addr & 0x0003;
783 		if (extra_bytes != 0
784 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
785 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
786 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
787 			scsiq->q1.extra_bytes = extra_bytes;
788 			*p_data_bcount -= extra_bytes;
789 		}
790 	}
791 
792 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
793 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
794 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
795 
796 	return (retval);
797 }
798 
799 
800 u_int8_t
801 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
802 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
803 {
804 	u_int16_t val;
805 	u_int8_t  sg_queue_cnt;
806 
807 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
808 		       (u_int16_t *)scsiq,
809 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
810 
811 #if BYTE_ORDER == BIG_ENDIAN
812 	adv_adj_endian_qdone_info(scsiq);
813 #endif
814 
815 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
816 	scsiq->q_status = val & 0xFF;
817 	scsiq->q_no = (val >> 8) & 0XFF;
818 
819 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
820 	scsiq->cntl = val & 0xFF;
821 	sg_queue_cnt = (val >> 8) & 0xFF;
822 
823 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
824 	scsiq->sense_len = val & 0xFF;
825 	scsiq->extra_bytes = (val >> 8) & 0xFF;
826 
827 	/*
828 	 * Due to a bug in accessing LRAM on the 940UA, the residual
829 	 * is split into separate high and low 16bit quantities.
830 	 */
831 	scsiq->remain_bytes =
832 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
833 	scsiq->remain_bytes |=
834 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
835 
836 	/*
837 	 * XXX Is this just a safeguard or will the counter really
838 	 * have bogus upper bits?
839 	 */
840 	scsiq->remain_bytes &= max_dma_count;
841 
842 	return (sg_queue_cnt);
843 }
844 
845 int
846 adv_start_chip(struct adv_softc *adv)
847 {
848 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
849 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
850 		return (0);
851 	return (1);
852 }
853 
854 int
855 adv_stop_execution(struct adv_softc *adv)
856 {
857 	int count;
858 
859 	count = 0;
860 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
861 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
862 				 ADV_STOP_REQ_RISC_STOP);
863 		do {
864 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
865 				ADV_STOP_ACK_RISC_STOP) {
866 				return (1);
867 			}
868 			DELAY(1000);
869 		} while (count++ < 20);
870 	}
871 	return (0);
872 }
873 
874 int
875 adv_is_chip_halted(struct adv_softc *adv)
876 {
877 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
878 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
879 			return (1);
880 		}
881 	}
882 	return (0);
883 }
884 
885 /*
886  * XXX The numeric constants and the loops in this routine
887  * need to be documented.
888  */
889 void
890 adv_ack_interrupt(struct adv_softc *adv)
891 {
892 	u_int8_t	host_flag;
893 	u_int8_t	risc_flag;
894 	int		loop;
895 
896 	loop = 0;
897 	do {
898 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
899 		if (loop++ > 0x7FFF) {
900 			break;
901 		}
902 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
903 
904 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
905 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
906 			 host_flag | ADV_HOST_FLAG_ACK_INT);
907 
908 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
909 	loop = 0;
910 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
911 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
912 		if (loop++ > 3) {
913 			break;
914 		}
915 	}
916 
917 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
918 }
919 
920 /*
921  * Handle all conditions that may halt the chip waiting
922  * for us to intervene.
923  */
924 void
925 adv_isr_chip_halted(struct adv_softc *adv)
926 {
927 	u_int16_t	  int_halt_code;
928 	u_int16_t	  halt_q_addr;
929 	target_bit_vector target_mask;
930 	target_bit_vector scsi_busy;
931 	u_int8_t	  halt_qp;
932 	u_int8_t	  target_ix;
933 	u_int8_t	  q_cntl;
934 	u_int8_t	  tid_no;
935 
936 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
937 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
938 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
939 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
940 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
941 	tid_no = ADV_TIX_TO_TID(target_ix);
942 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
943 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
944 		/*
945 		 * Temporarily disable the async fix by removing
946 		 * this target from the list of affected targets,
947 		 * setting our async rate, and then putting us
948 		 * back into the mask.
949 		 */
950 		adv->fix_asyn_xfer &= ~target_mask;
951 		adv_set_syncrate(adv, /*struct cam_path */NULL,
952 				 tid_no, /*period*/0, /*offset*/0,
953 				 ADV_TRANS_ACTIVE);
954 		adv->fix_asyn_xfer |= target_mask;
955 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
956 		adv_set_syncrate(adv, /*struct cam_path */NULL,
957 				 tid_no, /*period*/0, /*offset*/0,
958 				 ADV_TRANS_ACTIVE);
959 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
960 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
961 				     target_mask, tid_no);
962 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
963 		struct	  adv_target_transinfo* tinfo;
964 		union	  ccb *ccb;
965 		u_int32_t cinfo_index;
966 		u_int8_t  tag_code;
967 		u_int8_t  q_status;
968 
969 		tinfo = &adv->tinfo[tid_no];
970 		q_cntl |= QC_REQ_SENSE;
971 
972 		/* Renegotiate if appropriate. */
973 		adv_set_syncrate(adv, /*struct cam_path */NULL,
974 				 tid_no, /*period*/0, /*offset*/0,
975 				 ADV_TRANS_CUR);
976 		if (tinfo->current.period != tinfo->goal.period) {
977 			adv_msgout_sdtr(adv, tinfo->goal.period,
978 					tinfo->goal.offset);
979 			q_cntl |= QC_MSG_OUT;
980 		}
981 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
982 
983 		/* Don't tag request sense commands */
984 		tag_code = adv_read_lram_8(adv,
985 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
986 		tag_code &=
987 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
988 
989 		if ((adv->fix_asyn_xfer & target_mask) != 0
990 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
991 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
992 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
993 		}
994 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
995 				 tag_code);
996 		q_status = adv_read_lram_8(adv,
997 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
998 		q_status |= (QS_READY | QS_BUSY);
999 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1000 				 q_status);
1001 		/*
1002 		 * Freeze the devq until we can handle the sense condition.
1003 		 */
1004 		cinfo_index =
1005 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1006 		ccb = adv->ccb_infos[cinfo_index].ccb;
1007 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1008 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1009 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1010 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1011 			      /*queued_only*/TRUE);
1012 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1013 		scsi_busy &= ~target_mask;
1014 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1015 		/*
1016 		 * Ensure we have enough time to actually
1017 		 * retrieve the sense.
1018 		 */
1019 		callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1020 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1021 		struct	ext_msg out_msg;
1022 
1023 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1024 				       (u_int16_t *) &out_msg,
1025 				       sizeof(out_msg)/2);
1026 
1027 		if ((out_msg.msg_type == MSG_EXTENDED)
1028 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1029 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1030 
1031 			/* Revert to Async */
1032 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1033 					 tid_no, /*period*/0, /*offset*/0,
1034 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1035 		}
1036 		q_cntl &= ~QC_MSG_OUT;
1037 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1038 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1039 		u_int8_t scsi_status;
1040 		union ccb *ccb;
1041 		u_int32_t cinfo_index;
1042 
1043 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1044 					      + ADV_SCSIQ_SCSI_STATUS);
1045 		cinfo_index =
1046 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1047 		ccb = adv->ccb_infos[cinfo_index].ccb;
1048 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1049 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1050 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1051 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1052 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1053 			      /*queued_only*/TRUE);
1054 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1055 		scsi_busy &= ~target_mask;
1056 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1057 	} else {
1058 		kprintf("Unhandled Halt Code %x\n", int_halt_code);
1059 	}
1060 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1061 }
1062 
1063 void
1064 adv_sdtr_to_period_offset(struct adv_softc *adv,
1065 			  u_int8_t sync_data, u_int8_t *period,
1066 			  u_int8_t *offset, int tid)
1067 {
1068 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1069 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1070 		*period = *offset = 0;
1071 	} else {
1072 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1073 		*offset = sync_data & 0xF;
1074 	}
1075 }
1076 
1077 void
1078 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1079 		 u_int tid, u_int period, u_int offset, u_int type)
1080 {
1081 	struct adv_target_transinfo* tinfo;
1082 	u_int old_period;
1083 	u_int old_offset;
1084 	u_int8_t sdtr_data;
1085 
1086 	tinfo = &adv->tinfo[tid];
1087 
1088 	/* Filter our input */
1089 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1090 					      &offset, tid);
1091 
1092 	old_period = tinfo->current.period;
1093 	old_offset = tinfo->current.offset;
1094 
1095 	if ((type & ADV_TRANS_CUR) != 0
1096 	 && ((old_period != period || old_offset != offset)
1097 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1098 		int halted;
1099 
1100 		crit_enter();
1101 		halted = adv_is_chip_halted(adv);
1102 		if (halted == 0)
1103 			/* Must halt the chip first */
1104 			adv_host_req_chip_halt(adv);
1105 
1106 		/* Update current hardware settings */
1107 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1108 
1109 		/*
1110 		 * If a target can run in sync mode, we don't need
1111 		 * to check it for sync problems.
1112 		 */
1113 		if (offset != 0)
1114 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1115 
1116 		if (halted == 0)
1117 			/* Start the chip again */
1118 			adv_start_chip(adv);
1119 
1120 		crit_exit();
1121 		tinfo->current.period = period;
1122 		tinfo->current.offset = offset;
1123 
1124 		if (path != NULL) {
1125 			/*
1126 			 * Tell the SCSI layer about the
1127 			 * new transfer parameters.
1128 			 */
1129 			struct	ccb_trans_settings neg;
1130 			memset(&neg, 0, sizeof (neg));
1131 			struct ccb_trans_settings_spi *spi =
1132 			    &neg.xport_specific.spi;
1133 
1134 			neg.protocol = PROTO_SCSI;
1135 			neg.protocol_version = SCSI_REV_2;
1136 			neg.transport = XPORT_SPI;
1137 			neg.transport_version = 2;
1138 
1139 			spi->sync_offset = offset;
1140 			spi->sync_period = period;
1141 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1142 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1143 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1144 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1145 		}
1146 	}
1147 
1148 	if ((type & ADV_TRANS_GOAL) != 0) {
1149 		tinfo->goal.period = period;
1150 		tinfo->goal.offset = offset;
1151 	}
1152 
1153 	if ((type & ADV_TRANS_USER) != 0) {
1154 		tinfo->user.period = period;
1155 		tinfo->user.offset = offset;
1156 	}
1157 }
1158 
1159 u_int8_t
1160 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1161 			  u_int *offset, int tid)
1162 {
1163 	u_int i;
1164 	u_int dummy_offset;
1165 	u_int dummy_period;
1166 
1167 	if (offset == NULL) {
1168 		dummy_offset = 0;
1169 		offset = &dummy_offset;
1170 	}
1171 
1172 	if (period == NULL) {
1173 		dummy_period = 0;
1174 		period = &dummy_period;
1175 	}
1176 
1177 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1178 	if (*period != 0 && *offset != 0) {
1179 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1180 			if (*period <= adv->sdtr_period_tbl[i]) {
1181 				/*
1182 				 * When responding to a target that requests
1183 				 * sync, the requested  rate may fall between
1184 				 * two rates that we can output, but still be
1185 				 * a rate that we can receive.  Because of this,
1186 				 * we want to respond to the target with
1187 				 * the same rate that it sent to us even
1188 				 * if the period we use to send data to it
1189 				 * is lower.  Only lower the response period
1190 				 * if we must.
1191 				 */
1192 				if (i == 0 /* Our maximum rate */)
1193 					*period = adv->sdtr_period_tbl[0];
1194 				return ((i << 4) | *offset);
1195 			}
1196 		}
1197 	}
1198 
1199 	/* Must go async */
1200 	*period = 0;
1201 	*offset = 0;
1202 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1203 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1204 	return (0);
1205 }
1206 
1207 /* Internal Routines */
1208 
1209 static void
1210 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1211 		       u_int16_t *buffer, int count)
1212 {
1213 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1214 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1215 }
1216 
1217 static void
1218 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1219 			u_int16_t *buffer, int count)
1220 {
1221 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1222 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1223 }
1224 
1225 static void
1226 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1227 		 u_int16_t set_value, int count)
1228 {
1229 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1230 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1231 			      set_value, count);
1232 }
1233 
1234 static u_int32_t
1235 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1236 {
1237 	u_int32_t	sum;
1238 	int		i;
1239 
1240 	sum = 0;
1241 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1242 	for (i = 0; i < count; i++)
1243 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1244 	return (sum);
1245 }
1246 
1247 static int
1248 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1249 			     u_int16_t value)
1250 {
1251 	int	retval;
1252 
1253 	retval = 0;
1254 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1255 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1256 	DELAY(10000);
1257 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1258 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1259 		retval = 1;
1260 	return (retval);
1261 }
1262 
1263 static u_int32_t
1264 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1265 {
1266 	u_int16_t           val_low, val_high;
1267 
1268 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1269 
1270 #if BYTE_ORDER == BIG_ENDIAN
1271 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1272 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1273 #else
1274 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1275 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1276 #endif
1277 
1278 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1279 }
1280 
1281 static void
1282 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1283 {
1284 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1285 
1286 #if BYTE_ORDER == BIG_ENDIAN
1287 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1288 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1289 #else
1290 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1291 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1292 #endif
1293 }
1294 
1295 static void
1296 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1297 			u_int32_t *buffer, int count)
1298 {
1299 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1300 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1301 }
1302 
1303 static u_int16_t
1304 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1305 {
1306 	u_int16_t read_wval;
1307 	u_int8_t  cmd_reg;
1308 
1309 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1310 	DELAY(1000);
1311 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1312 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1313 	DELAY(1000);
1314 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1315 	DELAY(1000);
1316 	return (read_wval);
1317 }
1318 
1319 static u_int16_t
1320 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1321 {
1322 	u_int16_t	read_value;
1323 
1324 	read_value = adv_read_eeprom_16(adv, addr);
1325 	if (read_value != value) {
1326 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1327 		DELAY(1000);
1328 
1329 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1330 		DELAY(1000);
1331 
1332 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1333 		DELAY(20 * 1000);
1334 
1335 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1336 		DELAY(1000);
1337 		read_value = adv_read_eeprom_16(adv, addr);
1338 	}
1339 	return (read_value);
1340 }
1341 
1342 static int
1343 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1344 {
1345 	u_int8_t read_back;
1346 	int	 retry;
1347 
1348 	retry = 0;
1349 	while (1) {
1350 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1351 		DELAY(1000);
1352 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1353 		if (read_back == cmd_reg) {
1354 			return (1);
1355 		}
1356 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1357 			return (0);
1358 		}
1359 	}
1360 }
1361 
1362 static int
1363 adv_set_eeprom_config_once(struct adv_softc *adv,
1364 			   struct adv_eeprom_config *eeprom_config)
1365 {
1366 	int		n_error;
1367 	u_int16_t	*wbuf;
1368 	u_int16_t	sum;
1369 	u_int8_t	s_addr;
1370 	u_int8_t	cfg_beg;
1371 	u_int8_t	cfg_end;
1372 
1373 	wbuf = (u_int16_t *)eeprom_config;
1374 	n_error = 0;
1375 	sum = 0;
1376 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1377 		sum += *wbuf;
1378 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1379 			n_error++;
1380 		}
1381 	}
1382 	if (adv->type & ADV_VL) {
1383 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1384 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1385 	} else {
1386 		cfg_beg = ADV_EEPROM_CFG_BEG;
1387 		cfg_end = ADV_EEPROM_MAX_ADDR;
1388 	}
1389 
1390 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1391 		sum += *wbuf;
1392 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1393 			n_error++;
1394 		}
1395 	}
1396 	*wbuf = sum;
1397 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1398 		n_error++;
1399 	}
1400 	wbuf = (u_int16_t *)eeprom_config;
1401 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1402 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1403 			n_error++;
1404 		}
1405 	}
1406 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1407 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1408 			n_error++;
1409 		}
1410 	}
1411 	return (n_error);
1412 }
1413 
1414 static u_int32_t
1415 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1416 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1417 {
1418 	u_int32_t chksum;
1419 	u_int16_t mcode_lram_size;
1420 	u_int16_t mcode_chksum;
1421 
1422 	mcode_lram_size = mcode_size >> 1;
1423 	/* XXX Why zero the memory just before you write the whole thing?? */
1424 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1425 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1426 
1427 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1428 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1429 						   ((mcode_size - s_addr
1430 						     - ADV_CODE_SEC_BEG) >> 1));
1431 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1432 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1433 	return (chksum);
1434 }
1435 
1436 static void
1437 adv_reinit_lram(struct adv_softc *adv) {
1438 	adv_init_lram(adv);
1439 	adv_init_qlink_var(adv);
1440 }
1441 
1442 static void
1443 adv_init_lram(struct adv_softc *adv)
1444 {
1445 	u_int8_t  i;
1446 	u_int16_t s_addr;
1447 
1448 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1449 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1450 
1451 	i = ADV_MIN_ACTIVE_QNO;
1452 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1453 
1454 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1455 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1456 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1457 	i++;
1458 	s_addr += ADV_QBLK_SIZE;
1459 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1460 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1461 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1462 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1463 	}
1464 
1465 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1466 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1467 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1468 	i++;
1469 	s_addr += ADV_QBLK_SIZE;
1470 
1471 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1472 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1473 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1474 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1475 	}
1476 }
1477 
1478 static int
1479 adv_init_microcode_var(struct adv_softc *adv)
1480 {
1481 	int	 i;
1482 
1483 	for (i = 0; i <= ADV_MAX_TID; i++) {
1484 
1485 		/* Start out async all around */
1486 		adv_set_syncrate(adv, /*path*/NULL,
1487 				 i, 0, 0,
1488 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1489 	}
1490 
1491 	adv_init_qlink_var(adv);
1492 
1493 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1494 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1495 
1496 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1497 
1498 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1499 
1500 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1501 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1502 		kprintf("adv%d: Unable to set program counter. Aborting.\n",
1503 		       adv->unit);
1504 		return (1);
1505 	}
1506 	return (0);
1507 }
1508 
1509 static void
1510 adv_init_qlink_var(struct adv_softc *adv)
1511 {
1512 	int	  i;
1513 	u_int16_t lram_addr;
1514 
1515 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1516 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1517 
1518 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1519 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1520 
1521 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1522 			 (u_int8_t)((int) adv->max_openings + 1));
1523 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1524 			 (u_int8_t)((int) adv->max_openings + 2));
1525 
1526 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1527 
1528 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1529 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1530 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1531 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1532 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1533 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1534 
1535 	lram_addr = ADV_QADR_BEG;
1536 	for (i = 0; i < 32; i++, lram_addr += 2)
1537 		adv_write_lram_16(adv, lram_addr, 0);
1538 }
1539 
1540 static void
1541 adv_disable_interrupt(struct adv_softc *adv)
1542 {
1543 	u_int16_t cfg;
1544 
1545 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1546 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1547 }
1548 
1549 static void
1550 adv_enable_interrupt(struct adv_softc *adv)
1551 {
1552 	u_int16_t cfg;
1553 
1554 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1555 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1556 }
1557 
1558 static void
1559 adv_toggle_irq_act(struct adv_softc *adv)
1560 {
1561 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1562 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1563 }
1564 
1565 void
1566 adv_start_execution(struct adv_softc *adv)
1567 {
1568 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1569 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1570 	}
1571 }
1572 
1573 int
1574 adv_stop_chip(struct adv_softc *adv)
1575 {
1576 	u_int8_t cc_val;
1577 
1578 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1579 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1580 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1581 	adv_set_chip_ih(adv, ADV_INS_HALT);
1582 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1583 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1584 		return (0);
1585 	}
1586 	return (1);
1587 }
1588 
1589 static int
1590 adv_host_req_chip_halt(struct adv_softc *adv)
1591 {
1592 	int	 count;
1593 	u_int8_t saved_stop_code;
1594 
1595 	if (adv_is_chip_halted(adv))
1596 		return (1);
1597 
1598 	count = 0;
1599 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1600 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1601 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1602 	while (adv_is_chip_halted(adv) == 0
1603 	    && count++ < 2000)
1604 		;
1605 
1606 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1607 	return (count < 2000);
1608 }
1609 
1610 static void
1611 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1612 {
1613 	adv_set_bank(adv, 1);
1614 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1615 	adv_set_bank(adv, 0);
1616 }
1617 
1618 #if 0 /* UNUSED */
1619 static u_int8_t
1620 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1621 {
1622 	u_int8_t scsi_ctrl;
1623 
1624 	adv_set_bank(adv, 1);
1625 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1626 	adv_set_bank(adv, 0);
1627 	return (scsi_ctrl);
1628 }
1629 #endif
1630 
1631 /*
1632  * XXX Looks like more padding issues in this routine as well.
1633  *     There has to be a way to turn this into an insw.
1634  */
1635 static void
1636 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1637 	       u_int16_t *inbuf, int words)
1638 {
1639 	int	i;
1640 
1641 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1642 	for (i = 0; i < words; i++, inbuf++) {
1643 		if (i == 5) {
1644 			continue;
1645 		}
1646 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1647 	}
1648 }
1649 
1650 static u_int
1651 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1652 {
1653 	u_int	  cur_used_qs;
1654 	u_int	  cur_free_qs;
1655 
1656 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1657 
1658 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1659 		cur_free_qs = adv->max_openings - cur_used_qs;
1660 		return (cur_free_qs);
1661 	}
1662 	adv->openings_needed = n_qs;
1663 	return (0);
1664 }
1665 
1666 static u_int8_t
1667 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1668 		      u_int8_t n_free_q)
1669 {
1670 	int i;
1671 
1672 	for (i = 0; i < n_free_q; i++) {
1673 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1674 		if (free_q_head == ADV_QLINK_END)
1675 			break;
1676 	}
1677 	return (free_q_head);
1678 }
1679 
1680 static u_int8_t
1681 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1682 {
1683 	u_int16_t	q_addr;
1684 	u_int8_t	next_qp;
1685 	u_int8_t	q_status;
1686 
1687 	next_qp = ADV_QLINK_END;
1688 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1689 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1690 
1691 	if ((q_status & QS_READY) == 0)
1692 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1693 
1694 	return (next_qp);
1695 }
1696 
1697 static int
1698 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1699 		    u_int8_t n_q_required)
1700 {
1701 	u_int8_t	free_q_head;
1702 	u_int8_t	next_qp;
1703 	int		retval;
1704 
1705 	retval = 1;
1706 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1707 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1708 	    != ADV_QLINK_END) {
1709 		scsiq->q1.q_no = free_q_head;
1710 
1711 		/*
1712 		 * Now that we know our Q number, point our sense
1713 		 * buffer pointer to a bus dma mapped area where
1714 		 * we can dma the data to.
1715 		 */
1716 		scsiq->q1.sense_addr = adv->sense_physbase
1717 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1718 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1719 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1720 		adv->cur_active += n_q_required;
1721 		retval = 0;
1722 	}
1723 	return (retval);
1724 }
1725 
1726 
1727 static void
1728 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1729 			    u_int q_no)
1730 {
1731 	u_int8_t	sg_list_dwords;
1732 	u_int8_t	sg_index, i;
1733 	u_int8_t	sg_entry_cnt;
1734 	u_int8_t	next_qp;
1735 	u_int16_t	q_addr;
1736 	struct		adv_sg_head *sg_head;
1737 	struct		adv_sg_list_q scsi_sg_q;
1738 
1739 	sg_head = scsiq->sg_head;
1740 
1741 	if (sg_head) {
1742 		sg_entry_cnt = sg_head->entry_cnt - 1;
1743 #ifdef DIAGNOSTIC
1744 		if (sg_entry_cnt == 0)
1745 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746 			      "a SG list but only one element");
1747 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1748 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749 			      "a SG list but QC_SG_HEAD not set");
1750 #endif
1751 		q_addr = ADV_QNO_TO_QADDR(q_no);
1752 		sg_index = 1;
1753 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1754 		scsi_sg_q.sg_head_qp = q_no;
1755 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1756 		for (i = 0; i < sg_head->queue_cnt; i++) {
1757 			u_int8_t segs_this_q;
1758 
1759 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1760 				segs_this_q = ADV_SG_LIST_PER_Q;
1761 			else {
1762 				/* This will be the last segment then */
1763 				segs_this_q = sg_entry_cnt;
1764 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1765 			}
1766 			scsi_sg_q.seq_no = i + 1;
1767 			sg_list_dwords = segs_this_q << 1;
1768 			if (i == 0) {
1769 				scsi_sg_q.sg_list_cnt = segs_this_q;
1770 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1771 			} else {
1772 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1773 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1774 			}
1775 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1776 			scsi_sg_q.q_no = next_qp;
1777 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1778 
1779 			adv_write_lram_16_multi(adv,
1780 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1781 						(u_int16_t *)&scsi_sg_q,
1782 						sizeof(scsi_sg_q) >> 1);
1783 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1784 						(u_int32_t *)&sg_head->sg_list[sg_index],
1785 						sg_list_dwords);
1786 			sg_entry_cnt -= segs_this_q;
1787 			sg_index += ADV_SG_LIST_PER_Q;
1788 		}
1789 	}
1790 	adv_put_ready_queue(adv, scsiq, q_no);
1791 }
1792 
1793 static void
1794 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1795 		    u_int q_no)
1796 {
1797 	struct		adv_target_transinfo* tinfo;
1798 	u_int		q_addr;
1799 	u_int		tid_no;
1800 
1801 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1802 	tinfo = &adv->tinfo[tid_no];
1803 	if ((tinfo->current.period != tinfo->goal.period)
1804 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1805 
1806 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1807 		scsiq->q1.cntl |= QC_MSG_OUT;
1808 	}
1809 	q_addr = ADV_QNO_TO_QADDR(q_no);
1810 
1811 	scsiq->q1.status = QS_FREE;
1812 
1813 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1814 				(u_int16_t *)scsiq->cdbptr,
1815 				scsiq->q2.cdb_len >> 1);
1816 
1817 #if BYTE_ORDER == BIG_ENDIAN
1818 	adv_adj_scsiq_endian(scsiq);
1819 #endif
1820 
1821 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1822 		      (u_int16_t *) &scsiq->q1.cntl,
1823 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1824 
1825 #if CC_WRITE_IO_COUNT
1826 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1827 			  adv->req_count);
1828 #endif
1829 
1830 #if CC_CLEAR_DMA_REMAIN
1831 
1832 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1833 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1834 #endif
1835 
1836 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1837 			  (scsiq->q1.q_no << 8) | QS_READY);
1838 }
1839 
1840 static void
1841 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1842 	      u_int16_t *buffer, int words)
1843 {
1844 	int	i;
1845 
1846 	/*
1847 	 * XXX This routine makes *gross* assumptions
1848 	 * about padding in the data structures.
1849 	 * Either the data structures should have explicit
1850 	 * padding members added, or they should have padding
1851 	 * turned off via compiler attributes depending on
1852 	 * which yields better overall performance.  My hunch
1853 	 * would be that turning off padding would be the
1854 	 * faster approach as an outsw is much faster than
1855 	 * this crude loop and accessing un-aligned data
1856 	 * members isn't *that* expensive.  The other choice
1857 	 * would be to modify the ASC script so that the
1858 	 * the adv_scsiq_1 structure can be re-arranged so
1859 	 * padding isn't required.
1860 	 */
1861 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1862 	for (i = 0; i < words; i++, buffer++) {
1863 		if (i == 2 || i == 10) {
1864 			continue;
1865 		}
1866 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1867 	}
1868 }
1869 
1870 static void
1871 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1872 		     u_int8_t q_cntl, target_bit_vector target_mask,
1873 		     int tid_no)
1874 {
1875 	struct	ext_msg ext_msg;
1876 
1877 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1878 			       sizeof(ext_msg) >> 1);
1879 	if ((ext_msg.msg_type == MSG_EXTENDED)
1880 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1881 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1882 		union	  ccb *ccb;
1883 		struct	  adv_target_transinfo* tinfo;
1884 		u_int32_t cinfo_index;
1885 		u_int	 period;
1886 		u_int	 offset;
1887 		int	 sdtr_accept;
1888 		u_int8_t orig_offset;
1889 
1890 		cinfo_index =
1891 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1892 		ccb = adv->ccb_infos[cinfo_index].ccb;
1893 		tinfo = &adv->tinfo[tid_no];
1894 		sdtr_accept = TRUE;
1895 
1896 		orig_offset = ext_msg.req_ack_offset;
1897 		if (ext_msg.xfer_period < tinfo->goal.period) {
1898                 	sdtr_accept = FALSE;
1899 			ext_msg.xfer_period = tinfo->goal.period;
1900 		}
1901 
1902 		/* Perform range checking */
1903 		period = ext_msg.xfer_period;
1904 		offset = ext_msg.req_ack_offset;
1905 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1906 		ext_msg.xfer_period = period;
1907 		ext_msg.req_ack_offset = offset;
1908 
1909 		/* Record our current sync settings */
1910 		adv_set_syncrate(adv, ccb->ccb_h.path,
1911 				 tid_no, ext_msg.xfer_period,
1912 				 ext_msg.req_ack_offset,
1913 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1914 
1915 		/* Offset too high or large period forced async */
1916 		if (orig_offset != ext_msg.req_ack_offset)
1917 			sdtr_accept = FALSE;
1918 
1919 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1920 			/* Valid response to our requested negotiation */
1921 			q_cntl &= ~QC_MSG_OUT;
1922 		} else {
1923 			/* Must Respond */
1924 			q_cntl |= QC_MSG_OUT;
1925 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1926 					ext_msg.req_ack_offset);
1927 		}
1928 
1929 	} else if (ext_msg.msg_type == MSG_EXTENDED
1930 		&& ext_msg.msg_req == MSG_EXT_WDTR
1931 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1932 
1933 		ext_msg.wdtr_width = 0;
1934 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1935 					(u_int16_t *)&ext_msg,
1936 					sizeof(ext_msg) >> 1);
1937 		q_cntl |= QC_MSG_OUT;
1938         } else {
1939 
1940 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1941 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1942 					(u_int16_t *)&ext_msg,
1943 					sizeof(ext_msg) >> 1);
1944 		q_cntl |= QC_MSG_OUT;
1945         }
1946 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1947 }
1948 
1949 static void
1950 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1951 		u_int8_t sdtr_offset)
1952 {
1953 	struct	 ext_msg sdtr_buf;
1954 
1955 	sdtr_buf.msg_type = MSG_EXTENDED;
1956 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1957 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1958 	sdtr_buf.xfer_period = sdtr_period;
1959 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1960 	sdtr_buf.req_ack_offset = sdtr_offset;
1961 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1962 				(u_int16_t *) &sdtr_buf,
1963 				sizeof(sdtr_buf) / 2);
1964 }
1965 
1966 int
1967 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1968 	      u_int32_t status, int queued_only)
1969 {
1970 	u_int16_t q_addr;
1971 	u_int8_t  q_no;
1972 	struct adv_q_done_info scsiq_buf;
1973 	struct adv_q_done_info *scsiq;
1974 	u_int8_t  target_ix;
1975 	int	  count;
1976 
1977 	scsiq = &scsiq_buf;
1978 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1979 	count = 0;
1980 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1981 		struct adv_ccb_info *ccb_info;
1982 		q_addr = ADV_QNO_TO_QADDR(q_no);
1983 
1984 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1985 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1986 		if (((scsiq->q_status & QS_READY) != 0)
1987 		 && ((scsiq->q_status & QS_ABORTED) == 0)
1988 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1989 		 && (scsiq->d2.target_ix == target_ix)
1990 		 && (queued_only == 0
1991 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1992 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1993 			union ccb *aborted_ccb;
1994 			struct adv_ccb_info *cinfo;
1995 
1996 			scsiq->q_status |= QS_ABORTED;
1997 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1998 					 scsiq->q_status);
1999 			aborted_ccb = ccb_info->ccb;
2000 			/* Don't clobber earlier error codes */
2001 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2002 			  == CAM_REQ_INPROG)
2003 				aborted_ccb->ccb_h.status |= status;
2004 			cinfo = (struct adv_ccb_info *)
2005 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2006 			cinfo->state |= ACCB_ABORT_QUEUED;
2007 			count++;
2008 		}
2009 	}
2010 	return (count);
2011 }
2012 
2013 int
2014 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2015 {
2016 	int count;
2017 	int i;
2018 	union ccb *ccb;
2019 
2020 	i = 200;
2021 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2022 	    && i--)
2023 		DELAY(1000);
2024 	adv_reset_chip(adv, initiate_bus_reset);
2025 	adv_reinit_lram(adv);
2026 	for (i = 0; i <= ADV_MAX_TID; i++)
2027 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2028 				 /*offset*/0, ADV_TRANS_CUR);
2029 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2030 
2031 	/* Tell the XPT layer that a bus reset occured */
2032 	if (adv->path != NULL)
2033 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2034 
2035 	count = 0;
2036 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2037 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2038 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2039 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2040 		count++;
2041 	}
2042 
2043 	adv_start_chip(adv);
2044 	return (count);
2045 }
2046 
2047 static void
2048 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2049 {
2050 	int orig_id;
2051 
2052     	adv_set_bank(adv, 1);
2053     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2054     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2055 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2056 		adv_set_bank(adv, 0);
2057 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2058 	}
2059     	adv_set_bank(adv, 1);
2060     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2061 	adv_set_bank(adv, 0);
2062 }
2063