xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision 1847e88f)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.6 2005/06/03 16:57:12 eirikn Exp $
33  */
34 /*
35  * Ported from:
36  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
37  *
38  * Copyright (c) 1995-1996 Advanced System Products, Inc.
39  * All Rights Reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that redistributions of source
43  * code retain the above copyright notice and this comment without
44  * modification.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/thread2.h>
51 
52 #include <machine/bus_pio.h>
53 #include <machine/bus.h>
54 #include <machine/clock.h>
55 #include <machine/resource.h>
56 #include <sys/bus.h>
57 #include <sys/rman.h>
58 
59 #include <bus/cam/cam.h>
60 #include <bus/cam/cam_ccb.h>
61 #include <bus/cam/cam_sim.h>
62 #include <bus/cam/cam_xpt_sim.h>
63 
64 #include <bus/cam/scsi/scsi_all.h>
65 #include <bus/cam/scsi/scsi_message.h>
66 #include <bus/cam/scsi/scsi_da.h>
67 #include <bus/cam/scsi/scsi_cd.h>
68 
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/pmap.h>
72 
73 #include "advansys.h"
74 #include "advmcode.h"
75 
76 struct adv_quirk_entry {
77 	struct scsi_inquiry_pattern inq_pat;
78 	u_int8_t quirks;
79 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
80 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
81 };
82 
83 static struct adv_quirk_entry adv_quirk_table[] =
84 {
85 	{
86 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
87 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
88 	},
89 	{
90 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
91 		0
92 	},
93 	{
94 		{
95 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
96 		  "TANDBERG", " TDC 36", "*"
97 		},
98 		0
99 	},
100 	{
101 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
102 		0
103 	},
104 	{
105 		{
106 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
107 		  "*", "*", "*"
108 		},
109 		0
110 	},
111 	{
112 		{
113 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
114 		  "*", "*", "*"
115 		},
116 		0
117 	},
118 	{
119 		/* Default quirk entry */
120 		{
121 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
122 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
123                 },
124                 ADV_QUIRK_FIX_ASYN_XFER,
125 	}
126 };
127 
128 /*
129  * Allowable periods in ns
130  */
131 static u_int8_t adv_sdtr_period_tbl[] =
132 {
133 	25,
134 	30,
135 	35,
136 	40,
137 	50,
138 	60,
139 	70,
140 	85
141 };
142 
143 static u_int8_t adv_sdtr_period_tbl_ultra[] =
144 {
145 	12,
146 	19,
147 	25,
148 	32,
149 	38,
150 	44,
151 	50,
152 	57,
153 	63,
154 	69,
155 	75,
156 	82,
157 	88,
158 	94,
159 	100,
160 	107
161 };
162 
163 struct ext_msg {
164 	u_int8_t msg_type;
165 	u_int8_t msg_len;
166 	u_int8_t msg_req;
167 	union {
168 		struct {
169 			u_int8_t sdtr_xfer_period;
170 			u_int8_t sdtr_req_ack_offset;
171 		} sdtr;
172 		struct {
173        			u_int8_t wdtr_width;
174 		} wdtr;
175 		struct {
176 			u_int8_t mdp[4];
177 		} mdp;
178 	} u_ext_msg;
179 	u_int8_t res;
180 };
181 
182 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
183 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
184 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
185 #define	mdp_b3		u_ext_msg.mdp_b3
186 #define	mdp_b2		u_ext_msg.mdp_b2
187 #define	mdp_b1		u_ext_msg.mdp_b1
188 #define	mdp_b0		u_ext_msg.mdp_b0
189 
190 /*
191  * Some of the early PCI adapters have problems with
192  * async transfers.  Instead use an offset of 1.
193  */
194 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
195 
196 /* LRAM routines */
197 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
198 					u_int16_t *buffer, int count);
199 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
200 					 u_int16_t s_addr, u_int16_t *buffer,
201 					 int count);
202 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
203 				  u_int16_t set_value, int count);
204 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
205 				  int count);
206 
207 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
208 					      u_int16_t addr, u_int16_t value);
209 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
210 
211 
212 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
213 				   u_int32_t value);
214 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
215 					 u_int16_t s_addr, u_int32_t *buffer,
216 					 int count);
217 
218 /* EEPROM routines */
219 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
220 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
221 				     u_int16_t value);
222 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
223 					  u_int8_t cmd_reg);
224 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
225 					    struct adv_eeprom_config *eeconfig);
226 
227 /* Initialization */
228 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
229 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
230 
231 static void	 adv_reinit_lram(struct adv_softc *adv);
232 static void	 adv_init_lram(struct adv_softc *adv);
233 static int	 adv_init_microcode_var(struct adv_softc *adv);
234 static void	 adv_init_qlink_var(struct adv_softc *adv);
235 
236 /* Interrupts */
237 static void	 adv_disable_interrupt(struct adv_softc *adv);
238 static void	 adv_enable_interrupt(struct adv_softc *adv);
239 static void	 adv_toggle_irq_act(struct adv_softc *adv);
240 
241 /* Chip Control */
242 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
243 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
244 #if UNUSED
245 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
246 #endif
247 
248 /* Queue handling and execution */
249 static __inline int
250 		 adv_sgcount_to_qcount(int sgcount);
251 
252 static __inline int
253 adv_sgcount_to_qcount(int sgcount)
254 {
255 	int	n_sg_list_qs;
256 
257 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
258 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
259 		n_sg_list_qs++;
260 	return (n_sg_list_qs + 1);
261 }
262 
263 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
264 				u_int16_t *inbuf, int words);
265 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
266 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
267 				       u_int8_t free_q_head, u_int8_t n_free_q);
268 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
269 				      u_int8_t free_q_head);
270 static int	 adv_send_scsi_queue(struct adv_softc *adv,
271 				     struct adv_scsi_q *scsiq,
272 				     u_int8_t n_q_required);
273 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
274 					     struct adv_scsi_q *scsiq,
275 					     u_int q_no);
276 static void	 adv_put_ready_queue(struct adv_softc *adv,
277 				     struct adv_scsi_q *scsiq, u_int q_no);
278 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
279 			       u_int16_t *buffer, int words);
280 
281 /* Messages */
282 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
283 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
284 				      target_bit_vector target_id,
285 				      int tid);
286 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
287 				 u_int8_t sdtr_offset);
288 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
289 					u_int8_t sdtr_data);
290 
291 
292 /* Exported functions first */
293 
294 void
295 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
296 {
297 	struct adv_softc *adv;
298 
299 	adv = (struct adv_softc *)callback_arg;
300 	switch (code) {
301 	case AC_FOUND_DEVICE:
302 	{
303 		struct ccb_getdev *cgd;
304 		target_bit_vector target_mask;
305 		int num_entries;
306         	caddr_t match;
307 		struct adv_quirk_entry *entry;
308 		struct adv_target_transinfo* tinfo;
309 
310 		cgd = (struct ccb_getdev *)arg;
311 
312 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
313 
314 		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
315 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
316 				       (caddr_t)adv_quirk_table,
317 				       num_entries, sizeof(*adv_quirk_table),
318 				       scsi_inquiry_match);
319 
320 		if (match == NULL)
321 			panic("advasync: device didn't match wildcard entry!!");
322 
323 		entry = (struct adv_quirk_entry *)match;
324 
325 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
326 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
327 				adv->fix_asyn_xfer_always |= target_mask;
328 			else
329 				adv->fix_asyn_xfer_always &= ~target_mask;
330 			/*
331 			 * We start out life with all bits set and clear them
332 			 * after we've determined that the fix isn't necessary.
333 			 * It may well be that we've already cleared a target
334 			 * before the full inquiry session completes, so don't
335 			 * gratuitously set a target bit even if it has this
336 			 * quirk.  But, if the quirk exonerates a device, clear
337 			 * the bit now.
338 			 */
339 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
340 				adv->fix_asyn_xfer &= ~target_mask;
341 		}
342 		/*
343 		 * Reset our sync settings now that we've determined
344 		 * what quirks are in effect for the device.
345 		 */
346 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
347 		adv_set_syncrate(adv, cgd->ccb_h.path,
348 				 cgd->ccb_h.target_id,
349 				 tinfo->current.period,
350 				 tinfo->current.offset,
351 				 ADV_TRANS_CUR);
352 		break;
353 	}
354 	case AC_LOST_DEVICE:
355 	{
356 		u_int target_mask;
357 
358 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
359 			target_mask = 0x01 << xpt_path_target_id(path);
360 			adv->fix_asyn_xfer |= target_mask;
361 		}
362 
363 		/*
364 		 * Revert to async transfers
365 		 * for the next device.
366 		 */
367 		adv_set_syncrate(adv, /*path*/NULL,
368 				 xpt_path_target_id(path),
369 				 /*period*/0,
370 				 /*offset*/0,
371 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
372 	}
373 	default:
374 		break;
375 	}
376 }
377 
378 void
379 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
380 {
381 	u_int8_t control;
382 
383 	/*
384 	 * Start out with the bank reset to 0
385 	 */
386 	control = ADV_INB(adv, ADV_CHIP_CTRL)
387 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
388 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
389 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
390 	if (bank == 1) {
391 		control |= ADV_CC_BANK_ONE;
392 	} else if (bank == 2) {
393 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
394 	}
395 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
396 }
397 
398 u_int8_t
399 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
400 {
401 	u_int8_t   byte_data;
402 	u_int16_t  word_data;
403 
404 	/*
405 	 * LRAM is accessed on 16bit boundaries.
406 	 */
407 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
408 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
409 	if (addr & 1) {
410 #if BYTE_ORDER == BIG_ENDIAN
411 		byte_data = (u_int8_t)(word_data & 0xFF);
412 #else
413 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
414 #endif
415 	} else {
416 #if BYTE_ORDER == BIG_ENDIAN
417 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
418 #else
419 		byte_data = (u_int8_t)(word_data & 0xFF);
420 #endif
421 	}
422 	return (byte_data);
423 }
424 
425 void
426 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
427 {
428 	u_int16_t word_data;
429 
430 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
431 	if (addr & 1) {
432 		word_data &= 0x00FF;
433 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
434 	} else {
435 		word_data &= 0xFF00;
436 		word_data |= ((u_int8_t)value & 0x00FF);
437 	}
438 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
439 }
440 
441 
442 u_int16_t
443 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
444 {
445 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
446 	return (ADV_INW(adv, ADV_LRAM_DATA));
447 }
448 
449 void
450 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
451 {
452 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
453 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
454 }
455 
456 /*
457  * Determine if there is a board at "iobase" by looking
458  * for the AdvanSys signatures.  Return 1 if a board is
459  * found, 0 otherwise.
460  */
461 int
462 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
463 {
464 	u_int16_t signature;
465 
466 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
467 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
468 		if ((signature == ADV_1000_ID0W)
469 		 || (signature == ADV_1000_ID0W_FIX))
470 			return (1);
471 	}
472 	return (0);
473 }
474 
475 void
476 adv_lib_init(struct adv_softc *adv)
477 {
478 	if ((adv->type & ADV_ULTRA) != 0) {
479 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
480 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
481 	} else {
482 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
483 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
484 	}
485 }
486 
487 u_int16_t
488 adv_get_eeprom_config(struct adv_softc *adv, struct
489 		      adv_eeprom_config  *eeprom_config)
490 {
491 	u_int16_t	sum;
492 	u_int16_t	*wbuf;
493 	u_int8_t	cfg_beg;
494 	u_int8_t	cfg_end;
495 	u_int8_t	s_addr;
496 
497 	wbuf = (u_int16_t *)eeprom_config;
498 	sum = 0;
499 
500 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
501 		*wbuf = adv_read_eeprom_16(adv, s_addr);
502 		sum += *wbuf;
503 	}
504 
505 	if (adv->type & ADV_VL) {
506 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
507 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
508 	} else {
509 		cfg_beg = ADV_EEPROM_CFG_BEG;
510 		cfg_end = ADV_EEPROM_MAX_ADDR;
511 	}
512 
513 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
514 		*wbuf = adv_read_eeprom_16(adv, s_addr);
515 		sum += *wbuf;
516 #if ADV_DEBUG_EEPROM
517 		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
518 #endif
519 	}
520 	*wbuf = adv_read_eeprom_16(adv, s_addr);
521 	return (sum);
522 }
523 
524 int
525 adv_set_eeprom_config(struct adv_softc *adv,
526 		      struct adv_eeprom_config *eeprom_config)
527 {
528 	int	retry;
529 
530 	retry = 0;
531 	while (1) {
532 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
533 			break;
534 		}
535 		if (++retry > ADV_EEPROM_MAX_RETRY) {
536 			break;
537 		}
538 	}
539 	return (retry > ADV_EEPROM_MAX_RETRY);
540 }
541 
542 int
543 adv_reset_chip(struct adv_softc *adv, int reset_bus)
544 {
545 	adv_stop_chip(adv);
546 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
547 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
548 	DELAY(60);
549 
550 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
551 	adv_set_chip_ih(adv, ADV_INS_HALT);
552 
553 	if (reset_bus)
554 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
555 
556 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
557 	if (reset_bus)
558 		DELAY(200 * 1000);
559 
560 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
561 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
562 	return (adv_is_chip_halted(adv));
563 }
564 
565 int
566 adv_test_external_lram(struct adv_softc* adv)
567 {
568 	u_int16_t	q_addr;
569 	u_int16_t	saved_value;
570 	int		success;
571 
572 	success = 0;
573 
574 	q_addr = ADV_QNO_TO_QADDR(241);
575 	saved_value = adv_read_lram_16(adv, q_addr);
576 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
577 		success = 1;
578 		adv_write_lram_16(adv, q_addr, saved_value);
579 	}
580 	return (success);
581 }
582 
583 
584 int
585 adv_init_lram_and_mcode(struct adv_softc *adv)
586 {
587 	u_int32_t	retval;
588 
589 	adv_disable_interrupt(adv);
590 
591 	adv_init_lram(adv);
592 
593 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
594 				    adv_mcode_size);
595 	if (retval != adv_mcode_chksum) {
596 		printf("adv%d: Microcode download failed checksum!\n",
597 		       adv->unit);
598 		return (1);
599 	}
600 
601 	if (adv_init_microcode_var(adv) != 0)
602 		return (1);
603 
604 	adv_enable_interrupt(adv);
605 	return (0);
606 }
607 
608 u_int8_t
609 adv_get_chip_irq(struct adv_softc *adv)
610 {
611 	u_int16_t	cfg_lsw;
612 	u_int8_t	chip_irq;
613 
614 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
615 
616 	if ((adv->type & ADV_VL) != 0) {
617 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
618 		if ((chip_irq == 0) ||
619 		    (chip_irq == 4) ||
620 		    (chip_irq == 7)) {
621 			return (0);
622 		}
623 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
624 	}
625 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
626 	if (chip_irq == 3)
627 		chip_irq += 2;
628 	return (chip_irq + ADV_MIN_IRQ_NO);
629 }
630 
631 u_int8_t
632 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
633 {
634 	u_int16_t	cfg_lsw;
635 
636 	if ((adv->type & ADV_VL) != 0) {
637 		if (irq_no != 0) {
638 			if ((irq_no < ADV_MIN_IRQ_NO)
639 			 || (irq_no > ADV_MAX_IRQ_NO)) {
640 				irq_no = 0;
641 			} else {
642 				irq_no -= ADV_MIN_IRQ_NO - 1;
643 			}
644 		}
645 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
646 		cfg_lsw |= 0x0010;
647 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
648 		adv_toggle_irq_act(adv);
649 
650 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
651 		cfg_lsw |= (irq_no & 0x07) << 2;
652 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
653 		adv_toggle_irq_act(adv);
654 	} else if ((adv->type & ADV_ISA) != 0) {
655 		if (irq_no == 15)
656 			irq_no -= 2;
657 		irq_no -= ADV_MIN_IRQ_NO;
658 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
659 		cfg_lsw |= (irq_no & 0x03) << 2;
660 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
661 	}
662 	return (adv_get_chip_irq(adv));
663 }
664 
665 void
666 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
667 {
668 	u_int16_t cfg_lsw;
669 
670 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
671 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
672 		return;
673     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
674 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
675 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
676 }
677 
678 int
679 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
680 		       u_int32_t datalen)
681 {
682 	struct		adv_target_transinfo* tinfo;
683 	u_int32_t	*p_data_addr;
684 	u_int32_t	*p_data_bcount;
685 	int		disable_syn_offset_one_fix;
686 	int		retval;
687 	u_int		n_q_required;
688 	u_int32_t	addr;
689 	u_int8_t	sg_entry_cnt;
690 	u_int8_t	target_ix;
691 	u_int8_t	sg_entry_cnt_minus_one;
692 	u_int8_t	tid_no;
693 
694 	scsiq->q1.q_no = 0;
695 	retval = 1;  /* Default to error case */
696 	target_ix = scsiq->q2.target_ix;
697 	tid_no = ADV_TIX_TO_TID(target_ix);
698 	tinfo = &adv->tinfo[tid_no];
699 
700 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
701 		/* Renegotiate if appropriate. */
702 		adv_set_syncrate(adv, /*struct cam_path */NULL,
703 				 tid_no, /*period*/0, /*offset*/0,
704 				 ADV_TRANS_CUR);
705 		if (tinfo->current.period != tinfo->goal.period) {
706 			adv_msgout_sdtr(adv, tinfo->goal.period,
707 					tinfo->goal.offset);
708 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
709 		}
710 	}
711 
712 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
713 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
714 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
715 
716 #ifdef DIAGNOSTIC
717 		if (sg_entry_cnt <= 1)
718 			panic("adv_execute_scsi_queue: Queue "
719 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
720 
721 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
722 			panic("adv_execute_scsi_queue: "
723 			      "Queue with too many segs.");
724 
725 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
726 			int i;
727 
728 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
729 				addr = scsiq->sg_head->sg_list[i].addr +
730 				       scsiq->sg_head->sg_list[i].bytes;
731 
732 				if ((addr & 0x0003) != 0)
733 					panic("adv_execute_scsi_queue: SG "
734 					      "with odd address or byte count");
735 			}
736 		}
737 #endif
738 		p_data_addr =
739 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
740 		p_data_bcount =
741 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
742 
743 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
744 		scsiq->sg_head->queue_cnt = n_q_required - 1;
745 	} else {
746 		p_data_addr = &scsiq->q1.data_addr;
747 		p_data_bcount = &scsiq->q1.data_cnt;
748 		n_q_required = 1;
749 	}
750 
751 	disable_syn_offset_one_fix = FALSE;
752 
753 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
754 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
755 
756 		if (datalen != 0) {
757 			if (datalen < 512) {
758 				disable_syn_offset_one_fix = TRUE;
759 			} else {
760 				if (scsiq->cdbptr[0] == INQUIRY
761 				 || scsiq->cdbptr[0] == REQUEST_SENSE
762 				 || scsiq->cdbptr[0] == READ_CAPACITY
763 				 || scsiq->cdbptr[0] == MODE_SELECT_6
764 				 || scsiq->cdbptr[0] == MODE_SENSE_6
765 				 || scsiq->cdbptr[0] == MODE_SENSE_10
766 				 || scsiq->cdbptr[0] == MODE_SELECT_10
767 				 || scsiq->cdbptr[0] == READ_TOC) {
768 					disable_syn_offset_one_fix = TRUE;
769 				}
770 			}
771 		}
772 	}
773 
774 	if (disable_syn_offset_one_fix) {
775 		scsiq->q2.tag_code &=
776 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
777 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
778 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
779 	}
780 
781 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
782 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
783 		u_int8_t extra_bytes;
784 
785 		addr = *p_data_addr + *p_data_bcount;
786 		extra_bytes = addr & 0x0003;
787 		if (extra_bytes != 0
788 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
789 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
790 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
791 			scsiq->q1.extra_bytes = extra_bytes;
792 			*p_data_bcount -= extra_bytes;
793 		}
794 	}
795 
796 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
797 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
798 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
799 
800 	return (retval);
801 }
802 
803 
804 u_int8_t
805 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
806 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
807 {
808 	u_int16_t val;
809 	u_int8_t  sg_queue_cnt;
810 
811 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
812 		       (u_int16_t *)scsiq,
813 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
814 
815 #if BYTE_ORDER == BIG_ENDIAN
816 	adv_adj_endian_qdone_info(scsiq);
817 #endif
818 
819 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
820 	scsiq->q_status = val & 0xFF;
821 	scsiq->q_no = (val >> 8) & 0XFF;
822 
823 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
824 	scsiq->cntl = val & 0xFF;
825 	sg_queue_cnt = (val >> 8) & 0xFF;
826 
827 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
828 	scsiq->sense_len = val & 0xFF;
829 	scsiq->extra_bytes = (val >> 8) & 0xFF;
830 
831 	/*
832 	 * Due to a bug in accessing LRAM on the 940UA, the residual
833 	 * is split into separate high and low 16bit quantities.
834 	 */
835 	scsiq->remain_bytes =
836 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
837 	scsiq->remain_bytes |=
838 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
839 
840 	/*
841 	 * XXX Is this just a safeguard or will the counter really
842 	 * have bogus upper bits?
843 	 */
844 	scsiq->remain_bytes &= max_dma_count;
845 
846 	return (sg_queue_cnt);
847 }
848 
849 int
850 adv_start_chip(struct adv_softc *adv)
851 {
852 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
853 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
854 		return (0);
855 	return (1);
856 }
857 
858 int
859 adv_stop_execution(struct adv_softc *adv)
860 {
861 	int count;
862 
863 	count = 0;
864 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
865 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
866 				 ADV_STOP_REQ_RISC_STOP);
867 		do {
868 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
869 				ADV_STOP_ACK_RISC_STOP) {
870 				return (1);
871 			}
872 			DELAY(1000);
873 		} while (count++ < 20);
874 	}
875 	return (0);
876 }
877 
878 int
879 adv_is_chip_halted(struct adv_softc *adv)
880 {
881 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
882 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
883 			return (1);
884 		}
885 	}
886 	return (0);
887 }
888 
889 /*
890  * XXX The numeric constants and the loops in this routine
891  * need to be documented.
892  */
893 void
894 adv_ack_interrupt(struct adv_softc *adv)
895 {
896 	u_int8_t	host_flag;
897 	u_int8_t	risc_flag;
898 	int		loop;
899 
900 	loop = 0;
901 	do {
902 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
903 		if (loop++ > 0x7FFF) {
904 			break;
905 		}
906 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
907 
908 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
909 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
910 			 host_flag | ADV_HOST_FLAG_ACK_INT);
911 
912 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 	loop = 0;
914 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
915 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
916 		if (loop++ > 3) {
917 			break;
918 		}
919 	}
920 
921 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
922 }
923 
924 /*
925  * Handle all conditions that may halt the chip waiting
926  * for us to intervene.
927  */
928 void
929 adv_isr_chip_halted(struct adv_softc *adv)
930 {
931 	u_int16_t	  int_halt_code;
932 	u_int16_t	  halt_q_addr;
933 	target_bit_vector target_mask;
934 	target_bit_vector scsi_busy;
935 	u_int8_t	  halt_qp;
936 	u_int8_t	  target_ix;
937 	u_int8_t	  q_cntl;
938 	u_int8_t	  tid_no;
939 
940 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
941 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
942 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
943 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
944 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
945 	tid_no = ADV_TIX_TO_TID(target_ix);
946 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
947 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
948 		/*
949 		 * Temporarily disable the async fix by removing
950 		 * this target from the list of affected targets,
951 		 * setting our async rate, and then putting us
952 		 * back into the mask.
953 		 */
954 		adv->fix_asyn_xfer &= ~target_mask;
955 		adv_set_syncrate(adv, /*struct cam_path */NULL,
956 				 tid_no, /*period*/0, /*offset*/0,
957 				 ADV_TRANS_ACTIVE);
958 		adv->fix_asyn_xfer |= target_mask;
959 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
960 		adv_set_syncrate(adv, /*struct cam_path */NULL,
961 				 tid_no, /*period*/0, /*offset*/0,
962 				 ADV_TRANS_ACTIVE);
963 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
964 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
965 				     target_mask, tid_no);
966 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
967 		struct	  adv_target_transinfo* tinfo;
968 		union	  ccb *ccb;
969 		u_int32_t cinfo_index;
970 		u_int8_t  tag_code;
971 		u_int8_t  q_status;
972 
973 		tinfo = &adv->tinfo[tid_no];
974 		q_cntl |= QC_REQ_SENSE;
975 
976 		/* Renegotiate if appropriate. */
977 		adv_set_syncrate(adv, /*struct cam_path */NULL,
978 				 tid_no, /*period*/0, /*offset*/0,
979 				 ADV_TRANS_CUR);
980 		if (tinfo->current.period != tinfo->goal.period) {
981 			adv_msgout_sdtr(adv, tinfo->goal.period,
982 					tinfo->goal.offset);
983 			q_cntl |= QC_MSG_OUT;
984 		}
985 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
986 
987 		/* Don't tag request sense commands */
988 		tag_code = adv_read_lram_8(adv,
989 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
990 		tag_code &=
991 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
992 
993 		if ((adv->fix_asyn_xfer & target_mask) != 0
994 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
995 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
996 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
997 		}
998 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
999 				 tag_code);
1000 		q_status = adv_read_lram_8(adv,
1001 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1002 		q_status |= (QS_READY | QS_BUSY);
1003 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1004 				 q_status);
1005 		/*
1006 		 * Freeze the devq until we can handle the sense condition.
1007 		 */
1008 		cinfo_index =
1009 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1010 		ccb = adv->ccb_infos[cinfo_index].ccb;
1011 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1012 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1013 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1014 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1015 			      /*queued_only*/TRUE);
1016 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1017 		scsi_busy &= ~target_mask;
1018 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1019 		/*
1020 		 * Ensure we have enough time to actually
1021 		 * retrieve the sense.
1022 		 */
1023 		callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1024 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1025 		struct	ext_msg out_msg;
1026 
1027 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1028 				       (u_int16_t *) &out_msg,
1029 				       sizeof(out_msg)/2);
1030 
1031 		if ((out_msg.msg_type == MSG_EXTENDED)
1032 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1033 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1034 
1035 			/* Revert to Async */
1036 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1037 					 tid_no, /*period*/0, /*offset*/0,
1038 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1039 		}
1040 		q_cntl &= ~QC_MSG_OUT;
1041 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1042 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1043 		u_int8_t scsi_status;
1044 		union ccb *ccb;
1045 		u_int32_t cinfo_index;
1046 
1047 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1048 					      + ADV_SCSIQ_SCSI_STATUS);
1049 		cinfo_index =
1050 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1051 		ccb = adv->ccb_infos[cinfo_index].ccb;
1052 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1053 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1054 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1055 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1056 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1057 			      /*queued_only*/TRUE);
1058 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1059 		scsi_busy &= ~target_mask;
1060 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1061 	} else {
1062 		printf("Unhandled Halt Code %x\n", int_halt_code);
1063 	}
1064 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1065 }
1066 
1067 void
1068 adv_sdtr_to_period_offset(struct adv_softc *adv,
1069 			  u_int8_t sync_data, u_int8_t *period,
1070 			  u_int8_t *offset, int tid)
1071 {
1072 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1073 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1074 		*period = *offset = 0;
1075 	} else {
1076 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1077 		*offset = sync_data & 0xF;
1078 	}
1079 }
1080 
1081 void
1082 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1083 		 u_int tid, u_int period, u_int offset, u_int type)
1084 {
1085 	struct adv_target_transinfo* tinfo;
1086 	u_int old_period;
1087 	u_int old_offset;
1088 	u_int8_t sdtr_data;
1089 
1090 	tinfo = &adv->tinfo[tid];
1091 
1092 	/* Filter our input */
1093 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1094 					      &offset, tid);
1095 
1096 	old_period = tinfo->current.period;
1097 	old_offset = tinfo->current.offset;
1098 
1099 	if ((type & ADV_TRANS_CUR) != 0
1100 	 && ((old_period != period || old_offset != offset)
1101 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1102 		int halted;
1103 
1104 		crit_enter();
1105 		halted = adv_is_chip_halted(adv);
1106 		if (halted == 0)
1107 			/* Must halt the chip first */
1108 			adv_host_req_chip_halt(adv);
1109 
1110 		/* Update current hardware settings */
1111 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1112 
1113 		/*
1114 		 * If a target can run in sync mode, we don't need
1115 		 * to check it for sync problems.
1116 		 */
1117 		if (offset != 0)
1118 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1119 
1120 		if (halted == 0)
1121 			/* Start the chip again */
1122 			adv_start_chip(adv);
1123 
1124 		crit_exit();
1125 		tinfo->current.period = period;
1126 		tinfo->current.offset = offset;
1127 
1128 		if (path != NULL) {
1129 			/*
1130 			 * Tell the SCSI layer about the
1131 			 * new transfer parameters.
1132 			 */
1133 			struct	ccb_trans_settings neg;
1134 
1135 			neg.sync_period = period;
1136 			neg.sync_offset = offset;
1137 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1138 				  | CCB_TRANS_SYNC_OFFSET_VALID;
1139 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1140 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1141 		}
1142 	}
1143 
1144 	if ((type & ADV_TRANS_GOAL) != 0) {
1145 		tinfo->goal.period = period;
1146 		tinfo->goal.offset = offset;
1147 	}
1148 
1149 	if ((type & ADV_TRANS_USER) != 0) {
1150 		tinfo->user.period = period;
1151 		tinfo->user.offset = offset;
1152 	}
1153 }
1154 
1155 u_int8_t
1156 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1157 			  u_int *offset, int tid)
1158 {
1159 	u_int i;
1160 	u_int dummy_offset;
1161 	u_int dummy_period;
1162 
1163 	if (offset == NULL) {
1164 		dummy_offset = 0;
1165 		offset = &dummy_offset;
1166 	}
1167 
1168 	if (period == NULL) {
1169 		dummy_period = 0;
1170 		period = &dummy_period;
1171 	}
1172 
1173 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1174 	if (*period != 0 && *offset != 0) {
1175 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1176 			if (*period <= adv->sdtr_period_tbl[i]) {
1177 				/*
1178 				 * When responding to a target that requests
1179 				 * sync, the requested  rate may fall between
1180 				 * two rates that we can output, but still be
1181 				 * a rate that we can receive.  Because of this,
1182 				 * we want to respond to the target with
1183 				 * the same rate that it sent to us even
1184 				 * if the period we use to send data to it
1185 				 * is lower.  Only lower the response period
1186 				 * if we must.
1187 				 */
1188 				if (i == 0 /* Our maximum rate */)
1189 					*period = adv->sdtr_period_tbl[0];
1190 				return ((i << 4) | *offset);
1191 			}
1192 		}
1193 	}
1194 
1195 	/* Must go async */
1196 	*period = 0;
1197 	*offset = 0;
1198 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1199 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1200 	return (0);
1201 }
1202 
1203 /* Internal Routines */
1204 
1205 static void
1206 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1207 		       u_int16_t *buffer, int count)
1208 {
1209 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1210 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1211 }
1212 
1213 static void
1214 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1215 			u_int16_t *buffer, int count)
1216 {
1217 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1218 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1219 }
1220 
1221 static void
1222 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1223 		 u_int16_t set_value, int count)
1224 {
1225 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1226 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1227 			      set_value, count);
1228 }
1229 
1230 static u_int32_t
1231 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1232 {
1233 	u_int32_t	sum;
1234 	int		i;
1235 
1236 	sum = 0;
1237 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238 	for (i = 0; i < count; i++)
1239 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1240 	return (sum);
1241 }
1242 
1243 static int
1244 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1245 			     u_int16_t value)
1246 {
1247 	int	retval;
1248 
1249 	retval = 0;
1250 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1251 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1252 	DELAY(10000);
1253 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1254 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1255 		retval = 1;
1256 	return (retval);
1257 }
1258 
1259 static u_int32_t
1260 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1261 {
1262 	u_int16_t           val_low, val_high;
1263 
1264 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1265 
1266 #if BYTE_ORDER == BIG_ENDIAN
1267 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1268 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1269 #else
1270 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1272 #endif
1273 
1274 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1275 }
1276 
1277 static void
1278 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1279 {
1280 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1281 
1282 #if BYTE_ORDER == BIG_ENDIAN
1283 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1284 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1285 #else
1286 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1288 #endif
1289 }
1290 
1291 static void
1292 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1293 			u_int32_t *buffer, int count)
1294 {
1295 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1296 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1297 }
1298 
1299 static u_int16_t
1300 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1301 {
1302 	u_int16_t read_wval;
1303 	u_int8_t  cmd_reg;
1304 
1305 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1306 	DELAY(1000);
1307 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1308 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1309 	DELAY(1000);
1310 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1311 	DELAY(1000);
1312 	return (read_wval);
1313 }
1314 
1315 static u_int16_t
1316 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1317 {
1318 	u_int16_t	read_value;
1319 
1320 	read_value = adv_read_eeprom_16(adv, addr);
1321 	if (read_value != value) {
1322 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1323 		DELAY(1000);
1324 
1325 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1326 		DELAY(1000);
1327 
1328 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1329 		DELAY(20 * 1000);
1330 
1331 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1332 		DELAY(1000);
1333 		read_value = adv_read_eeprom_16(adv, addr);
1334 	}
1335 	return (read_value);
1336 }
1337 
1338 static int
1339 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1340 {
1341 	u_int8_t read_back;
1342 	int	 retry;
1343 
1344 	retry = 0;
1345 	while (1) {
1346 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1347 		DELAY(1000);
1348 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1349 		if (read_back == cmd_reg) {
1350 			return (1);
1351 		}
1352 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1353 			return (0);
1354 		}
1355 	}
1356 }
1357 
1358 static int
1359 adv_set_eeprom_config_once(struct adv_softc *adv,
1360 			   struct adv_eeprom_config *eeprom_config)
1361 {
1362 	int		n_error;
1363 	u_int16_t	*wbuf;
1364 	u_int16_t	sum;
1365 	u_int8_t	s_addr;
1366 	u_int8_t	cfg_beg;
1367 	u_int8_t	cfg_end;
1368 
1369 	wbuf = (u_int16_t *)eeprom_config;
1370 	n_error = 0;
1371 	sum = 0;
1372 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1373 		sum += *wbuf;
1374 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1375 			n_error++;
1376 		}
1377 	}
1378 	if (adv->type & ADV_VL) {
1379 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1380 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1381 	} else {
1382 		cfg_beg = ADV_EEPROM_CFG_BEG;
1383 		cfg_end = ADV_EEPROM_MAX_ADDR;
1384 	}
1385 
1386 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1387 		sum += *wbuf;
1388 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1389 			n_error++;
1390 		}
1391 	}
1392 	*wbuf = sum;
1393 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1394 		n_error++;
1395 	}
1396 	wbuf = (u_int16_t *)eeprom_config;
1397 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1398 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1399 			n_error++;
1400 		}
1401 	}
1402 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1403 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1404 			n_error++;
1405 		}
1406 	}
1407 	return (n_error);
1408 }
1409 
1410 static u_int32_t
1411 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1412 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1413 {
1414 	u_int32_t chksum;
1415 	u_int16_t mcode_lram_size;
1416 	u_int16_t mcode_chksum;
1417 
1418 	mcode_lram_size = mcode_size >> 1;
1419 	/* XXX Why zero the memory just before you write the whole thing?? */
1420 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1421 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1422 
1423 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1424 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1425 						   ((mcode_size - s_addr
1426 						     - ADV_CODE_SEC_BEG) >> 1));
1427 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1428 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1429 	return (chksum);
1430 }
1431 
1432 static void
1433 adv_reinit_lram(struct adv_softc *adv) {
1434 	adv_init_lram(adv);
1435 	adv_init_qlink_var(adv);
1436 }
1437 
1438 static void
1439 adv_init_lram(struct adv_softc *adv)
1440 {
1441 	u_int8_t  i;
1442 	u_int16_t s_addr;
1443 
1444 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1445 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1446 
1447 	i = ADV_MIN_ACTIVE_QNO;
1448 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1449 
1450 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1451 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1452 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1453 	i++;
1454 	s_addr += ADV_QBLK_SIZE;
1455 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1456 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1457 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1458 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1459 	}
1460 
1461 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1462 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1463 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1464 	i++;
1465 	s_addr += ADV_QBLK_SIZE;
1466 
1467 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1468 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1469 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1470 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 	}
1472 }
1473 
1474 static int
1475 adv_init_microcode_var(struct adv_softc *adv)
1476 {
1477 	int	 i;
1478 
1479 	for (i = 0; i <= ADV_MAX_TID; i++) {
1480 
1481 		/* Start out async all around */
1482 		adv_set_syncrate(adv, /*path*/NULL,
1483 				 i, 0, 0,
1484 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1485 	}
1486 
1487 	adv_init_qlink_var(adv);
1488 
1489 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1490 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1491 
1492 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1493 
1494 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1495 
1496 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1497 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1498 		printf("adv%d: Unable to set program counter. Aborting.\n",
1499 		       adv->unit);
1500 		return (1);
1501 	}
1502 	return (0);
1503 }
1504 
1505 static void
1506 adv_init_qlink_var(struct adv_softc *adv)
1507 {
1508 	int	  i;
1509 	u_int16_t lram_addr;
1510 
1511 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1512 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1513 
1514 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1515 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1516 
1517 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1518 			 (u_int8_t)((int) adv->max_openings + 1));
1519 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1520 			 (u_int8_t)((int) adv->max_openings + 2));
1521 
1522 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1523 
1524 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1525 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1526 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1527 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1528 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1529 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1530 
1531 	lram_addr = ADV_QADR_BEG;
1532 	for (i = 0; i < 32; i++, lram_addr += 2)
1533 		adv_write_lram_16(adv, lram_addr, 0);
1534 }
1535 
1536 static void
1537 adv_disable_interrupt(struct adv_softc *adv)
1538 {
1539 	u_int16_t cfg;
1540 
1541 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1542 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1543 }
1544 
1545 static void
1546 adv_enable_interrupt(struct adv_softc *adv)
1547 {
1548 	u_int16_t cfg;
1549 
1550 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1551 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1552 }
1553 
1554 static void
1555 adv_toggle_irq_act(struct adv_softc *adv)
1556 {
1557 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1558 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1559 }
1560 
1561 void
1562 adv_start_execution(struct adv_softc *adv)
1563 {
1564 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1565 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1566 	}
1567 }
1568 
1569 int
1570 adv_stop_chip(struct adv_softc *adv)
1571 {
1572 	u_int8_t cc_val;
1573 
1574 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1575 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1576 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1577 	adv_set_chip_ih(adv, ADV_INS_HALT);
1578 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1579 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1580 		return (0);
1581 	}
1582 	return (1);
1583 }
1584 
1585 static int
1586 adv_host_req_chip_halt(struct adv_softc *adv)
1587 {
1588 	int	 count;
1589 	u_int8_t saved_stop_code;
1590 
1591 	if (adv_is_chip_halted(adv))
1592 		return (1);
1593 
1594 	count = 0;
1595 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1596 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1597 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1598 	while (adv_is_chip_halted(adv) == 0
1599 	    && count++ < 2000)
1600 		;
1601 
1602 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1603 	return (count < 2000);
1604 }
1605 
1606 static void
1607 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1608 {
1609 	adv_set_bank(adv, 1);
1610 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1611 	adv_set_bank(adv, 0);
1612 }
1613 
1614 #if UNUSED
1615 static u_int8_t
1616 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1617 {
1618 	u_int8_t scsi_ctrl;
1619 
1620 	adv_set_bank(adv, 1);
1621 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1622 	adv_set_bank(adv, 0);
1623 	return (scsi_ctrl);
1624 }
1625 #endif
1626 
1627 /*
1628  * XXX Looks like more padding issues in this routine as well.
1629  *     There has to be a way to turn this into an insw.
1630  */
1631 static void
1632 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1633 	       u_int16_t *inbuf, int words)
1634 {
1635 	int	i;
1636 
1637 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1638 	for (i = 0; i < words; i++, inbuf++) {
1639 		if (i == 5) {
1640 			continue;
1641 		}
1642 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1643 	}
1644 }
1645 
1646 static u_int
1647 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1648 {
1649 	u_int	  cur_used_qs;
1650 	u_int	  cur_free_qs;
1651 
1652 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1653 
1654 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1655 		cur_free_qs = adv->max_openings - cur_used_qs;
1656 		return (cur_free_qs);
1657 	}
1658 	adv->openings_needed = n_qs;
1659 	return (0);
1660 }
1661 
1662 static u_int8_t
1663 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1664 		      u_int8_t n_free_q)
1665 {
1666 	int i;
1667 
1668 	for (i = 0; i < n_free_q; i++) {
1669 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1670 		if (free_q_head == ADV_QLINK_END)
1671 			break;
1672 	}
1673 	return (free_q_head);
1674 }
1675 
1676 static u_int8_t
1677 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1678 {
1679 	u_int16_t	q_addr;
1680 	u_int8_t	next_qp;
1681 	u_int8_t	q_status;
1682 
1683 	next_qp = ADV_QLINK_END;
1684 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1685 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1686 
1687 	if ((q_status & QS_READY) == 0)
1688 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1689 
1690 	return (next_qp);
1691 }
1692 
1693 static int
1694 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1695 		    u_int8_t n_q_required)
1696 {
1697 	u_int8_t	free_q_head;
1698 	u_int8_t	next_qp;
1699 	u_int8_t	tid_no;
1700 	u_int8_t	target_ix;
1701 	int		retval;
1702 
1703 	retval = 1;
1704 	target_ix = scsiq->q2.target_ix;
1705 	tid_no = ADV_TIX_TO_TID(target_ix);
1706 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1707 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1708 	    != ADV_QLINK_END) {
1709 		scsiq->q1.q_no = free_q_head;
1710 
1711 		/*
1712 		 * Now that we know our Q number, point our sense
1713 		 * buffer pointer to a bus dma mapped area where
1714 		 * we can dma the data to.
1715 		 */
1716 		scsiq->q1.sense_addr = adv->sense_physbase
1717 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1718 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1719 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1720 		adv->cur_active += n_q_required;
1721 		retval = 0;
1722 	}
1723 	return (retval);
1724 }
1725 
1726 
1727 static void
1728 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1729 			    u_int q_no)
1730 {
1731 	u_int8_t	sg_list_dwords;
1732 	u_int8_t	sg_index, i;
1733 	u_int8_t	sg_entry_cnt;
1734 	u_int8_t	next_qp;
1735 	u_int16_t	q_addr;
1736 	struct		adv_sg_head *sg_head;
1737 	struct		adv_sg_list_q scsi_sg_q;
1738 
1739 	sg_head = scsiq->sg_head;
1740 
1741 	if (sg_head) {
1742 		sg_entry_cnt = sg_head->entry_cnt - 1;
1743 #ifdef DIAGNOSTIC
1744 		if (sg_entry_cnt == 0)
1745 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746 			      "a SG list but only one element");
1747 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1748 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749 			      "a SG list but QC_SG_HEAD not set");
1750 #endif
1751 		q_addr = ADV_QNO_TO_QADDR(q_no);
1752 		sg_index = 1;
1753 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1754 		scsi_sg_q.sg_head_qp = q_no;
1755 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1756 		for (i = 0; i < sg_head->queue_cnt; i++) {
1757 			u_int8_t segs_this_q;
1758 
1759 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1760 				segs_this_q = ADV_SG_LIST_PER_Q;
1761 			else {
1762 				/* This will be the last segment then */
1763 				segs_this_q = sg_entry_cnt;
1764 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1765 			}
1766 			scsi_sg_q.seq_no = i + 1;
1767 			sg_list_dwords = segs_this_q << 1;
1768 			if (i == 0) {
1769 				scsi_sg_q.sg_list_cnt = segs_this_q;
1770 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1771 			} else {
1772 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1773 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1774 			}
1775 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1776 			scsi_sg_q.q_no = next_qp;
1777 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1778 
1779 			adv_write_lram_16_multi(adv,
1780 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1781 						(u_int16_t *)&scsi_sg_q,
1782 						sizeof(scsi_sg_q) >> 1);
1783 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1784 						(u_int32_t *)&sg_head->sg_list[sg_index],
1785 						sg_list_dwords);
1786 			sg_entry_cnt -= segs_this_q;
1787 			sg_index += ADV_SG_LIST_PER_Q;
1788 		}
1789 	}
1790 	adv_put_ready_queue(adv, scsiq, q_no);
1791 }
1792 
1793 static void
1794 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1795 		    u_int q_no)
1796 {
1797 	struct		adv_target_transinfo* tinfo;
1798 	u_int		q_addr;
1799 	u_int		tid_no;
1800 
1801 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1802 	tinfo = &adv->tinfo[tid_no];
1803 	if ((tinfo->current.period != tinfo->goal.period)
1804 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1805 
1806 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1807 		scsiq->q1.cntl |= QC_MSG_OUT;
1808 	}
1809 	q_addr = ADV_QNO_TO_QADDR(q_no);
1810 
1811 	scsiq->q1.status = QS_FREE;
1812 
1813 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1814 				(u_int16_t *)scsiq->cdbptr,
1815 				scsiq->q2.cdb_len >> 1);
1816 
1817 #if BYTE_ORDER == BIG_ENDIAN
1818 	adv_adj_scsiq_endian(scsiq);
1819 #endif
1820 
1821 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1822 		      (u_int16_t *) &scsiq->q1.cntl,
1823 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1824 
1825 #if CC_WRITE_IO_COUNT
1826 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1827 			  adv->req_count);
1828 #endif
1829 
1830 #if CC_CLEAR_DMA_REMAIN
1831 
1832 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1833 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1834 #endif
1835 
1836 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1837 			  (scsiq->q1.q_no << 8) | QS_READY);
1838 }
1839 
1840 static void
1841 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1842 	      u_int16_t *buffer, int words)
1843 {
1844 	int	i;
1845 
1846 	/*
1847 	 * XXX This routine makes *gross* assumptions
1848 	 * about padding in the data structures.
1849 	 * Either the data structures should have explicit
1850 	 * padding members added, or they should have padding
1851 	 * turned off via compiler attributes depending on
1852 	 * which yields better overall performance.  My hunch
1853 	 * would be that turning off padding would be the
1854 	 * faster approach as an outsw is much faster than
1855 	 * this crude loop and accessing un-aligned data
1856 	 * members isn't *that* expensive.  The other choice
1857 	 * would be to modify the ASC script so that the
1858 	 * the adv_scsiq_1 structure can be re-arranged so
1859 	 * padding isn't required.
1860 	 */
1861 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1862 	for (i = 0; i < words; i++, buffer++) {
1863 		if (i == 2 || i == 10) {
1864 			continue;
1865 		}
1866 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1867 	}
1868 }
1869 
1870 static void
1871 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1872 		     u_int8_t q_cntl, target_bit_vector target_mask,
1873 		     int tid_no)
1874 {
1875 	struct	ext_msg ext_msg;
1876 
1877 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1878 			       sizeof(ext_msg) >> 1);
1879 	if ((ext_msg.msg_type == MSG_EXTENDED)
1880 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1881 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1882 		union	  ccb *ccb;
1883 		struct	  adv_target_transinfo* tinfo;
1884 		u_int32_t cinfo_index;
1885 		u_int	 period;
1886 		u_int	 offset;
1887 		int	 sdtr_accept;
1888 		u_int8_t orig_offset;
1889 
1890 		cinfo_index =
1891 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1892 		ccb = adv->ccb_infos[cinfo_index].ccb;
1893 		tinfo = &adv->tinfo[tid_no];
1894 		sdtr_accept = TRUE;
1895 
1896 		orig_offset = ext_msg.req_ack_offset;
1897 		if (ext_msg.xfer_period < tinfo->goal.period) {
1898                 	sdtr_accept = FALSE;
1899 			ext_msg.xfer_period = tinfo->goal.period;
1900 		}
1901 
1902 		/* Perform range checking */
1903 		period = ext_msg.xfer_period;
1904 		offset = ext_msg.req_ack_offset;
1905 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1906 		ext_msg.xfer_period = period;
1907 		ext_msg.req_ack_offset = offset;
1908 
1909 		/* Record our current sync settings */
1910 		adv_set_syncrate(adv, ccb->ccb_h.path,
1911 				 tid_no, ext_msg.xfer_period,
1912 				 ext_msg.req_ack_offset,
1913 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1914 
1915 		/* Offset too high or large period forced async */
1916 		if (orig_offset != ext_msg.req_ack_offset)
1917 			sdtr_accept = FALSE;
1918 
1919 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1920 			/* Valid response to our requested negotiation */
1921 			q_cntl &= ~QC_MSG_OUT;
1922 		} else {
1923 			/* Must Respond */
1924 			q_cntl |= QC_MSG_OUT;
1925 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1926 					ext_msg.req_ack_offset);
1927 		}
1928 
1929 	} else if (ext_msg.msg_type == MSG_EXTENDED
1930 		&& ext_msg.msg_req == MSG_EXT_WDTR
1931 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1932 
1933 		ext_msg.wdtr_width = 0;
1934 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1935 					(u_int16_t *)&ext_msg,
1936 					sizeof(ext_msg) >> 1);
1937 		q_cntl |= QC_MSG_OUT;
1938         } else {
1939 
1940 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1941 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1942 					(u_int16_t *)&ext_msg,
1943 					sizeof(ext_msg) >> 1);
1944 		q_cntl |= QC_MSG_OUT;
1945         }
1946 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1947 }
1948 
1949 static void
1950 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1951 		u_int8_t sdtr_offset)
1952 {
1953 	struct	 ext_msg sdtr_buf;
1954 
1955 	sdtr_buf.msg_type = MSG_EXTENDED;
1956 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1957 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1958 	sdtr_buf.xfer_period = sdtr_period;
1959 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1960 	sdtr_buf.req_ack_offset = sdtr_offset;
1961 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1962 				(u_int16_t *) &sdtr_buf,
1963 				sizeof(sdtr_buf) / 2);
1964 }
1965 
1966 int
1967 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1968 	      u_int32_t status, int queued_only)
1969 {
1970 	u_int16_t q_addr;
1971 	u_int8_t  q_no;
1972 	struct adv_q_done_info scsiq_buf;
1973 	struct adv_q_done_info *scsiq;
1974 	u_int8_t  target_ix;
1975 	int	  count;
1976 
1977 	scsiq = &scsiq_buf;
1978 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1979 	count = 0;
1980 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1981 		struct adv_ccb_info *ccb_info;
1982 		q_addr = ADV_QNO_TO_QADDR(q_no);
1983 
1984 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1985 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1986 		if (((scsiq->q_status & QS_READY) != 0)
1987 		 && ((scsiq->q_status & QS_ABORTED) == 0)
1988 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1989 		 && (scsiq->d2.target_ix == target_ix)
1990 		 && (queued_only == 0
1991 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1992 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1993 			union ccb *aborted_ccb;
1994 			struct adv_ccb_info *cinfo;
1995 
1996 			scsiq->q_status |= QS_ABORTED;
1997 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1998 					 scsiq->q_status);
1999 			aborted_ccb = ccb_info->ccb;
2000 			/* Don't clobber earlier error codes */
2001 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2002 			  == CAM_REQ_INPROG)
2003 				aborted_ccb->ccb_h.status |= status;
2004 			cinfo = (struct adv_ccb_info *)
2005 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2006 			cinfo->state |= ACCB_ABORT_QUEUED;
2007 			count++;
2008 		}
2009 	}
2010 	return (count);
2011 }
2012 
2013 int
2014 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2015 {
2016 	int count;
2017 	int i;
2018 	union ccb *ccb;
2019 
2020 	i = 200;
2021 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2022 	    && i--)
2023 		DELAY(1000);
2024 	adv_reset_chip(adv, initiate_bus_reset);
2025 	adv_reinit_lram(adv);
2026 	for (i = 0; i <= ADV_MAX_TID; i++)
2027 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2028 				 /*offset*/0, ADV_TRANS_CUR);
2029 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2030 
2031 	/* Tell the XPT layer that a bus reset occured */
2032 	if (adv->path != NULL)
2033 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2034 
2035 	count = 0;
2036 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2037 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2038 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2039 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2040 		count++;
2041 	}
2042 
2043 	adv_start_chip(adv);
2044 	return (count);
2045 }
2046 
2047 static void
2048 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2049 {
2050 	int orig_id;
2051 
2052     	adv_set_bank(adv, 1);
2053     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2054     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2055 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2056 		adv_set_bank(adv, 0);
2057 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2058 	}
2059     	adv_set_bank(adv, 1);
2060     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2061 	adv_set_bank(adv, 0);
2062 }
2063