xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision 9a92bb4c)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.10 2008/02/10 00:01:02 pavalos Exp $
33  */
34 /*
35  * Ported from:
36  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
37  *
38  * Copyright (c) 1995-1996 Advanced System Products, Inc.
39  * All Rights Reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that redistributions of source
43  * code retain the above copyright notice and this comment without
44  * modification.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/thread2.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 
54 #include <machine/clock.h>
55 
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
60 
61 #include <bus/cam/scsi/scsi_all.h>
62 #include <bus/cam/scsi/scsi_message.h>
63 #include <bus/cam/scsi/scsi_da.h>
64 #include <bus/cam/scsi/scsi_cd.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69 
70 #include "advansys.h"
71 #include "advmcode.h"
72 
73 struct adv_quirk_entry {
74 	struct scsi_inquiry_pattern inq_pat;
75 	u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
78 };
79 
80 static struct adv_quirk_entry adv_quirk_table[] =
81 {
82 	{
83 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85 	},
86 	{
87 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88 		0
89 	},
90 	{
91 		{
92 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 		  "TANDBERG", " TDC 36", "*"
94 		},
95 		0
96 	},
97 	{
98 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99 		0
100 	},
101 	{
102 		{
103 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 		  "*", "*", "*"
105 		},
106 		0
107 	},
108 	{
109 		{
110 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 		  "*", "*", "*"
112 		},
113 		0
114 	},
115 	{
116 		/* Default quirk entry */
117 		{
118 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
120                 },
121                 ADV_QUIRK_FIX_ASYN_XFER,
122 	}
123 };
124 
125 /*
126  * Allowable periods in ns
127  */
128 static u_int8_t adv_sdtr_period_tbl[] =
129 {
130 	25,
131 	30,
132 	35,
133 	40,
134 	50,
135 	60,
136 	70,
137 	85
138 };
139 
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
141 {
142 	12,
143 	19,
144 	25,
145 	32,
146 	38,
147 	44,
148 	50,
149 	57,
150 	63,
151 	69,
152 	75,
153 	82,
154 	88,
155 	94,
156 	100,
157 	107
158 };
159 
160 struct ext_msg {
161 	u_int8_t msg_type;
162 	u_int8_t msg_len;
163 	u_int8_t msg_req;
164 	union {
165 		struct {
166 			u_int8_t sdtr_xfer_period;
167 			u_int8_t sdtr_req_ack_offset;
168 		} sdtr;
169 		struct {
170        			u_int8_t wdtr_width;
171 		} wdtr;
172 		struct {
173 			u_int8_t mdp[4];
174 		} mdp;
175 	} u_ext_msg;
176 	u_int8_t res;
177 };
178 
179 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
180 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
182 #define	mdp_b3		u_ext_msg.mdp_b3
183 #define	mdp_b2		u_ext_msg.mdp_b2
184 #define	mdp_b1		u_ext_msg.mdp_b1
185 #define	mdp_b0		u_ext_msg.mdp_b0
186 
187 /*
188  * Some of the early PCI adapters have problems with
189  * async transfers.  Instead use an offset of 1.
190  */
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192 
193 /* LRAM routines */
194 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 					u_int16_t *buffer, int count);
196 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
197 					 u_int16_t s_addr, u_int16_t *buffer,
198 					 int count);
199 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 				  u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 				  int count);
203 
204 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
205 					      u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207 
208 
209 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 				   u_int32_t value);
211 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
212 					 u_int16_t s_addr, u_int32_t *buffer,
213 					 int count);
214 
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 				     u_int16_t value);
219 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 					  u_int8_t cmd_reg);
221 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
222 					    struct adv_eeprom_config *eeconfig);
223 
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
227 
228 static void	 adv_reinit_lram(struct adv_softc *adv);
229 static void	 adv_init_lram(struct adv_softc *adv);
230 static int	 adv_init_microcode_var(struct adv_softc *adv);
231 static void	 adv_init_qlink_var(struct adv_softc *adv);
232 
233 /* Interrupts */
234 static void	 adv_disable_interrupt(struct adv_softc *adv);
235 static void	 adv_enable_interrupt(struct adv_softc *adv);
236 static void	 adv_toggle_irq_act(struct adv_softc *adv);
237 
238 /* Chip Control */
239 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
240 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if UNUSED
242 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
244 
245 /* Queue handling and execution */
246 static __inline int
247 		 adv_sgcount_to_qcount(int sgcount);
248 
249 static __inline int
250 adv_sgcount_to_qcount(int sgcount)
251 {
252 	int	n_sg_list_qs;
253 
254 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 		n_sg_list_qs++;
257 	return (n_sg_list_qs + 1);
258 }
259 
260 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
261 				u_int16_t *inbuf, int words);
262 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
263 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
264 				       u_int8_t free_q_head, u_int8_t n_free_q);
265 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
266 				      u_int8_t free_q_head);
267 static int	 adv_send_scsi_queue(struct adv_softc *adv,
268 				     struct adv_scsi_q *scsiq,
269 				     u_int8_t n_q_required);
270 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
271 					     struct adv_scsi_q *scsiq,
272 					     u_int q_no);
273 static void	 adv_put_ready_queue(struct adv_softc *adv,
274 				     struct adv_scsi_q *scsiq, u_int q_no);
275 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
276 			       u_int16_t *buffer, int words);
277 
278 /* Messages */
279 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
280 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
281 				      target_bit_vector target_id,
282 				      int tid);
283 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
284 				 u_int8_t sdtr_offset);
285 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
286 					u_int8_t sdtr_data);
287 
288 
289 /* Exported functions first */
290 
291 void
292 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
293 {
294 	struct adv_softc *adv;
295 
296 	adv = (struct adv_softc *)callback_arg;
297 	switch (code) {
298 	case AC_FOUND_DEVICE:
299 	{
300 		struct ccb_getdev *cgd;
301 		target_bit_vector target_mask;
302 		int num_entries;
303         	caddr_t match;
304 		struct adv_quirk_entry *entry;
305 		struct adv_target_transinfo* tinfo;
306 
307 		cgd = (struct ccb_getdev *)arg;
308 
309 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
310 
311 		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
312 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
313 				       (caddr_t)adv_quirk_table,
314 				       num_entries, sizeof(*adv_quirk_table),
315 				       scsi_inquiry_match);
316 
317 		if (match == NULL)
318 			panic("advasync: device didn't match wildcard entry!!");
319 
320 		entry = (struct adv_quirk_entry *)match;
321 
322 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
323 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
324 				adv->fix_asyn_xfer_always |= target_mask;
325 			else
326 				adv->fix_asyn_xfer_always &= ~target_mask;
327 			/*
328 			 * We start out life with all bits set and clear them
329 			 * after we've determined that the fix isn't necessary.
330 			 * It may well be that we've already cleared a target
331 			 * before the full inquiry session completes, so don't
332 			 * gratuitously set a target bit even if it has this
333 			 * quirk.  But, if the quirk exonerates a device, clear
334 			 * the bit now.
335 			 */
336 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
337 				adv->fix_asyn_xfer &= ~target_mask;
338 		}
339 		/*
340 		 * Reset our sync settings now that we've determined
341 		 * what quirks are in effect for the device.
342 		 */
343 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
344 		adv_set_syncrate(adv, cgd->ccb_h.path,
345 				 cgd->ccb_h.target_id,
346 				 tinfo->current.period,
347 				 tinfo->current.offset,
348 				 ADV_TRANS_CUR);
349 		break;
350 	}
351 	case AC_LOST_DEVICE:
352 	{
353 		u_int target_mask;
354 
355 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
356 			target_mask = 0x01 << xpt_path_target_id(path);
357 			adv->fix_asyn_xfer |= target_mask;
358 		}
359 
360 		/*
361 		 * Revert to async transfers
362 		 * for the next device.
363 		 */
364 		adv_set_syncrate(adv, /*path*/NULL,
365 				 xpt_path_target_id(path),
366 				 /*period*/0,
367 				 /*offset*/0,
368 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
369 	}
370 	default:
371 		break;
372 	}
373 }
374 
375 void
376 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
377 {
378 	u_int8_t control;
379 
380 	/*
381 	 * Start out with the bank reset to 0
382 	 */
383 	control = ADV_INB(adv, ADV_CHIP_CTRL)
384 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
385 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
386 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
387 	if (bank == 1) {
388 		control |= ADV_CC_BANK_ONE;
389 	} else if (bank == 2) {
390 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
391 	}
392 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
393 }
394 
395 u_int8_t
396 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
397 {
398 	u_int8_t   byte_data;
399 	u_int16_t  word_data;
400 
401 	/*
402 	 * LRAM is accessed on 16bit boundaries.
403 	 */
404 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
405 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
406 	if (addr & 1) {
407 #if BYTE_ORDER == BIG_ENDIAN
408 		byte_data = (u_int8_t)(word_data & 0xFF);
409 #else
410 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
411 #endif
412 	} else {
413 #if BYTE_ORDER == BIG_ENDIAN
414 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #else
416 		byte_data = (u_int8_t)(word_data & 0xFF);
417 #endif
418 	}
419 	return (byte_data);
420 }
421 
422 void
423 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
424 {
425 	u_int16_t word_data;
426 
427 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
428 	if (addr & 1) {
429 		word_data &= 0x00FF;
430 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
431 	} else {
432 		word_data &= 0xFF00;
433 		word_data |= ((u_int8_t)value & 0x00FF);
434 	}
435 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
436 }
437 
438 
439 u_int16_t
440 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
441 {
442 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
443 	return (ADV_INW(adv, ADV_LRAM_DATA));
444 }
445 
446 void
447 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
448 {
449 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
450 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
451 }
452 
453 /*
454  * Determine if there is a board at "iobase" by looking
455  * for the AdvanSys signatures.  Return 1 if a board is
456  * found, 0 otherwise.
457  */
458 int
459 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
460 {
461 	u_int16_t signature;
462 
463 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
464 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
465 		if ((signature == ADV_1000_ID0W)
466 		 || (signature == ADV_1000_ID0W_FIX))
467 			return (1);
468 	}
469 	return (0);
470 }
471 
472 void
473 adv_lib_init(struct adv_softc *adv)
474 {
475 	if ((adv->type & ADV_ULTRA) != 0) {
476 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
477 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
478 	} else {
479 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
480 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
481 	}
482 }
483 
484 u_int16_t
485 adv_get_eeprom_config(struct adv_softc *adv, struct
486 		      adv_eeprom_config  *eeprom_config)
487 {
488 	u_int16_t	sum;
489 	u_int16_t	*wbuf;
490 	u_int8_t	cfg_beg;
491 	u_int8_t	cfg_end;
492 	u_int8_t	s_addr;
493 
494 	wbuf = (u_int16_t *)eeprom_config;
495 	sum = 0;
496 
497 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
498 		*wbuf = adv_read_eeprom_16(adv, s_addr);
499 		sum += *wbuf;
500 	}
501 
502 	if (adv->type & ADV_VL) {
503 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
504 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
505 	} else {
506 		cfg_beg = ADV_EEPROM_CFG_BEG;
507 		cfg_end = ADV_EEPROM_MAX_ADDR;
508 	}
509 
510 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
511 		*wbuf = adv_read_eeprom_16(adv, s_addr);
512 		sum += *wbuf;
513 #if ADV_DEBUG_EEPROM
514 		kprintf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
515 #endif
516 	}
517 	*wbuf = adv_read_eeprom_16(adv, s_addr);
518 	return (sum);
519 }
520 
521 int
522 adv_set_eeprom_config(struct adv_softc *adv,
523 		      struct adv_eeprom_config *eeprom_config)
524 {
525 	int	retry;
526 
527 	retry = 0;
528 	while (1) {
529 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
530 			break;
531 		}
532 		if (++retry > ADV_EEPROM_MAX_RETRY) {
533 			break;
534 		}
535 	}
536 	return (retry > ADV_EEPROM_MAX_RETRY);
537 }
538 
539 int
540 adv_reset_chip(struct adv_softc *adv, int reset_bus)
541 {
542 	adv_stop_chip(adv);
543 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
544 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
545 	DELAY(60);
546 
547 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
548 	adv_set_chip_ih(adv, ADV_INS_HALT);
549 
550 	if (reset_bus)
551 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
552 
553 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
554 	if (reset_bus)
555 		DELAY(200 * 1000);
556 
557 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
558 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
559 	return (adv_is_chip_halted(adv));
560 }
561 
562 int
563 adv_test_external_lram(struct adv_softc* adv)
564 {
565 	u_int16_t	q_addr;
566 	u_int16_t	saved_value;
567 	int		success;
568 
569 	success = 0;
570 
571 	q_addr = ADV_QNO_TO_QADDR(241);
572 	saved_value = adv_read_lram_16(adv, q_addr);
573 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
574 		success = 1;
575 		adv_write_lram_16(adv, q_addr, saved_value);
576 	}
577 	return (success);
578 }
579 
580 
581 int
582 adv_init_lram_and_mcode(struct adv_softc *adv)
583 {
584 	u_int32_t	retval;
585 
586 	adv_disable_interrupt(adv);
587 
588 	adv_init_lram(adv);
589 
590 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
591 				    adv_mcode_size);
592 	if (retval != adv_mcode_chksum) {
593 		kprintf("adv%d: Microcode download failed checksum!\n",
594 		       adv->unit);
595 		return (1);
596 	}
597 
598 	if (adv_init_microcode_var(adv) != 0)
599 		return (1);
600 
601 	adv_enable_interrupt(adv);
602 	return (0);
603 }
604 
605 u_int8_t
606 adv_get_chip_irq(struct adv_softc *adv)
607 {
608 	u_int16_t	cfg_lsw;
609 	u_int8_t	chip_irq;
610 
611 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
612 
613 	if ((adv->type & ADV_VL) != 0) {
614 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
615 		if ((chip_irq == 0) ||
616 		    (chip_irq == 4) ||
617 		    (chip_irq == 7)) {
618 			return (0);
619 		}
620 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
621 	}
622 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
623 	if (chip_irq == 3)
624 		chip_irq += 2;
625 	return (chip_irq + ADV_MIN_IRQ_NO);
626 }
627 
628 u_int8_t
629 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
630 {
631 	u_int16_t	cfg_lsw;
632 
633 	if ((adv->type & ADV_VL) != 0) {
634 		if (irq_no != 0) {
635 			if ((irq_no < ADV_MIN_IRQ_NO)
636 			 || (irq_no > ADV_MAX_IRQ_NO)) {
637 				irq_no = 0;
638 			} else {
639 				irq_no -= ADV_MIN_IRQ_NO - 1;
640 			}
641 		}
642 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
643 		cfg_lsw |= 0x0010;
644 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
645 		adv_toggle_irq_act(adv);
646 
647 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
648 		cfg_lsw |= (irq_no & 0x07) << 2;
649 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 		adv_toggle_irq_act(adv);
651 	} else if ((adv->type & ADV_ISA) != 0) {
652 		if (irq_no == 15)
653 			irq_no -= 2;
654 		irq_no -= ADV_MIN_IRQ_NO;
655 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
656 		cfg_lsw |= (irq_no & 0x03) << 2;
657 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658 	}
659 	return (adv_get_chip_irq(adv));
660 }
661 
662 void
663 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
664 {
665 	u_int16_t cfg_lsw;
666 
667 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
668 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
669 		return;
670     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
671 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
672 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
673 }
674 
675 int
676 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
677 		       u_int32_t datalen)
678 {
679 	struct		adv_target_transinfo* tinfo;
680 	u_int32_t	*p_data_addr;
681 	u_int32_t	*p_data_bcount;
682 	int		disable_syn_offset_one_fix;
683 	int		retval;
684 	u_int		n_q_required;
685 	u_int32_t	addr;
686 	u_int8_t	sg_entry_cnt;
687 	u_int8_t	target_ix;
688 	u_int8_t	sg_entry_cnt_minus_one;
689 	u_int8_t	tid_no;
690 
691 	scsiq->q1.q_no = 0;
692 	retval = 1;  /* Default to error case */
693 	target_ix = scsiq->q2.target_ix;
694 	tid_no = ADV_TIX_TO_TID(target_ix);
695 	tinfo = &adv->tinfo[tid_no];
696 
697 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
698 		/* Renegotiate if appropriate. */
699 		adv_set_syncrate(adv, /*struct cam_path */NULL,
700 				 tid_no, /*period*/0, /*offset*/0,
701 				 ADV_TRANS_CUR);
702 		if (tinfo->current.period != tinfo->goal.period) {
703 			adv_msgout_sdtr(adv, tinfo->goal.period,
704 					tinfo->goal.offset);
705 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
706 		}
707 	}
708 
709 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
710 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
711 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
712 
713 #ifdef DIAGNOSTIC
714 		if (sg_entry_cnt <= 1)
715 			panic("adv_execute_scsi_queue: Queue "
716 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
717 
718 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
719 			panic("adv_execute_scsi_queue: "
720 			      "Queue with too many segs.");
721 
722 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
723 			int i;
724 
725 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
726 				addr = scsiq->sg_head->sg_list[i].addr +
727 				       scsiq->sg_head->sg_list[i].bytes;
728 
729 				if ((addr & 0x0003) != 0)
730 					panic("adv_execute_scsi_queue: SG "
731 					      "with odd address or byte count");
732 			}
733 		}
734 #endif
735 		p_data_addr =
736 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
737 		p_data_bcount =
738 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
739 
740 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
741 		scsiq->sg_head->queue_cnt = n_q_required - 1;
742 	} else {
743 		p_data_addr = &scsiq->q1.data_addr;
744 		p_data_bcount = &scsiq->q1.data_cnt;
745 		n_q_required = 1;
746 	}
747 
748 	disable_syn_offset_one_fix = FALSE;
749 
750 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
751 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
752 
753 		if (datalen != 0) {
754 			if (datalen < 512) {
755 				disable_syn_offset_one_fix = TRUE;
756 			} else {
757 				if (scsiq->cdbptr[0] == INQUIRY
758 				 || scsiq->cdbptr[0] == REQUEST_SENSE
759 				 || scsiq->cdbptr[0] == READ_CAPACITY
760 				 || scsiq->cdbptr[0] == MODE_SELECT_6
761 				 || scsiq->cdbptr[0] == MODE_SENSE_6
762 				 || scsiq->cdbptr[0] == MODE_SENSE_10
763 				 || scsiq->cdbptr[0] == MODE_SELECT_10
764 				 || scsiq->cdbptr[0] == READ_TOC) {
765 					disable_syn_offset_one_fix = TRUE;
766 				}
767 			}
768 		}
769 	}
770 
771 	if (disable_syn_offset_one_fix) {
772 		scsiq->q2.tag_code &=
773 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
774 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
776 	}
777 
778 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
779 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
780 		u_int8_t extra_bytes;
781 
782 		addr = *p_data_addr + *p_data_bcount;
783 		extra_bytes = addr & 0x0003;
784 		if (extra_bytes != 0
785 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
786 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
787 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
788 			scsiq->q1.extra_bytes = extra_bytes;
789 			*p_data_bcount -= extra_bytes;
790 		}
791 	}
792 
793 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
794 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
795 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
796 
797 	return (retval);
798 }
799 
800 
801 u_int8_t
802 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
803 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
804 {
805 	u_int16_t val;
806 	u_int8_t  sg_queue_cnt;
807 
808 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
809 		       (u_int16_t *)scsiq,
810 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
811 
812 #if BYTE_ORDER == BIG_ENDIAN
813 	adv_adj_endian_qdone_info(scsiq);
814 #endif
815 
816 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
817 	scsiq->q_status = val & 0xFF;
818 	scsiq->q_no = (val >> 8) & 0XFF;
819 
820 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
821 	scsiq->cntl = val & 0xFF;
822 	sg_queue_cnt = (val >> 8) & 0xFF;
823 
824 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
825 	scsiq->sense_len = val & 0xFF;
826 	scsiq->extra_bytes = (val >> 8) & 0xFF;
827 
828 	/*
829 	 * Due to a bug in accessing LRAM on the 940UA, the residual
830 	 * is split into separate high and low 16bit quantities.
831 	 */
832 	scsiq->remain_bytes =
833 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
834 	scsiq->remain_bytes |=
835 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
836 
837 	/*
838 	 * XXX Is this just a safeguard or will the counter really
839 	 * have bogus upper bits?
840 	 */
841 	scsiq->remain_bytes &= max_dma_count;
842 
843 	return (sg_queue_cnt);
844 }
845 
846 int
847 adv_start_chip(struct adv_softc *adv)
848 {
849 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
850 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
851 		return (0);
852 	return (1);
853 }
854 
855 int
856 adv_stop_execution(struct adv_softc *adv)
857 {
858 	int count;
859 
860 	count = 0;
861 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
862 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
863 				 ADV_STOP_REQ_RISC_STOP);
864 		do {
865 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
866 				ADV_STOP_ACK_RISC_STOP) {
867 				return (1);
868 			}
869 			DELAY(1000);
870 		} while (count++ < 20);
871 	}
872 	return (0);
873 }
874 
875 int
876 adv_is_chip_halted(struct adv_softc *adv)
877 {
878 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
879 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
880 			return (1);
881 		}
882 	}
883 	return (0);
884 }
885 
886 /*
887  * XXX The numeric constants and the loops in this routine
888  * need to be documented.
889  */
890 void
891 adv_ack_interrupt(struct adv_softc *adv)
892 {
893 	u_int8_t	host_flag;
894 	u_int8_t	risc_flag;
895 	int		loop;
896 
897 	loop = 0;
898 	do {
899 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
900 		if (loop++ > 0x7FFF) {
901 			break;
902 		}
903 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
904 
905 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
906 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
907 			 host_flag | ADV_HOST_FLAG_ACK_INT);
908 
909 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
910 	loop = 0;
911 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
912 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 		if (loop++ > 3) {
914 			break;
915 		}
916 	}
917 
918 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
919 }
920 
921 /*
922  * Handle all conditions that may halt the chip waiting
923  * for us to intervene.
924  */
925 void
926 adv_isr_chip_halted(struct adv_softc *adv)
927 {
928 	u_int16_t	  int_halt_code;
929 	u_int16_t	  halt_q_addr;
930 	target_bit_vector target_mask;
931 	target_bit_vector scsi_busy;
932 	u_int8_t	  halt_qp;
933 	u_int8_t	  target_ix;
934 	u_int8_t	  q_cntl;
935 	u_int8_t	  tid_no;
936 
937 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
938 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
939 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
940 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
941 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
942 	tid_no = ADV_TIX_TO_TID(target_ix);
943 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
944 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
945 		/*
946 		 * Temporarily disable the async fix by removing
947 		 * this target from the list of affected targets,
948 		 * setting our async rate, and then putting us
949 		 * back into the mask.
950 		 */
951 		adv->fix_asyn_xfer &= ~target_mask;
952 		adv_set_syncrate(adv, /*struct cam_path */NULL,
953 				 tid_no, /*period*/0, /*offset*/0,
954 				 ADV_TRANS_ACTIVE);
955 		adv->fix_asyn_xfer |= target_mask;
956 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
957 		adv_set_syncrate(adv, /*struct cam_path */NULL,
958 				 tid_no, /*period*/0, /*offset*/0,
959 				 ADV_TRANS_ACTIVE);
960 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
961 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
962 				     target_mask, tid_no);
963 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
964 		struct	  adv_target_transinfo* tinfo;
965 		union	  ccb *ccb;
966 		u_int32_t cinfo_index;
967 		u_int8_t  tag_code;
968 		u_int8_t  q_status;
969 
970 		tinfo = &adv->tinfo[tid_no];
971 		q_cntl |= QC_REQ_SENSE;
972 
973 		/* Renegotiate if appropriate. */
974 		adv_set_syncrate(adv, /*struct cam_path */NULL,
975 				 tid_no, /*period*/0, /*offset*/0,
976 				 ADV_TRANS_CUR);
977 		if (tinfo->current.period != tinfo->goal.period) {
978 			adv_msgout_sdtr(adv, tinfo->goal.period,
979 					tinfo->goal.offset);
980 			q_cntl |= QC_MSG_OUT;
981 		}
982 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
983 
984 		/* Don't tag request sense commands */
985 		tag_code = adv_read_lram_8(adv,
986 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
987 		tag_code &=
988 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
989 
990 		if ((adv->fix_asyn_xfer & target_mask) != 0
991 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
992 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
994 		}
995 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
996 				 tag_code);
997 		q_status = adv_read_lram_8(adv,
998 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
999 		q_status |= (QS_READY | QS_BUSY);
1000 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1001 				 q_status);
1002 		/*
1003 		 * Freeze the devq until we can handle the sense condition.
1004 		 */
1005 		cinfo_index =
1006 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1007 		ccb = adv->ccb_infos[cinfo_index].ccb;
1008 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1009 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1010 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1011 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1012 			      /*queued_only*/TRUE);
1013 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1014 		scsi_busy &= ~target_mask;
1015 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1016 		/*
1017 		 * Ensure we have enough time to actually
1018 		 * retrieve the sense.
1019 		 */
1020 		callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1021 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1022 		struct	ext_msg out_msg;
1023 
1024 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1025 				       (u_int16_t *) &out_msg,
1026 				       sizeof(out_msg)/2);
1027 
1028 		if ((out_msg.msg_type == MSG_EXTENDED)
1029 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1030 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1031 
1032 			/* Revert to Async */
1033 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1034 					 tid_no, /*period*/0, /*offset*/0,
1035 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1036 		}
1037 		q_cntl &= ~QC_MSG_OUT;
1038 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1039 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1040 		u_int8_t scsi_status;
1041 		union ccb *ccb;
1042 		u_int32_t cinfo_index;
1043 
1044 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1045 					      + ADV_SCSIQ_SCSI_STATUS);
1046 		cinfo_index =
1047 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1048 		ccb = adv->ccb_infos[cinfo_index].ccb;
1049 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1050 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1051 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1052 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1053 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1054 			      /*queued_only*/TRUE);
1055 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1056 		scsi_busy &= ~target_mask;
1057 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1058 	} else {
1059 		kprintf("Unhandled Halt Code %x\n", int_halt_code);
1060 	}
1061 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1062 }
1063 
1064 void
1065 adv_sdtr_to_period_offset(struct adv_softc *adv,
1066 			  u_int8_t sync_data, u_int8_t *period,
1067 			  u_int8_t *offset, int tid)
1068 {
1069 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1070 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1071 		*period = *offset = 0;
1072 	} else {
1073 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1074 		*offset = sync_data & 0xF;
1075 	}
1076 }
1077 
1078 void
1079 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1080 		 u_int tid, u_int period, u_int offset, u_int type)
1081 {
1082 	struct adv_target_transinfo* tinfo;
1083 	u_int old_period;
1084 	u_int old_offset;
1085 	u_int8_t sdtr_data;
1086 
1087 	tinfo = &adv->tinfo[tid];
1088 
1089 	/* Filter our input */
1090 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1091 					      &offset, tid);
1092 
1093 	old_period = tinfo->current.period;
1094 	old_offset = tinfo->current.offset;
1095 
1096 	if ((type & ADV_TRANS_CUR) != 0
1097 	 && ((old_period != period || old_offset != offset)
1098 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1099 		int halted;
1100 
1101 		crit_enter();
1102 		halted = adv_is_chip_halted(adv);
1103 		if (halted == 0)
1104 			/* Must halt the chip first */
1105 			adv_host_req_chip_halt(adv);
1106 
1107 		/* Update current hardware settings */
1108 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1109 
1110 		/*
1111 		 * If a target can run in sync mode, we don't need
1112 		 * to check it for sync problems.
1113 		 */
1114 		if (offset != 0)
1115 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1116 
1117 		if (halted == 0)
1118 			/* Start the chip again */
1119 			adv_start_chip(adv);
1120 
1121 		crit_exit();
1122 		tinfo->current.period = period;
1123 		tinfo->current.offset = offset;
1124 
1125 		if (path != NULL) {
1126 			/*
1127 			 * Tell the SCSI layer about the
1128 			 * new transfer parameters.
1129 			 */
1130 			struct	ccb_trans_settings neg;
1131 			memset(&neg, 0, sizeof (neg));
1132 			struct ccb_trans_settings_spi *spi =
1133 			    &neg.xport_specific.spi;
1134 
1135 			neg.protocol = PROTO_SCSI;
1136 			neg.protocol_version = SCSI_REV_2;
1137 			neg.transport = XPORT_SPI;
1138 			neg.transport_version = 2;
1139 
1140 			spi->sync_offset = offset;
1141 			spi->sync_period = period;
1142 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1143 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1144 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1145 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1146 		}
1147 	}
1148 
1149 	if ((type & ADV_TRANS_GOAL) != 0) {
1150 		tinfo->goal.period = period;
1151 		tinfo->goal.offset = offset;
1152 	}
1153 
1154 	if ((type & ADV_TRANS_USER) != 0) {
1155 		tinfo->user.period = period;
1156 		tinfo->user.offset = offset;
1157 	}
1158 }
1159 
1160 u_int8_t
1161 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1162 			  u_int *offset, int tid)
1163 {
1164 	u_int i;
1165 	u_int dummy_offset;
1166 	u_int dummy_period;
1167 
1168 	if (offset == NULL) {
1169 		dummy_offset = 0;
1170 		offset = &dummy_offset;
1171 	}
1172 
1173 	if (period == NULL) {
1174 		dummy_period = 0;
1175 		period = &dummy_period;
1176 	}
1177 
1178 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1179 	if (*period != 0 && *offset != 0) {
1180 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1181 			if (*period <= adv->sdtr_period_tbl[i]) {
1182 				/*
1183 				 * When responding to a target that requests
1184 				 * sync, the requested  rate may fall between
1185 				 * two rates that we can output, but still be
1186 				 * a rate that we can receive.  Because of this,
1187 				 * we want to respond to the target with
1188 				 * the same rate that it sent to us even
1189 				 * if the period we use to send data to it
1190 				 * is lower.  Only lower the response period
1191 				 * if we must.
1192 				 */
1193 				if (i == 0 /* Our maximum rate */)
1194 					*period = adv->sdtr_period_tbl[0];
1195 				return ((i << 4) | *offset);
1196 			}
1197 		}
1198 	}
1199 
1200 	/* Must go async */
1201 	*period = 0;
1202 	*offset = 0;
1203 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1204 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1205 	return (0);
1206 }
1207 
1208 /* Internal Routines */
1209 
1210 static void
1211 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1212 		       u_int16_t *buffer, int count)
1213 {
1214 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1215 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1216 }
1217 
1218 static void
1219 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1220 			u_int16_t *buffer, int count)
1221 {
1222 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1223 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1224 }
1225 
1226 static void
1227 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1228 		 u_int16_t set_value, int count)
1229 {
1230 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1231 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1232 			      set_value, count);
1233 }
1234 
1235 static u_int32_t
1236 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1237 {
1238 	u_int32_t	sum;
1239 	int		i;
1240 
1241 	sum = 0;
1242 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1243 	for (i = 0; i < count; i++)
1244 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1245 	return (sum);
1246 }
1247 
1248 static int
1249 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1250 			     u_int16_t value)
1251 {
1252 	int	retval;
1253 
1254 	retval = 0;
1255 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1256 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1257 	DELAY(10000);
1258 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1259 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1260 		retval = 1;
1261 	return (retval);
1262 }
1263 
1264 static u_int32_t
1265 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1266 {
1267 	u_int16_t           val_low, val_high;
1268 
1269 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1270 
1271 #if BYTE_ORDER == BIG_ENDIAN
1272 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1273 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1274 #else
1275 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1276 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1277 #endif
1278 
1279 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1280 }
1281 
1282 static void
1283 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1284 {
1285 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1286 
1287 #if BYTE_ORDER == BIG_ENDIAN
1288 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1289 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1290 #else
1291 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1292 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1293 #endif
1294 }
1295 
1296 static void
1297 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1298 			u_int32_t *buffer, int count)
1299 {
1300 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1301 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1302 }
1303 
1304 static u_int16_t
1305 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1306 {
1307 	u_int16_t read_wval;
1308 	u_int8_t  cmd_reg;
1309 
1310 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1311 	DELAY(1000);
1312 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1313 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1314 	DELAY(1000);
1315 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1316 	DELAY(1000);
1317 	return (read_wval);
1318 }
1319 
1320 static u_int16_t
1321 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1322 {
1323 	u_int16_t	read_value;
1324 
1325 	read_value = adv_read_eeprom_16(adv, addr);
1326 	if (read_value != value) {
1327 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1328 		DELAY(1000);
1329 
1330 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1331 		DELAY(1000);
1332 
1333 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1334 		DELAY(20 * 1000);
1335 
1336 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1337 		DELAY(1000);
1338 		read_value = adv_read_eeprom_16(adv, addr);
1339 	}
1340 	return (read_value);
1341 }
1342 
1343 static int
1344 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1345 {
1346 	u_int8_t read_back;
1347 	int	 retry;
1348 
1349 	retry = 0;
1350 	while (1) {
1351 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1352 		DELAY(1000);
1353 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1354 		if (read_back == cmd_reg) {
1355 			return (1);
1356 		}
1357 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1358 			return (0);
1359 		}
1360 	}
1361 }
1362 
1363 static int
1364 adv_set_eeprom_config_once(struct adv_softc *adv,
1365 			   struct adv_eeprom_config *eeprom_config)
1366 {
1367 	int		n_error;
1368 	u_int16_t	*wbuf;
1369 	u_int16_t	sum;
1370 	u_int8_t	s_addr;
1371 	u_int8_t	cfg_beg;
1372 	u_int8_t	cfg_end;
1373 
1374 	wbuf = (u_int16_t *)eeprom_config;
1375 	n_error = 0;
1376 	sum = 0;
1377 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1378 		sum += *wbuf;
1379 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1380 			n_error++;
1381 		}
1382 	}
1383 	if (adv->type & ADV_VL) {
1384 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1385 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1386 	} else {
1387 		cfg_beg = ADV_EEPROM_CFG_BEG;
1388 		cfg_end = ADV_EEPROM_MAX_ADDR;
1389 	}
1390 
1391 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1392 		sum += *wbuf;
1393 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1394 			n_error++;
1395 		}
1396 	}
1397 	*wbuf = sum;
1398 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1399 		n_error++;
1400 	}
1401 	wbuf = (u_int16_t *)eeprom_config;
1402 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1403 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1404 			n_error++;
1405 		}
1406 	}
1407 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1408 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1409 			n_error++;
1410 		}
1411 	}
1412 	return (n_error);
1413 }
1414 
1415 static u_int32_t
1416 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1417 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1418 {
1419 	u_int32_t chksum;
1420 	u_int16_t mcode_lram_size;
1421 	u_int16_t mcode_chksum;
1422 
1423 	mcode_lram_size = mcode_size >> 1;
1424 	/* XXX Why zero the memory just before you write the whole thing?? */
1425 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1426 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1427 
1428 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1429 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1430 						   ((mcode_size - s_addr
1431 						     - ADV_CODE_SEC_BEG) >> 1));
1432 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1433 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1434 	return (chksum);
1435 }
1436 
1437 static void
1438 adv_reinit_lram(struct adv_softc *adv) {
1439 	adv_init_lram(adv);
1440 	adv_init_qlink_var(adv);
1441 }
1442 
1443 static void
1444 adv_init_lram(struct adv_softc *adv)
1445 {
1446 	u_int8_t  i;
1447 	u_int16_t s_addr;
1448 
1449 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1450 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1451 
1452 	i = ADV_MIN_ACTIVE_QNO;
1453 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1454 
1455 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1456 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1457 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1458 	i++;
1459 	s_addr += ADV_QBLK_SIZE;
1460 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1461 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1462 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1463 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1464 	}
1465 
1466 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1467 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1468 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1469 	i++;
1470 	s_addr += ADV_QBLK_SIZE;
1471 
1472 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1473 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1474 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1475 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1476 	}
1477 }
1478 
1479 static int
1480 adv_init_microcode_var(struct adv_softc *adv)
1481 {
1482 	int	 i;
1483 
1484 	for (i = 0; i <= ADV_MAX_TID; i++) {
1485 
1486 		/* Start out async all around */
1487 		adv_set_syncrate(adv, /*path*/NULL,
1488 				 i, 0, 0,
1489 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1490 	}
1491 
1492 	adv_init_qlink_var(adv);
1493 
1494 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1495 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1496 
1497 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1498 
1499 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1500 
1501 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1502 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1503 		kprintf("adv%d: Unable to set program counter. Aborting.\n",
1504 		       adv->unit);
1505 		return (1);
1506 	}
1507 	return (0);
1508 }
1509 
1510 static void
1511 adv_init_qlink_var(struct adv_softc *adv)
1512 {
1513 	int	  i;
1514 	u_int16_t lram_addr;
1515 
1516 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1517 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1518 
1519 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1520 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1521 
1522 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1523 			 (u_int8_t)((int) adv->max_openings + 1));
1524 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1525 			 (u_int8_t)((int) adv->max_openings + 2));
1526 
1527 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1528 
1529 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1530 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1531 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1532 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1533 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1534 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1535 
1536 	lram_addr = ADV_QADR_BEG;
1537 	for (i = 0; i < 32; i++, lram_addr += 2)
1538 		adv_write_lram_16(adv, lram_addr, 0);
1539 }
1540 
1541 static void
1542 adv_disable_interrupt(struct adv_softc *adv)
1543 {
1544 	u_int16_t cfg;
1545 
1546 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1547 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1548 }
1549 
1550 static void
1551 adv_enable_interrupt(struct adv_softc *adv)
1552 {
1553 	u_int16_t cfg;
1554 
1555 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1556 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1557 }
1558 
1559 static void
1560 adv_toggle_irq_act(struct adv_softc *adv)
1561 {
1562 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1563 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1564 }
1565 
1566 void
1567 adv_start_execution(struct adv_softc *adv)
1568 {
1569 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1570 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1571 	}
1572 }
1573 
1574 int
1575 adv_stop_chip(struct adv_softc *adv)
1576 {
1577 	u_int8_t cc_val;
1578 
1579 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1580 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1581 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1582 	adv_set_chip_ih(adv, ADV_INS_HALT);
1583 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1584 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1585 		return (0);
1586 	}
1587 	return (1);
1588 }
1589 
1590 static int
1591 adv_host_req_chip_halt(struct adv_softc *adv)
1592 {
1593 	int	 count;
1594 	u_int8_t saved_stop_code;
1595 
1596 	if (adv_is_chip_halted(adv))
1597 		return (1);
1598 
1599 	count = 0;
1600 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1601 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1602 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1603 	while (adv_is_chip_halted(adv) == 0
1604 	    && count++ < 2000)
1605 		;
1606 
1607 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1608 	return (count < 2000);
1609 }
1610 
1611 static void
1612 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1613 {
1614 	adv_set_bank(adv, 1);
1615 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1616 	adv_set_bank(adv, 0);
1617 }
1618 
1619 #if UNUSED
1620 static u_int8_t
1621 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1622 {
1623 	u_int8_t scsi_ctrl;
1624 
1625 	adv_set_bank(adv, 1);
1626 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1627 	adv_set_bank(adv, 0);
1628 	return (scsi_ctrl);
1629 }
1630 #endif
1631 
1632 /*
1633  * XXX Looks like more padding issues in this routine as well.
1634  *     There has to be a way to turn this into an insw.
1635  */
1636 static void
1637 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1638 	       u_int16_t *inbuf, int words)
1639 {
1640 	int	i;
1641 
1642 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1643 	for (i = 0; i < words; i++, inbuf++) {
1644 		if (i == 5) {
1645 			continue;
1646 		}
1647 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1648 	}
1649 }
1650 
1651 static u_int
1652 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1653 {
1654 	u_int	  cur_used_qs;
1655 	u_int	  cur_free_qs;
1656 
1657 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1658 
1659 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1660 		cur_free_qs = adv->max_openings - cur_used_qs;
1661 		return (cur_free_qs);
1662 	}
1663 	adv->openings_needed = n_qs;
1664 	return (0);
1665 }
1666 
1667 static u_int8_t
1668 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1669 		      u_int8_t n_free_q)
1670 {
1671 	int i;
1672 
1673 	for (i = 0; i < n_free_q; i++) {
1674 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1675 		if (free_q_head == ADV_QLINK_END)
1676 			break;
1677 	}
1678 	return (free_q_head);
1679 }
1680 
1681 static u_int8_t
1682 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1683 {
1684 	u_int16_t	q_addr;
1685 	u_int8_t	next_qp;
1686 	u_int8_t	q_status;
1687 
1688 	next_qp = ADV_QLINK_END;
1689 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1690 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1691 
1692 	if ((q_status & QS_READY) == 0)
1693 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1694 
1695 	return (next_qp);
1696 }
1697 
1698 static int
1699 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1700 		    u_int8_t n_q_required)
1701 {
1702 	u_int8_t	free_q_head;
1703 	u_int8_t	next_qp;
1704 	u_int8_t	tid_no;
1705 	u_int8_t	target_ix;
1706 	int		retval;
1707 
1708 	retval = 1;
1709 	target_ix = scsiq->q2.target_ix;
1710 	tid_no = ADV_TIX_TO_TID(target_ix);
1711 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1712 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1713 	    != ADV_QLINK_END) {
1714 		scsiq->q1.q_no = free_q_head;
1715 
1716 		/*
1717 		 * Now that we know our Q number, point our sense
1718 		 * buffer pointer to a bus dma mapped area where
1719 		 * we can dma the data to.
1720 		 */
1721 		scsiq->q1.sense_addr = adv->sense_physbase
1722 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1723 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1724 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1725 		adv->cur_active += n_q_required;
1726 		retval = 0;
1727 	}
1728 	return (retval);
1729 }
1730 
1731 
1732 static void
1733 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1734 			    u_int q_no)
1735 {
1736 	u_int8_t	sg_list_dwords;
1737 	u_int8_t	sg_index, i;
1738 	u_int8_t	sg_entry_cnt;
1739 	u_int8_t	next_qp;
1740 	u_int16_t	q_addr;
1741 	struct		adv_sg_head *sg_head;
1742 	struct		adv_sg_list_q scsi_sg_q;
1743 
1744 	sg_head = scsiq->sg_head;
1745 
1746 	if (sg_head) {
1747 		sg_entry_cnt = sg_head->entry_cnt - 1;
1748 #ifdef DIAGNOSTIC
1749 		if (sg_entry_cnt == 0)
1750 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1751 			      "a SG list but only one element");
1752 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1753 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1754 			      "a SG list but QC_SG_HEAD not set");
1755 #endif
1756 		q_addr = ADV_QNO_TO_QADDR(q_no);
1757 		sg_index = 1;
1758 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1759 		scsi_sg_q.sg_head_qp = q_no;
1760 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1761 		for (i = 0; i < sg_head->queue_cnt; i++) {
1762 			u_int8_t segs_this_q;
1763 
1764 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1765 				segs_this_q = ADV_SG_LIST_PER_Q;
1766 			else {
1767 				/* This will be the last segment then */
1768 				segs_this_q = sg_entry_cnt;
1769 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1770 			}
1771 			scsi_sg_q.seq_no = i + 1;
1772 			sg_list_dwords = segs_this_q << 1;
1773 			if (i == 0) {
1774 				scsi_sg_q.sg_list_cnt = segs_this_q;
1775 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1776 			} else {
1777 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1778 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1779 			}
1780 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1781 			scsi_sg_q.q_no = next_qp;
1782 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1783 
1784 			adv_write_lram_16_multi(adv,
1785 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1786 						(u_int16_t *)&scsi_sg_q,
1787 						sizeof(scsi_sg_q) >> 1);
1788 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1789 						(u_int32_t *)&sg_head->sg_list[sg_index],
1790 						sg_list_dwords);
1791 			sg_entry_cnt -= segs_this_q;
1792 			sg_index += ADV_SG_LIST_PER_Q;
1793 		}
1794 	}
1795 	adv_put_ready_queue(adv, scsiq, q_no);
1796 }
1797 
1798 static void
1799 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1800 		    u_int q_no)
1801 {
1802 	struct		adv_target_transinfo* tinfo;
1803 	u_int		q_addr;
1804 	u_int		tid_no;
1805 
1806 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1807 	tinfo = &adv->tinfo[tid_no];
1808 	if ((tinfo->current.period != tinfo->goal.period)
1809 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1810 
1811 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1812 		scsiq->q1.cntl |= QC_MSG_OUT;
1813 	}
1814 	q_addr = ADV_QNO_TO_QADDR(q_no);
1815 
1816 	scsiq->q1.status = QS_FREE;
1817 
1818 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1819 				(u_int16_t *)scsiq->cdbptr,
1820 				scsiq->q2.cdb_len >> 1);
1821 
1822 #if BYTE_ORDER == BIG_ENDIAN
1823 	adv_adj_scsiq_endian(scsiq);
1824 #endif
1825 
1826 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1827 		      (u_int16_t *) &scsiq->q1.cntl,
1828 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1829 
1830 #if CC_WRITE_IO_COUNT
1831 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1832 			  adv->req_count);
1833 #endif
1834 
1835 #if CC_CLEAR_DMA_REMAIN
1836 
1837 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1838 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1839 #endif
1840 
1841 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1842 			  (scsiq->q1.q_no << 8) | QS_READY);
1843 }
1844 
1845 static void
1846 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1847 	      u_int16_t *buffer, int words)
1848 {
1849 	int	i;
1850 
1851 	/*
1852 	 * XXX This routine makes *gross* assumptions
1853 	 * about padding in the data structures.
1854 	 * Either the data structures should have explicit
1855 	 * padding members added, or they should have padding
1856 	 * turned off via compiler attributes depending on
1857 	 * which yields better overall performance.  My hunch
1858 	 * would be that turning off padding would be the
1859 	 * faster approach as an outsw is much faster than
1860 	 * this crude loop and accessing un-aligned data
1861 	 * members isn't *that* expensive.  The other choice
1862 	 * would be to modify the ASC script so that the
1863 	 * the adv_scsiq_1 structure can be re-arranged so
1864 	 * padding isn't required.
1865 	 */
1866 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1867 	for (i = 0; i < words; i++, buffer++) {
1868 		if (i == 2 || i == 10) {
1869 			continue;
1870 		}
1871 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1872 	}
1873 }
1874 
1875 static void
1876 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1877 		     u_int8_t q_cntl, target_bit_vector target_mask,
1878 		     int tid_no)
1879 {
1880 	struct	ext_msg ext_msg;
1881 
1882 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1883 			       sizeof(ext_msg) >> 1);
1884 	if ((ext_msg.msg_type == MSG_EXTENDED)
1885 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1886 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1887 		union	  ccb *ccb;
1888 		struct	  adv_target_transinfo* tinfo;
1889 		u_int32_t cinfo_index;
1890 		u_int	 period;
1891 		u_int	 offset;
1892 		int	 sdtr_accept;
1893 		u_int8_t orig_offset;
1894 
1895 		cinfo_index =
1896 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1897 		ccb = adv->ccb_infos[cinfo_index].ccb;
1898 		tinfo = &adv->tinfo[tid_no];
1899 		sdtr_accept = TRUE;
1900 
1901 		orig_offset = ext_msg.req_ack_offset;
1902 		if (ext_msg.xfer_period < tinfo->goal.period) {
1903                 	sdtr_accept = FALSE;
1904 			ext_msg.xfer_period = tinfo->goal.period;
1905 		}
1906 
1907 		/* Perform range checking */
1908 		period = ext_msg.xfer_period;
1909 		offset = ext_msg.req_ack_offset;
1910 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1911 		ext_msg.xfer_period = period;
1912 		ext_msg.req_ack_offset = offset;
1913 
1914 		/* Record our current sync settings */
1915 		adv_set_syncrate(adv, ccb->ccb_h.path,
1916 				 tid_no, ext_msg.xfer_period,
1917 				 ext_msg.req_ack_offset,
1918 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1919 
1920 		/* Offset too high or large period forced async */
1921 		if (orig_offset != ext_msg.req_ack_offset)
1922 			sdtr_accept = FALSE;
1923 
1924 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1925 			/* Valid response to our requested negotiation */
1926 			q_cntl &= ~QC_MSG_OUT;
1927 		} else {
1928 			/* Must Respond */
1929 			q_cntl |= QC_MSG_OUT;
1930 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1931 					ext_msg.req_ack_offset);
1932 		}
1933 
1934 	} else if (ext_msg.msg_type == MSG_EXTENDED
1935 		&& ext_msg.msg_req == MSG_EXT_WDTR
1936 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1937 
1938 		ext_msg.wdtr_width = 0;
1939 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1940 					(u_int16_t *)&ext_msg,
1941 					sizeof(ext_msg) >> 1);
1942 		q_cntl |= QC_MSG_OUT;
1943         } else {
1944 
1945 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1946 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1947 					(u_int16_t *)&ext_msg,
1948 					sizeof(ext_msg) >> 1);
1949 		q_cntl |= QC_MSG_OUT;
1950         }
1951 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1952 }
1953 
1954 static void
1955 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1956 		u_int8_t sdtr_offset)
1957 {
1958 	struct	 ext_msg sdtr_buf;
1959 
1960 	sdtr_buf.msg_type = MSG_EXTENDED;
1961 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1962 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1963 	sdtr_buf.xfer_period = sdtr_period;
1964 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1965 	sdtr_buf.req_ack_offset = sdtr_offset;
1966 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1967 				(u_int16_t *) &sdtr_buf,
1968 				sizeof(sdtr_buf) / 2);
1969 }
1970 
1971 int
1972 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1973 	      u_int32_t status, int queued_only)
1974 {
1975 	u_int16_t q_addr;
1976 	u_int8_t  q_no;
1977 	struct adv_q_done_info scsiq_buf;
1978 	struct adv_q_done_info *scsiq;
1979 	u_int8_t  target_ix;
1980 	int	  count;
1981 
1982 	scsiq = &scsiq_buf;
1983 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1984 	count = 0;
1985 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1986 		struct adv_ccb_info *ccb_info;
1987 		q_addr = ADV_QNO_TO_QADDR(q_no);
1988 
1989 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1990 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1991 		if (((scsiq->q_status & QS_READY) != 0)
1992 		 && ((scsiq->q_status & QS_ABORTED) == 0)
1993 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1994 		 && (scsiq->d2.target_ix == target_ix)
1995 		 && (queued_only == 0
1996 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1997 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1998 			union ccb *aborted_ccb;
1999 			struct adv_ccb_info *cinfo;
2000 
2001 			scsiq->q_status |= QS_ABORTED;
2002 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2003 					 scsiq->q_status);
2004 			aborted_ccb = ccb_info->ccb;
2005 			/* Don't clobber earlier error codes */
2006 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2007 			  == CAM_REQ_INPROG)
2008 				aborted_ccb->ccb_h.status |= status;
2009 			cinfo = (struct adv_ccb_info *)
2010 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2011 			cinfo->state |= ACCB_ABORT_QUEUED;
2012 			count++;
2013 		}
2014 	}
2015 	return (count);
2016 }
2017 
2018 int
2019 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2020 {
2021 	int count;
2022 	int i;
2023 	union ccb *ccb;
2024 
2025 	i = 200;
2026 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2027 	    && i--)
2028 		DELAY(1000);
2029 	adv_reset_chip(adv, initiate_bus_reset);
2030 	adv_reinit_lram(adv);
2031 	for (i = 0; i <= ADV_MAX_TID; i++)
2032 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2033 				 /*offset*/0, ADV_TRANS_CUR);
2034 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2035 
2036 	/* Tell the XPT layer that a bus reset occured */
2037 	if (adv->path != NULL)
2038 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2039 
2040 	count = 0;
2041 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2042 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2043 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2044 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2045 		count++;
2046 	}
2047 
2048 	adv_start_chip(adv);
2049 	return (count);
2050 }
2051 
2052 static void
2053 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2054 {
2055 	int orig_id;
2056 
2057     	adv_set_bank(adv, 1);
2058     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2059     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2060 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2061 		adv_set_bank(adv, 0);
2062 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2063 	}
2064     	adv_set_bank(adv, 1);
2065     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2066 	adv_set_bank(adv, 0);
2067 }
2068