xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision 2d8a3be7)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.4 2003/08/07 21:16:50 dillon Exp $
33  */
34 /*
35  * Ported from:
36  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
37  *
38  * Copyright (c) 1995-1996 Advanced System Products, Inc.
39  * All Rights Reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that redistributions of source
43  * code retain the above copyright notice and this comment without
44  * modification.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 
51 #include <machine/bus_pio.h>
52 #include <machine/bus.h>
53 #include <machine/clock.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57 
58 #include <bus/cam/cam.h>
59 #include <bus/cam/cam_ccb.h>
60 #include <bus/cam/cam_sim.h>
61 #include <bus/cam/cam_xpt_sim.h>
62 
63 #include <bus/cam/scsi/scsi_all.h>
64 #include <bus/cam/scsi/scsi_message.h>
65 #include <bus/cam/scsi/scsi_da.h>
66 #include <bus/cam/scsi/scsi_cd.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 
72 #include "advansys.h"
73 #include "advmcode.h"
74 
75 struct adv_quirk_entry {
76 	struct scsi_inquiry_pattern inq_pat;
77 	u_int8_t quirks;
78 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
79 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
80 };
81 
82 static struct adv_quirk_entry adv_quirk_table[] =
83 {
84 	{
85 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
86 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
87 	},
88 	{
89 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
90 		0
91 	},
92 	{
93 		{
94 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
95 		  "TANDBERG", " TDC 36", "*"
96 		},
97 		0
98 	},
99 	{
100 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
101 		0
102 	},
103 	{
104 		{
105 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
106 		  "*", "*", "*"
107 		},
108 		0
109 	},
110 	{
111 		{
112 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
113 		  "*", "*", "*"
114 		},
115 		0
116 	},
117 	{
118 		/* Default quirk entry */
119 		{
120 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
121 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
122                 },
123                 ADV_QUIRK_FIX_ASYN_XFER,
124 	}
125 };
126 
127 /*
128  * Allowable periods in ns
129  */
130 static u_int8_t adv_sdtr_period_tbl[] =
131 {
132 	25,
133 	30,
134 	35,
135 	40,
136 	50,
137 	60,
138 	70,
139 	85
140 };
141 
142 static u_int8_t adv_sdtr_period_tbl_ultra[] =
143 {
144 	12,
145 	19,
146 	25,
147 	32,
148 	38,
149 	44,
150 	50,
151 	57,
152 	63,
153 	69,
154 	75,
155 	82,
156 	88,
157 	94,
158 	100,
159 	107
160 };
161 
162 struct ext_msg {
163 	u_int8_t msg_type;
164 	u_int8_t msg_len;
165 	u_int8_t msg_req;
166 	union {
167 		struct {
168 			u_int8_t sdtr_xfer_period;
169 			u_int8_t sdtr_req_ack_offset;
170 		} sdtr;
171 		struct {
172        			u_int8_t wdtr_width;
173 		} wdtr;
174 		struct {
175 			u_int8_t mdp[4];
176 		} mdp;
177 	} u_ext_msg;
178 	u_int8_t res;
179 };
180 
181 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
182 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
183 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
184 #define	mdp_b3		u_ext_msg.mdp_b3
185 #define	mdp_b2		u_ext_msg.mdp_b2
186 #define	mdp_b1		u_ext_msg.mdp_b1
187 #define	mdp_b0		u_ext_msg.mdp_b0
188 
189 /*
190  * Some of the early PCI adapters have problems with
191  * async transfers.  Instead use an offset of 1.
192  */
193 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
194 
195 /* LRAM routines */
196 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
197 					u_int16_t *buffer, int count);
198 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
199 					 u_int16_t s_addr, u_int16_t *buffer,
200 					 int count);
201 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 				  u_int16_t set_value, int count);
203 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
204 				  int count);
205 
206 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
207 					      u_int16_t addr, u_int16_t value);
208 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
209 
210 
211 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
212 				   u_int32_t value);
213 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
214 					 u_int16_t s_addr, u_int32_t *buffer,
215 					 int count);
216 
217 /* EEPROM routines */
218 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
219 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
220 				     u_int16_t value);
221 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
222 					  u_int8_t cmd_reg);
223 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
224 					    struct adv_eeprom_config *eeconfig);
225 
226 /* Initialization */
227 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
228 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
229 
230 static void	 adv_reinit_lram(struct adv_softc *adv);
231 static void	 adv_init_lram(struct adv_softc *adv);
232 static int	 adv_init_microcode_var(struct adv_softc *adv);
233 static void	 adv_init_qlink_var(struct adv_softc *adv);
234 
235 /* Interrupts */
236 static void	 adv_disable_interrupt(struct adv_softc *adv);
237 static void	 adv_enable_interrupt(struct adv_softc *adv);
238 static void	 adv_toggle_irq_act(struct adv_softc *adv);
239 
240 /* Chip Control */
241 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
242 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
243 #if UNUSED
244 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
245 #endif
246 
247 /* Queue handling and execution */
248 static __inline int
249 		 adv_sgcount_to_qcount(int sgcount);
250 
251 static __inline int
252 adv_sgcount_to_qcount(int sgcount)
253 {
254 	int	n_sg_list_qs;
255 
256 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
257 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
258 		n_sg_list_qs++;
259 	return (n_sg_list_qs + 1);
260 }
261 
262 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
263 				u_int16_t *inbuf, int words);
264 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
265 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
266 				       u_int8_t free_q_head, u_int8_t n_free_q);
267 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
268 				      u_int8_t free_q_head);
269 static int	 adv_send_scsi_queue(struct adv_softc *adv,
270 				     struct adv_scsi_q *scsiq,
271 				     u_int8_t n_q_required);
272 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
273 					     struct adv_scsi_q *scsiq,
274 					     u_int q_no);
275 static void	 adv_put_ready_queue(struct adv_softc *adv,
276 				     struct adv_scsi_q *scsiq, u_int q_no);
277 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
278 			       u_int16_t *buffer, int words);
279 
280 /* Messages */
281 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
282 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
283 				      target_bit_vector target_id,
284 				      int tid);
285 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
286 				 u_int8_t sdtr_offset);
287 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
288 					u_int8_t sdtr_data);
289 
290 
291 /* Exported functions first */
292 
293 void
294 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
295 {
296 	struct adv_softc *adv;
297 
298 	adv = (struct adv_softc *)callback_arg;
299 	switch (code) {
300 	case AC_FOUND_DEVICE:
301 	{
302 		struct ccb_getdev *cgd;
303 		target_bit_vector target_mask;
304 		int num_entries;
305         	caddr_t match;
306 		struct adv_quirk_entry *entry;
307 		struct adv_target_transinfo* tinfo;
308 
309 		cgd = (struct ccb_getdev *)arg;
310 
311 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
312 
313 		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
314 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
315 				       (caddr_t)adv_quirk_table,
316 				       num_entries, sizeof(*adv_quirk_table),
317 				       scsi_inquiry_match);
318 
319 		if (match == NULL)
320 			panic("advasync: device didn't match wildcard entry!!");
321 
322 		entry = (struct adv_quirk_entry *)match;
323 
324 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
325 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
326 				adv->fix_asyn_xfer_always |= target_mask;
327 			else
328 				adv->fix_asyn_xfer_always &= ~target_mask;
329 			/*
330 			 * We start out life with all bits set and clear them
331 			 * after we've determined that the fix isn't necessary.
332 			 * It may well be that we've already cleared a target
333 			 * before the full inquiry session completes, so don't
334 			 * gratuitously set a target bit even if it has this
335 			 * quirk.  But, if the quirk exonerates a device, clear
336 			 * the bit now.
337 			 */
338 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
339 				adv->fix_asyn_xfer &= ~target_mask;
340 		}
341 		/*
342 		 * Reset our sync settings now that we've determined
343 		 * what quirks are in effect for the device.
344 		 */
345 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
346 		adv_set_syncrate(adv, cgd->ccb_h.path,
347 				 cgd->ccb_h.target_id,
348 				 tinfo->current.period,
349 				 tinfo->current.offset,
350 				 ADV_TRANS_CUR);
351 		break;
352 	}
353 	case AC_LOST_DEVICE:
354 	{
355 		u_int target_mask;
356 
357 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
358 			target_mask = 0x01 << xpt_path_target_id(path);
359 			adv->fix_asyn_xfer |= target_mask;
360 		}
361 
362 		/*
363 		 * Revert to async transfers
364 		 * for the next device.
365 		 */
366 		adv_set_syncrate(adv, /*path*/NULL,
367 				 xpt_path_target_id(path),
368 				 /*period*/0,
369 				 /*offset*/0,
370 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
371 	}
372 	default:
373 		break;
374 	}
375 }
376 
377 void
378 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
379 {
380 	u_int8_t control;
381 
382 	/*
383 	 * Start out with the bank reset to 0
384 	 */
385 	control = ADV_INB(adv, ADV_CHIP_CTRL)
386 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
387 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
388 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
389 	if (bank == 1) {
390 		control |= ADV_CC_BANK_ONE;
391 	} else if (bank == 2) {
392 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
393 	}
394 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
395 }
396 
397 u_int8_t
398 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
399 {
400 	u_int8_t   byte_data;
401 	u_int16_t  word_data;
402 
403 	/*
404 	 * LRAM is accessed on 16bit boundaries.
405 	 */
406 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
407 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
408 	if (addr & 1) {
409 #if BYTE_ORDER == BIG_ENDIAN
410 		byte_data = (u_int8_t)(word_data & 0xFF);
411 #else
412 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
413 #endif
414 	} else {
415 #if BYTE_ORDER == BIG_ENDIAN
416 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
417 #else
418 		byte_data = (u_int8_t)(word_data & 0xFF);
419 #endif
420 	}
421 	return (byte_data);
422 }
423 
424 void
425 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
426 {
427 	u_int16_t word_data;
428 
429 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
430 	if (addr & 1) {
431 		word_data &= 0x00FF;
432 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
433 	} else {
434 		word_data &= 0xFF00;
435 		word_data |= ((u_int8_t)value & 0x00FF);
436 	}
437 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
438 }
439 
440 
441 u_int16_t
442 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
443 {
444 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
445 	return (ADV_INW(adv, ADV_LRAM_DATA));
446 }
447 
448 void
449 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
450 {
451 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
452 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
453 }
454 
455 /*
456  * Determine if there is a board at "iobase" by looking
457  * for the AdvanSys signatures.  Return 1 if a board is
458  * found, 0 otherwise.
459  */
460 int
461 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
462 {
463 	u_int16_t signature;
464 
465 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
466 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
467 		if ((signature == ADV_1000_ID0W)
468 		 || (signature == ADV_1000_ID0W_FIX))
469 			return (1);
470 	}
471 	return (0);
472 }
473 
474 void
475 adv_lib_init(struct adv_softc *adv)
476 {
477 	if ((adv->type & ADV_ULTRA) != 0) {
478 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
479 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
480 	} else {
481 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
482 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
483 	}
484 }
485 
486 u_int16_t
487 adv_get_eeprom_config(struct adv_softc *adv, struct
488 		      adv_eeprom_config  *eeprom_config)
489 {
490 	u_int16_t	sum;
491 	u_int16_t	*wbuf;
492 	u_int8_t	cfg_beg;
493 	u_int8_t	cfg_end;
494 	u_int8_t	s_addr;
495 
496 	wbuf = (u_int16_t *)eeprom_config;
497 	sum = 0;
498 
499 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
500 		*wbuf = adv_read_eeprom_16(adv, s_addr);
501 		sum += *wbuf;
502 	}
503 
504 	if (adv->type & ADV_VL) {
505 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
506 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
507 	} else {
508 		cfg_beg = ADV_EEPROM_CFG_BEG;
509 		cfg_end = ADV_EEPROM_MAX_ADDR;
510 	}
511 
512 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
513 		*wbuf = adv_read_eeprom_16(adv, s_addr);
514 		sum += *wbuf;
515 #if ADV_DEBUG_EEPROM
516 		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
517 #endif
518 	}
519 	*wbuf = adv_read_eeprom_16(adv, s_addr);
520 	return (sum);
521 }
522 
523 int
524 adv_set_eeprom_config(struct adv_softc *adv,
525 		      struct adv_eeprom_config *eeprom_config)
526 {
527 	int	retry;
528 
529 	retry = 0;
530 	while (1) {
531 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
532 			break;
533 		}
534 		if (++retry > ADV_EEPROM_MAX_RETRY) {
535 			break;
536 		}
537 	}
538 	return (retry > ADV_EEPROM_MAX_RETRY);
539 }
540 
541 int
542 adv_reset_chip(struct adv_softc *adv, int reset_bus)
543 {
544 	adv_stop_chip(adv);
545 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
546 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
547 	DELAY(60);
548 
549 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
550 	adv_set_chip_ih(adv, ADV_INS_HALT);
551 
552 	if (reset_bus)
553 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
554 
555 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
556 	if (reset_bus)
557 		DELAY(200 * 1000);
558 
559 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
560 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
561 	return (adv_is_chip_halted(adv));
562 }
563 
564 int
565 adv_test_external_lram(struct adv_softc* adv)
566 {
567 	u_int16_t	q_addr;
568 	u_int16_t	saved_value;
569 	int		success;
570 
571 	success = 0;
572 
573 	q_addr = ADV_QNO_TO_QADDR(241);
574 	saved_value = adv_read_lram_16(adv, q_addr);
575 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
576 		success = 1;
577 		adv_write_lram_16(adv, q_addr, saved_value);
578 	}
579 	return (success);
580 }
581 
582 
583 int
584 adv_init_lram_and_mcode(struct adv_softc *adv)
585 {
586 	u_int32_t	retval;
587 
588 	adv_disable_interrupt(adv);
589 
590 	adv_init_lram(adv);
591 
592 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
593 				    adv_mcode_size);
594 	if (retval != adv_mcode_chksum) {
595 		printf("adv%d: Microcode download failed checksum!\n",
596 		       adv->unit);
597 		return (1);
598 	}
599 
600 	if (adv_init_microcode_var(adv) != 0)
601 		return (1);
602 
603 	adv_enable_interrupt(adv);
604 	return (0);
605 }
606 
607 u_int8_t
608 adv_get_chip_irq(struct adv_softc *adv)
609 {
610 	u_int16_t	cfg_lsw;
611 	u_int8_t	chip_irq;
612 
613 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
614 
615 	if ((adv->type & ADV_VL) != 0) {
616 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
617 		if ((chip_irq == 0) ||
618 		    (chip_irq == 4) ||
619 		    (chip_irq == 7)) {
620 			return (0);
621 		}
622 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
623 	}
624 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
625 	if (chip_irq == 3)
626 		chip_irq += 2;
627 	return (chip_irq + ADV_MIN_IRQ_NO);
628 }
629 
630 u_int8_t
631 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
632 {
633 	u_int16_t	cfg_lsw;
634 
635 	if ((adv->type & ADV_VL) != 0) {
636 		if (irq_no != 0) {
637 			if ((irq_no < ADV_MIN_IRQ_NO)
638 			 || (irq_no > ADV_MAX_IRQ_NO)) {
639 				irq_no = 0;
640 			} else {
641 				irq_no -= ADV_MIN_IRQ_NO - 1;
642 			}
643 		}
644 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
645 		cfg_lsw |= 0x0010;
646 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
647 		adv_toggle_irq_act(adv);
648 
649 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
650 		cfg_lsw |= (irq_no & 0x07) << 2;
651 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
652 		adv_toggle_irq_act(adv);
653 	} else if ((adv->type & ADV_ISA) != 0) {
654 		if (irq_no == 15)
655 			irq_no -= 2;
656 		irq_no -= ADV_MIN_IRQ_NO;
657 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
658 		cfg_lsw |= (irq_no & 0x03) << 2;
659 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
660 	}
661 	return (adv_get_chip_irq(adv));
662 }
663 
664 void
665 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
666 {
667 	u_int16_t cfg_lsw;
668 
669 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
670 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
671 		return;
672     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
673 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
674 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
675 }
676 
677 int
678 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
679 		       u_int32_t datalen)
680 {
681 	struct		adv_target_transinfo* tinfo;
682 	u_int32_t	*p_data_addr;
683 	u_int32_t	*p_data_bcount;
684 	int		disable_syn_offset_one_fix;
685 	int		retval;
686 	u_int		n_q_required;
687 	u_int32_t	addr;
688 	u_int8_t	sg_entry_cnt;
689 	u_int8_t	target_ix;
690 	u_int8_t	sg_entry_cnt_minus_one;
691 	u_int8_t	tid_no;
692 
693 	scsiq->q1.q_no = 0;
694 	retval = 1;  /* Default to error case */
695 	target_ix = scsiq->q2.target_ix;
696 	tid_no = ADV_TIX_TO_TID(target_ix);
697 	tinfo = &adv->tinfo[tid_no];
698 
699 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
700 		/* Renegotiate if appropriate. */
701 		adv_set_syncrate(adv, /*struct cam_path */NULL,
702 				 tid_no, /*period*/0, /*offset*/0,
703 				 ADV_TRANS_CUR);
704 		if (tinfo->current.period != tinfo->goal.period) {
705 			adv_msgout_sdtr(adv, tinfo->goal.period,
706 					tinfo->goal.offset);
707 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
708 		}
709 	}
710 
711 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
712 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
713 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
714 
715 #ifdef DIAGNOSTIC
716 		if (sg_entry_cnt <= 1)
717 			panic("adv_execute_scsi_queue: Queue "
718 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
719 
720 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
721 			panic("adv_execute_scsi_queue: "
722 			      "Queue with too many segs.");
723 
724 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
725 			int i;
726 
727 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
728 				addr = scsiq->sg_head->sg_list[i].addr +
729 				       scsiq->sg_head->sg_list[i].bytes;
730 
731 				if ((addr & 0x0003) != 0)
732 					panic("adv_execute_scsi_queue: SG "
733 					      "with odd address or byte count");
734 			}
735 		}
736 #endif
737 		p_data_addr =
738 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
739 		p_data_bcount =
740 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
741 
742 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
743 		scsiq->sg_head->queue_cnt = n_q_required - 1;
744 	} else {
745 		p_data_addr = &scsiq->q1.data_addr;
746 		p_data_bcount = &scsiq->q1.data_cnt;
747 		n_q_required = 1;
748 	}
749 
750 	disable_syn_offset_one_fix = FALSE;
751 
752 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
753 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
754 
755 		if (datalen != 0) {
756 			if (datalen < 512) {
757 				disable_syn_offset_one_fix = TRUE;
758 			} else {
759 				if (scsiq->cdbptr[0] == INQUIRY
760 				 || scsiq->cdbptr[0] == REQUEST_SENSE
761 				 || scsiq->cdbptr[0] == READ_CAPACITY
762 				 || scsiq->cdbptr[0] == MODE_SELECT_6
763 				 || scsiq->cdbptr[0] == MODE_SENSE_6
764 				 || scsiq->cdbptr[0] == MODE_SENSE_10
765 				 || scsiq->cdbptr[0] == MODE_SELECT_10
766 				 || scsiq->cdbptr[0] == READ_TOC) {
767 					disable_syn_offset_one_fix = TRUE;
768 				}
769 			}
770 		}
771 	}
772 
773 	if (disable_syn_offset_one_fix) {
774 		scsiq->q2.tag_code &=
775 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
776 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
777 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
778 	}
779 
780 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
781 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
782 		u_int8_t extra_bytes;
783 
784 		addr = *p_data_addr + *p_data_bcount;
785 		extra_bytes = addr & 0x0003;
786 		if (extra_bytes != 0
787 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
788 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
789 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
790 			scsiq->q1.extra_bytes = extra_bytes;
791 			*p_data_bcount -= extra_bytes;
792 		}
793 	}
794 
795 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
796 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
797 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
798 
799 	return (retval);
800 }
801 
802 
803 u_int8_t
804 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
805 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
806 {
807 	u_int16_t val;
808 	u_int8_t  sg_queue_cnt;
809 
810 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
811 		       (u_int16_t *)scsiq,
812 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
813 
814 #if BYTE_ORDER == BIG_ENDIAN
815 	adv_adj_endian_qdone_info(scsiq);
816 #endif
817 
818 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
819 	scsiq->q_status = val & 0xFF;
820 	scsiq->q_no = (val >> 8) & 0XFF;
821 
822 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
823 	scsiq->cntl = val & 0xFF;
824 	sg_queue_cnt = (val >> 8) & 0xFF;
825 
826 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
827 	scsiq->sense_len = val & 0xFF;
828 	scsiq->extra_bytes = (val >> 8) & 0xFF;
829 
830 	/*
831 	 * Due to a bug in accessing LRAM on the 940UA, the residual
832 	 * is split into separate high and low 16bit quantities.
833 	 */
834 	scsiq->remain_bytes =
835 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
836 	scsiq->remain_bytes |=
837 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
838 
839 	/*
840 	 * XXX Is this just a safeguard or will the counter really
841 	 * have bogus upper bits?
842 	 */
843 	scsiq->remain_bytes &= max_dma_count;
844 
845 	return (sg_queue_cnt);
846 }
847 
848 int
849 adv_start_chip(struct adv_softc *adv)
850 {
851 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
852 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
853 		return (0);
854 	return (1);
855 }
856 
857 int
858 adv_stop_execution(struct adv_softc *adv)
859 {
860 	int count;
861 
862 	count = 0;
863 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
864 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
865 				 ADV_STOP_REQ_RISC_STOP);
866 		do {
867 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
868 				ADV_STOP_ACK_RISC_STOP) {
869 				return (1);
870 			}
871 			DELAY(1000);
872 		} while (count++ < 20);
873 	}
874 	return (0);
875 }
876 
877 int
878 adv_is_chip_halted(struct adv_softc *adv)
879 {
880 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
881 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
882 			return (1);
883 		}
884 	}
885 	return (0);
886 }
887 
888 /*
889  * XXX The numeric constants and the loops in this routine
890  * need to be documented.
891  */
892 void
893 adv_ack_interrupt(struct adv_softc *adv)
894 {
895 	u_int8_t	host_flag;
896 	u_int8_t	risc_flag;
897 	int		loop;
898 
899 	loop = 0;
900 	do {
901 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
902 		if (loop++ > 0x7FFF) {
903 			break;
904 		}
905 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
906 
907 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
908 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
909 			 host_flag | ADV_HOST_FLAG_ACK_INT);
910 
911 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
912 	loop = 0;
913 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
914 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
915 		if (loop++ > 3) {
916 			break;
917 		}
918 	}
919 
920 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
921 }
922 
923 /*
924  * Handle all conditions that may halt the chip waiting
925  * for us to intervene.
926  */
927 void
928 adv_isr_chip_halted(struct adv_softc *adv)
929 {
930 	u_int16_t	  int_halt_code;
931 	u_int16_t	  halt_q_addr;
932 	target_bit_vector target_mask;
933 	target_bit_vector scsi_busy;
934 	u_int8_t	  halt_qp;
935 	u_int8_t	  target_ix;
936 	u_int8_t	  q_cntl;
937 	u_int8_t	  tid_no;
938 
939 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
940 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
941 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
942 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
943 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
944 	tid_no = ADV_TIX_TO_TID(target_ix);
945 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
946 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
947 		/*
948 		 * Temporarily disable the async fix by removing
949 		 * this target from the list of affected targets,
950 		 * setting our async rate, and then putting us
951 		 * back into the mask.
952 		 */
953 		adv->fix_asyn_xfer &= ~target_mask;
954 		adv_set_syncrate(adv, /*struct cam_path */NULL,
955 				 tid_no, /*period*/0, /*offset*/0,
956 				 ADV_TRANS_ACTIVE);
957 		adv->fix_asyn_xfer |= target_mask;
958 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
959 		adv_set_syncrate(adv, /*struct cam_path */NULL,
960 				 tid_no, /*period*/0, /*offset*/0,
961 				 ADV_TRANS_ACTIVE);
962 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
963 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
964 				     target_mask, tid_no);
965 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
966 		struct	  adv_target_transinfo* tinfo;
967 		union	  ccb *ccb;
968 		u_int32_t cinfo_index;
969 		u_int8_t  tag_code;
970 		u_int8_t  q_status;
971 
972 		tinfo = &adv->tinfo[tid_no];
973 		q_cntl |= QC_REQ_SENSE;
974 
975 		/* Renegotiate if appropriate. */
976 		adv_set_syncrate(adv, /*struct cam_path */NULL,
977 				 tid_no, /*period*/0, /*offset*/0,
978 				 ADV_TRANS_CUR);
979 		if (tinfo->current.period != tinfo->goal.period) {
980 			adv_msgout_sdtr(adv, tinfo->goal.period,
981 					tinfo->goal.offset);
982 			q_cntl |= QC_MSG_OUT;
983 		}
984 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
985 
986 		/* Don't tag request sense commands */
987 		tag_code = adv_read_lram_8(adv,
988 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
989 		tag_code &=
990 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
991 
992 		if ((adv->fix_asyn_xfer & target_mask) != 0
993 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
994 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
995 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
996 		}
997 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
998 				 tag_code);
999 		q_status = adv_read_lram_8(adv,
1000 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1001 		q_status |= (QS_READY | QS_BUSY);
1002 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1003 				 q_status);
1004 		/*
1005 		 * Freeze the devq until we can handle the sense condition.
1006 		 */
1007 		cinfo_index =
1008 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1009 		ccb = adv->ccb_infos[cinfo_index].ccb;
1010 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1011 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1012 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1013 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1014 			      /*queued_only*/TRUE);
1015 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1016 		scsi_busy &= ~target_mask;
1017 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1018 		/*
1019 		 * Ensure we have enough time to actually
1020 		 * retrieve the sense.
1021 		 */
1022 		untimeout(adv_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1023 		ccb->ccb_h.timeout_ch =
1024 		    timeout(adv_timeout, (caddr_t)ccb, 5 * hz);
1025 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1026 		struct	ext_msg out_msg;
1027 
1028 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1029 				       (u_int16_t *) &out_msg,
1030 				       sizeof(out_msg)/2);
1031 
1032 		if ((out_msg.msg_type == MSG_EXTENDED)
1033 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1034 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1035 
1036 			/* Revert to Async */
1037 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1038 					 tid_no, /*period*/0, /*offset*/0,
1039 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1040 		}
1041 		q_cntl &= ~QC_MSG_OUT;
1042 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1043 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1044 		u_int8_t scsi_status;
1045 		union ccb *ccb;
1046 		u_int32_t cinfo_index;
1047 
1048 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1049 					      + ADV_SCSIQ_SCSI_STATUS);
1050 		cinfo_index =
1051 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1052 		ccb = adv->ccb_infos[cinfo_index].ccb;
1053 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1054 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1055 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1056 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1057 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1058 			      /*queued_only*/TRUE);
1059 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1060 		scsi_busy &= ~target_mask;
1061 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1062 	} else {
1063 		printf("Unhandled Halt Code %x\n", int_halt_code);
1064 	}
1065 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1066 }
1067 
1068 void
1069 adv_sdtr_to_period_offset(struct adv_softc *adv,
1070 			  u_int8_t sync_data, u_int8_t *period,
1071 			  u_int8_t *offset, int tid)
1072 {
1073 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1074 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1075 		*period = *offset = 0;
1076 	} else {
1077 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1078 		*offset = sync_data & 0xF;
1079 	}
1080 }
1081 
1082 void
1083 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1084 		 u_int tid, u_int period, u_int offset, u_int type)
1085 {
1086 	struct adv_target_transinfo* tinfo;
1087 	u_int old_period;
1088 	u_int old_offset;
1089 	u_int8_t sdtr_data;
1090 
1091 	tinfo = &adv->tinfo[tid];
1092 
1093 	/* Filter our input */
1094 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1095 					      &offset, tid);
1096 
1097 	old_period = tinfo->current.period;
1098 	old_offset = tinfo->current.offset;
1099 
1100 	if ((type & ADV_TRANS_CUR) != 0
1101 	 && ((old_period != period || old_offset != offset)
1102 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1103 		int s;
1104 		int halted;
1105 
1106 		s = splcam();
1107 		halted = adv_is_chip_halted(adv);
1108 		if (halted == 0)
1109 			/* Must halt the chip first */
1110 			adv_host_req_chip_halt(adv);
1111 
1112 		/* Update current hardware settings */
1113 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1114 
1115 		/*
1116 		 * If a target can run in sync mode, we don't need
1117 		 * to check it for sync problems.
1118 		 */
1119 		if (offset != 0)
1120 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1121 
1122 		if (halted == 0)
1123 			/* Start the chip again */
1124 			adv_start_chip(adv);
1125 
1126 		splx(s);
1127 		tinfo->current.period = period;
1128 		tinfo->current.offset = offset;
1129 
1130 		if (path != NULL) {
1131 			/*
1132 			 * Tell the SCSI layer about the
1133 			 * new transfer parameters.
1134 			 */
1135 			struct	ccb_trans_settings neg;
1136 
1137 			neg.sync_period = period;
1138 			neg.sync_offset = offset;
1139 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1140 				  | CCB_TRANS_SYNC_OFFSET_VALID;
1141 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1142 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1143 		}
1144 	}
1145 
1146 	if ((type & ADV_TRANS_GOAL) != 0) {
1147 		tinfo->goal.period = period;
1148 		tinfo->goal.offset = offset;
1149 	}
1150 
1151 	if ((type & ADV_TRANS_USER) != 0) {
1152 		tinfo->user.period = period;
1153 		tinfo->user.offset = offset;
1154 	}
1155 }
1156 
1157 u_int8_t
1158 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1159 			  u_int *offset, int tid)
1160 {
1161 	u_int i;
1162 	u_int dummy_offset;
1163 	u_int dummy_period;
1164 
1165 	if (offset == NULL) {
1166 		dummy_offset = 0;
1167 		offset = &dummy_offset;
1168 	}
1169 
1170 	if (period == NULL) {
1171 		dummy_period = 0;
1172 		period = &dummy_period;
1173 	}
1174 
1175 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1176 	if (*period != 0 && *offset != 0) {
1177 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1178 			if (*period <= adv->sdtr_period_tbl[i]) {
1179 				/*
1180 				 * When responding to a target that requests
1181 				 * sync, the requested  rate may fall between
1182 				 * two rates that we can output, but still be
1183 				 * a rate that we can receive.  Because of this,
1184 				 * we want to respond to the target with
1185 				 * the same rate that it sent to us even
1186 				 * if the period we use to send data to it
1187 				 * is lower.  Only lower the response period
1188 				 * if we must.
1189 				 */
1190 				if (i == 0 /* Our maximum rate */)
1191 					*period = adv->sdtr_period_tbl[0];
1192 				return ((i << 4) | *offset);
1193 			}
1194 		}
1195 	}
1196 
1197 	/* Must go async */
1198 	*period = 0;
1199 	*offset = 0;
1200 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1201 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1202 	return (0);
1203 }
1204 
1205 /* Internal Routines */
1206 
1207 static void
1208 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1209 		       u_int16_t *buffer, int count)
1210 {
1211 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1212 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1213 }
1214 
1215 static void
1216 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1217 			u_int16_t *buffer, int count)
1218 {
1219 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1220 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1221 }
1222 
1223 static void
1224 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1225 		 u_int16_t set_value, int count)
1226 {
1227 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1228 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1229 			      set_value, count);
1230 }
1231 
1232 static u_int32_t
1233 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1234 {
1235 	u_int32_t	sum;
1236 	int		i;
1237 
1238 	sum = 0;
1239 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1240 	for (i = 0; i < count; i++)
1241 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1242 	return (sum);
1243 }
1244 
1245 static int
1246 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1247 			     u_int16_t value)
1248 {
1249 	int	retval;
1250 
1251 	retval = 0;
1252 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1253 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1254 	DELAY(10000);
1255 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1256 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1257 		retval = 1;
1258 	return (retval);
1259 }
1260 
1261 static u_int32_t
1262 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1263 {
1264 	u_int16_t           val_low, val_high;
1265 
1266 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1267 
1268 #if BYTE_ORDER == BIG_ENDIAN
1269 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1270 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271 #else
1272 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1273 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1274 #endif
1275 
1276 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1277 }
1278 
1279 static void
1280 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1281 {
1282 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1283 
1284 #if BYTE_ORDER == BIG_ENDIAN
1285 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1286 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287 #else
1288 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1289 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1290 #endif
1291 }
1292 
1293 static void
1294 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1295 			u_int32_t *buffer, int count)
1296 {
1297 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1298 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1299 }
1300 
1301 static u_int16_t
1302 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1303 {
1304 	u_int16_t read_wval;
1305 	u_int8_t  cmd_reg;
1306 
1307 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1308 	DELAY(1000);
1309 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1310 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1311 	DELAY(1000);
1312 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1313 	DELAY(1000);
1314 	return (read_wval);
1315 }
1316 
1317 static u_int16_t
1318 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1319 {
1320 	u_int16_t	read_value;
1321 
1322 	read_value = adv_read_eeprom_16(adv, addr);
1323 	if (read_value != value) {
1324 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1325 		DELAY(1000);
1326 
1327 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1328 		DELAY(1000);
1329 
1330 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1331 		DELAY(20 * 1000);
1332 
1333 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1334 		DELAY(1000);
1335 		read_value = adv_read_eeprom_16(adv, addr);
1336 	}
1337 	return (read_value);
1338 }
1339 
1340 static int
1341 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1342 {
1343 	u_int8_t read_back;
1344 	int	 retry;
1345 
1346 	retry = 0;
1347 	while (1) {
1348 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1349 		DELAY(1000);
1350 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1351 		if (read_back == cmd_reg) {
1352 			return (1);
1353 		}
1354 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1355 			return (0);
1356 		}
1357 	}
1358 }
1359 
1360 static int
1361 adv_set_eeprom_config_once(struct adv_softc *adv,
1362 			   struct adv_eeprom_config *eeprom_config)
1363 {
1364 	int		n_error;
1365 	u_int16_t	*wbuf;
1366 	u_int16_t	sum;
1367 	u_int8_t	s_addr;
1368 	u_int8_t	cfg_beg;
1369 	u_int8_t	cfg_end;
1370 
1371 	wbuf = (u_int16_t *)eeprom_config;
1372 	n_error = 0;
1373 	sum = 0;
1374 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1375 		sum += *wbuf;
1376 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1377 			n_error++;
1378 		}
1379 	}
1380 	if (adv->type & ADV_VL) {
1381 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1382 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1383 	} else {
1384 		cfg_beg = ADV_EEPROM_CFG_BEG;
1385 		cfg_end = ADV_EEPROM_MAX_ADDR;
1386 	}
1387 
1388 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1389 		sum += *wbuf;
1390 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1391 			n_error++;
1392 		}
1393 	}
1394 	*wbuf = sum;
1395 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1396 		n_error++;
1397 	}
1398 	wbuf = (u_int16_t *)eeprom_config;
1399 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1400 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1401 			n_error++;
1402 		}
1403 	}
1404 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1405 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1406 			n_error++;
1407 		}
1408 	}
1409 	return (n_error);
1410 }
1411 
1412 static u_int32_t
1413 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1414 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1415 {
1416 	u_int32_t chksum;
1417 	u_int16_t mcode_lram_size;
1418 	u_int16_t mcode_chksum;
1419 
1420 	mcode_lram_size = mcode_size >> 1;
1421 	/* XXX Why zero the memory just before you write the whole thing?? */
1422 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1423 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1424 
1425 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1426 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1427 						   ((mcode_size - s_addr
1428 						     - ADV_CODE_SEC_BEG) >> 1));
1429 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1430 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1431 	return (chksum);
1432 }
1433 
1434 static void
1435 adv_reinit_lram(struct adv_softc *adv) {
1436 	adv_init_lram(adv);
1437 	adv_init_qlink_var(adv);
1438 }
1439 
1440 static void
1441 adv_init_lram(struct adv_softc *adv)
1442 {
1443 	u_int8_t  i;
1444 	u_int16_t s_addr;
1445 
1446 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1447 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1448 
1449 	i = ADV_MIN_ACTIVE_QNO;
1450 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1451 
1452 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1453 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1454 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1455 	i++;
1456 	s_addr += ADV_QBLK_SIZE;
1457 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1458 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1459 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1460 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1461 	}
1462 
1463 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1464 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1465 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1466 	i++;
1467 	s_addr += ADV_QBLK_SIZE;
1468 
1469 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1470 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1471 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1472 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1473 	}
1474 }
1475 
1476 static int
1477 adv_init_microcode_var(struct adv_softc *adv)
1478 {
1479 	int	 i;
1480 
1481 	for (i = 0; i <= ADV_MAX_TID; i++) {
1482 
1483 		/* Start out async all around */
1484 		adv_set_syncrate(adv, /*path*/NULL,
1485 				 i, 0, 0,
1486 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1487 	}
1488 
1489 	adv_init_qlink_var(adv);
1490 
1491 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1492 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1493 
1494 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1495 
1496 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1497 
1498 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1499 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1500 		printf("adv%d: Unable to set program counter. Aborting.\n",
1501 		       adv->unit);
1502 		return (1);
1503 	}
1504 	return (0);
1505 }
1506 
1507 static void
1508 adv_init_qlink_var(struct adv_softc *adv)
1509 {
1510 	int	  i;
1511 	u_int16_t lram_addr;
1512 
1513 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1514 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1515 
1516 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1517 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1518 
1519 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1520 			 (u_int8_t)((int) adv->max_openings + 1));
1521 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1522 			 (u_int8_t)((int) adv->max_openings + 2));
1523 
1524 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1525 
1526 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1527 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1528 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1529 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1530 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1531 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1532 
1533 	lram_addr = ADV_QADR_BEG;
1534 	for (i = 0; i < 32; i++, lram_addr += 2)
1535 		adv_write_lram_16(adv, lram_addr, 0);
1536 }
1537 
1538 static void
1539 adv_disable_interrupt(struct adv_softc *adv)
1540 {
1541 	u_int16_t cfg;
1542 
1543 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1544 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1545 }
1546 
1547 static void
1548 adv_enable_interrupt(struct adv_softc *adv)
1549 {
1550 	u_int16_t cfg;
1551 
1552 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1553 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1554 }
1555 
1556 static void
1557 adv_toggle_irq_act(struct adv_softc *adv)
1558 {
1559 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1560 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1561 }
1562 
1563 void
1564 adv_start_execution(struct adv_softc *adv)
1565 {
1566 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1567 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1568 	}
1569 }
1570 
1571 int
1572 adv_stop_chip(struct adv_softc *adv)
1573 {
1574 	u_int8_t cc_val;
1575 
1576 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1577 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1578 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1579 	adv_set_chip_ih(adv, ADV_INS_HALT);
1580 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1581 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1582 		return (0);
1583 	}
1584 	return (1);
1585 }
1586 
1587 static int
1588 adv_host_req_chip_halt(struct adv_softc *adv)
1589 {
1590 	int	 count;
1591 	u_int8_t saved_stop_code;
1592 
1593 	if (adv_is_chip_halted(adv))
1594 		return (1);
1595 
1596 	count = 0;
1597 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1598 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1599 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1600 	while (adv_is_chip_halted(adv) == 0
1601 	    && count++ < 2000)
1602 		;
1603 
1604 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1605 	return (count < 2000);
1606 }
1607 
1608 static void
1609 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1610 {
1611 	adv_set_bank(adv, 1);
1612 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1613 	adv_set_bank(adv, 0);
1614 }
1615 
1616 #if UNUSED
1617 static u_int8_t
1618 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1619 {
1620 	u_int8_t scsi_ctrl;
1621 
1622 	adv_set_bank(adv, 1);
1623 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1624 	adv_set_bank(adv, 0);
1625 	return (scsi_ctrl);
1626 }
1627 #endif
1628 
1629 /*
1630  * XXX Looks like more padding issues in this routine as well.
1631  *     There has to be a way to turn this into an insw.
1632  */
1633 static void
1634 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1635 	       u_int16_t *inbuf, int words)
1636 {
1637 	int	i;
1638 
1639 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1640 	for (i = 0; i < words; i++, inbuf++) {
1641 		if (i == 5) {
1642 			continue;
1643 		}
1644 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1645 	}
1646 }
1647 
1648 static u_int
1649 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1650 {
1651 	u_int	  cur_used_qs;
1652 	u_int	  cur_free_qs;
1653 
1654 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1655 
1656 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1657 		cur_free_qs = adv->max_openings - cur_used_qs;
1658 		return (cur_free_qs);
1659 	}
1660 	adv->openings_needed = n_qs;
1661 	return (0);
1662 }
1663 
1664 static u_int8_t
1665 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1666 		      u_int8_t n_free_q)
1667 {
1668 	int i;
1669 
1670 	for (i = 0; i < n_free_q; i++) {
1671 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1672 		if (free_q_head == ADV_QLINK_END)
1673 			break;
1674 	}
1675 	return (free_q_head);
1676 }
1677 
1678 static u_int8_t
1679 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1680 {
1681 	u_int16_t	q_addr;
1682 	u_int8_t	next_qp;
1683 	u_int8_t	q_status;
1684 
1685 	next_qp = ADV_QLINK_END;
1686 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1687 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1688 
1689 	if ((q_status & QS_READY) == 0)
1690 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1691 
1692 	return (next_qp);
1693 }
1694 
1695 static int
1696 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1697 		    u_int8_t n_q_required)
1698 {
1699 	u_int8_t	free_q_head;
1700 	u_int8_t	next_qp;
1701 	u_int8_t	tid_no;
1702 	u_int8_t	target_ix;
1703 	int		retval;
1704 
1705 	retval = 1;
1706 	target_ix = scsiq->q2.target_ix;
1707 	tid_no = ADV_TIX_TO_TID(target_ix);
1708 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1709 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1710 	    != ADV_QLINK_END) {
1711 		scsiq->q1.q_no = free_q_head;
1712 
1713 		/*
1714 		 * Now that we know our Q number, point our sense
1715 		 * buffer pointer to a bus dma mapped area where
1716 		 * we can dma the data to.
1717 		 */
1718 		scsiq->q1.sense_addr = adv->sense_physbase
1719 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1720 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1721 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1722 		adv->cur_active += n_q_required;
1723 		retval = 0;
1724 	}
1725 	return (retval);
1726 }
1727 
1728 
1729 static void
1730 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1731 			    u_int q_no)
1732 {
1733 	u_int8_t	sg_list_dwords;
1734 	u_int8_t	sg_index, i;
1735 	u_int8_t	sg_entry_cnt;
1736 	u_int8_t	next_qp;
1737 	u_int16_t	q_addr;
1738 	struct		adv_sg_head *sg_head;
1739 	struct		adv_sg_list_q scsi_sg_q;
1740 
1741 	sg_head = scsiq->sg_head;
1742 
1743 	if (sg_head) {
1744 		sg_entry_cnt = sg_head->entry_cnt - 1;
1745 #ifdef DIAGNOSTIC
1746 		if (sg_entry_cnt == 0)
1747 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1748 			      "a SG list but only one element");
1749 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1750 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1751 			      "a SG list but QC_SG_HEAD not set");
1752 #endif
1753 		q_addr = ADV_QNO_TO_QADDR(q_no);
1754 		sg_index = 1;
1755 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1756 		scsi_sg_q.sg_head_qp = q_no;
1757 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1758 		for (i = 0; i < sg_head->queue_cnt; i++) {
1759 			u_int8_t segs_this_q;
1760 
1761 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1762 				segs_this_q = ADV_SG_LIST_PER_Q;
1763 			else {
1764 				/* This will be the last segment then */
1765 				segs_this_q = sg_entry_cnt;
1766 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1767 			}
1768 			scsi_sg_q.seq_no = i + 1;
1769 			sg_list_dwords = segs_this_q << 1;
1770 			if (i == 0) {
1771 				scsi_sg_q.sg_list_cnt = segs_this_q;
1772 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1773 			} else {
1774 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1775 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1776 			}
1777 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1778 			scsi_sg_q.q_no = next_qp;
1779 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1780 
1781 			adv_write_lram_16_multi(adv,
1782 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1783 						(u_int16_t *)&scsi_sg_q,
1784 						sizeof(scsi_sg_q) >> 1);
1785 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1786 						(u_int32_t *)&sg_head->sg_list[sg_index],
1787 						sg_list_dwords);
1788 			sg_entry_cnt -= segs_this_q;
1789 			sg_index += ADV_SG_LIST_PER_Q;
1790 		}
1791 	}
1792 	adv_put_ready_queue(adv, scsiq, q_no);
1793 }
1794 
1795 static void
1796 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1797 		    u_int q_no)
1798 {
1799 	struct		adv_target_transinfo* tinfo;
1800 	u_int		q_addr;
1801 	u_int		tid_no;
1802 
1803 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1804 	tinfo = &adv->tinfo[tid_no];
1805 	if ((tinfo->current.period != tinfo->goal.period)
1806 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1807 
1808 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1809 		scsiq->q1.cntl |= QC_MSG_OUT;
1810 	}
1811 	q_addr = ADV_QNO_TO_QADDR(q_no);
1812 
1813 	scsiq->q1.status = QS_FREE;
1814 
1815 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1816 				(u_int16_t *)scsiq->cdbptr,
1817 				scsiq->q2.cdb_len >> 1);
1818 
1819 #if BYTE_ORDER == BIG_ENDIAN
1820 	adv_adj_scsiq_endian(scsiq);
1821 #endif
1822 
1823 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1824 		      (u_int16_t *) &scsiq->q1.cntl,
1825 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1826 
1827 #if CC_WRITE_IO_COUNT
1828 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1829 			  adv->req_count);
1830 #endif
1831 
1832 #if CC_CLEAR_DMA_REMAIN
1833 
1834 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1835 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1836 #endif
1837 
1838 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1839 			  (scsiq->q1.q_no << 8) | QS_READY);
1840 }
1841 
1842 static void
1843 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1844 	      u_int16_t *buffer, int words)
1845 {
1846 	int	i;
1847 
1848 	/*
1849 	 * XXX This routine makes *gross* assumptions
1850 	 * about padding in the data structures.
1851 	 * Either the data structures should have explicit
1852 	 * padding members added, or they should have padding
1853 	 * turned off via compiler attributes depending on
1854 	 * which yields better overall performance.  My hunch
1855 	 * would be that turning off padding would be the
1856 	 * faster approach as an outsw is much faster than
1857 	 * this crude loop and accessing un-aligned data
1858 	 * members isn't *that* expensive.  The other choice
1859 	 * would be to modify the ASC script so that the
1860 	 * the adv_scsiq_1 structure can be re-arranged so
1861 	 * padding isn't required.
1862 	 */
1863 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1864 	for (i = 0; i < words; i++, buffer++) {
1865 		if (i == 2 || i == 10) {
1866 			continue;
1867 		}
1868 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1869 	}
1870 }
1871 
1872 static void
1873 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1874 		     u_int8_t q_cntl, target_bit_vector target_mask,
1875 		     int tid_no)
1876 {
1877 	struct	ext_msg ext_msg;
1878 
1879 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1880 			       sizeof(ext_msg) >> 1);
1881 	if ((ext_msg.msg_type == MSG_EXTENDED)
1882 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1883 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1884 		union	  ccb *ccb;
1885 		struct	  adv_target_transinfo* tinfo;
1886 		u_int32_t cinfo_index;
1887 		u_int	 period;
1888 		u_int	 offset;
1889 		int	 sdtr_accept;
1890 		u_int8_t orig_offset;
1891 
1892 		cinfo_index =
1893 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1894 		ccb = adv->ccb_infos[cinfo_index].ccb;
1895 		tinfo = &adv->tinfo[tid_no];
1896 		sdtr_accept = TRUE;
1897 
1898 		orig_offset = ext_msg.req_ack_offset;
1899 		if (ext_msg.xfer_period < tinfo->goal.period) {
1900                 	sdtr_accept = FALSE;
1901 			ext_msg.xfer_period = tinfo->goal.period;
1902 		}
1903 
1904 		/* Perform range checking */
1905 		period = ext_msg.xfer_period;
1906 		offset = ext_msg.req_ack_offset;
1907 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1908 		ext_msg.xfer_period = period;
1909 		ext_msg.req_ack_offset = offset;
1910 
1911 		/* Record our current sync settings */
1912 		adv_set_syncrate(adv, ccb->ccb_h.path,
1913 				 tid_no, ext_msg.xfer_period,
1914 				 ext_msg.req_ack_offset,
1915 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1916 
1917 		/* Offset too high or large period forced async */
1918 		if (orig_offset != ext_msg.req_ack_offset)
1919 			sdtr_accept = FALSE;
1920 
1921 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1922 			/* Valid response to our requested negotiation */
1923 			q_cntl &= ~QC_MSG_OUT;
1924 		} else {
1925 			/* Must Respond */
1926 			q_cntl |= QC_MSG_OUT;
1927 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1928 					ext_msg.req_ack_offset);
1929 		}
1930 
1931 	} else if (ext_msg.msg_type == MSG_EXTENDED
1932 		&& ext_msg.msg_req == MSG_EXT_WDTR
1933 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1934 
1935 		ext_msg.wdtr_width = 0;
1936 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1937 					(u_int16_t *)&ext_msg,
1938 					sizeof(ext_msg) >> 1);
1939 		q_cntl |= QC_MSG_OUT;
1940         } else {
1941 
1942 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1943 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1944 					(u_int16_t *)&ext_msg,
1945 					sizeof(ext_msg) >> 1);
1946 		q_cntl |= QC_MSG_OUT;
1947         }
1948 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1949 }
1950 
1951 static void
1952 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1953 		u_int8_t sdtr_offset)
1954 {
1955 	struct	 ext_msg sdtr_buf;
1956 
1957 	sdtr_buf.msg_type = MSG_EXTENDED;
1958 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1959 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1960 	sdtr_buf.xfer_period = sdtr_period;
1961 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1962 	sdtr_buf.req_ack_offset = sdtr_offset;
1963 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1964 				(u_int16_t *) &sdtr_buf,
1965 				sizeof(sdtr_buf) / 2);
1966 }
1967 
1968 int
1969 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1970 	      u_int32_t status, int queued_only)
1971 {
1972 	u_int16_t q_addr;
1973 	u_int8_t  q_no;
1974 	struct adv_q_done_info scsiq_buf;
1975 	struct adv_q_done_info *scsiq;
1976 	u_int8_t  target_ix;
1977 	int	  count;
1978 
1979 	scsiq = &scsiq_buf;
1980 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1981 	count = 0;
1982 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1983 		struct adv_ccb_info *ccb_info;
1984 		q_addr = ADV_QNO_TO_QADDR(q_no);
1985 
1986 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1987 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1988 		if (((scsiq->q_status & QS_READY) != 0)
1989 		 && ((scsiq->q_status & QS_ABORTED) == 0)
1990 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1991 		 && (scsiq->d2.target_ix == target_ix)
1992 		 && (queued_only == 0
1993 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1994 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1995 			union ccb *aborted_ccb;
1996 			struct adv_ccb_info *cinfo;
1997 
1998 			scsiq->q_status |= QS_ABORTED;
1999 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2000 					 scsiq->q_status);
2001 			aborted_ccb = ccb_info->ccb;
2002 			/* Don't clobber earlier error codes */
2003 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2004 			  == CAM_REQ_INPROG)
2005 				aborted_ccb->ccb_h.status |= status;
2006 			cinfo = (struct adv_ccb_info *)
2007 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2008 			cinfo->state |= ACCB_ABORT_QUEUED;
2009 			count++;
2010 		}
2011 	}
2012 	return (count);
2013 }
2014 
2015 int
2016 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2017 {
2018 	int count;
2019 	int i;
2020 	union ccb *ccb;
2021 
2022 	i = 200;
2023 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2024 	    && i--)
2025 		DELAY(1000);
2026 	adv_reset_chip(adv, initiate_bus_reset);
2027 	adv_reinit_lram(adv);
2028 	for (i = 0; i <= ADV_MAX_TID; i++)
2029 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2030 				 /*offset*/0, ADV_TRANS_CUR);
2031 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2032 
2033 	/* Tell the XPT layer that a bus reset occured */
2034 	if (adv->path != NULL)
2035 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2036 
2037 	count = 0;
2038 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2039 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2040 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2041 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2042 		count++;
2043 	}
2044 
2045 	adv_start_chip(adv);
2046 	return (count);
2047 }
2048 
2049 static void
2050 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2051 {
2052 	int orig_id;
2053 
2054     	adv_set_bank(adv, 1);
2055     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2056     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2057 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2058 		adv_set_bank(adv, 0);
2059 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2060 	}
2061     	adv_set_bank(adv, 1);
2062     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2063 	adv_set_bank(adv, 0);
2064 }
2065