xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision 1465342b)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.9 2007/12/23 07:00:55 pavalos Exp $
33  */
34 /*
35  * Ported from:
36  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
37  *
38  * Copyright (c) 1995-1996 Advanced System Products, Inc.
39  * All Rights Reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that redistributions of source
43  * code retain the above copyright notice and this comment without
44  * modification.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 #include <sys/thread2.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 
54 #include <machine/clock.h>
55 
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
60 
61 #include <bus/cam/scsi/scsi_all.h>
62 #include <bus/cam/scsi/scsi_message.h>
63 #include <bus/cam/scsi/scsi_da.h>
64 #include <bus/cam/scsi/scsi_cd.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69 
70 #include "advansys.h"
71 #include "advmcode.h"
72 
73 struct adv_quirk_entry {
74 	struct scsi_inquiry_pattern inq_pat;
75 	u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
78 };
79 
80 static struct adv_quirk_entry adv_quirk_table[] =
81 {
82 	{
83 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85 	},
86 	{
87 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88 		0
89 	},
90 	{
91 		{
92 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 		  "TANDBERG", " TDC 36", "*"
94 		},
95 		0
96 	},
97 	{
98 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99 		0
100 	},
101 	{
102 		{
103 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 		  "*", "*", "*"
105 		},
106 		0
107 	},
108 	{
109 		{
110 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 		  "*", "*", "*"
112 		},
113 		0
114 	},
115 	{
116 		/* Default quirk entry */
117 		{
118 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
120                 },
121                 ADV_QUIRK_FIX_ASYN_XFER,
122 	}
123 };
124 
125 /*
126  * Allowable periods in ns
127  */
128 static u_int8_t adv_sdtr_period_tbl[] =
129 {
130 	25,
131 	30,
132 	35,
133 	40,
134 	50,
135 	60,
136 	70,
137 	85
138 };
139 
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
141 {
142 	12,
143 	19,
144 	25,
145 	32,
146 	38,
147 	44,
148 	50,
149 	57,
150 	63,
151 	69,
152 	75,
153 	82,
154 	88,
155 	94,
156 	100,
157 	107
158 };
159 
160 struct ext_msg {
161 	u_int8_t msg_type;
162 	u_int8_t msg_len;
163 	u_int8_t msg_req;
164 	union {
165 		struct {
166 			u_int8_t sdtr_xfer_period;
167 			u_int8_t sdtr_req_ack_offset;
168 		} sdtr;
169 		struct {
170        			u_int8_t wdtr_width;
171 		} wdtr;
172 		struct {
173 			u_int8_t mdp[4];
174 		} mdp;
175 	} u_ext_msg;
176 	u_int8_t res;
177 };
178 
179 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
180 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
182 #define	mdp_b3		u_ext_msg.mdp_b3
183 #define	mdp_b2		u_ext_msg.mdp_b2
184 #define	mdp_b1		u_ext_msg.mdp_b1
185 #define	mdp_b0		u_ext_msg.mdp_b0
186 
187 /*
188  * Some of the early PCI adapters have problems with
189  * async transfers.  Instead use an offset of 1.
190  */
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192 
193 /* LRAM routines */
194 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 					u_int16_t *buffer, int count);
196 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
197 					 u_int16_t s_addr, u_int16_t *buffer,
198 					 int count);
199 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 				  u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 				  int count);
203 
204 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
205 					      u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207 
208 
209 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 				   u_int32_t value);
211 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
212 					 u_int16_t s_addr, u_int32_t *buffer,
213 					 int count);
214 
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 				     u_int16_t value);
219 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 					  u_int8_t cmd_reg);
221 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
222 					    struct adv_eeprom_config *eeconfig);
223 
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
227 
228 static void	 adv_reinit_lram(struct adv_softc *adv);
229 static void	 adv_init_lram(struct adv_softc *adv);
230 static int	 adv_init_microcode_var(struct adv_softc *adv);
231 static void	 adv_init_qlink_var(struct adv_softc *adv);
232 
233 /* Interrupts */
234 static void	 adv_disable_interrupt(struct adv_softc *adv);
235 static void	 adv_enable_interrupt(struct adv_softc *adv);
236 static void	 adv_toggle_irq_act(struct adv_softc *adv);
237 
238 /* Chip Control */
239 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
240 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if UNUSED
242 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
244 
245 /* Queue handling and execution */
246 static __inline int
247 		 adv_sgcount_to_qcount(int sgcount);
248 
249 static __inline int
250 adv_sgcount_to_qcount(int sgcount)
251 {
252 	int	n_sg_list_qs;
253 
254 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 		n_sg_list_qs++;
257 	return (n_sg_list_qs + 1);
258 }
259 
260 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
261 				u_int16_t *inbuf, int words);
262 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
263 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
264 				       u_int8_t free_q_head, u_int8_t n_free_q);
265 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
266 				      u_int8_t free_q_head);
267 static int	 adv_send_scsi_queue(struct adv_softc *adv,
268 				     struct adv_scsi_q *scsiq,
269 				     u_int8_t n_q_required);
270 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
271 					     struct adv_scsi_q *scsiq,
272 					     u_int q_no);
273 static void	 adv_put_ready_queue(struct adv_softc *adv,
274 				     struct adv_scsi_q *scsiq, u_int q_no);
275 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
276 			       u_int16_t *buffer, int words);
277 
278 /* Messages */
279 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
280 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
281 				      target_bit_vector target_id,
282 				      int tid);
283 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
284 				 u_int8_t sdtr_offset);
285 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
286 					u_int8_t sdtr_data);
287 
288 
289 /* Exported functions first */
290 
291 void
292 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
293 {
294 	struct adv_softc *adv;
295 
296 	adv = (struct adv_softc *)callback_arg;
297 	switch (code) {
298 	case AC_FOUND_DEVICE:
299 	{
300 		struct ccb_getdev *cgd;
301 		target_bit_vector target_mask;
302 		int num_entries;
303         	caddr_t match;
304 		struct adv_quirk_entry *entry;
305 		struct adv_target_transinfo* tinfo;
306 
307 		cgd = (struct ccb_getdev *)arg;
308 
309 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
310 
311 		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
312 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
313 				       (caddr_t)adv_quirk_table,
314 				       num_entries, sizeof(*adv_quirk_table),
315 				       scsi_inquiry_match);
316 
317 		if (match == NULL)
318 			panic("advasync: device didn't match wildcard entry!!");
319 
320 		entry = (struct adv_quirk_entry *)match;
321 
322 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
323 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
324 				adv->fix_asyn_xfer_always |= target_mask;
325 			else
326 				adv->fix_asyn_xfer_always &= ~target_mask;
327 			/*
328 			 * We start out life with all bits set and clear them
329 			 * after we've determined that the fix isn't necessary.
330 			 * It may well be that we've already cleared a target
331 			 * before the full inquiry session completes, so don't
332 			 * gratuitously set a target bit even if it has this
333 			 * quirk.  But, if the quirk exonerates a device, clear
334 			 * the bit now.
335 			 */
336 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
337 				adv->fix_asyn_xfer &= ~target_mask;
338 		}
339 		/*
340 		 * Reset our sync settings now that we've determined
341 		 * what quirks are in effect for the device.
342 		 */
343 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
344 		adv_set_syncrate(adv, cgd->ccb_h.path,
345 				 cgd->ccb_h.target_id,
346 				 tinfo->current.period,
347 				 tinfo->current.offset,
348 				 ADV_TRANS_CUR);
349 		break;
350 	}
351 	case AC_LOST_DEVICE:
352 	{
353 		u_int target_mask;
354 
355 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
356 			target_mask = 0x01 << xpt_path_target_id(path);
357 			adv->fix_asyn_xfer |= target_mask;
358 		}
359 
360 		/*
361 		 * Revert to async transfers
362 		 * for the next device.
363 		 */
364 		adv_set_syncrate(adv, /*path*/NULL,
365 				 xpt_path_target_id(path),
366 				 /*period*/0,
367 				 /*offset*/0,
368 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
369 	}
370 	default:
371 		break;
372 	}
373 }
374 
375 void
376 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
377 {
378 	u_int8_t control;
379 
380 	/*
381 	 * Start out with the bank reset to 0
382 	 */
383 	control = ADV_INB(adv, ADV_CHIP_CTRL)
384 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
385 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
386 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
387 	if (bank == 1) {
388 		control |= ADV_CC_BANK_ONE;
389 	} else if (bank == 2) {
390 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
391 	}
392 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
393 }
394 
395 u_int8_t
396 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
397 {
398 	u_int8_t   byte_data;
399 	u_int16_t  word_data;
400 
401 	/*
402 	 * LRAM is accessed on 16bit boundaries.
403 	 */
404 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
405 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
406 	if (addr & 1) {
407 #if BYTE_ORDER == BIG_ENDIAN
408 		byte_data = (u_int8_t)(word_data & 0xFF);
409 #else
410 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
411 #endif
412 	} else {
413 #if BYTE_ORDER == BIG_ENDIAN
414 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #else
416 		byte_data = (u_int8_t)(word_data & 0xFF);
417 #endif
418 	}
419 	return (byte_data);
420 }
421 
422 void
423 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
424 {
425 	u_int16_t word_data;
426 
427 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
428 	if (addr & 1) {
429 		word_data &= 0x00FF;
430 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
431 	} else {
432 		word_data &= 0xFF00;
433 		word_data |= ((u_int8_t)value & 0x00FF);
434 	}
435 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
436 }
437 
438 
439 u_int16_t
440 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
441 {
442 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
443 	return (ADV_INW(adv, ADV_LRAM_DATA));
444 }
445 
446 void
447 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
448 {
449 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
450 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
451 }
452 
453 /*
454  * Determine if there is a board at "iobase" by looking
455  * for the AdvanSys signatures.  Return 1 if a board is
456  * found, 0 otherwise.
457  */
458 int
459 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
460 {
461 	u_int16_t signature;
462 
463 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
464 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
465 		if ((signature == ADV_1000_ID0W)
466 		 || (signature == ADV_1000_ID0W_FIX))
467 			return (1);
468 	}
469 	return (0);
470 }
471 
472 void
473 adv_lib_init(struct adv_softc *adv)
474 {
475 	if ((adv->type & ADV_ULTRA) != 0) {
476 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
477 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
478 	} else {
479 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
480 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
481 	}
482 }
483 
484 u_int16_t
485 adv_get_eeprom_config(struct adv_softc *adv, struct
486 		      adv_eeprom_config  *eeprom_config)
487 {
488 	u_int16_t	sum;
489 	u_int16_t	*wbuf;
490 	u_int8_t	cfg_beg;
491 	u_int8_t	cfg_end;
492 	u_int8_t	s_addr;
493 
494 	wbuf = (u_int16_t *)eeprom_config;
495 	sum = 0;
496 
497 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
498 		*wbuf = adv_read_eeprom_16(adv, s_addr);
499 		sum += *wbuf;
500 	}
501 
502 	if (adv->type & ADV_VL) {
503 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
504 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
505 	} else {
506 		cfg_beg = ADV_EEPROM_CFG_BEG;
507 		cfg_end = ADV_EEPROM_MAX_ADDR;
508 	}
509 
510 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
511 		*wbuf = adv_read_eeprom_16(adv, s_addr);
512 		sum += *wbuf;
513 #if ADV_DEBUG_EEPROM
514 		kprintf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
515 #endif
516 	}
517 	*wbuf = adv_read_eeprom_16(adv, s_addr);
518 	return (sum);
519 }
520 
521 int
522 adv_set_eeprom_config(struct adv_softc *adv,
523 		      struct adv_eeprom_config *eeprom_config)
524 {
525 	int	retry;
526 
527 	retry = 0;
528 	while (1) {
529 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
530 			break;
531 		}
532 		if (++retry > ADV_EEPROM_MAX_RETRY) {
533 			break;
534 		}
535 	}
536 	return (retry > ADV_EEPROM_MAX_RETRY);
537 }
538 
539 int
540 adv_reset_chip(struct adv_softc *adv, int reset_bus)
541 {
542 	adv_stop_chip(adv);
543 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
544 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
545 	DELAY(60);
546 
547 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
548 	adv_set_chip_ih(adv, ADV_INS_HALT);
549 
550 	if (reset_bus)
551 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
552 
553 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
554 	if (reset_bus)
555 		DELAY(200 * 1000);
556 
557 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
558 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
559 	return (adv_is_chip_halted(adv));
560 }
561 
562 int
563 adv_test_external_lram(struct adv_softc* adv)
564 {
565 	u_int16_t	q_addr;
566 	u_int16_t	saved_value;
567 	int		success;
568 
569 	success = 0;
570 
571 	q_addr = ADV_QNO_TO_QADDR(241);
572 	saved_value = adv_read_lram_16(adv, q_addr);
573 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
574 		success = 1;
575 		adv_write_lram_16(adv, q_addr, saved_value);
576 	}
577 	return (success);
578 }
579 
580 
581 int
582 adv_init_lram_and_mcode(struct adv_softc *adv)
583 {
584 	u_int32_t	retval;
585 
586 	adv_disable_interrupt(adv);
587 
588 	adv_init_lram(adv);
589 
590 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
591 				    adv_mcode_size);
592 	if (retval != adv_mcode_chksum) {
593 		kprintf("adv%d: Microcode download failed checksum!\n",
594 		       adv->unit);
595 		return (1);
596 	}
597 
598 	if (adv_init_microcode_var(adv) != 0)
599 		return (1);
600 
601 	adv_enable_interrupt(adv);
602 	return (0);
603 }
604 
605 u_int8_t
606 adv_get_chip_irq(struct adv_softc *adv)
607 {
608 	u_int16_t	cfg_lsw;
609 	u_int8_t	chip_irq;
610 
611 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
612 
613 	if ((adv->type & ADV_VL) != 0) {
614 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
615 		if ((chip_irq == 0) ||
616 		    (chip_irq == 4) ||
617 		    (chip_irq == 7)) {
618 			return (0);
619 		}
620 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
621 	}
622 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
623 	if (chip_irq == 3)
624 		chip_irq += 2;
625 	return (chip_irq + ADV_MIN_IRQ_NO);
626 }
627 
628 u_int8_t
629 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
630 {
631 	u_int16_t	cfg_lsw;
632 
633 	if ((adv->type & ADV_VL) != 0) {
634 		if (irq_no != 0) {
635 			if ((irq_no < ADV_MIN_IRQ_NO)
636 			 || (irq_no > ADV_MAX_IRQ_NO)) {
637 				irq_no = 0;
638 			} else {
639 				irq_no -= ADV_MIN_IRQ_NO - 1;
640 			}
641 		}
642 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
643 		cfg_lsw |= 0x0010;
644 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
645 		adv_toggle_irq_act(adv);
646 
647 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
648 		cfg_lsw |= (irq_no & 0x07) << 2;
649 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 		adv_toggle_irq_act(adv);
651 	} else if ((adv->type & ADV_ISA) != 0) {
652 		if (irq_no == 15)
653 			irq_no -= 2;
654 		irq_no -= ADV_MIN_IRQ_NO;
655 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
656 		cfg_lsw |= (irq_no & 0x03) << 2;
657 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658 	}
659 	return (adv_get_chip_irq(adv));
660 }
661 
662 void
663 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
664 {
665 	u_int16_t cfg_lsw;
666 
667 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
668 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
669 		return;
670     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
671 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
672 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
673 }
674 
675 int
676 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
677 		       u_int32_t datalen)
678 {
679 	struct		adv_target_transinfo* tinfo;
680 	u_int32_t	*p_data_addr;
681 	u_int32_t	*p_data_bcount;
682 	int		disable_syn_offset_one_fix;
683 	int		retval;
684 	u_int		n_q_required;
685 	u_int32_t	addr;
686 	u_int8_t	sg_entry_cnt;
687 	u_int8_t	target_ix;
688 	u_int8_t	sg_entry_cnt_minus_one;
689 	u_int8_t	tid_no;
690 
691 	scsiq->q1.q_no = 0;
692 	retval = 1;  /* Default to error case */
693 	target_ix = scsiq->q2.target_ix;
694 	tid_no = ADV_TIX_TO_TID(target_ix);
695 	tinfo = &adv->tinfo[tid_no];
696 
697 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
698 		/* Renegotiate if appropriate. */
699 		adv_set_syncrate(adv, /*struct cam_path */NULL,
700 				 tid_no, /*period*/0, /*offset*/0,
701 				 ADV_TRANS_CUR);
702 		if (tinfo->current.period != tinfo->goal.period) {
703 			adv_msgout_sdtr(adv, tinfo->goal.period,
704 					tinfo->goal.offset);
705 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
706 		}
707 	}
708 
709 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
710 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
711 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
712 
713 #ifdef DIAGNOSTIC
714 		if (sg_entry_cnt <= 1)
715 			panic("adv_execute_scsi_queue: Queue "
716 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
717 
718 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
719 			panic("adv_execute_scsi_queue: "
720 			      "Queue with too many segs.");
721 
722 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
723 			int i;
724 
725 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
726 				addr = scsiq->sg_head->sg_list[i].addr +
727 				       scsiq->sg_head->sg_list[i].bytes;
728 
729 				if ((addr & 0x0003) != 0)
730 					panic("adv_execute_scsi_queue: SG "
731 					      "with odd address or byte count");
732 			}
733 		}
734 #endif
735 		p_data_addr =
736 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
737 		p_data_bcount =
738 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
739 
740 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
741 		scsiq->sg_head->queue_cnt = n_q_required - 1;
742 	} else {
743 		p_data_addr = &scsiq->q1.data_addr;
744 		p_data_bcount = &scsiq->q1.data_cnt;
745 		n_q_required = 1;
746 	}
747 
748 	disable_syn_offset_one_fix = FALSE;
749 
750 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
751 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
752 
753 		if (datalen != 0) {
754 			if (datalen < 512) {
755 				disable_syn_offset_one_fix = TRUE;
756 			} else {
757 				if (scsiq->cdbptr[0] == INQUIRY
758 				 || scsiq->cdbptr[0] == REQUEST_SENSE
759 				 || scsiq->cdbptr[0] == READ_CAPACITY
760 				 || scsiq->cdbptr[0] == MODE_SELECT_6
761 				 || scsiq->cdbptr[0] == MODE_SENSE_6
762 				 || scsiq->cdbptr[0] == MODE_SENSE_10
763 				 || scsiq->cdbptr[0] == MODE_SELECT_10
764 				 || scsiq->cdbptr[0] == READ_TOC) {
765 					disable_syn_offset_one_fix = TRUE;
766 				}
767 			}
768 		}
769 	}
770 
771 	if (disable_syn_offset_one_fix) {
772 		scsiq->q2.tag_code &=
773 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
774 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
776 	}
777 
778 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
779 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
780 		u_int8_t extra_bytes;
781 
782 		addr = *p_data_addr + *p_data_bcount;
783 		extra_bytes = addr & 0x0003;
784 		if (extra_bytes != 0
785 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
786 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
787 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
788 			scsiq->q1.extra_bytes = extra_bytes;
789 			*p_data_bcount -= extra_bytes;
790 		}
791 	}
792 
793 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
794 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
795 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
796 
797 	return (retval);
798 }
799 
800 
801 u_int8_t
802 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
803 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
804 {
805 	u_int16_t val;
806 	u_int8_t  sg_queue_cnt;
807 
808 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
809 		       (u_int16_t *)scsiq,
810 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
811 
812 #if BYTE_ORDER == BIG_ENDIAN
813 	adv_adj_endian_qdone_info(scsiq);
814 #endif
815 
816 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
817 	scsiq->q_status = val & 0xFF;
818 	scsiq->q_no = (val >> 8) & 0XFF;
819 
820 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
821 	scsiq->cntl = val & 0xFF;
822 	sg_queue_cnt = (val >> 8) & 0xFF;
823 
824 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
825 	scsiq->sense_len = val & 0xFF;
826 	scsiq->extra_bytes = (val >> 8) & 0xFF;
827 
828 	/*
829 	 * Due to a bug in accessing LRAM on the 940UA, the residual
830 	 * is split into separate high and low 16bit quantities.
831 	 */
832 	scsiq->remain_bytes =
833 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
834 	scsiq->remain_bytes |=
835 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
836 
837 	/*
838 	 * XXX Is this just a safeguard or will the counter really
839 	 * have bogus upper bits?
840 	 */
841 	scsiq->remain_bytes &= max_dma_count;
842 
843 	return (sg_queue_cnt);
844 }
845 
846 int
847 adv_start_chip(struct adv_softc *adv)
848 {
849 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
850 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
851 		return (0);
852 	return (1);
853 }
854 
855 int
856 adv_stop_execution(struct adv_softc *adv)
857 {
858 	int count;
859 
860 	count = 0;
861 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
862 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
863 				 ADV_STOP_REQ_RISC_STOP);
864 		do {
865 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
866 				ADV_STOP_ACK_RISC_STOP) {
867 				return (1);
868 			}
869 			DELAY(1000);
870 		} while (count++ < 20);
871 	}
872 	return (0);
873 }
874 
875 int
876 adv_is_chip_halted(struct adv_softc *adv)
877 {
878 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
879 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
880 			return (1);
881 		}
882 	}
883 	return (0);
884 }
885 
886 /*
887  * XXX The numeric constants and the loops in this routine
888  * need to be documented.
889  */
890 void
891 adv_ack_interrupt(struct adv_softc *adv)
892 {
893 	u_int8_t	host_flag;
894 	u_int8_t	risc_flag;
895 	int		loop;
896 
897 	loop = 0;
898 	do {
899 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
900 		if (loop++ > 0x7FFF) {
901 			break;
902 		}
903 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
904 
905 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
906 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
907 			 host_flag | ADV_HOST_FLAG_ACK_INT);
908 
909 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
910 	loop = 0;
911 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
912 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 		if (loop++ > 3) {
914 			break;
915 		}
916 	}
917 
918 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
919 }
920 
921 /*
922  * Handle all conditions that may halt the chip waiting
923  * for us to intervene.
924  */
925 void
926 adv_isr_chip_halted(struct adv_softc *adv)
927 {
928 	u_int16_t	  int_halt_code;
929 	u_int16_t	  halt_q_addr;
930 	target_bit_vector target_mask;
931 	target_bit_vector scsi_busy;
932 	u_int8_t	  halt_qp;
933 	u_int8_t	  target_ix;
934 	u_int8_t	  q_cntl;
935 	u_int8_t	  tid_no;
936 
937 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
938 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
939 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
940 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
941 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
942 	tid_no = ADV_TIX_TO_TID(target_ix);
943 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
944 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
945 		/*
946 		 * Temporarily disable the async fix by removing
947 		 * this target from the list of affected targets,
948 		 * setting our async rate, and then putting us
949 		 * back into the mask.
950 		 */
951 		adv->fix_asyn_xfer &= ~target_mask;
952 		adv_set_syncrate(adv, /*struct cam_path */NULL,
953 				 tid_no, /*period*/0, /*offset*/0,
954 				 ADV_TRANS_ACTIVE);
955 		adv->fix_asyn_xfer |= target_mask;
956 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
957 		adv_set_syncrate(adv, /*struct cam_path */NULL,
958 				 tid_no, /*period*/0, /*offset*/0,
959 				 ADV_TRANS_ACTIVE);
960 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
961 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
962 				     target_mask, tid_no);
963 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
964 		struct	  adv_target_transinfo* tinfo;
965 		union	  ccb *ccb;
966 		u_int32_t cinfo_index;
967 		u_int8_t  tag_code;
968 		u_int8_t  q_status;
969 
970 		tinfo = &adv->tinfo[tid_no];
971 		q_cntl |= QC_REQ_SENSE;
972 
973 		/* Renegotiate if appropriate. */
974 		adv_set_syncrate(adv, /*struct cam_path */NULL,
975 				 tid_no, /*period*/0, /*offset*/0,
976 				 ADV_TRANS_CUR);
977 		if (tinfo->current.period != tinfo->goal.period) {
978 			adv_msgout_sdtr(adv, tinfo->goal.period,
979 					tinfo->goal.offset);
980 			q_cntl |= QC_MSG_OUT;
981 		}
982 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
983 
984 		/* Don't tag request sense commands */
985 		tag_code = adv_read_lram_8(adv,
986 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
987 		tag_code &=
988 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
989 
990 		if ((adv->fix_asyn_xfer & target_mask) != 0
991 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
992 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
994 		}
995 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
996 				 tag_code);
997 		q_status = adv_read_lram_8(adv,
998 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
999 		q_status |= (QS_READY | QS_BUSY);
1000 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1001 				 q_status);
1002 		/*
1003 		 * Freeze the devq until we can handle the sense condition.
1004 		 */
1005 		cinfo_index =
1006 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1007 		ccb = adv->ccb_infos[cinfo_index].ccb;
1008 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1009 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1010 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1011 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1012 			      /*queued_only*/TRUE);
1013 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1014 		scsi_busy &= ~target_mask;
1015 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1016 		/*
1017 		 * Ensure we have enough time to actually
1018 		 * retrieve the sense.
1019 		 */
1020 		callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1021 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1022 		struct	ext_msg out_msg;
1023 
1024 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1025 				       (u_int16_t *) &out_msg,
1026 				       sizeof(out_msg)/2);
1027 
1028 		if ((out_msg.msg_type == MSG_EXTENDED)
1029 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1030 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1031 
1032 			/* Revert to Async */
1033 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1034 					 tid_no, /*period*/0, /*offset*/0,
1035 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1036 		}
1037 		q_cntl &= ~QC_MSG_OUT;
1038 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1039 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1040 		u_int8_t scsi_status;
1041 		union ccb *ccb;
1042 		u_int32_t cinfo_index;
1043 
1044 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1045 					      + ADV_SCSIQ_SCSI_STATUS);
1046 		cinfo_index =
1047 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1048 		ccb = adv->ccb_infos[cinfo_index].ccb;
1049 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1050 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1051 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1052 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1053 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1054 			      /*queued_only*/TRUE);
1055 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1056 		scsi_busy &= ~target_mask;
1057 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1058 	} else {
1059 		kprintf("Unhandled Halt Code %x\n", int_halt_code);
1060 	}
1061 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1062 }
1063 
1064 void
1065 adv_sdtr_to_period_offset(struct adv_softc *adv,
1066 			  u_int8_t sync_data, u_int8_t *period,
1067 			  u_int8_t *offset, int tid)
1068 {
1069 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1070 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1071 		*period = *offset = 0;
1072 	} else {
1073 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1074 		*offset = sync_data & 0xF;
1075 	}
1076 }
1077 
1078 void
1079 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1080 		 u_int tid, u_int period, u_int offset, u_int type)
1081 {
1082 	struct adv_target_transinfo* tinfo;
1083 	u_int old_period;
1084 	u_int old_offset;
1085 	u_int8_t sdtr_data;
1086 
1087 	tinfo = &adv->tinfo[tid];
1088 
1089 	/* Filter our input */
1090 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1091 					      &offset, tid);
1092 
1093 	old_period = tinfo->current.period;
1094 	old_offset = tinfo->current.offset;
1095 
1096 	if ((type & ADV_TRANS_CUR) != 0
1097 	 && ((old_period != period || old_offset != offset)
1098 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1099 		int halted;
1100 
1101 		crit_enter();
1102 		halted = adv_is_chip_halted(adv);
1103 		if (halted == 0)
1104 			/* Must halt the chip first */
1105 			adv_host_req_chip_halt(adv);
1106 
1107 		/* Update current hardware settings */
1108 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1109 
1110 		/*
1111 		 * If a target can run in sync mode, we don't need
1112 		 * to check it for sync problems.
1113 		 */
1114 		if (offset != 0)
1115 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1116 
1117 		if (halted == 0)
1118 			/* Start the chip again */
1119 			adv_start_chip(adv);
1120 
1121 		crit_exit();
1122 		tinfo->current.period = period;
1123 		tinfo->current.offset = offset;
1124 
1125 		if (path != NULL) {
1126 			/*
1127 			 * Tell the SCSI layer about the
1128 			 * new transfer parameters.
1129 			 */
1130 			struct	ccb_trans_settings neg;
1131 			memset(&neg, 0, sizeof (neg));
1132 #ifdef	CAM_NEW_TRAN_CODE
1133 			struct ccb_trans_settings_spi *spi =
1134 			    &neg.xport_specific.spi;
1135 
1136 			neg.protocol = PROTO_SCSI;
1137 			neg.protocol_version = SCSI_REV_2;
1138 			neg.transport = XPORT_SPI;
1139 			neg.transport_version = 2;
1140 
1141 			spi->sync_offset = offset;
1142 			spi->sync_period = period;
1143 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1144 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1145 #else
1146 			neg.sync_period = period;
1147 			neg.sync_offset = offset;
1148 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1149 				  | CCB_TRANS_SYNC_OFFSET_VALID;
1150 #endif
1151 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1152 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1153 		}
1154 	}
1155 
1156 	if ((type & ADV_TRANS_GOAL) != 0) {
1157 		tinfo->goal.period = period;
1158 		tinfo->goal.offset = offset;
1159 	}
1160 
1161 	if ((type & ADV_TRANS_USER) != 0) {
1162 		tinfo->user.period = period;
1163 		tinfo->user.offset = offset;
1164 	}
1165 }
1166 
1167 u_int8_t
1168 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1169 			  u_int *offset, int tid)
1170 {
1171 	u_int i;
1172 	u_int dummy_offset;
1173 	u_int dummy_period;
1174 
1175 	if (offset == NULL) {
1176 		dummy_offset = 0;
1177 		offset = &dummy_offset;
1178 	}
1179 
1180 	if (period == NULL) {
1181 		dummy_period = 0;
1182 		period = &dummy_period;
1183 	}
1184 
1185 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1186 	if (*period != 0 && *offset != 0) {
1187 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1188 			if (*period <= adv->sdtr_period_tbl[i]) {
1189 				/*
1190 				 * When responding to a target that requests
1191 				 * sync, the requested  rate may fall between
1192 				 * two rates that we can output, but still be
1193 				 * a rate that we can receive.  Because of this,
1194 				 * we want to respond to the target with
1195 				 * the same rate that it sent to us even
1196 				 * if the period we use to send data to it
1197 				 * is lower.  Only lower the response period
1198 				 * if we must.
1199 				 */
1200 				if (i == 0 /* Our maximum rate */)
1201 					*period = adv->sdtr_period_tbl[0];
1202 				return ((i << 4) | *offset);
1203 			}
1204 		}
1205 	}
1206 
1207 	/* Must go async */
1208 	*period = 0;
1209 	*offset = 0;
1210 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1211 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1212 	return (0);
1213 }
1214 
1215 /* Internal Routines */
1216 
1217 static void
1218 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1219 		       u_int16_t *buffer, int count)
1220 {
1221 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1222 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1223 }
1224 
1225 static void
1226 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1227 			u_int16_t *buffer, int count)
1228 {
1229 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1230 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1231 }
1232 
1233 static void
1234 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1235 		 u_int16_t set_value, int count)
1236 {
1237 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1239 			      set_value, count);
1240 }
1241 
1242 static u_int32_t
1243 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1244 {
1245 	u_int32_t	sum;
1246 	int		i;
1247 
1248 	sum = 0;
1249 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1250 	for (i = 0; i < count; i++)
1251 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1252 	return (sum);
1253 }
1254 
1255 static int
1256 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1257 			     u_int16_t value)
1258 {
1259 	int	retval;
1260 
1261 	retval = 0;
1262 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1263 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1264 	DELAY(10000);
1265 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1266 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1267 		retval = 1;
1268 	return (retval);
1269 }
1270 
1271 static u_int32_t
1272 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1273 {
1274 	u_int16_t           val_low, val_high;
1275 
1276 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1277 
1278 #if BYTE_ORDER == BIG_ENDIAN
1279 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1280 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1281 #else
1282 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1283 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1284 #endif
1285 
1286 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1287 }
1288 
1289 static void
1290 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1291 {
1292 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1293 
1294 #if BYTE_ORDER == BIG_ENDIAN
1295 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1296 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1297 #else
1298 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1299 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1300 #endif
1301 }
1302 
1303 static void
1304 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1305 			u_int32_t *buffer, int count)
1306 {
1307 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1308 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1309 }
1310 
1311 static u_int16_t
1312 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1313 {
1314 	u_int16_t read_wval;
1315 	u_int8_t  cmd_reg;
1316 
1317 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1318 	DELAY(1000);
1319 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1320 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1321 	DELAY(1000);
1322 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1323 	DELAY(1000);
1324 	return (read_wval);
1325 }
1326 
1327 static u_int16_t
1328 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1329 {
1330 	u_int16_t	read_value;
1331 
1332 	read_value = adv_read_eeprom_16(adv, addr);
1333 	if (read_value != value) {
1334 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1335 		DELAY(1000);
1336 
1337 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1338 		DELAY(1000);
1339 
1340 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1341 		DELAY(20 * 1000);
1342 
1343 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1344 		DELAY(1000);
1345 		read_value = adv_read_eeprom_16(adv, addr);
1346 	}
1347 	return (read_value);
1348 }
1349 
1350 static int
1351 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1352 {
1353 	u_int8_t read_back;
1354 	int	 retry;
1355 
1356 	retry = 0;
1357 	while (1) {
1358 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1359 		DELAY(1000);
1360 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1361 		if (read_back == cmd_reg) {
1362 			return (1);
1363 		}
1364 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1365 			return (0);
1366 		}
1367 	}
1368 }
1369 
1370 static int
1371 adv_set_eeprom_config_once(struct adv_softc *adv,
1372 			   struct adv_eeprom_config *eeprom_config)
1373 {
1374 	int		n_error;
1375 	u_int16_t	*wbuf;
1376 	u_int16_t	sum;
1377 	u_int8_t	s_addr;
1378 	u_int8_t	cfg_beg;
1379 	u_int8_t	cfg_end;
1380 
1381 	wbuf = (u_int16_t *)eeprom_config;
1382 	n_error = 0;
1383 	sum = 0;
1384 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1385 		sum += *wbuf;
1386 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1387 			n_error++;
1388 		}
1389 	}
1390 	if (adv->type & ADV_VL) {
1391 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1392 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1393 	} else {
1394 		cfg_beg = ADV_EEPROM_CFG_BEG;
1395 		cfg_end = ADV_EEPROM_MAX_ADDR;
1396 	}
1397 
1398 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1399 		sum += *wbuf;
1400 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1401 			n_error++;
1402 		}
1403 	}
1404 	*wbuf = sum;
1405 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1406 		n_error++;
1407 	}
1408 	wbuf = (u_int16_t *)eeprom_config;
1409 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1410 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1411 			n_error++;
1412 		}
1413 	}
1414 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1415 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1416 			n_error++;
1417 		}
1418 	}
1419 	return (n_error);
1420 }
1421 
1422 static u_int32_t
1423 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1424 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1425 {
1426 	u_int32_t chksum;
1427 	u_int16_t mcode_lram_size;
1428 	u_int16_t mcode_chksum;
1429 
1430 	mcode_lram_size = mcode_size >> 1;
1431 	/* XXX Why zero the memory just before you write the whole thing?? */
1432 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1433 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1434 
1435 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1436 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1437 						   ((mcode_size - s_addr
1438 						     - ADV_CODE_SEC_BEG) >> 1));
1439 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1440 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1441 	return (chksum);
1442 }
1443 
1444 static void
1445 adv_reinit_lram(struct adv_softc *adv) {
1446 	adv_init_lram(adv);
1447 	adv_init_qlink_var(adv);
1448 }
1449 
1450 static void
1451 adv_init_lram(struct adv_softc *adv)
1452 {
1453 	u_int8_t  i;
1454 	u_int16_t s_addr;
1455 
1456 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1457 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1458 
1459 	i = ADV_MIN_ACTIVE_QNO;
1460 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1461 
1462 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1463 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1464 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1465 	i++;
1466 	s_addr += ADV_QBLK_SIZE;
1467 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1468 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1469 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1470 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 	}
1472 
1473 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1474 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1475 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1476 	i++;
1477 	s_addr += ADV_QBLK_SIZE;
1478 
1479 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1480 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1481 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1482 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1483 	}
1484 }
1485 
1486 static int
1487 adv_init_microcode_var(struct adv_softc *adv)
1488 {
1489 	int	 i;
1490 
1491 	for (i = 0; i <= ADV_MAX_TID; i++) {
1492 
1493 		/* Start out async all around */
1494 		adv_set_syncrate(adv, /*path*/NULL,
1495 				 i, 0, 0,
1496 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1497 	}
1498 
1499 	adv_init_qlink_var(adv);
1500 
1501 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1502 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1503 
1504 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1505 
1506 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1507 
1508 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1509 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1510 		kprintf("adv%d: Unable to set program counter. Aborting.\n",
1511 		       adv->unit);
1512 		return (1);
1513 	}
1514 	return (0);
1515 }
1516 
1517 static void
1518 adv_init_qlink_var(struct adv_softc *adv)
1519 {
1520 	int	  i;
1521 	u_int16_t lram_addr;
1522 
1523 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1524 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1525 
1526 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1527 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1528 
1529 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1530 			 (u_int8_t)((int) adv->max_openings + 1));
1531 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1532 			 (u_int8_t)((int) adv->max_openings + 2));
1533 
1534 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1535 
1536 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1537 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1538 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1539 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1540 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1541 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1542 
1543 	lram_addr = ADV_QADR_BEG;
1544 	for (i = 0; i < 32; i++, lram_addr += 2)
1545 		adv_write_lram_16(adv, lram_addr, 0);
1546 }
1547 
1548 static void
1549 adv_disable_interrupt(struct adv_softc *adv)
1550 {
1551 	u_int16_t cfg;
1552 
1553 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1554 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1555 }
1556 
1557 static void
1558 adv_enable_interrupt(struct adv_softc *adv)
1559 {
1560 	u_int16_t cfg;
1561 
1562 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1563 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1564 }
1565 
1566 static void
1567 adv_toggle_irq_act(struct adv_softc *adv)
1568 {
1569 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1570 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1571 }
1572 
1573 void
1574 adv_start_execution(struct adv_softc *adv)
1575 {
1576 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1577 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1578 	}
1579 }
1580 
1581 int
1582 adv_stop_chip(struct adv_softc *adv)
1583 {
1584 	u_int8_t cc_val;
1585 
1586 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1587 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1588 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1589 	adv_set_chip_ih(adv, ADV_INS_HALT);
1590 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1591 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1592 		return (0);
1593 	}
1594 	return (1);
1595 }
1596 
1597 static int
1598 adv_host_req_chip_halt(struct adv_softc *adv)
1599 {
1600 	int	 count;
1601 	u_int8_t saved_stop_code;
1602 
1603 	if (adv_is_chip_halted(adv))
1604 		return (1);
1605 
1606 	count = 0;
1607 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1608 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1609 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1610 	while (adv_is_chip_halted(adv) == 0
1611 	    && count++ < 2000)
1612 		;
1613 
1614 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1615 	return (count < 2000);
1616 }
1617 
1618 static void
1619 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1620 {
1621 	adv_set_bank(adv, 1);
1622 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1623 	adv_set_bank(adv, 0);
1624 }
1625 
1626 #if UNUSED
1627 static u_int8_t
1628 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1629 {
1630 	u_int8_t scsi_ctrl;
1631 
1632 	adv_set_bank(adv, 1);
1633 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1634 	adv_set_bank(adv, 0);
1635 	return (scsi_ctrl);
1636 }
1637 #endif
1638 
1639 /*
1640  * XXX Looks like more padding issues in this routine as well.
1641  *     There has to be a way to turn this into an insw.
1642  */
1643 static void
1644 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1645 	       u_int16_t *inbuf, int words)
1646 {
1647 	int	i;
1648 
1649 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1650 	for (i = 0; i < words; i++, inbuf++) {
1651 		if (i == 5) {
1652 			continue;
1653 		}
1654 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1655 	}
1656 }
1657 
1658 static u_int
1659 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1660 {
1661 	u_int	  cur_used_qs;
1662 	u_int	  cur_free_qs;
1663 
1664 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1665 
1666 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1667 		cur_free_qs = adv->max_openings - cur_used_qs;
1668 		return (cur_free_qs);
1669 	}
1670 	adv->openings_needed = n_qs;
1671 	return (0);
1672 }
1673 
1674 static u_int8_t
1675 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1676 		      u_int8_t n_free_q)
1677 {
1678 	int i;
1679 
1680 	for (i = 0; i < n_free_q; i++) {
1681 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1682 		if (free_q_head == ADV_QLINK_END)
1683 			break;
1684 	}
1685 	return (free_q_head);
1686 }
1687 
1688 static u_int8_t
1689 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1690 {
1691 	u_int16_t	q_addr;
1692 	u_int8_t	next_qp;
1693 	u_int8_t	q_status;
1694 
1695 	next_qp = ADV_QLINK_END;
1696 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1697 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1698 
1699 	if ((q_status & QS_READY) == 0)
1700 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1701 
1702 	return (next_qp);
1703 }
1704 
1705 static int
1706 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1707 		    u_int8_t n_q_required)
1708 {
1709 	u_int8_t	free_q_head;
1710 	u_int8_t	next_qp;
1711 	u_int8_t	tid_no;
1712 	u_int8_t	target_ix;
1713 	int		retval;
1714 
1715 	retval = 1;
1716 	target_ix = scsiq->q2.target_ix;
1717 	tid_no = ADV_TIX_TO_TID(target_ix);
1718 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1719 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1720 	    != ADV_QLINK_END) {
1721 		scsiq->q1.q_no = free_q_head;
1722 
1723 		/*
1724 		 * Now that we know our Q number, point our sense
1725 		 * buffer pointer to a bus dma mapped area where
1726 		 * we can dma the data to.
1727 		 */
1728 		scsiq->q1.sense_addr = adv->sense_physbase
1729 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1730 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1731 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1732 		adv->cur_active += n_q_required;
1733 		retval = 0;
1734 	}
1735 	return (retval);
1736 }
1737 
1738 
1739 static void
1740 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1741 			    u_int q_no)
1742 {
1743 	u_int8_t	sg_list_dwords;
1744 	u_int8_t	sg_index, i;
1745 	u_int8_t	sg_entry_cnt;
1746 	u_int8_t	next_qp;
1747 	u_int16_t	q_addr;
1748 	struct		adv_sg_head *sg_head;
1749 	struct		adv_sg_list_q scsi_sg_q;
1750 
1751 	sg_head = scsiq->sg_head;
1752 
1753 	if (sg_head) {
1754 		sg_entry_cnt = sg_head->entry_cnt - 1;
1755 #ifdef DIAGNOSTIC
1756 		if (sg_entry_cnt == 0)
1757 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1758 			      "a SG list but only one element");
1759 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1760 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1761 			      "a SG list but QC_SG_HEAD not set");
1762 #endif
1763 		q_addr = ADV_QNO_TO_QADDR(q_no);
1764 		sg_index = 1;
1765 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1766 		scsi_sg_q.sg_head_qp = q_no;
1767 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1768 		for (i = 0; i < sg_head->queue_cnt; i++) {
1769 			u_int8_t segs_this_q;
1770 
1771 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1772 				segs_this_q = ADV_SG_LIST_PER_Q;
1773 			else {
1774 				/* This will be the last segment then */
1775 				segs_this_q = sg_entry_cnt;
1776 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1777 			}
1778 			scsi_sg_q.seq_no = i + 1;
1779 			sg_list_dwords = segs_this_q << 1;
1780 			if (i == 0) {
1781 				scsi_sg_q.sg_list_cnt = segs_this_q;
1782 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1783 			} else {
1784 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1785 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1786 			}
1787 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1788 			scsi_sg_q.q_no = next_qp;
1789 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1790 
1791 			adv_write_lram_16_multi(adv,
1792 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1793 						(u_int16_t *)&scsi_sg_q,
1794 						sizeof(scsi_sg_q) >> 1);
1795 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1796 						(u_int32_t *)&sg_head->sg_list[sg_index],
1797 						sg_list_dwords);
1798 			sg_entry_cnt -= segs_this_q;
1799 			sg_index += ADV_SG_LIST_PER_Q;
1800 		}
1801 	}
1802 	adv_put_ready_queue(adv, scsiq, q_no);
1803 }
1804 
1805 static void
1806 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1807 		    u_int q_no)
1808 {
1809 	struct		adv_target_transinfo* tinfo;
1810 	u_int		q_addr;
1811 	u_int		tid_no;
1812 
1813 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1814 	tinfo = &adv->tinfo[tid_no];
1815 	if ((tinfo->current.period != tinfo->goal.period)
1816 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1817 
1818 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1819 		scsiq->q1.cntl |= QC_MSG_OUT;
1820 	}
1821 	q_addr = ADV_QNO_TO_QADDR(q_no);
1822 
1823 	scsiq->q1.status = QS_FREE;
1824 
1825 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1826 				(u_int16_t *)scsiq->cdbptr,
1827 				scsiq->q2.cdb_len >> 1);
1828 
1829 #if BYTE_ORDER == BIG_ENDIAN
1830 	adv_adj_scsiq_endian(scsiq);
1831 #endif
1832 
1833 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1834 		      (u_int16_t *) &scsiq->q1.cntl,
1835 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1836 
1837 #if CC_WRITE_IO_COUNT
1838 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1839 			  adv->req_count);
1840 #endif
1841 
1842 #if CC_CLEAR_DMA_REMAIN
1843 
1844 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1845 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1846 #endif
1847 
1848 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1849 			  (scsiq->q1.q_no << 8) | QS_READY);
1850 }
1851 
1852 static void
1853 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1854 	      u_int16_t *buffer, int words)
1855 {
1856 	int	i;
1857 
1858 	/*
1859 	 * XXX This routine makes *gross* assumptions
1860 	 * about padding in the data structures.
1861 	 * Either the data structures should have explicit
1862 	 * padding members added, or they should have padding
1863 	 * turned off via compiler attributes depending on
1864 	 * which yields better overall performance.  My hunch
1865 	 * would be that turning off padding would be the
1866 	 * faster approach as an outsw is much faster than
1867 	 * this crude loop and accessing un-aligned data
1868 	 * members isn't *that* expensive.  The other choice
1869 	 * would be to modify the ASC script so that the
1870 	 * the adv_scsiq_1 structure can be re-arranged so
1871 	 * padding isn't required.
1872 	 */
1873 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1874 	for (i = 0; i < words; i++, buffer++) {
1875 		if (i == 2 || i == 10) {
1876 			continue;
1877 		}
1878 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1879 	}
1880 }
1881 
1882 static void
1883 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1884 		     u_int8_t q_cntl, target_bit_vector target_mask,
1885 		     int tid_no)
1886 {
1887 	struct	ext_msg ext_msg;
1888 
1889 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1890 			       sizeof(ext_msg) >> 1);
1891 	if ((ext_msg.msg_type == MSG_EXTENDED)
1892 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1893 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1894 		union	  ccb *ccb;
1895 		struct	  adv_target_transinfo* tinfo;
1896 		u_int32_t cinfo_index;
1897 		u_int	 period;
1898 		u_int	 offset;
1899 		int	 sdtr_accept;
1900 		u_int8_t orig_offset;
1901 
1902 		cinfo_index =
1903 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1904 		ccb = adv->ccb_infos[cinfo_index].ccb;
1905 		tinfo = &adv->tinfo[tid_no];
1906 		sdtr_accept = TRUE;
1907 
1908 		orig_offset = ext_msg.req_ack_offset;
1909 		if (ext_msg.xfer_period < tinfo->goal.period) {
1910                 	sdtr_accept = FALSE;
1911 			ext_msg.xfer_period = tinfo->goal.period;
1912 		}
1913 
1914 		/* Perform range checking */
1915 		period = ext_msg.xfer_period;
1916 		offset = ext_msg.req_ack_offset;
1917 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1918 		ext_msg.xfer_period = period;
1919 		ext_msg.req_ack_offset = offset;
1920 
1921 		/* Record our current sync settings */
1922 		adv_set_syncrate(adv, ccb->ccb_h.path,
1923 				 tid_no, ext_msg.xfer_period,
1924 				 ext_msg.req_ack_offset,
1925 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1926 
1927 		/* Offset too high or large period forced async */
1928 		if (orig_offset != ext_msg.req_ack_offset)
1929 			sdtr_accept = FALSE;
1930 
1931 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1932 			/* Valid response to our requested negotiation */
1933 			q_cntl &= ~QC_MSG_OUT;
1934 		} else {
1935 			/* Must Respond */
1936 			q_cntl |= QC_MSG_OUT;
1937 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1938 					ext_msg.req_ack_offset);
1939 		}
1940 
1941 	} else if (ext_msg.msg_type == MSG_EXTENDED
1942 		&& ext_msg.msg_req == MSG_EXT_WDTR
1943 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1944 
1945 		ext_msg.wdtr_width = 0;
1946 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1947 					(u_int16_t *)&ext_msg,
1948 					sizeof(ext_msg) >> 1);
1949 		q_cntl |= QC_MSG_OUT;
1950         } else {
1951 
1952 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1953 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1954 					(u_int16_t *)&ext_msg,
1955 					sizeof(ext_msg) >> 1);
1956 		q_cntl |= QC_MSG_OUT;
1957         }
1958 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1959 }
1960 
1961 static void
1962 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1963 		u_int8_t sdtr_offset)
1964 {
1965 	struct	 ext_msg sdtr_buf;
1966 
1967 	sdtr_buf.msg_type = MSG_EXTENDED;
1968 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1969 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1970 	sdtr_buf.xfer_period = sdtr_period;
1971 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1972 	sdtr_buf.req_ack_offset = sdtr_offset;
1973 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1974 				(u_int16_t *) &sdtr_buf,
1975 				sizeof(sdtr_buf) / 2);
1976 }
1977 
1978 int
1979 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1980 	      u_int32_t status, int queued_only)
1981 {
1982 	u_int16_t q_addr;
1983 	u_int8_t  q_no;
1984 	struct adv_q_done_info scsiq_buf;
1985 	struct adv_q_done_info *scsiq;
1986 	u_int8_t  target_ix;
1987 	int	  count;
1988 
1989 	scsiq = &scsiq_buf;
1990 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1991 	count = 0;
1992 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1993 		struct adv_ccb_info *ccb_info;
1994 		q_addr = ADV_QNO_TO_QADDR(q_no);
1995 
1996 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1997 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1998 		if (((scsiq->q_status & QS_READY) != 0)
1999 		 && ((scsiq->q_status & QS_ABORTED) == 0)
2000 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
2001 		 && (scsiq->d2.target_ix == target_ix)
2002 		 && (queued_only == 0
2003 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
2004 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
2005 			union ccb *aborted_ccb;
2006 			struct adv_ccb_info *cinfo;
2007 
2008 			scsiq->q_status |= QS_ABORTED;
2009 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2010 					 scsiq->q_status);
2011 			aborted_ccb = ccb_info->ccb;
2012 			/* Don't clobber earlier error codes */
2013 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2014 			  == CAM_REQ_INPROG)
2015 				aborted_ccb->ccb_h.status |= status;
2016 			cinfo = (struct adv_ccb_info *)
2017 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2018 			cinfo->state |= ACCB_ABORT_QUEUED;
2019 			count++;
2020 		}
2021 	}
2022 	return (count);
2023 }
2024 
2025 int
2026 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2027 {
2028 	int count;
2029 	int i;
2030 	union ccb *ccb;
2031 
2032 	i = 200;
2033 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2034 	    && i--)
2035 		DELAY(1000);
2036 	adv_reset_chip(adv, initiate_bus_reset);
2037 	adv_reinit_lram(adv);
2038 	for (i = 0; i <= ADV_MAX_TID; i++)
2039 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2040 				 /*offset*/0, ADV_TRANS_CUR);
2041 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2042 
2043 	/* Tell the XPT layer that a bus reset occured */
2044 	if (adv->path != NULL)
2045 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2046 
2047 	count = 0;
2048 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2049 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2050 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2051 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2052 		count++;
2053 	}
2054 
2055 	adv_start_chip(adv);
2056 	return (count);
2057 }
2058 
2059 static void
2060 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2061 {
2062 	int orig_id;
2063 
2064     	adv_set_bank(adv, 1);
2065     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2066     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2067 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2068 		adv_set_bank(adv, 0);
2069 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2070 	}
2071     	adv_set_bank(adv, 1);
2072     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2073 	adv_set_bank(adv, 0);
2074 }
2075