xref: /dragonfly/sys/dev/disk/advansys/advlib.c (revision b40e316c)
1 /*
2  * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3  *
4  * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32  * $DragonFly: src/sys/dev/disk/advansys/advlib.c,v 1.5 2004/09/17 03:39:38 joerg Exp $
33  */
34 /*
35  * Ported from:
36  * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
37  *
38  * Copyright (c) 1995-1996 Advanced System Products, Inc.
39  * All Rights Reserved.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that redistributions of source
43  * code retain the above copyright notice and this comment without
44  * modification.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/kernel.h>
49 #include <sys/systm.h>
50 
51 #include <machine/bus_pio.h>
52 #include <machine/bus.h>
53 #include <machine/clock.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57 
58 #include <bus/cam/cam.h>
59 #include <bus/cam/cam_ccb.h>
60 #include <bus/cam/cam_sim.h>
61 #include <bus/cam/cam_xpt_sim.h>
62 
63 #include <bus/cam/scsi/scsi_all.h>
64 #include <bus/cam/scsi/scsi_message.h>
65 #include <bus/cam/scsi/scsi_da.h>
66 #include <bus/cam/scsi/scsi_cd.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 
72 #include "advansys.h"
73 #include "advmcode.h"
74 
75 struct adv_quirk_entry {
76 	struct scsi_inquiry_pattern inq_pat;
77 	u_int8_t quirks;
78 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS	0x01
79 #define ADV_QUIRK_FIX_ASYN_XFER		0x02
80 };
81 
82 static struct adv_quirk_entry adv_quirk_table[] =
83 {
84 	{
85 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
86 		ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
87 	},
88 	{
89 		{ T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
90 		0
91 	},
92 	{
93 		{
94 		  T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
95 		  "TANDBERG", " TDC 36", "*"
96 		},
97 		0
98 	},
99 	{
100 		{ T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
101 		0
102 	},
103 	{
104 		{
105 		  T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
106 		  "*", "*", "*"
107 		},
108 		0
109 	},
110 	{
111 		{
112 		  T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
113 		  "*", "*", "*"
114 		},
115 		0
116 	},
117 	{
118 		/* Default quirk entry */
119 		{
120 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
121 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
122                 },
123                 ADV_QUIRK_FIX_ASYN_XFER,
124 	}
125 };
126 
127 /*
128  * Allowable periods in ns
129  */
130 static u_int8_t adv_sdtr_period_tbl[] =
131 {
132 	25,
133 	30,
134 	35,
135 	40,
136 	50,
137 	60,
138 	70,
139 	85
140 };
141 
142 static u_int8_t adv_sdtr_period_tbl_ultra[] =
143 {
144 	12,
145 	19,
146 	25,
147 	32,
148 	38,
149 	44,
150 	50,
151 	57,
152 	63,
153 	69,
154 	75,
155 	82,
156 	88,
157 	94,
158 	100,
159 	107
160 };
161 
162 struct ext_msg {
163 	u_int8_t msg_type;
164 	u_int8_t msg_len;
165 	u_int8_t msg_req;
166 	union {
167 		struct {
168 			u_int8_t sdtr_xfer_period;
169 			u_int8_t sdtr_req_ack_offset;
170 		} sdtr;
171 		struct {
172        			u_int8_t wdtr_width;
173 		} wdtr;
174 		struct {
175 			u_int8_t mdp[4];
176 		} mdp;
177 	} u_ext_msg;
178 	u_int8_t res;
179 };
180 
181 #define	xfer_period	u_ext_msg.sdtr.sdtr_xfer_period
182 #define	req_ack_offset	u_ext_msg.sdtr.sdtr_req_ack_offset
183 #define	wdtr_width	u_ext_msg.wdtr.wdtr_width
184 #define	mdp_b3		u_ext_msg.mdp_b3
185 #define	mdp_b2		u_ext_msg.mdp_b2
186 #define	mdp_b1		u_ext_msg.mdp_b1
187 #define	mdp_b0		u_ext_msg.mdp_b0
188 
189 /*
190  * Some of the early PCI adapters have problems with
191  * async transfers.  Instead use an offset of 1.
192  */
193 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
194 
195 /* LRAM routines */
196 static void	 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
197 					u_int16_t *buffer, int count);
198 static void	 adv_write_lram_16_multi(struct adv_softc *adv,
199 					 u_int16_t s_addr, u_int16_t *buffer,
200 					 int count);
201 static void	 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 				  u_int16_t set_value, int count);
203 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
204 				  int count);
205 
206 static int	 adv_write_and_verify_lram_16(struct adv_softc *adv,
207 					      u_int16_t addr, u_int16_t value);
208 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
209 
210 
211 static void	 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
212 				   u_int32_t value);
213 static void	 adv_write_lram_32_multi(struct adv_softc *adv,
214 					 u_int16_t s_addr, u_int32_t *buffer,
215 					 int count);
216 
217 /* EEPROM routines */
218 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
219 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
220 				     u_int16_t value);
221 static int	 adv_write_eeprom_cmd_reg(struct adv_softc *adv,
222 					  u_int8_t cmd_reg);
223 static int	 adv_set_eeprom_config_once(struct adv_softc *adv,
224 					    struct adv_eeprom_config *eeconfig);
225 
226 /* Initialization */
227 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
228 				    u_int16_t *mcode_buf, u_int16_t mcode_size);
229 
230 static void	 adv_reinit_lram(struct adv_softc *adv);
231 static void	 adv_init_lram(struct adv_softc *adv);
232 static int	 adv_init_microcode_var(struct adv_softc *adv);
233 static void	 adv_init_qlink_var(struct adv_softc *adv);
234 
235 /* Interrupts */
236 static void	 adv_disable_interrupt(struct adv_softc *adv);
237 static void	 adv_enable_interrupt(struct adv_softc *adv);
238 static void	 adv_toggle_irq_act(struct adv_softc *adv);
239 
240 /* Chip Control */
241 static int	 adv_host_req_chip_halt(struct adv_softc *adv);
242 static void	 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
243 #if UNUSED
244 static u_int8_t  adv_get_chip_scsi_ctrl(struct adv_softc *adv);
245 #endif
246 
247 /* Queue handling and execution */
248 static __inline int
249 		 adv_sgcount_to_qcount(int sgcount);
250 
251 static __inline int
252 adv_sgcount_to_qcount(int sgcount)
253 {
254 	int	n_sg_list_qs;
255 
256 	n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
257 	if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
258 		n_sg_list_qs++;
259 	return (n_sg_list_qs + 1);
260 }
261 
262 static void	 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
263 				u_int16_t *inbuf, int words);
264 static u_int	 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
265 static u_int8_t  adv_alloc_free_queues(struct adv_softc *adv,
266 				       u_int8_t free_q_head, u_int8_t n_free_q);
267 static u_int8_t  adv_alloc_free_queue(struct adv_softc *adv,
268 				      u_int8_t free_q_head);
269 static int	 adv_send_scsi_queue(struct adv_softc *adv,
270 				     struct adv_scsi_q *scsiq,
271 				     u_int8_t n_q_required);
272 static void	 adv_put_ready_sg_list_queue(struct adv_softc *adv,
273 					     struct adv_scsi_q *scsiq,
274 					     u_int q_no);
275 static void	 adv_put_ready_queue(struct adv_softc *adv,
276 				     struct adv_scsi_q *scsiq, u_int q_no);
277 static void	 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
278 			       u_int16_t *buffer, int words);
279 
280 /* Messages */
281 static void	 adv_handle_extmsg_in(struct adv_softc *adv,
282 				      u_int16_t halt_q_addr, u_int8_t q_cntl,
283 				      target_bit_vector target_id,
284 				      int tid);
285 static void	 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
286 				 u_int8_t sdtr_offset);
287 static void	 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
288 					u_int8_t sdtr_data);
289 
290 
291 /* Exported functions first */
292 
293 void
294 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
295 {
296 	struct adv_softc *adv;
297 
298 	adv = (struct adv_softc *)callback_arg;
299 	switch (code) {
300 	case AC_FOUND_DEVICE:
301 	{
302 		struct ccb_getdev *cgd;
303 		target_bit_vector target_mask;
304 		int num_entries;
305         	caddr_t match;
306 		struct adv_quirk_entry *entry;
307 		struct adv_target_transinfo* tinfo;
308 
309 		cgd = (struct ccb_getdev *)arg;
310 
311 		target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
312 
313 		num_entries = sizeof(adv_quirk_table)/sizeof(*adv_quirk_table);
314 		match = cam_quirkmatch((caddr_t)&cgd->inq_data,
315 				       (caddr_t)adv_quirk_table,
316 				       num_entries, sizeof(*adv_quirk_table),
317 				       scsi_inquiry_match);
318 
319 		if (match == NULL)
320 			panic("advasync: device didn't match wildcard entry!!");
321 
322 		entry = (struct adv_quirk_entry *)match;
323 
324 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
325 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
326 				adv->fix_asyn_xfer_always |= target_mask;
327 			else
328 				adv->fix_asyn_xfer_always &= ~target_mask;
329 			/*
330 			 * We start out life with all bits set and clear them
331 			 * after we've determined that the fix isn't necessary.
332 			 * It may well be that we've already cleared a target
333 			 * before the full inquiry session completes, so don't
334 			 * gratuitously set a target bit even if it has this
335 			 * quirk.  But, if the quirk exonerates a device, clear
336 			 * the bit now.
337 			 */
338 			if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
339 				adv->fix_asyn_xfer &= ~target_mask;
340 		}
341 		/*
342 		 * Reset our sync settings now that we've determined
343 		 * what quirks are in effect for the device.
344 		 */
345 		tinfo = &adv->tinfo[cgd->ccb_h.target_id];
346 		adv_set_syncrate(adv, cgd->ccb_h.path,
347 				 cgd->ccb_h.target_id,
348 				 tinfo->current.period,
349 				 tinfo->current.offset,
350 				 ADV_TRANS_CUR);
351 		break;
352 	}
353 	case AC_LOST_DEVICE:
354 	{
355 		u_int target_mask;
356 
357 		if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
358 			target_mask = 0x01 << xpt_path_target_id(path);
359 			adv->fix_asyn_xfer |= target_mask;
360 		}
361 
362 		/*
363 		 * Revert to async transfers
364 		 * for the next device.
365 		 */
366 		adv_set_syncrate(adv, /*path*/NULL,
367 				 xpt_path_target_id(path),
368 				 /*period*/0,
369 				 /*offset*/0,
370 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
371 	}
372 	default:
373 		break;
374 	}
375 }
376 
377 void
378 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
379 {
380 	u_int8_t control;
381 
382 	/*
383 	 * Start out with the bank reset to 0
384 	 */
385 	control = ADV_INB(adv, ADV_CHIP_CTRL)
386 		  &  (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
387 			| ADV_CC_DIAG | ADV_CC_SCSI_RESET
388 			| ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
389 	if (bank == 1) {
390 		control |= ADV_CC_BANK_ONE;
391 	} else if (bank == 2) {
392 		control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
393 	}
394 	ADV_OUTB(adv, ADV_CHIP_CTRL, control);
395 }
396 
397 u_int8_t
398 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
399 {
400 	u_int8_t   byte_data;
401 	u_int16_t  word_data;
402 
403 	/*
404 	 * LRAM is accessed on 16bit boundaries.
405 	 */
406 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
407 	word_data = ADV_INW(adv, ADV_LRAM_DATA);
408 	if (addr & 1) {
409 #if BYTE_ORDER == BIG_ENDIAN
410 		byte_data = (u_int8_t)(word_data & 0xFF);
411 #else
412 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
413 #endif
414 	} else {
415 #if BYTE_ORDER == BIG_ENDIAN
416 		byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
417 #else
418 		byte_data = (u_int8_t)(word_data & 0xFF);
419 #endif
420 	}
421 	return (byte_data);
422 }
423 
424 void
425 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
426 {
427 	u_int16_t word_data;
428 
429 	word_data = adv_read_lram_16(adv, addr & 0xFFFE);
430 	if (addr & 1) {
431 		word_data &= 0x00FF;
432 		word_data |= (((u_int8_t)value << 8) & 0xFF00);
433 	} else {
434 		word_data &= 0xFF00;
435 		word_data |= ((u_int8_t)value & 0x00FF);
436 	}
437 	adv_write_lram_16(adv, addr & 0xFFFE, word_data);
438 }
439 
440 
441 u_int16_t
442 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
443 {
444 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
445 	return (ADV_INW(adv, ADV_LRAM_DATA));
446 }
447 
448 void
449 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
450 {
451 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
452 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
453 }
454 
455 /*
456  * Determine if there is a board at "iobase" by looking
457  * for the AdvanSys signatures.  Return 1 if a board is
458  * found, 0 otherwise.
459  */
460 int
461 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
462 {
463 	u_int16_t signature;
464 
465 	if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
466 		signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
467 		if ((signature == ADV_1000_ID0W)
468 		 || (signature == ADV_1000_ID0W_FIX))
469 			return (1);
470 	}
471 	return (0);
472 }
473 
474 void
475 adv_lib_init(struct adv_softc *adv)
476 {
477 	if ((adv->type & ADV_ULTRA) != 0) {
478 		adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
479 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
480 	} else {
481 		adv->sdtr_period_tbl = adv_sdtr_period_tbl;
482 		adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
483 	}
484 }
485 
486 u_int16_t
487 adv_get_eeprom_config(struct adv_softc *adv, struct
488 		      adv_eeprom_config  *eeprom_config)
489 {
490 	u_int16_t	sum;
491 	u_int16_t	*wbuf;
492 	u_int8_t	cfg_beg;
493 	u_int8_t	cfg_end;
494 	u_int8_t	s_addr;
495 
496 	wbuf = (u_int16_t *)eeprom_config;
497 	sum = 0;
498 
499 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
500 		*wbuf = adv_read_eeprom_16(adv, s_addr);
501 		sum += *wbuf;
502 	}
503 
504 	if (adv->type & ADV_VL) {
505 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
506 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
507 	} else {
508 		cfg_beg = ADV_EEPROM_CFG_BEG;
509 		cfg_end = ADV_EEPROM_MAX_ADDR;
510 	}
511 
512 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
513 		*wbuf = adv_read_eeprom_16(adv, s_addr);
514 		sum += *wbuf;
515 #if ADV_DEBUG_EEPROM
516 		printf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
517 #endif
518 	}
519 	*wbuf = adv_read_eeprom_16(adv, s_addr);
520 	return (sum);
521 }
522 
523 int
524 adv_set_eeprom_config(struct adv_softc *adv,
525 		      struct adv_eeprom_config *eeprom_config)
526 {
527 	int	retry;
528 
529 	retry = 0;
530 	while (1) {
531 		if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
532 			break;
533 		}
534 		if (++retry > ADV_EEPROM_MAX_RETRY) {
535 			break;
536 		}
537 	}
538 	return (retry > ADV_EEPROM_MAX_RETRY);
539 }
540 
541 int
542 adv_reset_chip(struct adv_softc *adv, int reset_bus)
543 {
544 	adv_stop_chip(adv);
545 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
546 				     | (reset_bus ? ADV_CC_SCSI_RESET : 0));
547 	DELAY(60);
548 
549 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
550 	adv_set_chip_ih(adv, ADV_INS_HALT);
551 
552 	if (reset_bus)
553 		ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
554 
555 	ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
556 	if (reset_bus)
557 		DELAY(200 * 1000);
558 
559 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
560 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
561 	return (adv_is_chip_halted(adv));
562 }
563 
564 int
565 adv_test_external_lram(struct adv_softc* adv)
566 {
567 	u_int16_t	q_addr;
568 	u_int16_t	saved_value;
569 	int		success;
570 
571 	success = 0;
572 
573 	q_addr = ADV_QNO_TO_QADDR(241);
574 	saved_value = adv_read_lram_16(adv, q_addr);
575 	if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
576 		success = 1;
577 		adv_write_lram_16(adv, q_addr, saved_value);
578 	}
579 	return (success);
580 }
581 
582 
583 int
584 adv_init_lram_and_mcode(struct adv_softc *adv)
585 {
586 	u_int32_t	retval;
587 
588 	adv_disable_interrupt(adv);
589 
590 	adv_init_lram(adv);
591 
592 	retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
593 				    adv_mcode_size);
594 	if (retval != adv_mcode_chksum) {
595 		printf("adv%d: Microcode download failed checksum!\n",
596 		       adv->unit);
597 		return (1);
598 	}
599 
600 	if (adv_init_microcode_var(adv) != 0)
601 		return (1);
602 
603 	adv_enable_interrupt(adv);
604 	return (0);
605 }
606 
607 u_int8_t
608 adv_get_chip_irq(struct adv_softc *adv)
609 {
610 	u_int16_t	cfg_lsw;
611 	u_int8_t	chip_irq;
612 
613 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
614 
615 	if ((adv->type & ADV_VL) != 0) {
616 		chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
617 		if ((chip_irq == 0) ||
618 		    (chip_irq == 4) ||
619 		    (chip_irq == 7)) {
620 			return (0);
621 		}
622 		return (chip_irq + (ADV_MIN_IRQ_NO - 1));
623 	}
624 	chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
625 	if (chip_irq == 3)
626 		chip_irq += 2;
627 	return (chip_irq + ADV_MIN_IRQ_NO);
628 }
629 
630 u_int8_t
631 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
632 {
633 	u_int16_t	cfg_lsw;
634 
635 	if ((adv->type & ADV_VL) != 0) {
636 		if (irq_no != 0) {
637 			if ((irq_no < ADV_MIN_IRQ_NO)
638 			 || (irq_no > ADV_MAX_IRQ_NO)) {
639 				irq_no = 0;
640 			} else {
641 				irq_no -= ADV_MIN_IRQ_NO - 1;
642 			}
643 		}
644 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
645 		cfg_lsw |= 0x0010;
646 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
647 		adv_toggle_irq_act(adv);
648 
649 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
650 		cfg_lsw |= (irq_no & 0x07) << 2;
651 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
652 		adv_toggle_irq_act(adv);
653 	} else if ((adv->type & ADV_ISA) != 0) {
654 		if (irq_no == 15)
655 			irq_no -= 2;
656 		irq_no -= ADV_MIN_IRQ_NO;
657 		cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
658 		cfg_lsw |= (irq_no & 0x03) << 2;
659 		ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
660 	}
661 	return (adv_get_chip_irq(adv));
662 }
663 
664 void
665 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
666 {
667 	u_int16_t cfg_lsw;
668 
669 	cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
670 	if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
671 		return;
672     	cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
673 	cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
674 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
675 }
676 
677 int
678 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
679 		       u_int32_t datalen)
680 {
681 	struct		adv_target_transinfo* tinfo;
682 	u_int32_t	*p_data_addr;
683 	u_int32_t	*p_data_bcount;
684 	int		disable_syn_offset_one_fix;
685 	int		retval;
686 	u_int		n_q_required;
687 	u_int32_t	addr;
688 	u_int8_t	sg_entry_cnt;
689 	u_int8_t	target_ix;
690 	u_int8_t	sg_entry_cnt_minus_one;
691 	u_int8_t	tid_no;
692 
693 	scsiq->q1.q_no = 0;
694 	retval = 1;  /* Default to error case */
695 	target_ix = scsiq->q2.target_ix;
696 	tid_no = ADV_TIX_TO_TID(target_ix);
697 	tinfo = &adv->tinfo[tid_no];
698 
699 	if (scsiq->cdbptr[0] == REQUEST_SENSE) {
700 		/* Renegotiate if appropriate. */
701 		adv_set_syncrate(adv, /*struct cam_path */NULL,
702 				 tid_no, /*period*/0, /*offset*/0,
703 				 ADV_TRANS_CUR);
704 		if (tinfo->current.period != tinfo->goal.period) {
705 			adv_msgout_sdtr(adv, tinfo->goal.period,
706 					tinfo->goal.offset);
707 			scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
708 		}
709 	}
710 
711 	if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
712 		sg_entry_cnt = scsiq->sg_head->entry_cnt;
713 		sg_entry_cnt_minus_one = sg_entry_cnt - 1;
714 
715 #ifdef DIAGNOSTIC
716 		if (sg_entry_cnt <= 1)
717 			panic("adv_execute_scsi_queue: Queue "
718 			      "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
719 
720 		if (sg_entry_cnt > ADV_MAX_SG_LIST)
721 			panic("adv_execute_scsi_queue: "
722 			      "Queue with too many segs.");
723 
724 		if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
725 			int i;
726 
727 			for (i = 0; i < sg_entry_cnt_minus_one; i++) {
728 				addr = scsiq->sg_head->sg_list[i].addr +
729 				       scsiq->sg_head->sg_list[i].bytes;
730 
731 				if ((addr & 0x0003) != 0)
732 					panic("adv_execute_scsi_queue: SG "
733 					      "with odd address or byte count");
734 			}
735 		}
736 #endif
737 		p_data_addr =
738 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
739 		p_data_bcount =
740 		    &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
741 
742 		n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
743 		scsiq->sg_head->queue_cnt = n_q_required - 1;
744 	} else {
745 		p_data_addr = &scsiq->q1.data_addr;
746 		p_data_bcount = &scsiq->q1.data_cnt;
747 		n_q_required = 1;
748 	}
749 
750 	disable_syn_offset_one_fix = FALSE;
751 
752 	if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
753 	 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
754 
755 		if (datalen != 0) {
756 			if (datalen < 512) {
757 				disable_syn_offset_one_fix = TRUE;
758 			} else {
759 				if (scsiq->cdbptr[0] == INQUIRY
760 				 || scsiq->cdbptr[0] == REQUEST_SENSE
761 				 || scsiq->cdbptr[0] == READ_CAPACITY
762 				 || scsiq->cdbptr[0] == MODE_SELECT_6
763 				 || scsiq->cdbptr[0] == MODE_SENSE_6
764 				 || scsiq->cdbptr[0] == MODE_SENSE_10
765 				 || scsiq->cdbptr[0] == MODE_SELECT_10
766 				 || scsiq->cdbptr[0] == READ_TOC) {
767 					disable_syn_offset_one_fix = TRUE;
768 				}
769 			}
770 		}
771 	}
772 
773 	if (disable_syn_offset_one_fix) {
774 		scsiq->q2.tag_code &=
775 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
776 		scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
777 				     | ADV_TAG_FLAG_DISABLE_DISCONNECT);
778 	}
779 
780 	if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
781 	 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
782 		u_int8_t extra_bytes;
783 
784 		addr = *p_data_addr + *p_data_bcount;
785 		extra_bytes = addr & 0x0003;
786 		if (extra_bytes != 0
787 		 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
788 		  || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
789 			scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
790 			scsiq->q1.extra_bytes = extra_bytes;
791 			*p_data_bcount -= extra_bytes;
792 		}
793 	}
794 
795 	if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
796 	 || ((scsiq->q1.cntl & QC_URGENT) != 0))
797 		retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
798 
799 	return (retval);
800 }
801 
802 
803 u_int8_t
804 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
805 		    struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
806 {
807 	u_int16_t val;
808 	u_int8_t  sg_queue_cnt;
809 
810 	adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
811 		       (u_int16_t *)scsiq,
812 		       (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
813 
814 #if BYTE_ORDER == BIG_ENDIAN
815 	adv_adj_endian_qdone_info(scsiq);
816 #endif
817 
818 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
819 	scsiq->q_status = val & 0xFF;
820 	scsiq->q_no = (val >> 8) & 0XFF;
821 
822 	val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
823 	scsiq->cntl = val & 0xFF;
824 	sg_queue_cnt = (val >> 8) & 0xFF;
825 
826 	val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
827 	scsiq->sense_len = val & 0xFF;
828 	scsiq->extra_bytes = (val >> 8) & 0xFF;
829 
830 	/*
831 	 * Due to a bug in accessing LRAM on the 940UA, the residual
832 	 * is split into separate high and low 16bit quantities.
833 	 */
834 	scsiq->remain_bytes =
835 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
836 	scsiq->remain_bytes |=
837 	    adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
838 
839 	/*
840 	 * XXX Is this just a safeguard or will the counter really
841 	 * have bogus upper bits?
842 	 */
843 	scsiq->remain_bytes &= max_dma_count;
844 
845 	return (sg_queue_cnt);
846 }
847 
848 int
849 adv_start_chip(struct adv_softc *adv)
850 {
851 	ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
852 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
853 		return (0);
854 	return (1);
855 }
856 
857 int
858 adv_stop_execution(struct adv_softc *adv)
859 {
860 	int count;
861 
862 	count = 0;
863 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
864 		adv_write_lram_8(adv, ADV_STOP_CODE_B,
865 				 ADV_STOP_REQ_RISC_STOP);
866 		do {
867 			if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
868 				ADV_STOP_ACK_RISC_STOP) {
869 				return (1);
870 			}
871 			DELAY(1000);
872 		} while (count++ < 20);
873 	}
874 	return (0);
875 }
876 
877 int
878 adv_is_chip_halted(struct adv_softc *adv)
879 {
880 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
881 		if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
882 			return (1);
883 		}
884 	}
885 	return (0);
886 }
887 
888 /*
889  * XXX The numeric constants and the loops in this routine
890  * need to be documented.
891  */
892 void
893 adv_ack_interrupt(struct adv_softc *adv)
894 {
895 	u_int8_t	host_flag;
896 	u_int8_t	risc_flag;
897 	int		loop;
898 
899 	loop = 0;
900 	do {
901 		risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
902 		if (loop++ > 0x7FFF) {
903 			break;
904 		}
905 	} while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
906 
907 	host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
908 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
909 			 host_flag | ADV_HOST_FLAG_ACK_INT);
910 
911 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
912 	loop = 0;
913 	while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
914 		ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
915 		if (loop++ > 3) {
916 			break;
917 		}
918 	}
919 
920 	adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
921 }
922 
923 /*
924  * Handle all conditions that may halt the chip waiting
925  * for us to intervene.
926  */
927 void
928 adv_isr_chip_halted(struct adv_softc *adv)
929 {
930 	u_int16_t	  int_halt_code;
931 	u_int16_t	  halt_q_addr;
932 	target_bit_vector target_mask;
933 	target_bit_vector scsi_busy;
934 	u_int8_t	  halt_qp;
935 	u_int8_t	  target_ix;
936 	u_int8_t	  q_cntl;
937 	u_int8_t	  tid_no;
938 
939 	int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
940 	halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
941 	halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
942 	target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
943 	q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
944 	tid_no = ADV_TIX_TO_TID(target_ix);
945 	target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
946 	if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
947 		/*
948 		 * Temporarily disable the async fix by removing
949 		 * this target from the list of affected targets,
950 		 * setting our async rate, and then putting us
951 		 * back into the mask.
952 		 */
953 		adv->fix_asyn_xfer &= ~target_mask;
954 		adv_set_syncrate(adv, /*struct cam_path */NULL,
955 				 tid_no, /*period*/0, /*offset*/0,
956 				 ADV_TRANS_ACTIVE);
957 		adv->fix_asyn_xfer |= target_mask;
958 	} else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
959 		adv_set_syncrate(adv, /*struct cam_path */NULL,
960 				 tid_no, /*period*/0, /*offset*/0,
961 				 ADV_TRANS_ACTIVE);
962 	} else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
963 		adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
964 				     target_mask, tid_no);
965 	} else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
966 		struct	  adv_target_transinfo* tinfo;
967 		union	  ccb *ccb;
968 		u_int32_t cinfo_index;
969 		u_int8_t  tag_code;
970 		u_int8_t  q_status;
971 
972 		tinfo = &adv->tinfo[tid_no];
973 		q_cntl |= QC_REQ_SENSE;
974 
975 		/* Renegotiate if appropriate. */
976 		adv_set_syncrate(adv, /*struct cam_path */NULL,
977 				 tid_no, /*period*/0, /*offset*/0,
978 				 ADV_TRANS_CUR);
979 		if (tinfo->current.period != tinfo->goal.period) {
980 			adv_msgout_sdtr(adv, tinfo->goal.period,
981 					tinfo->goal.offset);
982 			q_cntl |= QC_MSG_OUT;
983 		}
984 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
985 
986 		/* Don't tag request sense commands */
987 		tag_code = adv_read_lram_8(adv,
988 					   halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
989 		tag_code &=
990 		    ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
991 
992 		if ((adv->fix_asyn_xfer & target_mask) != 0
993 		 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
994 			tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
995 				 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
996 		}
997 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
998 				 tag_code);
999 		q_status = adv_read_lram_8(adv,
1000 					   halt_q_addr + ADV_SCSIQ_B_STATUS);
1001 		q_status |= (QS_READY | QS_BUSY);
1002 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1003 				 q_status);
1004 		/*
1005 		 * Freeze the devq until we can handle the sense condition.
1006 		 */
1007 		cinfo_index =
1008 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1009 		ccb = adv->ccb_infos[cinfo_index].ccb;
1010 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1011 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
1012 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1013 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1014 			      /*queued_only*/TRUE);
1015 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1016 		scsi_busy &= ~target_mask;
1017 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1018 		/*
1019 		 * Ensure we have enough time to actually
1020 		 * retrieve the sense.
1021 		 */
1022 		callout_reset(&ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1023 	} else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1024 		struct	ext_msg out_msg;
1025 
1026 		adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1027 				       (u_int16_t *) &out_msg,
1028 				       sizeof(out_msg)/2);
1029 
1030 		if ((out_msg.msg_type == MSG_EXTENDED)
1031 		 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1032 		 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1033 
1034 			/* Revert to Async */
1035 			adv_set_syncrate(adv, /*struct cam_path */NULL,
1036 					 tid_no, /*period*/0, /*offset*/0,
1037 					 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1038 		}
1039 		q_cntl &= ~QC_MSG_OUT;
1040 		adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1041 	} else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1042 		u_int8_t scsi_status;
1043 		union ccb *ccb;
1044 		u_int32_t cinfo_index;
1045 
1046 		scsi_status = adv_read_lram_8(adv, halt_q_addr
1047 					      + ADV_SCSIQ_SCSI_STATUS);
1048 		cinfo_index =
1049 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1050 		ccb = adv->ccb_infos[cinfo_index].ccb;
1051 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1052 		ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1053 		ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1054 		adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1055 			      /*ccb*/NULL, CAM_REQUEUE_REQ,
1056 			      /*queued_only*/TRUE);
1057 		scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1058 		scsi_busy &= ~target_mask;
1059 		adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1060 	} else {
1061 		printf("Unhandled Halt Code %x\n", int_halt_code);
1062 	}
1063 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1064 }
1065 
1066 void
1067 adv_sdtr_to_period_offset(struct adv_softc *adv,
1068 			  u_int8_t sync_data, u_int8_t *period,
1069 			  u_int8_t *offset, int tid)
1070 {
1071 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1072 	 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1073 		*period = *offset = 0;
1074 	} else {
1075 		*period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1076 		*offset = sync_data & 0xF;
1077 	}
1078 }
1079 
1080 void
1081 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1082 		 u_int tid, u_int period, u_int offset, u_int type)
1083 {
1084 	struct adv_target_transinfo* tinfo;
1085 	u_int old_period;
1086 	u_int old_offset;
1087 	u_int8_t sdtr_data;
1088 
1089 	tinfo = &adv->tinfo[tid];
1090 
1091 	/* Filter our input */
1092 	sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1093 					      &offset, tid);
1094 
1095 	old_period = tinfo->current.period;
1096 	old_offset = tinfo->current.offset;
1097 
1098 	if ((type & ADV_TRANS_CUR) != 0
1099 	 && ((old_period != period || old_offset != offset)
1100 	  || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1101 		int s;
1102 		int halted;
1103 
1104 		s = splcam();
1105 		halted = adv_is_chip_halted(adv);
1106 		if (halted == 0)
1107 			/* Must halt the chip first */
1108 			adv_host_req_chip_halt(adv);
1109 
1110 		/* Update current hardware settings */
1111 		adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1112 
1113 		/*
1114 		 * If a target can run in sync mode, we don't need
1115 		 * to check it for sync problems.
1116 		 */
1117 		if (offset != 0)
1118 			adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1119 
1120 		if (halted == 0)
1121 			/* Start the chip again */
1122 			adv_start_chip(adv);
1123 
1124 		splx(s);
1125 		tinfo->current.period = period;
1126 		tinfo->current.offset = offset;
1127 
1128 		if (path != NULL) {
1129 			/*
1130 			 * Tell the SCSI layer about the
1131 			 * new transfer parameters.
1132 			 */
1133 			struct	ccb_trans_settings neg;
1134 
1135 			neg.sync_period = period;
1136 			neg.sync_offset = offset;
1137 			neg.valid = CCB_TRANS_SYNC_RATE_VALID
1138 				  | CCB_TRANS_SYNC_OFFSET_VALID;
1139 			xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
1140 			xpt_async(AC_TRANSFER_NEG, path, &neg);
1141 		}
1142 	}
1143 
1144 	if ((type & ADV_TRANS_GOAL) != 0) {
1145 		tinfo->goal.period = period;
1146 		tinfo->goal.offset = offset;
1147 	}
1148 
1149 	if ((type & ADV_TRANS_USER) != 0) {
1150 		tinfo->user.period = period;
1151 		tinfo->user.offset = offset;
1152 	}
1153 }
1154 
1155 u_int8_t
1156 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1157 			  u_int *offset, int tid)
1158 {
1159 	u_int i;
1160 	u_int dummy_offset;
1161 	u_int dummy_period;
1162 
1163 	if (offset == NULL) {
1164 		dummy_offset = 0;
1165 		offset = &dummy_offset;
1166 	}
1167 
1168 	if (period == NULL) {
1169 		dummy_period = 0;
1170 		period = &dummy_period;
1171 	}
1172 
1173 	*offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1174 	if (*period != 0 && *offset != 0) {
1175 		for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1176 			if (*period <= adv->sdtr_period_tbl[i]) {
1177 				/*
1178 				 * When responding to a target that requests
1179 				 * sync, the requested  rate may fall between
1180 				 * two rates that we can output, but still be
1181 				 * a rate that we can receive.  Because of this,
1182 				 * we want to respond to the target with
1183 				 * the same rate that it sent to us even
1184 				 * if the period we use to send data to it
1185 				 * is lower.  Only lower the response period
1186 				 * if we must.
1187 				 */
1188 				if (i == 0 /* Our maximum rate */)
1189 					*period = adv->sdtr_period_tbl[0];
1190 				return ((i << 4) | *offset);
1191 			}
1192 		}
1193 	}
1194 
1195 	/* Must go async */
1196 	*period = 0;
1197 	*offset = 0;
1198 	if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1199 		return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1200 	return (0);
1201 }
1202 
1203 /* Internal Routines */
1204 
1205 static void
1206 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1207 		       u_int16_t *buffer, int count)
1208 {
1209 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1210 	ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1211 }
1212 
1213 static void
1214 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1215 			u_int16_t *buffer, int count)
1216 {
1217 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1218 	ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1219 }
1220 
1221 static void
1222 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1223 		 u_int16_t set_value, int count)
1224 {
1225 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1226 	bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1227 			      set_value, count);
1228 }
1229 
1230 static u_int32_t
1231 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1232 {
1233 	u_int32_t	sum;
1234 	int		i;
1235 
1236 	sum = 0;
1237 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1238 	for (i = 0; i < count; i++)
1239 		sum += ADV_INW(adv, ADV_LRAM_DATA);
1240 	return (sum);
1241 }
1242 
1243 static int
1244 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1245 			     u_int16_t value)
1246 {
1247 	int	retval;
1248 
1249 	retval = 0;
1250 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1251 	ADV_OUTW(adv, ADV_LRAM_DATA, value);
1252 	DELAY(10000);
1253 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1254 	if (value != ADV_INW(adv, ADV_LRAM_DATA))
1255 		retval = 1;
1256 	return (retval);
1257 }
1258 
1259 static u_int32_t
1260 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1261 {
1262 	u_int16_t           val_low, val_high;
1263 
1264 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1265 
1266 #if BYTE_ORDER == BIG_ENDIAN
1267 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1268 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1269 #else
1270 	val_low = ADV_INW(adv, ADV_LRAM_DATA);
1271 	val_high = ADV_INW(adv, ADV_LRAM_DATA);
1272 #endif
1273 
1274 	return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1275 }
1276 
1277 static void
1278 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1279 {
1280 	ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1281 
1282 #if BYTE_ORDER == BIG_ENDIAN
1283 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1284 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1285 #else
1286 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1287 	ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1288 #endif
1289 }
1290 
1291 static void
1292 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1293 			u_int32_t *buffer, int count)
1294 {
1295 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1296 	ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1297 }
1298 
1299 static u_int16_t
1300 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1301 {
1302 	u_int16_t read_wval;
1303 	u_int8_t  cmd_reg;
1304 
1305 	adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1306 	DELAY(1000);
1307 	cmd_reg = addr | ADV_EEPROM_CMD_READ;
1308 	adv_write_eeprom_cmd_reg(adv, cmd_reg);
1309 	DELAY(1000);
1310 	read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1311 	DELAY(1000);
1312 	return (read_wval);
1313 }
1314 
1315 static u_int16_t
1316 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1317 {
1318 	u_int16_t	read_value;
1319 
1320 	read_value = adv_read_eeprom_16(adv, addr);
1321 	if (read_value != value) {
1322 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1323 		DELAY(1000);
1324 
1325 		ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1326 		DELAY(1000);
1327 
1328 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1329 		DELAY(20 * 1000);
1330 
1331 		adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1332 		DELAY(1000);
1333 		read_value = adv_read_eeprom_16(adv, addr);
1334 	}
1335 	return (read_value);
1336 }
1337 
1338 static int
1339 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1340 {
1341 	u_int8_t read_back;
1342 	int	 retry;
1343 
1344 	retry = 0;
1345 	while (1) {
1346 		ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1347 		DELAY(1000);
1348 		read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1349 		if (read_back == cmd_reg) {
1350 			return (1);
1351 		}
1352 		if (retry++ > ADV_EEPROM_MAX_RETRY) {
1353 			return (0);
1354 		}
1355 	}
1356 }
1357 
1358 static int
1359 adv_set_eeprom_config_once(struct adv_softc *adv,
1360 			   struct adv_eeprom_config *eeprom_config)
1361 {
1362 	int		n_error;
1363 	u_int16_t	*wbuf;
1364 	u_int16_t	sum;
1365 	u_int8_t	s_addr;
1366 	u_int8_t	cfg_beg;
1367 	u_int8_t	cfg_end;
1368 
1369 	wbuf = (u_int16_t *)eeprom_config;
1370 	n_error = 0;
1371 	sum = 0;
1372 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1373 		sum += *wbuf;
1374 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1375 			n_error++;
1376 		}
1377 	}
1378 	if (adv->type & ADV_VL) {
1379 		cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1380 		cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1381 	} else {
1382 		cfg_beg = ADV_EEPROM_CFG_BEG;
1383 		cfg_end = ADV_EEPROM_MAX_ADDR;
1384 	}
1385 
1386 	for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1387 		sum += *wbuf;
1388 		if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1389 			n_error++;
1390 		}
1391 	}
1392 	*wbuf = sum;
1393 	if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1394 		n_error++;
1395 	}
1396 	wbuf = (u_int16_t *)eeprom_config;
1397 	for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1398 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1399 			n_error++;
1400 		}
1401 	}
1402 	for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1403 		if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1404 			n_error++;
1405 		}
1406 	}
1407 	return (n_error);
1408 }
1409 
1410 static u_int32_t
1411 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1412 		   u_int16_t *mcode_buf, u_int16_t mcode_size)
1413 {
1414 	u_int32_t chksum;
1415 	u_int16_t mcode_lram_size;
1416 	u_int16_t mcode_chksum;
1417 
1418 	mcode_lram_size = mcode_size >> 1;
1419 	/* XXX Why zero the memory just before you write the whole thing?? */
1420 	adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1421 	adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1422 
1423 	chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1424 	mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1425 						   ((mcode_size - s_addr
1426 						     - ADV_CODE_SEC_BEG) >> 1));
1427 	adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1428 	adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1429 	return (chksum);
1430 }
1431 
1432 static void
1433 adv_reinit_lram(struct adv_softc *adv) {
1434 	adv_init_lram(adv);
1435 	adv_init_qlink_var(adv);
1436 }
1437 
1438 static void
1439 adv_init_lram(struct adv_softc *adv)
1440 {
1441 	u_int8_t  i;
1442 	u_int16_t s_addr;
1443 
1444 	adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1445 			 (((adv->max_openings + 2 + 1) * 64) >> 1));
1446 
1447 	i = ADV_MIN_ACTIVE_QNO;
1448 	s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1449 
1450 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD,	i + 1);
1451 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1452 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1453 	i++;
1454 	s_addr += ADV_QBLK_SIZE;
1455 	for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1456 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1457 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1458 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1459 	}
1460 
1461 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1462 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1463 	adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1464 	i++;
1465 	s_addr += ADV_QBLK_SIZE;
1466 
1467 	for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1468 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1469 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1470 		adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1471 	}
1472 }
1473 
1474 static int
1475 adv_init_microcode_var(struct adv_softc *adv)
1476 {
1477 	int	 i;
1478 
1479 	for (i = 0; i <= ADV_MAX_TID; i++) {
1480 
1481 		/* Start out async all around */
1482 		adv_set_syncrate(adv, /*path*/NULL,
1483 				 i, 0, 0,
1484 				 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1485 	}
1486 
1487 	adv_init_qlink_var(adv);
1488 
1489 	adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1490 	adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1491 
1492 	adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1493 
1494 	adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1495 
1496 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1497 	if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1498 		printf("adv%d: Unable to set program counter. Aborting.\n",
1499 		       adv->unit);
1500 		return (1);
1501 	}
1502 	return (0);
1503 }
1504 
1505 static void
1506 adv_init_qlink_var(struct adv_softc *adv)
1507 {
1508 	int	  i;
1509 	u_int16_t lram_addr;
1510 
1511 	adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1512 	adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1513 
1514 	adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1515 	adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1516 
1517 	adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1518 			 (u_int8_t)((int) adv->max_openings + 1));
1519 	adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1520 			 (u_int8_t)((int) adv->max_openings + 2));
1521 
1522 	adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1523 
1524 	adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1525 	adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1526 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1527 	adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1528 	adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1529 	adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1530 
1531 	lram_addr = ADV_QADR_BEG;
1532 	for (i = 0; i < 32; i++, lram_addr += 2)
1533 		adv_write_lram_16(adv, lram_addr, 0);
1534 }
1535 
1536 static void
1537 adv_disable_interrupt(struct adv_softc *adv)
1538 {
1539 	u_int16_t cfg;
1540 
1541 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1542 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1543 }
1544 
1545 static void
1546 adv_enable_interrupt(struct adv_softc *adv)
1547 {
1548 	u_int16_t cfg;
1549 
1550 	cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1551 	ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1552 }
1553 
1554 static void
1555 adv_toggle_irq_act(struct adv_softc *adv)
1556 {
1557 	ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1558 	ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1559 }
1560 
1561 void
1562 adv_start_execution(struct adv_softc *adv)
1563 {
1564 	if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1565 		adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1566 	}
1567 }
1568 
1569 int
1570 adv_stop_chip(struct adv_softc *adv)
1571 {
1572 	u_int8_t cc_val;
1573 
1574 	cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1575 		 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1576 	ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1577 	adv_set_chip_ih(adv, ADV_INS_HALT);
1578 	adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1579 	if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1580 		return (0);
1581 	}
1582 	return (1);
1583 }
1584 
1585 static int
1586 adv_host_req_chip_halt(struct adv_softc *adv)
1587 {
1588 	int	 count;
1589 	u_int8_t saved_stop_code;
1590 
1591 	if (adv_is_chip_halted(adv))
1592 		return (1);
1593 
1594 	count = 0;
1595 	saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1596 	adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1597 			 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1598 	while (adv_is_chip_halted(adv) == 0
1599 	    && count++ < 2000)
1600 		;
1601 
1602 	adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1603 	return (count < 2000);
1604 }
1605 
1606 static void
1607 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1608 {
1609 	adv_set_bank(adv, 1);
1610 	ADV_OUTW(adv, ADV_REG_IH, ins_code);
1611 	adv_set_bank(adv, 0);
1612 }
1613 
1614 #if UNUSED
1615 static u_int8_t
1616 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1617 {
1618 	u_int8_t scsi_ctrl;
1619 
1620 	adv_set_bank(adv, 1);
1621 	scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1622 	adv_set_bank(adv, 0);
1623 	return (scsi_ctrl);
1624 }
1625 #endif
1626 
1627 /*
1628  * XXX Looks like more padding issues in this routine as well.
1629  *     There has to be a way to turn this into an insw.
1630  */
1631 static void
1632 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1633 	       u_int16_t *inbuf, int words)
1634 {
1635 	int	i;
1636 
1637 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1638 	for (i = 0; i < words; i++, inbuf++) {
1639 		if (i == 5) {
1640 			continue;
1641 		}
1642 		*inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1643 	}
1644 }
1645 
1646 static u_int
1647 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1648 {
1649 	u_int	  cur_used_qs;
1650 	u_int	  cur_free_qs;
1651 
1652 	cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1653 
1654 	if ((cur_used_qs + n_qs) <= adv->max_openings) {
1655 		cur_free_qs = adv->max_openings - cur_used_qs;
1656 		return (cur_free_qs);
1657 	}
1658 	adv->openings_needed = n_qs;
1659 	return (0);
1660 }
1661 
1662 static u_int8_t
1663 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1664 		      u_int8_t n_free_q)
1665 {
1666 	int i;
1667 
1668 	for (i = 0; i < n_free_q; i++) {
1669 		free_q_head = adv_alloc_free_queue(adv, free_q_head);
1670 		if (free_q_head == ADV_QLINK_END)
1671 			break;
1672 	}
1673 	return (free_q_head);
1674 }
1675 
1676 static u_int8_t
1677 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1678 {
1679 	u_int16_t	q_addr;
1680 	u_int8_t	next_qp;
1681 	u_int8_t	q_status;
1682 
1683 	next_qp = ADV_QLINK_END;
1684 	q_addr = ADV_QNO_TO_QADDR(free_q_head);
1685 	q_status = adv_read_lram_8(adv,	q_addr + ADV_SCSIQ_B_STATUS);
1686 
1687 	if ((q_status & QS_READY) == 0)
1688 		next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1689 
1690 	return (next_qp);
1691 }
1692 
1693 static int
1694 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1695 		    u_int8_t n_q_required)
1696 {
1697 	u_int8_t	free_q_head;
1698 	u_int8_t	next_qp;
1699 	u_int8_t	tid_no;
1700 	u_int8_t	target_ix;
1701 	int		retval;
1702 
1703 	retval = 1;
1704 	target_ix = scsiq->q2.target_ix;
1705 	tid_no = ADV_TIX_TO_TID(target_ix);
1706 	free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1707 	if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1708 	    != ADV_QLINK_END) {
1709 		scsiq->q1.q_no = free_q_head;
1710 
1711 		/*
1712 		 * Now that we know our Q number, point our sense
1713 		 * buffer pointer to a bus dma mapped area where
1714 		 * we can dma the data to.
1715 		 */
1716 		scsiq->q1.sense_addr = adv->sense_physbase
1717 		    + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1718 		adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1719 		adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1720 		adv->cur_active += n_q_required;
1721 		retval = 0;
1722 	}
1723 	return (retval);
1724 }
1725 
1726 
1727 static void
1728 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1729 			    u_int q_no)
1730 {
1731 	u_int8_t	sg_list_dwords;
1732 	u_int8_t	sg_index, i;
1733 	u_int8_t	sg_entry_cnt;
1734 	u_int8_t	next_qp;
1735 	u_int16_t	q_addr;
1736 	struct		adv_sg_head *sg_head;
1737 	struct		adv_sg_list_q scsi_sg_q;
1738 
1739 	sg_head = scsiq->sg_head;
1740 
1741 	if (sg_head) {
1742 		sg_entry_cnt = sg_head->entry_cnt - 1;
1743 #ifdef DIAGNOSTIC
1744 		if (sg_entry_cnt == 0)
1745 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1746 			      "a SG list but only one element");
1747 		if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1748 			panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749 			      "a SG list but QC_SG_HEAD not set");
1750 #endif
1751 		q_addr = ADV_QNO_TO_QADDR(q_no);
1752 		sg_index = 1;
1753 		scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1754 		scsi_sg_q.sg_head_qp = q_no;
1755 		scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1756 		for (i = 0; i < sg_head->queue_cnt; i++) {
1757 			u_int8_t segs_this_q;
1758 
1759 			if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1760 				segs_this_q = ADV_SG_LIST_PER_Q;
1761 			else {
1762 				/* This will be the last segment then */
1763 				segs_this_q = sg_entry_cnt;
1764 				scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1765 			}
1766 			scsi_sg_q.seq_no = i + 1;
1767 			sg_list_dwords = segs_this_q << 1;
1768 			if (i == 0) {
1769 				scsi_sg_q.sg_list_cnt = segs_this_q;
1770 				scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1771 			} else {
1772 				scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1773 				scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1774 			}
1775 			next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1776 			scsi_sg_q.q_no = next_qp;
1777 			q_addr = ADV_QNO_TO_QADDR(next_qp);
1778 
1779 			adv_write_lram_16_multi(adv,
1780 						q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1781 						(u_int16_t *)&scsi_sg_q,
1782 						sizeof(scsi_sg_q) >> 1);
1783 			adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1784 						(u_int32_t *)&sg_head->sg_list[sg_index],
1785 						sg_list_dwords);
1786 			sg_entry_cnt -= segs_this_q;
1787 			sg_index += ADV_SG_LIST_PER_Q;
1788 		}
1789 	}
1790 	adv_put_ready_queue(adv, scsiq, q_no);
1791 }
1792 
1793 static void
1794 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1795 		    u_int q_no)
1796 {
1797 	struct		adv_target_transinfo* tinfo;
1798 	u_int		q_addr;
1799 	u_int		tid_no;
1800 
1801 	tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1802 	tinfo = &adv->tinfo[tid_no];
1803 	if ((tinfo->current.period != tinfo->goal.period)
1804 	 || (tinfo->current.offset != tinfo->goal.offset)) {
1805 
1806 		adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1807 		scsiq->q1.cntl |= QC_MSG_OUT;
1808 	}
1809 	q_addr = ADV_QNO_TO_QADDR(q_no);
1810 
1811 	scsiq->q1.status = QS_FREE;
1812 
1813 	adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1814 				(u_int16_t *)scsiq->cdbptr,
1815 				scsiq->q2.cdb_len >> 1);
1816 
1817 #if BYTE_ORDER == BIG_ENDIAN
1818 	adv_adj_scsiq_endian(scsiq);
1819 #endif
1820 
1821 	adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1822 		      (u_int16_t *) &scsiq->q1.cntl,
1823 		      ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1824 
1825 #if CC_WRITE_IO_COUNT
1826 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1827 			  adv->req_count);
1828 #endif
1829 
1830 #if CC_CLEAR_DMA_REMAIN
1831 
1832 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1833 	adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1834 #endif
1835 
1836 	adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1837 			  (scsiq->q1.q_no << 8) | QS_READY);
1838 }
1839 
1840 static void
1841 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1842 	      u_int16_t *buffer, int words)
1843 {
1844 	int	i;
1845 
1846 	/*
1847 	 * XXX This routine makes *gross* assumptions
1848 	 * about padding in the data structures.
1849 	 * Either the data structures should have explicit
1850 	 * padding members added, or they should have padding
1851 	 * turned off via compiler attributes depending on
1852 	 * which yields better overall performance.  My hunch
1853 	 * would be that turning off padding would be the
1854 	 * faster approach as an outsw is much faster than
1855 	 * this crude loop and accessing un-aligned data
1856 	 * members isn't *that* expensive.  The other choice
1857 	 * would be to modify the ASC script so that the
1858 	 * the adv_scsiq_1 structure can be re-arranged so
1859 	 * padding isn't required.
1860 	 */
1861 	ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1862 	for (i = 0; i < words; i++, buffer++) {
1863 		if (i == 2 || i == 10) {
1864 			continue;
1865 		}
1866 		ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1867 	}
1868 }
1869 
1870 static void
1871 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1872 		     u_int8_t q_cntl, target_bit_vector target_mask,
1873 		     int tid_no)
1874 {
1875 	struct	ext_msg ext_msg;
1876 
1877 	adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1878 			       sizeof(ext_msg) >> 1);
1879 	if ((ext_msg.msg_type == MSG_EXTENDED)
1880 	 && (ext_msg.msg_req == MSG_EXT_SDTR)
1881 	 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1882 		union	  ccb *ccb;
1883 		struct	  adv_target_transinfo* tinfo;
1884 		u_int32_t cinfo_index;
1885 		u_int	 period;
1886 		u_int	 offset;
1887 		int	 sdtr_accept;
1888 		u_int8_t orig_offset;
1889 
1890 		cinfo_index =
1891 		    adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1892 		ccb = adv->ccb_infos[cinfo_index].ccb;
1893 		tinfo = &adv->tinfo[tid_no];
1894 		sdtr_accept = TRUE;
1895 
1896 		orig_offset = ext_msg.req_ack_offset;
1897 		if (ext_msg.xfer_period < tinfo->goal.period) {
1898                 	sdtr_accept = FALSE;
1899 			ext_msg.xfer_period = tinfo->goal.period;
1900 		}
1901 
1902 		/* Perform range checking */
1903 		period = ext_msg.xfer_period;
1904 		offset = ext_msg.req_ack_offset;
1905 		adv_period_offset_to_sdtr(adv, &period,  &offset, tid_no);
1906 		ext_msg.xfer_period = period;
1907 		ext_msg.req_ack_offset = offset;
1908 
1909 		/* Record our current sync settings */
1910 		adv_set_syncrate(adv, ccb->ccb_h.path,
1911 				 tid_no, ext_msg.xfer_period,
1912 				 ext_msg.req_ack_offset,
1913 				 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1914 
1915 		/* Offset too high or large period forced async */
1916 		if (orig_offset != ext_msg.req_ack_offset)
1917 			sdtr_accept = FALSE;
1918 
1919 		if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1920 			/* Valid response to our requested negotiation */
1921 			q_cntl &= ~QC_MSG_OUT;
1922 		} else {
1923 			/* Must Respond */
1924 			q_cntl |= QC_MSG_OUT;
1925 			adv_msgout_sdtr(adv, ext_msg.xfer_period,
1926 					ext_msg.req_ack_offset);
1927 		}
1928 
1929 	} else if (ext_msg.msg_type == MSG_EXTENDED
1930 		&& ext_msg.msg_req == MSG_EXT_WDTR
1931 		&& ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1932 
1933 		ext_msg.wdtr_width = 0;
1934 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1935 					(u_int16_t *)&ext_msg,
1936 					sizeof(ext_msg) >> 1);
1937 		q_cntl |= QC_MSG_OUT;
1938         } else {
1939 
1940 		ext_msg.msg_type = MSG_MESSAGE_REJECT;
1941 		adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1942 					(u_int16_t *)&ext_msg,
1943 					sizeof(ext_msg) >> 1);
1944 		q_cntl |= QC_MSG_OUT;
1945         }
1946 	adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1947 }
1948 
1949 static void
1950 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1951 		u_int8_t sdtr_offset)
1952 {
1953 	struct	 ext_msg sdtr_buf;
1954 
1955 	sdtr_buf.msg_type = MSG_EXTENDED;
1956 	sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1957 	sdtr_buf.msg_req = MSG_EXT_SDTR;
1958 	sdtr_buf.xfer_period = sdtr_period;
1959 	sdtr_offset &= ADV_SYN_MAX_OFFSET;
1960 	sdtr_buf.req_ack_offset = sdtr_offset;
1961 	adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1962 				(u_int16_t *) &sdtr_buf,
1963 				sizeof(sdtr_buf) / 2);
1964 }
1965 
1966 int
1967 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1968 	      u_int32_t status, int queued_only)
1969 {
1970 	u_int16_t q_addr;
1971 	u_int8_t  q_no;
1972 	struct adv_q_done_info scsiq_buf;
1973 	struct adv_q_done_info *scsiq;
1974 	u_int8_t  target_ix;
1975 	int	  count;
1976 
1977 	scsiq = &scsiq_buf;
1978 	target_ix = ADV_TIDLUN_TO_IX(target, lun);
1979 	count = 0;
1980 	for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1981 		struct adv_ccb_info *ccb_info;
1982 		q_addr = ADV_QNO_TO_QADDR(q_no);
1983 
1984 		adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1985 		ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1986 		if (((scsiq->q_status & QS_READY) != 0)
1987 		 && ((scsiq->q_status & QS_ABORTED) == 0)
1988 		 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1989 		 && (scsiq->d2.target_ix == target_ix)
1990 		 && (queued_only == 0
1991 		  || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1992 		 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1993 			union ccb *aborted_ccb;
1994 			struct adv_ccb_info *cinfo;
1995 
1996 			scsiq->q_status |= QS_ABORTED;
1997 			adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
1998 					 scsiq->q_status);
1999 			aborted_ccb = ccb_info->ccb;
2000 			/* Don't clobber earlier error codes */
2001 			if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2002 			  == CAM_REQ_INPROG)
2003 				aborted_ccb->ccb_h.status |= status;
2004 			cinfo = (struct adv_ccb_info *)
2005 			    aborted_ccb->ccb_h.ccb_cinfo_ptr;
2006 			cinfo->state |= ACCB_ABORT_QUEUED;
2007 			count++;
2008 		}
2009 	}
2010 	return (count);
2011 }
2012 
2013 int
2014 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2015 {
2016 	int count;
2017 	int i;
2018 	union ccb *ccb;
2019 
2020 	i = 200;
2021 	while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2022 	    && i--)
2023 		DELAY(1000);
2024 	adv_reset_chip(adv, initiate_bus_reset);
2025 	adv_reinit_lram(adv);
2026 	for (i = 0; i <= ADV_MAX_TID; i++)
2027 		adv_set_syncrate(adv, NULL, i, /*period*/0,
2028 				 /*offset*/0, ADV_TRANS_CUR);
2029 	ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2030 
2031 	/* Tell the XPT layer that a bus reset occured */
2032 	if (adv->path != NULL)
2033 		xpt_async(AC_BUS_RESET, adv->path, NULL);
2034 
2035 	count = 0;
2036 	while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2037 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2038 			ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2039 		adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2040 		count++;
2041 	}
2042 
2043 	adv_start_chip(adv);
2044 	return (count);
2045 }
2046 
2047 static void
2048 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2049 {
2050 	int orig_id;
2051 
2052     	adv_set_bank(adv, 1);
2053     	orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2054     	ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2055 	if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2056 		adv_set_bank(adv, 0);
2057 		ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2058 	}
2059     	adv_set_bank(adv, 1);
2060     	ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2061 	adv_set_bank(adv, 0);
2062 }
2063