1 /*
2 * Low level routines for the Advanced Systems Inc. SCSI controllers chips
3 *
4 * Copyright (c) 1996-1997, 1999-2000 Justin Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/dev/advansys/advlib.c,v 1.15.2.1 2000/04/14 13:32:49 nyan Exp $
32 */
33 /*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1996 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46 #include <sys/param.h>
47 #include <sys/kernel.h>
48 #include <sys/systm.h>
49 #include <sys/thread2.h>
50 #include <sys/bus.h>
51 #include <sys/rman.h>
52
53 #include <machine/clock.h>
54
55 #include <bus/cam/cam.h>
56 #include <bus/cam/cam_ccb.h>
57 #include <bus/cam/cam_sim.h>
58 #include <bus/cam/cam_xpt_sim.h>
59
60 #include <bus/cam/scsi/scsi_all.h>
61 #include <bus/cam/scsi/scsi_message.h>
62 #include <bus/cam/scsi/scsi_da.h>
63 #include <bus/cam/scsi/scsi_cd.h>
64 #include <bus/cam/cam_xpt_periph.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69
70 #include "advansys.h"
71 #include "advmcode.h"
72
73 struct adv_quirk_entry {
74 struct scsi_inquiry_pattern inq_pat;
75 u_int8_t quirks;
76 #define ADV_QUIRK_FIX_ASYN_XFER_ALWAYS 0x01
77 #define ADV_QUIRK_FIX_ASYN_XFER 0x02
78 };
79
80 static struct adv_quirk_entry adv_quirk_table[] =
81 {
82 {
83 { T_CDROM, SIP_MEDIA_REMOVABLE, "HP", "*", "*" },
84 ADV_QUIRK_FIX_ASYN_XFER_ALWAYS|ADV_QUIRK_FIX_ASYN_XFER
85 },
86 {
87 { T_CDROM, SIP_MEDIA_REMOVABLE, "NEC", "CD-ROM DRIVE", "*" },
88 0
89 },
90 {
91 {
92 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE,
93 "TANDBERG", " TDC 36", "*"
94 },
95 0
96 },
97 {
98 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "*", "*" },
99 0
100 },
101 {
102 {
103 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
104 "*", "*", "*"
105 },
106 0
107 },
108 {
109 {
110 T_SCANNER, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
111 "*", "*", "*"
112 },
113 0
114 },
115 {
116 /* Default quirk entry */
117 {
118 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
119 /*vendor*/"*", /*product*/"*", /*revision*/"*"
120 },
121 ADV_QUIRK_FIX_ASYN_XFER,
122 }
123 };
124
125 /*
126 * Allowable periods in ns
127 */
128 static u_int8_t adv_sdtr_period_tbl[] =
129 {
130 25,
131 30,
132 35,
133 40,
134 50,
135 60,
136 70,
137 85
138 };
139
140 static u_int8_t adv_sdtr_period_tbl_ultra[] =
141 {
142 12,
143 19,
144 25,
145 32,
146 38,
147 44,
148 50,
149 57,
150 63,
151 69,
152 75,
153 82,
154 88,
155 94,
156 100,
157 107
158 };
159
160 struct ext_msg {
161 u_int8_t msg_type;
162 u_int8_t msg_len;
163 u_int8_t msg_req;
164 union {
165 struct {
166 u_int8_t sdtr_xfer_period;
167 u_int8_t sdtr_req_ack_offset;
168 } sdtr;
169 struct {
170 u_int8_t wdtr_width;
171 } wdtr;
172 struct {
173 u_int8_t mdp[4];
174 } mdp;
175 } u_ext_msg;
176 u_int8_t res;
177 };
178
179 #define xfer_period u_ext_msg.sdtr.sdtr_xfer_period
180 #define req_ack_offset u_ext_msg.sdtr.sdtr_req_ack_offset
181 #define wdtr_width u_ext_msg.wdtr.wdtr_width
182 #define mdp_b3 u_ext_msg.mdp_b3
183 #define mdp_b2 u_ext_msg.mdp_b2
184 #define mdp_b1 u_ext_msg.mdp_b1
185 #define mdp_b0 u_ext_msg.mdp_b0
186
187 /*
188 * Some of the early PCI adapters have problems with
189 * async transfers. Instead use an offset of 1.
190 */
191 #define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
192
193 /* LRAM routines */
194 static void adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
195 u_int16_t *buffer, int count);
196 static void adv_write_lram_16_multi(struct adv_softc *adv,
197 u_int16_t s_addr, u_int16_t *buffer,
198 int count);
199 static void adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
200 u_int16_t set_value, int count);
201 static u_int32_t adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr,
202 int count);
203
204 static int adv_write_and_verify_lram_16(struct adv_softc *adv,
205 u_int16_t addr, u_int16_t value);
206 static u_int32_t adv_read_lram_32(struct adv_softc *adv, u_int16_t addr);
207
208
209 static void adv_write_lram_32(struct adv_softc *adv, u_int16_t addr,
210 u_int32_t value);
211 static void adv_write_lram_32_multi(struct adv_softc *adv,
212 u_int16_t s_addr, u_int32_t *buffer,
213 int count);
214
215 /* EEPROM routines */
216 static u_int16_t adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr);
217 static u_int16_t adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr,
218 u_int16_t value);
219 static int adv_write_eeprom_cmd_reg(struct adv_softc *adv,
220 u_int8_t cmd_reg);
221 static int adv_set_eeprom_config_once(struct adv_softc *adv,
222 struct adv_eeprom_config *eeconfig);
223
224 /* Initialization */
225 static u_int32_t adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
226 u_int16_t *mcode_buf, u_int16_t mcode_size);
227
228 static void adv_reinit_lram(struct adv_softc *adv);
229 static void adv_init_lram(struct adv_softc *adv);
230 static int adv_init_microcode_var(struct adv_softc *adv);
231 static void adv_init_qlink_var(struct adv_softc *adv);
232
233 /* Interrupts */
234 static void adv_disable_interrupt(struct adv_softc *adv);
235 static void adv_enable_interrupt(struct adv_softc *adv);
236 static void adv_toggle_irq_act(struct adv_softc *adv);
237
238 /* Chip Control */
239 static int adv_host_req_chip_halt(struct adv_softc *adv);
240 static void adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code);
241 #if 0 /* UNUSED */
242 static u_int8_t adv_get_chip_scsi_ctrl(struct adv_softc *adv);
243 #endif
244
245 /* Queue handling and execution */
246 static __inline int
247 adv_sgcount_to_qcount(int sgcount);
248
249 static __inline int
adv_sgcount_to_qcount(int sgcount)250 adv_sgcount_to_qcount(int sgcount)
251 {
252 int n_sg_list_qs;
253
254 n_sg_list_qs = ((sgcount - 1) / ADV_SG_LIST_PER_Q);
255 if (((sgcount - 1) % ADV_SG_LIST_PER_Q) != 0)
256 n_sg_list_qs++;
257 return (n_sg_list_qs + 1);
258 }
259
260 static void adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
261 u_int16_t *inbuf, int words);
262 static u_int adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs);
263 static u_int8_t adv_alloc_free_queues(struct adv_softc *adv,
264 u_int8_t free_q_head, u_int8_t n_free_q);
265 static u_int8_t adv_alloc_free_queue(struct adv_softc *adv,
266 u_int8_t free_q_head);
267 static int adv_send_scsi_queue(struct adv_softc *adv,
268 struct adv_scsi_q *scsiq,
269 u_int8_t n_q_required);
270 static void adv_put_ready_sg_list_queue(struct adv_softc *adv,
271 struct adv_scsi_q *scsiq,
272 u_int q_no);
273 static void adv_put_ready_queue(struct adv_softc *adv,
274 struct adv_scsi_q *scsiq, u_int q_no);
275 static void adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
276 u_int16_t *buffer, int words);
277
278 /* Messages */
279 static void adv_handle_extmsg_in(struct adv_softc *adv,
280 u_int16_t halt_q_addr, u_int8_t q_cntl,
281 target_bit_vector target_id,
282 int tid);
283 static void adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
284 u_int8_t sdtr_offset);
285 static void adv_set_sdtr_reg_at_id(struct adv_softc *adv, int id,
286 u_int8_t sdtr_data);
287
288
289 /* Exported functions first */
290
291 void
advasync(void * callback_arg,u_int32_t code,struct cam_path * path,void * arg)292 advasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
293 {
294 struct adv_softc *adv;
295
296 adv = (struct adv_softc *)callback_arg;
297 switch (code) {
298 case AC_FOUND_DEVICE:
299 {
300 struct ccb_getdev *cgd;
301 target_bit_vector target_mask;
302 int num_entries;
303 caddr_t match;
304 struct adv_quirk_entry *entry;
305 struct adv_target_transinfo* tinfo;
306
307 cgd = (struct ccb_getdev *)arg;
308
309 target_mask = ADV_TID_TO_TARGET_MASK(cgd->ccb_h.target_id);
310
311 num_entries = NELEM(adv_quirk_table);
312 match = cam_quirkmatch((caddr_t)&cgd->inq_data,
313 (caddr_t)adv_quirk_table,
314 num_entries, sizeof(*adv_quirk_table),
315 scsi_inquiry_match);
316
317 if (match == NULL)
318 panic("advasync: device didn't match wildcard entry!!");
319
320 entry = (struct adv_quirk_entry *)match;
321
322 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
323 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER_ALWAYS)!=0)
324 adv->fix_asyn_xfer_always |= target_mask;
325 else
326 adv->fix_asyn_xfer_always &= ~target_mask;
327 /*
328 * We start out life with all bits set and clear them
329 * after we've determined that the fix isn't necessary.
330 * It may well be that we've already cleared a target
331 * before the full inquiry session completes, so don't
332 * gratuitously set a target bit even if it has this
333 * quirk. But, if the quirk exonerates a device, clear
334 * the bit now.
335 */
336 if ((entry->quirks & ADV_QUIRK_FIX_ASYN_XFER) == 0)
337 adv->fix_asyn_xfer &= ~target_mask;
338 }
339 /*
340 * Reset our sync settings now that we've determined
341 * what quirks are in effect for the device.
342 */
343 tinfo = &adv->tinfo[cgd->ccb_h.target_id];
344 adv_set_syncrate(adv, cgd->ccb_h.path,
345 cgd->ccb_h.target_id,
346 tinfo->current.period,
347 tinfo->current.offset,
348 ADV_TRANS_CUR);
349 break;
350 }
351 case AC_LOST_DEVICE:
352 {
353 u_int target_mask;
354
355 if (adv->bug_fix_control & ADV_BUG_FIX_ASYN_USE_SYN) {
356 target_mask = 0x01 << xpt_path_target_id(path);
357 adv->fix_asyn_xfer |= target_mask;
358 }
359
360 /*
361 * Revert to async transfers
362 * for the next device.
363 */
364 adv_set_syncrate(adv, /*path*/NULL,
365 xpt_path_target_id(path),
366 /*period*/0,
367 /*offset*/0,
368 ADV_TRANS_GOAL|ADV_TRANS_CUR);
369 }
370 default:
371 break;
372 }
373 }
374
375 void
adv_set_bank(struct adv_softc * adv,u_int8_t bank)376 adv_set_bank(struct adv_softc *adv, u_int8_t bank)
377 {
378 u_int8_t control;
379
380 /*
381 * Start out with the bank reset to 0
382 */
383 control = ADV_INB(adv, ADV_CHIP_CTRL)
384 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST
385 | ADV_CC_DIAG | ADV_CC_SCSI_RESET
386 | ADV_CC_CHIP_RESET | ADV_CC_BANK_ONE));
387 if (bank == 1) {
388 control |= ADV_CC_BANK_ONE;
389 } else if (bank == 2) {
390 control |= ADV_CC_DIAG | ADV_CC_BANK_ONE;
391 }
392 ADV_OUTB(adv, ADV_CHIP_CTRL, control);
393 }
394
395 u_int8_t
adv_read_lram_8(struct adv_softc * adv,u_int16_t addr)396 adv_read_lram_8(struct adv_softc *adv, u_int16_t addr)
397 {
398 u_int8_t byte_data;
399 u_int16_t word_data;
400
401 /*
402 * LRAM is accessed on 16bit boundaries.
403 */
404 ADV_OUTW(adv, ADV_LRAM_ADDR, addr & 0xFFFE);
405 word_data = ADV_INW(adv, ADV_LRAM_DATA);
406 if (addr & 1) {
407 #if BYTE_ORDER == BIG_ENDIAN
408 byte_data = (u_int8_t)(word_data & 0xFF);
409 #else
410 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
411 #endif
412 } else {
413 #if BYTE_ORDER == BIG_ENDIAN
414 byte_data = (u_int8_t)((word_data >> 8) & 0xFF);
415 #else
416 byte_data = (u_int8_t)(word_data & 0xFF);
417 #endif
418 }
419 return (byte_data);
420 }
421
422 void
adv_write_lram_8(struct adv_softc * adv,u_int16_t addr,u_int8_t value)423 adv_write_lram_8(struct adv_softc *adv, u_int16_t addr, u_int8_t value)
424 {
425 u_int16_t word_data;
426
427 word_data = adv_read_lram_16(adv, addr & 0xFFFE);
428 if (addr & 1) {
429 word_data &= 0x00FF;
430 word_data |= (((u_int8_t)value << 8) & 0xFF00);
431 } else {
432 word_data &= 0xFF00;
433 word_data |= ((u_int8_t)value & 0x00FF);
434 }
435 adv_write_lram_16(adv, addr & 0xFFFE, word_data);
436 }
437
438
439 u_int16_t
adv_read_lram_16(struct adv_softc * adv,u_int16_t addr)440 adv_read_lram_16(struct adv_softc *adv, u_int16_t addr)
441 {
442 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
443 return (ADV_INW(adv, ADV_LRAM_DATA));
444 }
445
446 void
adv_write_lram_16(struct adv_softc * adv,u_int16_t addr,u_int16_t value)447 adv_write_lram_16(struct adv_softc *adv, u_int16_t addr, u_int16_t value)
448 {
449 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
450 ADV_OUTW(adv, ADV_LRAM_DATA, value);
451 }
452
453 /*
454 * Determine if there is a board at "iobase" by looking
455 * for the AdvanSys signatures. Return 1 if a board is
456 * found, 0 otherwise.
457 */
458 int
adv_find_signature(bus_space_tag_t tag,bus_space_handle_t bsh)459 adv_find_signature(bus_space_tag_t tag, bus_space_handle_t bsh)
460 {
461 u_int16_t signature;
462
463 if (bus_space_read_1(tag, bsh, ADV_SIGNATURE_BYTE) == ADV_1000_ID1B) {
464 signature = bus_space_read_2(tag, bsh, ADV_SIGNATURE_WORD);
465 if ((signature == ADV_1000_ID0W)
466 || (signature == ADV_1000_ID0W_FIX))
467 return (1);
468 }
469 return (0);
470 }
471
472 void
adv_lib_init(struct adv_softc * adv)473 adv_lib_init(struct adv_softc *adv)
474 {
475 if ((adv->type & ADV_ULTRA) != 0) {
476 adv->sdtr_period_tbl = adv_sdtr_period_tbl_ultra;
477 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl_ultra);
478 } else {
479 adv->sdtr_period_tbl = adv_sdtr_period_tbl;
480 adv->sdtr_period_tbl_size = sizeof(adv_sdtr_period_tbl);
481 }
482 }
483
484 u_int16_t
adv_get_eeprom_config(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)485 adv_get_eeprom_config(struct adv_softc *adv, struct
486 adv_eeprom_config *eeprom_config)
487 {
488 u_int16_t sum;
489 u_int16_t *wbuf;
490 u_int8_t cfg_beg;
491 u_int8_t cfg_end;
492 u_int8_t s_addr;
493
494 wbuf = (u_int16_t *)eeprom_config;
495 sum = 0;
496
497 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
498 *wbuf = adv_read_eeprom_16(adv, s_addr);
499 sum += *wbuf;
500 }
501
502 if (adv->type & ADV_VL) {
503 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
504 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
505 } else {
506 cfg_beg = ADV_EEPROM_CFG_BEG;
507 cfg_end = ADV_EEPROM_MAX_ADDR;
508 }
509
510 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
511 *wbuf = adv_read_eeprom_16(adv, s_addr);
512 sum += *wbuf;
513 #ifdef ADV_DEBUG_EEPROM
514 kprintf("Addr 0x%x: 0x%04x\n", s_addr, *wbuf);
515 #endif
516 }
517 *wbuf = adv_read_eeprom_16(adv, s_addr);
518 return (sum);
519 }
520
521 int
adv_set_eeprom_config(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)522 adv_set_eeprom_config(struct adv_softc *adv,
523 struct adv_eeprom_config *eeprom_config)
524 {
525 int retry;
526
527 retry = 0;
528 while (1) {
529 if (adv_set_eeprom_config_once(adv, eeprom_config) == 0) {
530 break;
531 }
532 if (++retry > ADV_EEPROM_MAX_RETRY) {
533 break;
534 }
535 }
536 return (retry > ADV_EEPROM_MAX_RETRY);
537 }
538
539 int
adv_reset_chip(struct adv_softc * adv,int reset_bus)540 adv_reset_chip(struct adv_softc *adv, int reset_bus)
541 {
542 adv_stop_chip(adv);
543 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT
544 | (reset_bus ? ADV_CC_SCSI_RESET : 0));
545 DELAY(60);
546
547 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
548 adv_set_chip_ih(adv, ADV_INS_HALT);
549
550 if (reset_bus)
551 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_CHIP_RESET | ADV_CC_HALT);
552
553 ADV_OUTB(adv, ADV_CHIP_CTRL, ADV_CC_HALT);
554 if (reset_bus)
555 DELAY(200 * 1000);
556
557 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_CLR_SCSI_RESET_INT);
558 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
559 return (adv_is_chip_halted(adv));
560 }
561
562 int
adv_test_external_lram(struct adv_softc * adv)563 adv_test_external_lram(struct adv_softc* adv)
564 {
565 u_int16_t q_addr;
566 u_int16_t saved_value;
567 int success;
568
569 success = 0;
570
571 q_addr = ADV_QNO_TO_QADDR(241);
572 saved_value = adv_read_lram_16(adv, q_addr);
573 if (adv_write_and_verify_lram_16(adv, q_addr, 0x55AA) == 0) {
574 success = 1;
575 adv_write_lram_16(adv, q_addr, saved_value);
576 }
577 return (success);
578 }
579
580
581 int
adv_init_lram_and_mcode(struct adv_softc * adv)582 adv_init_lram_and_mcode(struct adv_softc *adv)
583 {
584 u_int32_t retval;
585
586 adv_disable_interrupt(adv);
587
588 adv_init_lram(adv);
589
590 retval = adv_load_microcode(adv, 0, (u_int16_t *)adv_mcode,
591 adv_mcode_size);
592 if (retval != adv_mcode_chksum) {
593 kprintf("adv%d: Microcode download failed checksum!\n",
594 adv->unit);
595 return (1);
596 }
597
598 if (adv_init_microcode_var(adv) != 0)
599 return (1);
600
601 adv_enable_interrupt(adv);
602 return (0);
603 }
604
605 u_int8_t
adv_get_chip_irq(struct adv_softc * adv)606 adv_get_chip_irq(struct adv_softc *adv)
607 {
608 u_int16_t cfg_lsw;
609 u_int8_t chip_irq;
610
611 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
612
613 if ((adv->type & ADV_VL) != 0) {
614 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x07));
615 if ((chip_irq == 0) ||
616 (chip_irq == 4) ||
617 (chip_irq == 7)) {
618 return (0);
619 }
620 return (chip_irq + (ADV_MIN_IRQ_NO - 1));
621 }
622 chip_irq = (u_int8_t)(((cfg_lsw >> 2) & 0x03));
623 if (chip_irq == 3)
624 chip_irq += 2;
625 return (chip_irq + ADV_MIN_IRQ_NO);
626 }
627
628 u_int8_t
adv_set_chip_irq(struct adv_softc * adv,u_int8_t irq_no)629 adv_set_chip_irq(struct adv_softc *adv, u_int8_t irq_no)
630 {
631 u_int16_t cfg_lsw;
632
633 if ((adv->type & ADV_VL) != 0) {
634 if (irq_no != 0) {
635 if ((irq_no < ADV_MIN_IRQ_NO)
636 || (irq_no > ADV_MAX_IRQ_NO)) {
637 irq_no = 0;
638 } else {
639 irq_no -= ADV_MIN_IRQ_NO - 1;
640 }
641 }
642 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE3;
643 cfg_lsw |= 0x0010;
644 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
645 adv_toggle_irq_act(adv);
646
647 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFE0;
648 cfg_lsw |= (irq_no & 0x07) << 2;
649 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
650 adv_toggle_irq_act(adv);
651 } else if ((adv->type & ADV_ISA) != 0) {
652 if (irq_no == 15)
653 irq_no -= 2;
654 irq_no -= ADV_MIN_IRQ_NO;
655 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW) & 0xFFF3;
656 cfg_lsw |= (irq_no & 0x03) << 2;
657 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
658 }
659 return (adv_get_chip_irq(adv));
660 }
661
662 void
adv_set_chip_scsiid(struct adv_softc * adv,int new_id)663 adv_set_chip_scsiid(struct adv_softc *adv, int new_id)
664 {
665 u_int16_t cfg_lsw;
666
667 cfg_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
668 if (ADV_CONFIG_SCSIID(cfg_lsw) == new_id)
669 return;
670 cfg_lsw &= ~ADV_CFG_LSW_SCSIID;
671 cfg_lsw |= (new_id & ADV_MAX_TID) << ADV_CFG_LSW_SCSIID_SHIFT;
672 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg_lsw);
673 }
674
675 int
adv_execute_scsi_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int32_t datalen)676 adv_execute_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
677 u_int32_t datalen)
678 {
679 struct adv_target_transinfo* tinfo;
680 u_int32_t *p_data_addr;
681 u_int32_t *p_data_bcount;
682 int disable_syn_offset_one_fix;
683 int retval;
684 u_int n_q_required;
685 u_int32_t addr;
686 u_int8_t sg_entry_cnt;
687 u_int8_t target_ix;
688 u_int8_t sg_entry_cnt_minus_one;
689 u_int8_t tid_no;
690
691 scsiq->q1.q_no = 0;
692 retval = 1; /* Default to error case */
693 target_ix = scsiq->q2.target_ix;
694 tid_no = ADV_TIX_TO_TID(target_ix);
695 tinfo = &adv->tinfo[tid_no];
696
697 if (scsiq->cdbptr[0] == REQUEST_SENSE) {
698 /* Renegotiate if appropriate. */
699 adv_set_syncrate(adv, /*struct cam_path */NULL,
700 tid_no, /*period*/0, /*offset*/0,
701 ADV_TRANS_CUR);
702 if (tinfo->current.period != tinfo->goal.period) {
703 adv_msgout_sdtr(adv, tinfo->goal.period,
704 tinfo->goal.offset);
705 scsiq->q1.cntl |= (QC_MSG_OUT | QC_URGENT);
706 }
707 }
708
709 if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) {
710 sg_entry_cnt = scsiq->sg_head->entry_cnt;
711 sg_entry_cnt_minus_one = sg_entry_cnt - 1;
712
713 #ifdef DIAGNOSTIC
714 if (sg_entry_cnt <= 1)
715 panic("adv_execute_scsi_queue: Queue "
716 "with QC_SG_HEAD set but %d segs.", sg_entry_cnt);
717
718 if (sg_entry_cnt > ADV_MAX_SG_LIST)
719 panic("adv_execute_scsi_queue: "
720 "Queue with too many segs.");
721
722 if ((adv->type & (ADV_ISA | ADV_VL | ADV_EISA)) != 0) {
723 int i;
724
725 for (i = 0; i < sg_entry_cnt_minus_one; i++) {
726 addr = scsiq->sg_head->sg_list[i].addr +
727 scsiq->sg_head->sg_list[i].bytes;
728
729 if ((addr & 0x0003) != 0)
730 panic("adv_execute_scsi_queue: SG "
731 "with odd address or byte count");
732 }
733 }
734 #endif
735 p_data_addr =
736 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].addr;
737 p_data_bcount =
738 &scsiq->sg_head->sg_list[sg_entry_cnt_minus_one].bytes;
739
740 n_q_required = adv_sgcount_to_qcount(sg_entry_cnt);
741 scsiq->sg_head->queue_cnt = n_q_required - 1;
742 } else {
743 p_data_addr = &scsiq->q1.data_addr;
744 p_data_bcount = &scsiq->q1.data_cnt;
745 n_q_required = 1;
746 }
747
748 disable_syn_offset_one_fix = FALSE;
749
750 if ((adv->fix_asyn_xfer & scsiq->q1.target_id) != 0
751 && (adv->fix_asyn_xfer_always & scsiq->q1.target_id) == 0) {
752
753 if (datalen != 0) {
754 if (datalen < 512) {
755 disable_syn_offset_one_fix = TRUE;
756 } else {
757 if (scsiq->cdbptr[0] == INQUIRY
758 || scsiq->cdbptr[0] == REQUEST_SENSE
759 || scsiq->cdbptr[0] == READ_CAPACITY
760 || scsiq->cdbptr[0] == MODE_SELECT_6
761 || scsiq->cdbptr[0] == MODE_SENSE_6
762 || scsiq->cdbptr[0] == MODE_SENSE_10
763 || scsiq->cdbptr[0] == MODE_SELECT_10
764 || scsiq->cdbptr[0] == READ_TOC) {
765 disable_syn_offset_one_fix = TRUE;
766 }
767 }
768 }
769 }
770
771 if (disable_syn_offset_one_fix) {
772 scsiq->q2.tag_code &=
773 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
774 scsiq->q2.tag_code |= (ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX
775 | ADV_TAG_FLAG_DISABLE_DISCONNECT);
776 }
777
778 if ((adv->bug_fix_control & ADV_BUG_FIX_IF_NOT_DWB) != 0
779 && (scsiq->cdbptr[0] == READ_10 || scsiq->cdbptr[0] == READ_6)) {
780 u_int8_t extra_bytes;
781
782 addr = *p_data_addr + *p_data_bcount;
783 extra_bytes = addr & 0x0003;
784 if (extra_bytes != 0
785 && ((scsiq->q1.cntl & QC_SG_HEAD) != 0
786 || (scsiq->q1.data_cnt & 0x01FF) == 0)) {
787 scsiq->q2.tag_code |= ADV_TAG_FLAG_EXTRA_BYTES;
788 scsiq->q1.extra_bytes = extra_bytes;
789 *p_data_bcount -= extra_bytes;
790 }
791 }
792
793 if ((adv_get_num_free_queues(adv, n_q_required) >= n_q_required)
794 || ((scsiq->q1.cntl & QC_URGENT) != 0))
795 retval = adv_send_scsi_queue(adv, scsiq, n_q_required);
796
797 return (retval);
798 }
799
800
801 u_int8_t
adv_copy_lram_doneq(struct adv_softc * adv,u_int16_t q_addr,struct adv_q_done_info * scsiq,u_int32_t max_dma_count)802 adv_copy_lram_doneq(struct adv_softc *adv, u_int16_t q_addr,
803 struct adv_q_done_info *scsiq, u_int32_t max_dma_count)
804 {
805 u_int16_t val;
806 u_int8_t sg_queue_cnt;
807
808 adv_get_q_info(adv, q_addr + ADV_SCSIQ_DONE_INFO_BEG,
809 (u_int16_t *)scsiq,
810 (sizeof(scsiq->d2) + sizeof(scsiq->d3)) / 2);
811
812 #if BYTE_ORDER == BIG_ENDIAN
813 adv_adj_endian_qdone_info(scsiq);
814 #endif
815
816 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS);
817 scsiq->q_status = val & 0xFF;
818 scsiq->q_no = (val >> 8) & 0XFF;
819
820 val = adv_read_lram_16(adv, q_addr + ADV_SCSIQ_B_CNTL);
821 scsiq->cntl = val & 0xFF;
822 sg_queue_cnt = (val >> 8) & 0xFF;
823
824 val = adv_read_lram_16(adv,q_addr + ADV_SCSIQ_B_SENSE_LEN);
825 scsiq->sense_len = val & 0xFF;
826 scsiq->extra_bytes = (val >> 8) & 0xFF;
827
828 /*
829 * Due to a bug in accessing LRAM on the 940UA, the residual
830 * is split into separate high and low 16bit quantities.
831 */
832 scsiq->remain_bytes =
833 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT);
834 scsiq->remain_bytes |=
835 adv_read_lram_16(adv, q_addr + ADV_SCSIQ_W_ALT_DC1) << 16;
836
837 /*
838 * XXX Is this just a safeguard or will the counter really
839 * have bogus upper bits?
840 */
841 scsiq->remain_bytes &= max_dma_count;
842
843 return (sg_queue_cnt);
844 }
845
846 int
adv_start_chip(struct adv_softc * adv)847 adv_start_chip(struct adv_softc *adv)
848 {
849 ADV_OUTB(adv, ADV_CHIP_CTRL, 0);
850 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0)
851 return (0);
852 return (1);
853 }
854
855 int
adv_stop_execution(struct adv_softc * adv)856 adv_stop_execution(struct adv_softc *adv)
857 {
858 int count;
859
860 count = 0;
861 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) == 0) {
862 adv_write_lram_8(adv, ADV_STOP_CODE_B,
863 ADV_STOP_REQ_RISC_STOP);
864 do {
865 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) &
866 ADV_STOP_ACK_RISC_STOP) {
867 return (1);
868 }
869 DELAY(1000);
870 } while (count++ < 20);
871 }
872 return (0);
873 }
874
875 int
adv_is_chip_halted(struct adv_softc * adv)876 adv_is_chip_halted(struct adv_softc *adv)
877 {
878 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) != 0) {
879 if ((ADV_INB(adv, ADV_CHIP_CTRL) & ADV_CC_HALT) != 0) {
880 return (1);
881 }
882 }
883 return (0);
884 }
885
886 /*
887 * XXX The numeric constants and the loops in this routine
888 * need to be documented.
889 */
890 void
adv_ack_interrupt(struct adv_softc * adv)891 adv_ack_interrupt(struct adv_softc *adv)
892 {
893 u_int8_t host_flag;
894 u_int8_t risc_flag;
895 int loop;
896
897 loop = 0;
898 do {
899 risc_flag = adv_read_lram_8(adv, ADVV_RISC_FLAG_B);
900 if (loop++ > 0x7FFF) {
901 break;
902 }
903 } while ((risc_flag & ADV_RISC_FLAG_GEN_INT) != 0);
904
905 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
906 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
907 host_flag | ADV_HOST_FLAG_ACK_INT);
908
909 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
910 loop = 0;
911 while (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_INT_PENDING) {
912 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_INT_ACK);
913 if (loop++ > 3) {
914 break;
915 }
916 }
917
918 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
919 }
920
921 /*
922 * Handle all conditions that may halt the chip waiting
923 * for us to intervene.
924 */
925 void
adv_isr_chip_halted(struct adv_softc * adv)926 adv_isr_chip_halted(struct adv_softc *adv)
927 {
928 u_int16_t int_halt_code;
929 u_int16_t halt_q_addr;
930 target_bit_vector target_mask;
931 target_bit_vector scsi_busy;
932 u_int8_t halt_qp;
933 u_int8_t target_ix;
934 u_int8_t q_cntl;
935 u_int8_t tid_no;
936
937 int_halt_code = adv_read_lram_16(adv, ADVV_HALTCODE_W);
938 halt_qp = adv_read_lram_8(adv, ADVV_CURCDB_B);
939 halt_q_addr = ADV_QNO_TO_QADDR(halt_qp);
940 target_ix = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TARGET_IX);
941 q_cntl = adv_read_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL);
942 tid_no = ADV_TIX_TO_TID(target_ix);
943 target_mask = ADV_TID_TO_TARGET_MASK(tid_no);
944 if (int_halt_code == ADV_HALT_DISABLE_ASYN_USE_SYN_FIX) {
945 /*
946 * Temporarily disable the async fix by removing
947 * this target from the list of affected targets,
948 * setting our async rate, and then putting us
949 * back into the mask.
950 */
951 adv->fix_asyn_xfer &= ~target_mask;
952 adv_set_syncrate(adv, /*struct cam_path */NULL,
953 tid_no, /*period*/0, /*offset*/0,
954 ADV_TRANS_ACTIVE);
955 adv->fix_asyn_xfer |= target_mask;
956 } else if (int_halt_code == ADV_HALT_ENABLE_ASYN_USE_SYN_FIX) {
957 adv_set_syncrate(adv, /*struct cam_path */NULL,
958 tid_no, /*period*/0, /*offset*/0,
959 ADV_TRANS_ACTIVE);
960 } else if (int_halt_code == ADV_HALT_EXTMSG_IN) {
961 adv_handle_extmsg_in(adv, halt_q_addr, q_cntl,
962 target_mask, tid_no);
963 } else if (int_halt_code == ADV_HALT_CHK_CONDITION) {
964 struct adv_target_transinfo* tinfo;
965 union ccb *ccb;
966 u_int32_t cinfo_index;
967 u_int8_t tag_code;
968 u_int8_t q_status;
969
970 tinfo = &adv->tinfo[tid_no];
971 q_cntl |= QC_REQ_SENSE;
972
973 /* Renegotiate if appropriate. */
974 adv_set_syncrate(adv, /*struct cam_path */NULL,
975 tid_no, /*period*/0, /*offset*/0,
976 ADV_TRANS_CUR);
977 if (tinfo->current.period != tinfo->goal.period) {
978 adv_msgout_sdtr(adv, tinfo->goal.period,
979 tinfo->goal.offset);
980 q_cntl |= QC_MSG_OUT;
981 }
982 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
983
984 /* Don't tag request sense commands */
985 tag_code = adv_read_lram_8(adv,
986 halt_q_addr + ADV_SCSIQ_B_TAG_CODE);
987 tag_code &=
988 ~(MSG_SIMPLE_Q_TAG|MSG_HEAD_OF_Q_TAG|MSG_ORDERED_Q_TAG);
989
990 if ((adv->fix_asyn_xfer & target_mask) != 0
991 && (adv->fix_asyn_xfer_always & target_mask) == 0) {
992 tag_code |= (ADV_TAG_FLAG_DISABLE_DISCONNECT
993 | ADV_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX);
994 }
995 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_TAG_CODE,
996 tag_code);
997 q_status = adv_read_lram_8(adv,
998 halt_q_addr + ADV_SCSIQ_B_STATUS);
999 q_status |= (QS_READY | QS_BUSY);
1000 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_STATUS,
1001 q_status);
1002 /*
1003 * Freeze the devq until we can handle the sense condition.
1004 */
1005 cinfo_index =
1006 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1007 ccb = adv->ccb_infos[cinfo_index].ccb;
1008 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1009 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1010 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1011 /*ccb*/NULL, CAM_REQUEUE_REQ,
1012 /*queued_only*/TRUE);
1013 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1014 scsi_busy &= ~target_mask;
1015 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1016 /*
1017 * Ensure we have enough time to actually
1018 * retrieve the sense.
1019 */
1020 callout_reset(ccb->ccb_h.timeout_ch, 5 * hz, adv_timeout, ccb);
1021 } else if (int_halt_code == ADV_HALT_SDTR_REJECTED) {
1022 struct ext_msg out_msg;
1023
1024 adv_read_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1025 (u_int16_t *) &out_msg,
1026 sizeof(out_msg)/2);
1027
1028 if ((out_msg.msg_type == MSG_EXTENDED)
1029 && (out_msg.msg_len == MSG_EXT_SDTR_LEN)
1030 && (out_msg.msg_req == MSG_EXT_SDTR)) {
1031
1032 /* Revert to Async */
1033 adv_set_syncrate(adv, /*struct cam_path */NULL,
1034 tid_no, /*period*/0, /*offset*/0,
1035 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1036 }
1037 q_cntl &= ~QC_MSG_OUT;
1038 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1039 } else if (int_halt_code == ADV_HALT_SS_QUEUE_FULL) {
1040 u_int8_t scsi_status;
1041 union ccb *ccb;
1042 u_int32_t cinfo_index;
1043
1044 scsi_status = adv_read_lram_8(adv, halt_q_addr
1045 + ADV_SCSIQ_SCSI_STATUS);
1046 cinfo_index =
1047 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1048 ccb = adv->ccb_infos[cinfo_index].ccb;
1049 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1050 ccb->ccb_h.status |= CAM_DEV_QFRZN|CAM_SCSI_STATUS_ERROR;
1051 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
1052 adv_abort_ccb(adv, tid_no, ADV_TIX_TO_LUN(target_ix),
1053 /*ccb*/NULL, CAM_REQUEUE_REQ,
1054 /*queued_only*/TRUE);
1055 scsi_busy = adv_read_lram_8(adv, ADVV_SCSIBUSY_B);
1056 scsi_busy &= ~target_mask;
1057 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, scsi_busy);
1058 } else {
1059 kprintf("Unhandled Halt Code %x\n", int_halt_code);
1060 }
1061 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1062 }
1063
1064 void
adv_sdtr_to_period_offset(struct adv_softc * adv,u_int8_t sync_data,u_int8_t * period,u_int8_t * offset,int tid)1065 adv_sdtr_to_period_offset(struct adv_softc *adv,
1066 u_int8_t sync_data, u_int8_t *period,
1067 u_int8_t *offset, int tid)
1068 {
1069 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid)
1070 && (sync_data == ASYN_SDTR_DATA_FIX_PCI_REV_AB)) {
1071 *period = *offset = 0;
1072 } else {
1073 *period = adv->sdtr_period_tbl[((sync_data >> 4) & 0xF)];
1074 *offset = sync_data & 0xF;
1075 }
1076 }
1077
1078 void
adv_set_syncrate(struct adv_softc * adv,struct cam_path * path,u_int tid,u_int period,u_int offset,u_int type)1079 adv_set_syncrate(struct adv_softc *adv, struct cam_path *path,
1080 u_int tid, u_int period, u_int offset, u_int type)
1081 {
1082 struct adv_target_transinfo* tinfo;
1083 u_int old_period;
1084 u_int old_offset;
1085 u_int8_t sdtr_data;
1086
1087 tinfo = &adv->tinfo[tid];
1088
1089 /* Filter our input */
1090 sdtr_data = adv_period_offset_to_sdtr(adv, &period,
1091 &offset, tid);
1092
1093 old_period = tinfo->current.period;
1094 old_offset = tinfo->current.offset;
1095
1096 if ((type & ADV_TRANS_CUR) != 0
1097 && ((old_period != period || old_offset != offset)
1098 || period == 0 || offset == 0) /*Changes in asyn fix settings*/) {
1099 int halted;
1100
1101 crit_enter();
1102 halted = adv_is_chip_halted(adv);
1103 if (halted == 0)
1104 /* Must halt the chip first */
1105 adv_host_req_chip_halt(adv);
1106
1107 /* Update current hardware settings */
1108 adv_set_sdtr_reg_at_id(adv, tid, sdtr_data);
1109
1110 /*
1111 * If a target can run in sync mode, we don't need
1112 * to check it for sync problems.
1113 */
1114 if (offset != 0)
1115 adv->fix_asyn_xfer &= ~ADV_TID_TO_TARGET_MASK(tid);
1116
1117 if (halted == 0)
1118 /* Start the chip again */
1119 adv_start_chip(adv);
1120
1121 crit_exit();
1122 tinfo->current.period = period;
1123 tinfo->current.offset = offset;
1124
1125 if (path != NULL) {
1126 /*
1127 * Tell the SCSI layer about the
1128 * new transfer parameters.
1129 */
1130 struct ccb_trans_settings *neg;
1131 struct ccb_trans_settings_spi *spi;
1132
1133 neg = &xpt_alloc_ccb()->cts;
1134 spi = &neg->xport_specific.spi;
1135
1136 neg->protocol = PROTO_SCSI;
1137 neg->protocol_version = SCSI_REV_2;
1138 neg->transport = XPORT_SPI;
1139 neg->transport_version = 2;
1140
1141 spi->sync_offset = offset;
1142 spi->sync_period = period;
1143 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1144 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1145 xpt_setup_ccb(&neg->ccb_h, path, /*priority*/1);
1146 xpt_async(AC_TRANSFER_NEG, path, neg);
1147 xpt_free_ccb(&neg->ccb_h);
1148 }
1149 }
1150
1151 if ((type & ADV_TRANS_GOAL) != 0) {
1152 tinfo->goal.period = period;
1153 tinfo->goal.offset = offset;
1154 }
1155
1156 if ((type & ADV_TRANS_USER) != 0) {
1157 tinfo->user.period = period;
1158 tinfo->user.offset = offset;
1159 }
1160 }
1161
1162 u_int8_t
adv_period_offset_to_sdtr(struct adv_softc * adv,u_int * period,u_int * offset,int tid)1163 adv_period_offset_to_sdtr(struct adv_softc *adv, u_int *period,
1164 u_int *offset, int tid)
1165 {
1166 u_int i;
1167 u_int dummy_offset;
1168 u_int dummy_period;
1169
1170 if (offset == NULL) {
1171 dummy_offset = 0;
1172 offset = &dummy_offset;
1173 }
1174
1175 if (period == NULL) {
1176 dummy_period = 0;
1177 period = &dummy_period;
1178 }
1179
1180 *offset = MIN(ADV_SYN_MAX_OFFSET, *offset);
1181 if (*period != 0 && *offset != 0) {
1182 for (i = 0; i < adv->sdtr_period_tbl_size; i++) {
1183 if (*period <= adv->sdtr_period_tbl[i]) {
1184 /*
1185 * When responding to a target that requests
1186 * sync, the requested rate may fall between
1187 * two rates that we can output, but still be
1188 * a rate that we can receive. Because of this,
1189 * we want to respond to the target with
1190 * the same rate that it sent to us even
1191 * if the period we use to send data to it
1192 * is lower. Only lower the response period
1193 * if we must.
1194 */
1195 if (i == 0 /* Our maximum rate */)
1196 *period = adv->sdtr_period_tbl[0];
1197 return ((i << 4) | *offset);
1198 }
1199 }
1200 }
1201
1202 /* Must go async */
1203 *period = 0;
1204 *offset = 0;
1205 if (adv->fix_asyn_xfer & ADV_TID_TO_TARGET_MASK(tid))
1206 return (ASYN_SDTR_DATA_FIX_PCI_REV_AB);
1207 return (0);
1208 }
1209
1210 /* Internal Routines */
1211
1212 static void
adv_read_lram_16_multi(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int count)1213 adv_read_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1214 u_int16_t *buffer, int count)
1215 {
1216 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1217 ADV_INSW(adv, ADV_LRAM_DATA, buffer, count);
1218 }
1219
1220 static void
adv_write_lram_16_multi(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int count)1221 adv_write_lram_16_multi(struct adv_softc *adv, u_int16_t s_addr,
1222 u_int16_t *buffer, int count)
1223 {
1224 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1225 ADV_OUTSW(adv, ADV_LRAM_DATA, buffer, count);
1226 }
1227
1228 static void
adv_mset_lram_16(struct adv_softc * adv,u_int16_t s_addr,u_int16_t set_value,int count)1229 adv_mset_lram_16(struct adv_softc *adv, u_int16_t s_addr,
1230 u_int16_t set_value, int count)
1231 {
1232 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1233 bus_space_set_multi_2(adv->tag, adv->bsh, ADV_LRAM_DATA,
1234 set_value, count);
1235 }
1236
1237 static u_int32_t
adv_msum_lram_16(struct adv_softc * adv,u_int16_t s_addr,int count)1238 adv_msum_lram_16(struct adv_softc *adv, u_int16_t s_addr, int count)
1239 {
1240 u_int32_t sum;
1241 int i;
1242
1243 sum = 0;
1244 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1245 for (i = 0; i < count; i++)
1246 sum += ADV_INW(adv, ADV_LRAM_DATA);
1247 return (sum);
1248 }
1249
1250 static int
adv_write_and_verify_lram_16(struct adv_softc * adv,u_int16_t addr,u_int16_t value)1251 adv_write_and_verify_lram_16(struct adv_softc *adv, u_int16_t addr,
1252 u_int16_t value)
1253 {
1254 int retval;
1255
1256 retval = 0;
1257 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1258 ADV_OUTW(adv, ADV_LRAM_DATA, value);
1259 DELAY(10000);
1260 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1261 if (value != ADV_INW(adv, ADV_LRAM_DATA))
1262 retval = 1;
1263 return (retval);
1264 }
1265
1266 static u_int32_t
adv_read_lram_32(struct adv_softc * adv,u_int16_t addr)1267 adv_read_lram_32(struct adv_softc *adv, u_int16_t addr)
1268 {
1269 u_int16_t val_low, val_high;
1270
1271 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1272
1273 #if BYTE_ORDER == BIG_ENDIAN
1274 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1275 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1276 #else
1277 val_low = ADV_INW(adv, ADV_LRAM_DATA);
1278 val_high = ADV_INW(adv, ADV_LRAM_DATA);
1279 #endif
1280
1281 return (((u_int32_t)val_high << 16) | (u_int32_t)val_low);
1282 }
1283
1284 static void
adv_write_lram_32(struct adv_softc * adv,u_int16_t addr,u_int32_t value)1285 adv_write_lram_32(struct adv_softc *adv, u_int16_t addr, u_int32_t value)
1286 {
1287 ADV_OUTW(adv, ADV_LRAM_ADDR, addr);
1288
1289 #if BYTE_ORDER == BIG_ENDIAN
1290 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1291 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1292 #else
1293 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)(value & 0xFFFF));
1294 ADV_OUTW(adv, ADV_LRAM_DATA, (u_int16_t)((value >> 16) & 0xFFFF));
1295 #endif
1296 }
1297
1298 static void
adv_write_lram_32_multi(struct adv_softc * adv,u_int16_t s_addr,u_int32_t * buffer,int count)1299 adv_write_lram_32_multi(struct adv_softc *adv, u_int16_t s_addr,
1300 u_int32_t *buffer, int count)
1301 {
1302 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1303 ADV_OUTSW(adv, ADV_LRAM_DATA, (u_int16_t *)buffer, count * 2);
1304 }
1305
1306 static u_int16_t
adv_read_eeprom_16(struct adv_softc * adv,u_int8_t addr)1307 adv_read_eeprom_16(struct adv_softc *adv, u_int8_t addr)
1308 {
1309 u_int16_t read_wval;
1310 u_int8_t cmd_reg;
1311
1312 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1313 DELAY(1000);
1314 cmd_reg = addr | ADV_EEPROM_CMD_READ;
1315 adv_write_eeprom_cmd_reg(adv, cmd_reg);
1316 DELAY(1000);
1317 read_wval = ADV_INW(adv, ADV_EEPROM_DATA);
1318 DELAY(1000);
1319 return (read_wval);
1320 }
1321
1322 static u_int16_t
adv_write_eeprom_16(struct adv_softc * adv,u_int8_t addr,u_int16_t value)1323 adv_write_eeprom_16(struct adv_softc *adv, u_int8_t addr, u_int16_t value)
1324 {
1325 u_int16_t read_value;
1326
1327 read_value = adv_read_eeprom_16(adv, addr);
1328 if (read_value != value) {
1329 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_ENABLE);
1330 DELAY(1000);
1331
1332 ADV_OUTW(adv, ADV_EEPROM_DATA, value);
1333 DELAY(1000);
1334
1335 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE | addr);
1336 DELAY(20 * 1000);
1337
1338 adv_write_eeprom_cmd_reg(adv, ADV_EEPROM_CMD_WRITE_DISABLE);
1339 DELAY(1000);
1340 read_value = adv_read_eeprom_16(adv, addr);
1341 }
1342 return (read_value);
1343 }
1344
1345 static int
adv_write_eeprom_cmd_reg(struct adv_softc * adv,u_int8_t cmd_reg)1346 adv_write_eeprom_cmd_reg(struct adv_softc *adv, u_int8_t cmd_reg)
1347 {
1348 u_int8_t read_back;
1349 int retry;
1350
1351 retry = 0;
1352 while (1) {
1353 ADV_OUTB(adv, ADV_EEPROM_CMD, cmd_reg);
1354 DELAY(1000);
1355 read_back = ADV_INB(adv, ADV_EEPROM_CMD);
1356 if (read_back == cmd_reg) {
1357 return (1);
1358 }
1359 if (retry++ > ADV_EEPROM_MAX_RETRY) {
1360 return (0);
1361 }
1362 }
1363 }
1364
1365 static int
adv_set_eeprom_config_once(struct adv_softc * adv,struct adv_eeprom_config * eeprom_config)1366 adv_set_eeprom_config_once(struct adv_softc *adv,
1367 struct adv_eeprom_config *eeprom_config)
1368 {
1369 int n_error;
1370 u_int16_t *wbuf;
1371 u_int16_t sum;
1372 u_int8_t s_addr;
1373 u_int8_t cfg_beg;
1374 u_int8_t cfg_end;
1375
1376 wbuf = (u_int16_t *)eeprom_config;
1377 n_error = 0;
1378 sum = 0;
1379 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1380 sum += *wbuf;
1381 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1382 n_error++;
1383 }
1384 }
1385 if (adv->type & ADV_VL) {
1386 cfg_beg = ADV_EEPROM_CFG_BEG_VL;
1387 cfg_end = ADV_EEPROM_MAX_ADDR_VL;
1388 } else {
1389 cfg_beg = ADV_EEPROM_CFG_BEG;
1390 cfg_end = ADV_EEPROM_MAX_ADDR;
1391 }
1392
1393 for (s_addr = cfg_beg; s_addr <= (cfg_end - 1); s_addr++, wbuf++) {
1394 sum += *wbuf;
1395 if (*wbuf != adv_write_eeprom_16(adv, s_addr, *wbuf)) {
1396 n_error++;
1397 }
1398 }
1399 *wbuf = sum;
1400 if (sum != adv_write_eeprom_16(adv, s_addr, sum)) {
1401 n_error++;
1402 }
1403 wbuf = (u_int16_t *)eeprom_config;
1404 for (s_addr = 0; s_addr < 2; s_addr++, wbuf++) {
1405 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1406 n_error++;
1407 }
1408 }
1409 for (s_addr = cfg_beg; s_addr <= cfg_end; s_addr++, wbuf++) {
1410 if (*wbuf != adv_read_eeprom_16(adv, s_addr)) {
1411 n_error++;
1412 }
1413 }
1414 return (n_error);
1415 }
1416
1417 static u_int32_t
adv_load_microcode(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * mcode_buf,u_int16_t mcode_size)1418 adv_load_microcode(struct adv_softc *adv, u_int16_t s_addr,
1419 u_int16_t *mcode_buf, u_int16_t mcode_size)
1420 {
1421 u_int32_t chksum;
1422 u_int16_t mcode_lram_size;
1423 u_int16_t mcode_chksum;
1424
1425 mcode_lram_size = mcode_size >> 1;
1426 /* XXX Why zero the memory just before you write the whole thing?? */
1427 adv_mset_lram_16(adv, s_addr, 0, mcode_lram_size);
1428 adv_write_lram_16_multi(adv, s_addr, mcode_buf, mcode_lram_size);
1429
1430 chksum = adv_msum_lram_16(adv, s_addr, mcode_lram_size);
1431 mcode_chksum = (u_int16_t)adv_msum_lram_16(adv, ADV_CODE_SEC_BEG,
1432 ((mcode_size - s_addr
1433 - ADV_CODE_SEC_BEG) >> 1));
1434 adv_write_lram_16(adv, ADVV_MCODE_CHKSUM_W, mcode_chksum);
1435 adv_write_lram_16(adv, ADVV_MCODE_SIZE_W, mcode_size);
1436 return (chksum);
1437 }
1438
1439 static void
adv_reinit_lram(struct adv_softc * adv)1440 adv_reinit_lram(struct adv_softc *adv) {
1441 adv_init_lram(adv);
1442 adv_init_qlink_var(adv);
1443 }
1444
1445 static void
adv_init_lram(struct adv_softc * adv)1446 adv_init_lram(struct adv_softc *adv)
1447 {
1448 u_int8_t i;
1449 u_int16_t s_addr;
1450
1451 adv_mset_lram_16(adv, ADV_QADR_BEG, 0,
1452 (((adv->max_openings + 2 + 1) * 64) >> 1));
1453
1454 i = ADV_MIN_ACTIVE_QNO;
1455 s_addr = ADV_QADR_BEG + ADV_QBLK_SIZE;
1456
1457 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1458 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings);
1459 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1460 i++;
1461 s_addr += ADV_QBLK_SIZE;
1462 for (; i < adv->max_openings; i++, s_addr += ADV_QBLK_SIZE) {
1463 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i + 1);
1464 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i - 1);
1465 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1466 }
1467
1468 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, ADV_QLINK_END);
1469 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, adv->max_openings - 1);
1470 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, adv->max_openings);
1471 i++;
1472 s_addr += ADV_QBLK_SIZE;
1473
1474 for (; i <= adv->max_openings + 3; i++, s_addr += ADV_QBLK_SIZE) {
1475 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_FWD, i);
1476 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_BWD, i);
1477 adv_write_lram_8(adv, s_addr + ADV_SCSIQ_B_QNO, i);
1478 }
1479 }
1480
1481 static int
adv_init_microcode_var(struct adv_softc * adv)1482 adv_init_microcode_var(struct adv_softc *adv)
1483 {
1484 int i;
1485
1486 for (i = 0; i <= ADV_MAX_TID; i++) {
1487
1488 /* Start out async all around */
1489 adv_set_syncrate(adv, /*path*/NULL,
1490 i, 0, 0,
1491 ADV_TRANS_GOAL|ADV_TRANS_CUR);
1492 }
1493
1494 adv_init_qlink_var(adv);
1495
1496 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
1497 adv_write_lram_8(adv, ADVV_HOSTSCSI_ID_B, 0x01 << adv->scsi_id);
1498
1499 adv_write_lram_32(adv, ADVV_OVERRUN_PADDR_D, adv->overrun_physbase);
1500
1501 adv_write_lram_32(adv, ADVV_OVERRUN_BSIZE_D, ADV_OVERRUN_BSIZE);
1502
1503 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
1504 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
1505 kprintf("adv%d: Unable to set program counter. Aborting.\n",
1506 adv->unit);
1507 return (1);
1508 }
1509 return (0);
1510 }
1511
1512 static void
adv_init_qlink_var(struct adv_softc * adv)1513 adv_init_qlink_var(struct adv_softc *adv)
1514 {
1515 int i;
1516 u_int16_t lram_addr;
1517
1518 adv_write_lram_8(adv, ADVV_NEXTRDY_B, 1);
1519 adv_write_lram_8(adv, ADVV_DONENEXT_B, adv->max_openings);
1520
1521 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, 1);
1522 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, adv->max_openings);
1523
1524 adv_write_lram_8(adv, ADVV_BUSY_QHEAD_B,
1525 (u_int8_t)((int) adv->max_openings + 1));
1526 adv_write_lram_8(adv, ADVV_DISC1_QHEAD_B,
1527 (u_int8_t)((int) adv->max_openings + 2));
1528
1529 adv_write_lram_8(adv, ADVV_TOTAL_READY_Q_B, adv->max_openings);
1530
1531 adv_write_lram_16(adv, ADVV_ASCDVC_ERR_CODE_W, 0);
1532 adv_write_lram_16(adv, ADVV_HALTCODE_W, 0);
1533 adv_write_lram_8(adv, ADVV_STOP_CODE_B, 0);
1534 adv_write_lram_8(adv, ADVV_SCSIBUSY_B, 0);
1535 adv_write_lram_8(adv, ADVV_WTM_FLAG_B, 0);
1536 adv_write_lram_8(adv, ADVV_Q_DONE_IN_PROGRESS_B, 0);
1537
1538 lram_addr = ADV_QADR_BEG;
1539 for (i = 0; i < 32; i++, lram_addr += 2)
1540 adv_write_lram_16(adv, lram_addr, 0);
1541 }
1542
1543 static void
adv_disable_interrupt(struct adv_softc * adv)1544 adv_disable_interrupt(struct adv_softc *adv)
1545 {
1546 u_int16_t cfg;
1547
1548 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1549 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg & ~ADV_CFG_LSW_HOST_INT_ON);
1550 }
1551
1552 static void
adv_enable_interrupt(struct adv_softc * adv)1553 adv_enable_interrupt(struct adv_softc *adv)
1554 {
1555 u_int16_t cfg;
1556
1557 cfg = ADV_INW(adv, ADV_CONFIG_LSW);
1558 ADV_OUTW(adv, ADV_CONFIG_LSW, cfg | ADV_CFG_LSW_HOST_INT_ON);
1559 }
1560
1561 static void
adv_toggle_irq_act(struct adv_softc * adv)1562 adv_toggle_irq_act(struct adv_softc *adv)
1563 {
1564 ADV_OUTW(adv, ADV_CHIP_STATUS, ADV_CIW_IRQ_ACT);
1565 ADV_OUTW(adv, ADV_CHIP_STATUS, 0);
1566 }
1567
1568 void
adv_start_execution(struct adv_softc * adv)1569 adv_start_execution(struct adv_softc *adv)
1570 {
1571 if (adv_read_lram_8(adv, ADV_STOP_CODE_B) != 0) {
1572 adv_write_lram_8(adv, ADV_STOP_CODE_B, 0);
1573 }
1574 }
1575
1576 int
adv_stop_chip(struct adv_softc * adv)1577 adv_stop_chip(struct adv_softc *adv)
1578 {
1579 u_int8_t cc_val;
1580
1581 cc_val = ADV_INB(adv, ADV_CHIP_CTRL)
1582 & (~(ADV_CC_SINGLE_STEP | ADV_CC_TEST | ADV_CC_DIAG));
1583 ADV_OUTB(adv, ADV_CHIP_CTRL, cc_val | ADV_CC_HALT);
1584 adv_set_chip_ih(adv, ADV_INS_HALT);
1585 adv_set_chip_ih(adv, ADV_INS_RFLAG_WTM);
1586 if ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_HALTED) == 0) {
1587 return (0);
1588 }
1589 return (1);
1590 }
1591
1592 static int
adv_host_req_chip_halt(struct adv_softc * adv)1593 adv_host_req_chip_halt(struct adv_softc *adv)
1594 {
1595 int count;
1596 u_int8_t saved_stop_code;
1597
1598 if (adv_is_chip_halted(adv))
1599 return (1);
1600
1601 count = 0;
1602 saved_stop_code = adv_read_lram_8(adv, ADVV_STOP_CODE_B);
1603 adv_write_lram_8(adv, ADVV_STOP_CODE_B,
1604 ADV_STOP_HOST_REQ_RISC_HALT | ADV_STOP_REQ_RISC_STOP);
1605 while (adv_is_chip_halted(adv) == 0
1606 && count++ < 2000)
1607 ;
1608
1609 adv_write_lram_8(adv, ADVV_STOP_CODE_B, saved_stop_code);
1610 return (count < 2000);
1611 }
1612
1613 static void
adv_set_chip_ih(struct adv_softc * adv,u_int16_t ins_code)1614 adv_set_chip_ih(struct adv_softc *adv, u_int16_t ins_code)
1615 {
1616 adv_set_bank(adv, 1);
1617 ADV_OUTW(adv, ADV_REG_IH, ins_code);
1618 adv_set_bank(adv, 0);
1619 }
1620
1621 #if 0 /* UNUSED */
1622 static u_int8_t
1623 adv_get_chip_scsi_ctrl(struct adv_softc *adv)
1624 {
1625 u_int8_t scsi_ctrl;
1626
1627 adv_set_bank(adv, 1);
1628 scsi_ctrl = ADV_INB(adv, ADV_REG_SC);
1629 adv_set_bank(adv, 0);
1630 return (scsi_ctrl);
1631 }
1632 #endif
1633
1634 /*
1635 * XXX Looks like more padding issues in this routine as well.
1636 * There has to be a way to turn this into an insw.
1637 */
1638 static void
adv_get_q_info(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * inbuf,int words)1639 adv_get_q_info(struct adv_softc *adv, u_int16_t s_addr,
1640 u_int16_t *inbuf, int words)
1641 {
1642 int i;
1643
1644 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1645 for (i = 0; i < words; i++, inbuf++) {
1646 if (i == 5) {
1647 continue;
1648 }
1649 *inbuf = ADV_INW(adv, ADV_LRAM_DATA);
1650 }
1651 }
1652
1653 static u_int
adv_get_num_free_queues(struct adv_softc * adv,u_int8_t n_qs)1654 adv_get_num_free_queues(struct adv_softc *adv, u_int8_t n_qs)
1655 {
1656 u_int cur_used_qs;
1657 u_int cur_free_qs;
1658
1659 cur_used_qs = adv->cur_active + ADV_MIN_FREE_Q;
1660
1661 if ((cur_used_qs + n_qs) <= adv->max_openings) {
1662 cur_free_qs = adv->max_openings - cur_used_qs;
1663 return (cur_free_qs);
1664 }
1665 adv->openings_needed = n_qs;
1666 return (0);
1667 }
1668
1669 static u_int8_t
adv_alloc_free_queues(struct adv_softc * adv,u_int8_t free_q_head,u_int8_t n_free_q)1670 adv_alloc_free_queues(struct adv_softc *adv, u_int8_t free_q_head,
1671 u_int8_t n_free_q)
1672 {
1673 int i;
1674
1675 for (i = 0; i < n_free_q; i++) {
1676 free_q_head = adv_alloc_free_queue(adv, free_q_head);
1677 if (free_q_head == ADV_QLINK_END)
1678 break;
1679 }
1680 return (free_q_head);
1681 }
1682
1683 static u_int8_t
adv_alloc_free_queue(struct adv_softc * adv,u_int8_t free_q_head)1684 adv_alloc_free_queue(struct adv_softc *adv, u_int8_t free_q_head)
1685 {
1686 u_int16_t q_addr;
1687 u_int8_t next_qp;
1688 u_int8_t q_status;
1689
1690 next_qp = ADV_QLINK_END;
1691 q_addr = ADV_QNO_TO_QADDR(free_q_head);
1692 q_status = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS);
1693
1694 if ((q_status & QS_READY) == 0)
1695 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1696
1697 return (next_qp);
1698 }
1699
1700 static int
adv_send_scsi_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int8_t n_q_required)1701 adv_send_scsi_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1702 u_int8_t n_q_required)
1703 {
1704 u_int8_t free_q_head;
1705 u_int8_t next_qp;
1706 int retval;
1707
1708 retval = 1;
1709 free_q_head = adv_read_lram_16(adv, ADVV_FREE_Q_HEAD_W) & 0xFF;
1710 if ((next_qp = adv_alloc_free_queues(adv, free_q_head, n_q_required))
1711 != ADV_QLINK_END) {
1712 scsiq->q1.q_no = free_q_head;
1713
1714 /*
1715 * Now that we know our Q number, point our sense
1716 * buffer pointer to a bus dma mapped area where
1717 * we can dma the data to.
1718 */
1719 scsiq->q1.sense_addr = adv->sense_physbase
1720 + ((free_q_head - 1) * sizeof(struct scsi_sense_data));
1721 adv_put_ready_sg_list_queue(adv, scsiq, free_q_head);
1722 adv_write_lram_16(adv, ADVV_FREE_Q_HEAD_W, next_qp);
1723 adv->cur_active += n_q_required;
1724 retval = 0;
1725 }
1726 return (retval);
1727 }
1728
1729
1730 static void
adv_put_ready_sg_list_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int q_no)1731 adv_put_ready_sg_list_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1732 u_int q_no)
1733 {
1734 u_int8_t sg_list_dwords;
1735 u_int8_t sg_index, i;
1736 u_int8_t sg_entry_cnt;
1737 u_int8_t next_qp;
1738 u_int16_t q_addr;
1739 struct adv_sg_head *sg_head;
1740 struct adv_sg_list_q scsi_sg_q;
1741
1742 sg_head = scsiq->sg_head;
1743
1744 if (sg_head) {
1745 sg_entry_cnt = sg_head->entry_cnt - 1;
1746 #ifdef DIAGNOSTIC
1747 if (sg_entry_cnt == 0)
1748 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1749 "a SG list but only one element");
1750 if ((scsiq->q1.cntl & QC_SG_HEAD) == 0)
1751 panic("adv_put_ready_sg_list_queue: ScsiQ with "
1752 "a SG list but QC_SG_HEAD not set");
1753 #endif
1754 q_addr = ADV_QNO_TO_QADDR(q_no);
1755 sg_index = 1;
1756 scsiq->q1.sg_queue_cnt = sg_head->queue_cnt;
1757 scsi_sg_q.sg_head_qp = q_no;
1758 scsi_sg_q.cntl = QCSG_SG_XFER_LIST;
1759 for (i = 0; i < sg_head->queue_cnt; i++) {
1760 u_int8_t segs_this_q;
1761
1762 if (sg_entry_cnt > ADV_SG_LIST_PER_Q)
1763 segs_this_q = ADV_SG_LIST_PER_Q;
1764 else {
1765 /* This will be the last segment then */
1766 segs_this_q = sg_entry_cnt;
1767 scsi_sg_q.cntl |= QCSG_SG_XFER_END;
1768 }
1769 scsi_sg_q.seq_no = i + 1;
1770 sg_list_dwords = segs_this_q << 1;
1771 if (i == 0) {
1772 scsi_sg_q.sg_list_cnt = segs_this_q;
1773 scsi_sg_q.sg_cur_list_cnt = segs_this_q;
1774 } else {
1775 scsi_sg_q.sg_list_cnt = segs_this_q - 1;
1776 scsi_sg_q.sg_cur_list_cnt = segs_this_q - 1;
1777 }
1778 next_qp = adv_read_lram_8(adv, q_addr + ADV_SCSIQ_B_FWD);
1779 scsi_sg_q.q_no = next_qp;
1780 q_addr = ADV_QNO_TO_QADDR(next_qp);
1781
1782 adv_write_lram_16_multi(adv,
1783 q_addr + ADV_SCSIQ_SGHD_CPY_BEG,
1784 (u_int16_t *)&scsi_sg_q,
1785 sizeof(scsi_sg_q) >> 1);
1786 adv_write_lram_32_multi(adv, q_addr + ADV_SGQ_LIST_BEG,
1787 (u_int32_t *)&sg_head->sg_list[sg_index],
1788 sg_list_dwords);
1789 sg_entry_cnt -= segs_this_q;
1790 sg_index += ADV_SG_LIST_PER_Q;
1791 }
1792 }
1793 adv_put_ready_queue(adv, scsiq, q_no);
1794 }
1795
1796 static void
adv_put_ready_queue(struct adv_softc * adv,struct adv_scsi_q * scsiq,u_int q_no)1797 adv_put_ready_queue(struct adv_softc *adv, struct adv_scsi_q *scsiq,
1798 u_int q_no)
1799 {
1800 struct adv_target_transinfo* tinfo;
1801 u_int q_addr;
1802 u_int tid_no;
1803
1804 tid_no = ADV_TIX_TO_TID(scsiq->q2.target_ix);
1805 tinfo = &adv->tinfo[tid_no];
1806 if ((tinfo->current.period != tinfo->goal.period)
1807 || (tinfo->current.offset != tinfo->goal.offset)) {
1808
1809 adv_msgout_sdtr(adv, tinfo->goal.period, tinfo->goal.offset);
1810 scsiq->q1.cntl |= QC_MSG_OUT;
1811 }
1812 q_addr = ADV_QNO_TO_QADDR(q_no);
1813
1814 scsiq->q1.status = QS_FREE;
1815
1816 adv_write_lram_16_multi(adv, q_addr + ADV_SCSIQ_CDB_BEG,
1817 (u_int16_t *)scsiq->cdbptr,
1818 scsiq->q2.cdb_len >> 1);
1819
1820 #if BYTE_ORDER == BIG_ENDIAN
1821 adv_adj_scsiq_endian(scsiq);
1822 #endif
1823
1824 adv_put_scsiq(adv, q_addr + ADV_SCSIQ_CPY_BEG,
1825 (u_int16_t *) &scsiq->q1.cntl,
1826 ((sizeof(scsiq->q1) + sizeof(scsiq->q2)) / 2) - 1);
1827
1828 #if defined(CC_WRITE_IO_COUNT) && CC_WRITE_IO_COUNT
1829 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_W_REQ_COUNT,
1830 adv->req_count);
1831 #endif
1832
1833 #if defined(CC_CLEAR_DMA_REMAIN) && CC_CLEAR_DMA_REMAIN
1834
1835 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_ADDR, 0);
1836 adv_write_lram_32(adv, q_addr + ADV_SCSIQ_DW_REMAIN_XFER_CNT, 0);
1837 #endif
1838
1839 adv_write_lram_16(adv, q_addr + ADV_SCSIQ_B_STATUS,
1840 (scsiq->q1.q_no << 8) | QS_READY);
1841 }
1842
1843 static void
adv_put_scsiq(struct adv_softc * adv,u_int16_t s_addr,u_int16_t * buffer,int words)1844 adv_put_scsiq(struct adv_softc *adv, u_int16_t s_addr,
1845 u_int16_t *buffer, int words)
1846 {
1847 int i;
1848
1849 /*
1850 * XXX This routine makes *gross* assumptions
1851 * about padding in the data structures.
1852 * Either the data structures should have explicit
1853 * padding members added, or they should have padding
1854 * turned off via compiler attributes depending on
1855 * which yields better overall performance. My hunch
1856 * would be that turning off padding would be the
1857 * faster approach as an outsw is much faster than
1858 * this crude loop and accessing un-aligned data
1859 * members isn't *that* expensive. The other choice
1860 * would be to modify the ASC script so that the
1861 * the adv_scsiq_1 structure can be re-arranged so
1862 * padding isn't required.
1863 */
1864 ADV_OUTW(adv, ADV_LRAM_ADDR, s_addr);
1865 for (i = 0; i < words; i++, buffer++) {
1866 if (i == 2 || i == 10) {
1867 continue;
1868 }
1869 ADV_OUTW(adv, ADV_LRAM_DATA, *buffer);
1870 }
1871 }
1872
1873 static void
adv_handle_extmsg_in(struct adv_softc * adv,u_int16_t halt_q_addr,u_int8_t q_cntl,target_bit_vector target_mask,int tid_no)1874 adv_handle_extmsg_in(struct adv_softc *adv, u_int16_t halt_q_addr,
1875 u_int8_t q_cntl, target_bit_vector target_mask,
1876 int tid_no)
1877 {
1878 struct ext_msg ext_msg;
1879
1880 adv_read_lram_16_multi(adv, ADVV_MSGIN_BEG, (u_int16_t *) &ext_msg,
1881 sizeof(ext_msg) >> 1);
1882 if ((ext_msg.msg_type == MSG_EXTENDED)
1883 && (ext_msg.msg_req == MSG_EXT_SDTR)
1884 && (ext_msg.msg_len == MSG_EXT_SDTR_LEN)) {
1885 union ccb *ccb;
1886 struct adv_target_transinfo* tinfo;
1887 u_int32_t cinfo_index;
1888 u_int period;
1889 u_int offset;
1890 int sdtr_accept;
1891 u_int8_t orig_offset;
1892
1893 cinfo_index =
1894 adv_read_lram_32(adv, halt_q_addr + ADV_SCSIQ_D_CINFO_IDX);
1895 ccb = adv->ccb_infos[cinfo_index].ccb;
1896 tinfo = &adv->tinfo[tid_no];
1897 sdtr_accept = TRUE;
1898
1899 orig_offset = ext_msg.req_ack_offset;
1900 if (ext_msg.xfer_period < tinfo->goal.period) {
1901 sdtr_accept = FALSE;
1902 ext_msg.xfer_period = tinfo->goal.period;
1903 }
1904
1905 /* Perform range checking */
1906 period = ext_msg.xfer_period;
1907 offset = ext_msg.req_ack_offset;
1908 adv_period_offset_to_sdtr(adv, &period, &offset, tid_no);
1909 ext_msg.xfer_period = period;
1910 ext_msg.req_ack_offset = offset;
1911
1912 /* Record our current sync settings */
1913 adv_set_syncrate(adv, ccb->ccb_h.path,
1914 tid_no, ext_msg.xfer_period,
1915 ext_msg.req_ack_offset,
1916 ADV_TRANS_GOAL|ADV_TRANS_ACTIVE);
1917
1918 /* Offset too high or large period forced async */
1919 if (orig_offset != ext_msg.req_ack_offset)
1920 sdtr_accept = FALSE;
1921
1922 if (sdtr_accept && (q_cntl & QC_MSG_OUT)) {
1923 /* Valid response to our requested negotiation */
1924 q_cntl &= ~QC_MSG_OUT;
1925 } else {
1926 /* Must Respond */
1927 q_cntl |= QC_MSG_OUT;
1928 adv_msgout_sdtr(adv, ext_msg.xfer_period,
1929 ext_msg.req_ack_offset);
1930 }
1931
1932 } else if (ext_msg.msg_type == MSG_EXTENDED
1933 && ext_msg.msg_req == MSG_EXT_WDTR
1934 && ext_msg.msg_len == MSG_EXT_WDTR_LEN) {
1935
1936 ext_msg.wdtr_width = 0;
1937 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1938 (u_int16_t *)&ext_msg,
1939 sizeof(ext_msg) >> 1);
1940 q_cntl |= QC_MSG_OUT;
1941 } else {
1942
1943 ext_msg.msg_type = MSG_MESSAGE_REJECT;
1944 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1945 (u_int16_t *)&ext_msg,
1946 sizeof(ext_msg) >> 1);
1947 q_cntl |= QC_MSG_OUT;
1948 }
1949 adv_write_lram_8(adv, halt_q_addr + ADV_SCSIQ_B_CNTL, q_cntl);
1950 }
1951
1952 static void
adv_msgout_sdtr(struct adv_softc * adv,u_int8_t sdtr_period,u_int8_t sdtr_offset)1953 adv_msgout_sdtr(struct adv_softc *adv, u_int8_t sdtr_period,
1954 u_int8_t sdtr_offset)
1955 {
1956 struct ext_msg sdtr_buf;
1957
1958 sdtr_buf.msg_type = MSG_EXTENDED;
1959 sdtr_buf.msg_len = MSG_EXT_SDTR_LEN;
1960 sdtr_buf.msg_req = MSG_EXT_SDTR;
1961 sdtr_buf.xfer_period = sdtr_period;
1962 sdtr_offset &= ADV_SYN_MAX_OFFSET;
1963 sdtr_buf.req_ack_offset = sdtr_offset;
1964 adv_write_lram_16_multi(adv, ADVV_MSGOUT_BEG,
1965 (u_int16_t *) &sdtr_buf,
1966 sizeof(sdtr_buf) / 2);
1967 }
1968
1969 int
adv_abort_ccb(struct adv_softc * adv,int target,int lun,union ccb * ccb,u_int32_t status,int queued_only)1970 adv_abort_ccb(struct adv_softc *adv, int target, int lun, union ccb *ccb,
1971 u_int32_t status, int queued_only)
1972 {
1973 u_int16_t q_addr;
1974 u_int8_t q_no;
1975 struct adv_q_done_info scsiq_buf;
1976 struct adv_q_done_info *scsiq;
1977 u_int8_t target_ix;
1978 int count;
1979
1980 scsiq = &scsiq_buf;
1981 target_ix = ADV_TIDLUN_TO_IX(target, lun);
1982 count = 0;
1983 for (q_no = ADV_MIN_ACTIVE_QNO; q_no <= adv->max_openings; q_no++) {
1984 struct adv_ccb_info *ccb_info;
1985 q_addr = ADV_QNO_TO_QADDR(q_no);
1986
1987 adv_copy_lram_doneq(adv, q_addr, scsiq, adv->max_dma_count);
1988 ccb_info = &adv->ccb_infos[scsiq->d2.ccb_index];
1989 if (((scsiq->q_status & QS_READY) != 0)
1990 && ((scsiq->q_status & QS_ABORTED) == 0)
1991 && ((scsiq->cntl & QCSG_SG_XFER_LIST) == 0)
1992 && (scsiq->d2.target_ix == target_ix)
1993 && (queued_only == 0
1994 || !(scsiq->q_status & (QS_DISC1|QS_DISC2|QS_BUSY|QS_DONE)))
1995 && (ccb == NULL || (ccb == ccb_info->ccb))) {
1996 union ccb *aborted_ccb;
1997 struct adv_ccb_info *cinfo;
1998
1999 scsiq->q_status |= QS_ABORTED;
2000 adv_write_lram_8(adv, q_addr + ADV_SCSIQ_B_STATUS,
2001 scsiq->q_status);
2002 aborted_ccb = ccb_info->ccb;
2003 /* Don't clobber earlier error codes */
2004 if ((aborted_ccb->ccb_h.status & CAM_STATUS_MASK)
2005 == CAM_REQ_INPROG)
2006 aborted_ccb->ccb_h.status |= status;
2007 cinfo = (struct adv_ccb_info *)
2008 aborted_ccb->ccb_h.ccb_cinfo_ptr;
2009 cinfo->state |= ACCB_ABORT_QUEUED;
2010 count++;
2011 }
2012 }
2013 return (count);
2014 }
2015
2016 int
adv_reset_bus(struct adv_softc * adv,int initiate_bus_reset)2017 adv_reset_bus(struct adv_softc *adv, int initiate_bus_reset)
2018 {
2019 int count;
2020 int i;
2021 union ccb *ccb;
2022
2023 i = 200;
2024 while ((ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_SCSI_RESET_ACTIVE) != 0
2025 && i--)
2026 DELAY(1000);
2027 adv_reset_chip(adv, initiate_bus_reset);
2028 adv_reinit_lram(adv);
2029 for (i = 0; i <= ADV_MAX_TID; i++)
2030 adv_set_syncrate(adv, NULL, i, /*period*/0,
2031 /*offset*/0, ADV_TRANS_CUR);
2032 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
2033
2034 /* Tell the XPT layer that a bus reset occured */
2035 if (adv->path != NULL)
2036 xpt_async(AC_BUS_RESET, adv->path, NULL);
2037
2038 count = 0;
2039 while ((ccb = (union ccb *)LIST_FIRST(&adv->pending_ccbs)) != NULL) {
2040 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
2041 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2042 adv_done(adv, ccb, QD_ABORTED_BY_HOST, 0, 0, 0);
2043 count++;
2044 }
2045
2046 adv_start_chip(adv);
2047 return (count);
2048 }
2049
2050 static void
adv_set_sdtr_reg_at_id(struct adv_softc * adv,int tid,u_int8_t sdtr_data)2051 adv_set_sdtr_reg_at_id(struct adv_softc *adv, int tid, u_int8_t sdtr_data)
2052 {
2053 int orig_id;
2054
2055 adv_set_bank(adv, 1);
2056 orig_id = ffs(ADV_INB(adv, ADV_HOST_SCSIID)) - 1;
2057 ADV_OUTB(adv, ADV_HOST_SCSIID, tid);
2058 if (ADV_INB(adv, ADV_HOST_SCSIID) == (0x01 << tid)) {
2059 adv_set_bank(adv, 0);
2060 ADV_OUTB(adv, ADV_SYN_OFFSET, sdtr_data);
2061 }
2062 adv_set_bank(adv, 1);
2063 ADV_OUTB(adv, ADV_HOST_SCSIID, orig_id);
2064 adv_set_bank(adv, 0);
2065 }
2066