xref: /openbsd/sys/dev/ic/qlw.c (revision 264ca280)
1 /*	$OpenBSD: qlw.c,v 1.30 2015/09/17 17:59:15 miod Exp $ */
2 
3 /*
4  * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6  * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/device.h>
24 #include <sys/ioctl.h>
25 #include <sys/malloc.h>
26 #include <sys/kernel.h>
27 #include <sys/mutex.h>
28 #include <sys/rwlock.h>
29 #include <sys/sensors.h>
30 #include <sys/queue.h>
31 
32 #include <machine/atomic.h>
33 #include <machine/bus.h>
34 
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/ic/qlwreg.h>
39 #include <dev/ic/qlwvar.h>
40 
41 #ifndef SMALL_KERNEL
42 #define QLW_DEBUG
43 #endif
44 
45 #ifdef QLW_DEBUG
46 #define DPRINTF(m, f...) do { if ((qlwdebug & (m)) == (m)) printf(f); } \
47     while (0)
48 #define QLW_D_MBOX		0x01
49 #define QLW_D_INTR		0x02
50 #define QLW_D_PORT		0x04
51 #define QLW_D_IO		0x08
52 #define QLW_D_IOCB		0x10
53 int qlwdebug = QLW_D_PORT | QLW_D_INTR | QLW_D_MBOX;
54 #else
55 #define DPRINTF(m, f...)
56 #endif
57 
58 struct cfdriver qlw_cd = {
59 	NULL,
60 	"qlw",
61 	DV_DULL
62 };
63 
64 void		qlw_scsi_cmd(struct scsi_xfer *);
65 int		qlw_scsi_probe(struct scsi_link *);
66 
67 u_int16_t	qlw_read(struct qlw_softc *, bus_size_t);
68 void		qlw_write(struct qlw_softc *, bus_size_t, u_int16_t);
69 void		qlw_host_cmd(struct qlw_softc *sc, u_int16_t);
70 
71 int		qlw_mbox(struct qlw_softc *, int, int);
72 void		qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *);
73 u_int16_t	qlw_read_mbox(struct qlw_softc *, int);
74 void		qlw_write_mbox(struct qlw_softc *, int, u_int16_t);
75 
76 int		qlw_config_bus(struct qlw_softc *, int);
77 int		qlw_config_target(struct qlw_softc *, int, int);
78 void		qlw_update_bus(struct qlw_softc *, int);
79 void		qlw_update_target(struct qlw_softc *, int, int);
80 void		qlw_update_task(void *);
81 
82 void		qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t);
83 void		qlw_set_ints(struct qlw_softc *, int);
84 int		qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *);
85 void		qlw_clear_isr(struct qlw_softc *, u_int16_t);
86 
87 void		qlw_update(struct qlw_softc *, int);
88 void		qlw_put_marker(struct qlw_softc *, int, void *);
89 void		qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *,
90 		    struct qlw_ccb *);
91 void		qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *,
92 		    struct qlw_ccb *, int);
93 struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t);
94 void		qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *,
95 		    int *, int *);
96 void		qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *,
97 		    int, int);
98 void		qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *,
99 		    bus_dmamap_t, int);
100 
101 int		qlw_softreset(struct qlw_softc *);
102 void		qlw_dma_burst_enable(struct qlw_softc *);
103 
104 int		qlw_async(struct qlw_softc *, u_int16_t);
105 
106 int		qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *,
107 		    u_int16_t);
108 int		qlw_load_firmware(struct qlw_softc *);
109 int		qlw_read_nvram(struct qlw_softc *);
110 void		qlw_parse_nvram_1040(struct qlw_softc *, int);
111 void		qlw_parse_nvram_1080(struct qlw_softc *, int);
112 void		qlw_init_defaults(struct qlw_softc *, int);
113 
114 struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t);
115 void		qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *);
116 
117 int		qlw_alloc_ccbs(struct qlw_softc *);
118 void		qlw_free_ccbs(struct qlw_softc *);
119 void		*qlw_get_ccb(void *);
120 void		qlw_put_ccb(void *, void *);
121 
122 #ifdef QLW_DEBUG
123 void		qlw_dump_iocb(struct qlw_softc *, void *, int);
124 void		qlw_dump_iocb_segs(struct qlw_softc *, void *, int);
125 #else
126 #define qlw_dump_iocb(sc, h, fl)	do { /* nothing */ } while (0)
127 #define qlw_dump_iocb_segs(sc, h, fl)	do { /* nothing */ } while (0)
128 #endif
129 
130 static inline int
131 qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs)
132 {
133 	return ((xs->sc_link->scsibus == sc->sc_link[0].scsibus) ? 0 : 1);
134 }
135 
136 static inline u_int16_t
137 qlw_swap16(struct qlw_softc *sc, u_int16_t value)
138 {
139 	if (sc->sc_isp_gen == QLW_GEN_ISP1000)
140 		return htobe16(value);
141 	else
142 		return htole16(value);
143 }
144 
145 static inline u_int32_t
146 qlw_swap32(struct qlw_softc *sc, u_int32_t value)
147 {
148 	if (sc->sc_isp_gen == QLW_GEN_ISP1000)
149 		return htobe32(value);
150 	else
151 		return htole32(value);
152 }
153 
154 static inline u_int16_t
155 qlw_queue_read(struct qlw_softc *sc, bus_size_t offset)
156 {
157 	return qlw_read(sc, sc->sc_mbox_base + offset);
158 }
159 
160 static inline void
161 qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
162 {
163 	qlw_write(sc, sc->sc_mbox_base + offset, value);
164 }
165 
166 struct scsi_adapter qlw_switch = {
167 	qlw_scsi_cmd,
168 	scsi_minphys,
169 	qlw_scsi_probe,
170 	NULL,	/* scsi_free */
171 	NULL	/* ioctl */
172 };
173 
174 int
175 qlw_attach(struct qlw_softc *sc)
176 {
177 	struct scsibus_attach_args saa;
178 	void (*parse_nvram)(struct qlw_softc *, int);
179 	int reset_delay;
180 	int bus;
181 
182 	task_set(&sc->sc_update_task, qlw_update_task, sc);
183 
184 	switch (sc->sc_isp_gen) {
185 	case QLW_GEN_ISP1000:
186 		sc->sc_nvram_size = 0;
187 		break;
188 	case QLW_GEN_ISP1040:
189 		sc->sc_nvram_size = 128;
190 		sc->sc_nvram_minversion = 2;
191 		parse_nvram = qlw_parse_nvram_1040;
192 		break;
193 	case QLW_GEN_ISP1080:
194 	case QLW_GEN_ISP12160:
195 		sc->sc_nvram_size = 256;
196 		sc->sc_nvram_minversion = 1;
197 		parse_nvram = qlw_parse_nvram_1080;
198 		break;
199 
200 	default:
201 		printf("unknown isp type\n");
202 		return (ENXIO);
203 	}
204 
205 	/* after reset, mbox registers 1-3 should contain the string "ISP   " */
206 	if (qlw_read_mbox(sc, 1) != 0x4953 ||
207 	    qlw_read_mbox(sc, 2) != 0x5020 ||
208 	    qlw_read_mbox(sc, 3) != 0x2020) {
209 		/* try releasing the risc processor */
210 		qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
211 	}
212 
213 	qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
214 	if (qlw_softreset(sc) != 0) {
215 		printf("softreset failed\n");
216 		return (ENXIO);
217 	}
218 
219 	for (bus = 0; bus < sc->sc_numbusses; bus++)
220 		qlw_init_defaults(sc, bus);
221 
222 	if (qlw_read_nvram(sc) == 0) {
223 		for (bus = 0; bus < sc->sc_numbusses; bus++)
224 			parse_nvram(sc, bus);
225 	}
226 
227 #ifndef ISP_NOFIRMWARE
228 	if (sc->sc_firmware && qlw_load_firmware(sc)) {
229 		printf("firmware load failed\n");
230 		return (ENXIO);
231 	}
232 #endif
233 
234 	/* execute firmware */
235 	sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE;
236 	sc->sc_mbox[1] = QLW_CODE_ORG;
237 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
238 		printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
239 		return (ENXIO);
240 	}
241 
242 	delay(250000);		/* from isp(4) */
243 
244 	sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE;
245 	if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN,
246 	    QLW_MBOX_ABOUT_FIRMWARE_OUT)) {
247 		printf("ISP not talking after firmware exec: %x\n",
248 		    sc->sc_mbox[0]);
249 		return (ENXIO);
250 	}
251 	/* The ISP1000 firmware we use doesn't return a version number. */
252 	if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) {
253 		sc->sc_mbox[1] = 1;
254 		sc->sc_mbox[2] = 37;
255 		sc->sc_mbox[3] = 0;
256 		sc->sc_mbox[6] = 0;
257 	}
258 	printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc),
259 	    sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
260 
261 	/* work out how many ccbs to allocate */
262 	sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS;
263 	if (qlw_mbox(sc, 0x0001, 0x0007)) {
264 		printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
265 		return (ENXIO);
266 	}
267 	sc->sc_maxrequests = sc->sc_mbox[2];
268 	if (sc->sc_maxrequests > 512)
269 		sc->sc_maxrequests = 512;
270 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
271 		if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests)
272 			sc->sc_max_queue_depth[bus] = sc->sc_maxrequests;
273 	}
274 
275 	/*
276 	 * On some 1020/1040 variants the response queue is limited to
277 	 * 256 entries.  We don't really need all that many anyway.
278 	 */
279 	sc->sc_maxresponses = sc->sc_maxrequests / 2;
280 	if (sc->sc_maxresponses < 64)
281 		sc->sc_maxresponses = 64;
282 
283 	/* We may need up to 3 request entries per SCSI command. */
284 	sc->sc_maxccbs = sc->sc_maxrequests / 3;
285 
286 	/* Allegedly the FIFO is busted on the 1040A. */
287 	if (sc->sc_isp_type == QLW_ISP1040A)
288 		sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK;
289 	qlw_write(sc, QLW_CFG1, sc->sc_isp_config);
290 
291 	if (sc->sc_isp_config & QLW_BURST_ENABLE)
292 		qlw_dma_burst_enable(sc);
293 
294 	sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES;
295 	sc->sc_mbox[1] = 0;
296 	if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY)
297 		sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY;
298 	if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) {
299 		printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]);
300 		return (ENXIO);
301 	}
302 
303 	sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE;
304 	sc->sc_mbox[1] = sc->sc_clock;
305 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
306 		printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]);
307 		return (ENXIO);
308 	}
309 
310 	sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT;
311 	sc->sc_mbox[1] = sc->sc_retry_count[0];
312 	sc->sc_mbox[2] = sc->sc_retry_delay[0];
313 	sc->sc_mbox[6] = sc->sc_retry_count[1];
314 	sc->sc_mbox[7] = sc->sc_retry_delay[1];
315 	if (qlw_mbox(sc, 0x00c7, 0x0001)) {
316 		printf("couldn't set retry count: %x\n", sc->sc_mbox[0]);
317 		return (ENXIO);
318 	}
319 
320 	sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP;
321 	sc->sc_mbox[1] = sc->sc_async_data_setup[0];
322 	sc->sc_mbox[2] = sc->sc_async_data_setup[1];
323 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
324 		printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]);
325 		return (ENXIO);
326 	}
327 
328 	sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION;
329 	sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5;
330 	sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4;
331 	sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5;
332 	sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4;
333 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
334 		printf("couldn't set active negation: %x\n", sc->sc_mbox[0]);
335 		return (ENXIO);
336 	}
337 
338 	sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT;
339 	sc->sc_mbox[1] = sc->sc_tag_age_limit[0];
340 	sc->sc_mbox[2] = sc->sc_tag_age_limit[1];
341 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
342 		printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]);
343 		return (ENXIO);
344 	}
345 
346 	sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT;
347 	sc->sc_mbox[1] = sc->sc_selection_timeout[0];
348 	sc->sc_mbox[2] = sc->sc_selection_timeout[1];
349 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
350 		printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]);
351 		return (ENXIO);
352 	}
353 
354 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
355 		if (qlw_config_bus(sc, bus))
356 			return (ENXIO);
357 	}
358 
359 	if (qlw_alloc_ccbs(sc)) {
360 		/* error already printed */
361 		return (ENOMEM);
362 	}
363 
364 	sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE;
365 	sc->sc_mbox[1] = sc->sc_maxrequests;
366 	qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests);
367 	sc->sc_mbox[4] = 0;
368 	if (qlw_mbox(sc, 0x00df, 0x0001)) {
369 		printf("couldn't init request queue: %x\n", sc->sc_mbox[0]);
370 		goto free_ccbs;
371 	}
372 
373 	sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE;
374 	sc->sc_mbox[1] = sc->sc_maxresponses;
375 	qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses);
376 	sc->sc_mbox[5] = 0;
377 	if (qlw_mbox(sc, 0x00ef, 0x0001)) {
378 		printf("couldn't init response queue: %x\n", sc->sc_mbox[0]);
379 		goto free_ccbs;
380 	}
381 
382 	reset_delay = 0;
383 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
384 		sc->sc_mbox[0] = QLW_MBOX_BUS_RESET;
385 		sc->sc_mbox[1] = sc->sc_reset_delay[bus];
386 		sc->sc_mbox[2] = bus;
387 		if (qlw_mbox(sc, 0x0007, 0x0001)) {
388 			printf("couldn't reset bus: %x\n", sc->sc_mbox[0]);
389 			goto free_ccbs;
390 		}
391 		sc->sc_marker_required[bus] = 1;
392 		sc->sc_update_required[bus] = 0xffff;
393 
394 		if (sc->sc_reset_delay[bus] > reset_delay)
395 			reset_delay = sc->sc_reset_delay[bus];
396 	}
397 
398 	/* wait for the busses to settle */
399 	delay(reset_delay * 1000000);
400 
401 	/* we should be good to go now, attach scsibus */
402 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
403 		sc->sc_link[bus].adapter = &qlw_switch;
404 		sc->sc_link[bus].adapter_softc = sc;
405 		sc->sc_link[bus].adapter_target = sc->sc_initiator[bus];
406 		sc->sc_link[bus].adapter_buswidth = QLW_MAX_TARGETS;
407 		sc->sc_link[bus].openings = sc->sc_max_queue_depth[bus];
408 		sc->sc_link[bus].pool = &sc->sc_iopool;
409 
410 		memset(&saa, 0, sizeof(saa));
411 		saa.saa_sc_link = &sc->sc_link[bus];
412 
413 		/* config_found() returns the scsibus attached to us */
414 		sc->sc_scsibus[bus] = (struct scsibus_softc *)
415 		    config_found(&sc->sc_dev, &saa, scsiprint);
416 
417 		qlw_update_bus(sc, bus);
418 	}
419 
420 	sc->sc_running = 1;
421 	return(0);
422 
423 free_ccbs:
424 	qlw_free_ccbs(sc);
425 	return (ENXIO);
426 }
427 
428 int
429 qlw_detach(struct qlw_softc *sc, int flags)
430 {
431 	return (0);
432 }
433 
434 int
435 qlw_config_bus(struct qlw_softc *sc, int bus)
436 {
437 	int target, err;
438 
439 	sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID;
440 	sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus];
441 
442 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
443 		printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]);
444 		return (ENXIO);
445 	}
446 
447 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
448 		err = qlw_config_target(sc, bus, target);
449 		if (err)
450 			return (err);
451 	}
452 
453 	return (0);
454 }
455 
456 int
457 qlw_config_target(struct qlw_softc *sc, int bus, int target)
458 {
459 	int lun;
460 
461 	sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
462 	sc->sc_mbox[1] = (((bus << 7) | target) << 8);
463 	sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
464 	sc->sc_mbox[2] &= QLW_TARGET_SAFE;
465 	sc->sc_mbox[2] |= QLW_TARGET_NARROW | QLW_TARGET_ASYNC;
466 	sc->sc_mbox[3] = 0;
467 
468 	if (qlw_mbox(sc, 0x000f, 0x0001)) {
469 		printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
470 		return (ENXIO);
471 	}
472 
473 	for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
474 		sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
475 		sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
476 		sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
477 		sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
478 		if (qlw_mbox(sc, 0x000f, 0x0001)) {
479 			printf("couldn't set lun parameters: %x\n",
480 			    sc->sc_mbox[0]);
481 			return (ENXIO);
482 		}
483 	}
484 
485 	return (0);
486 }
487 
488 void
489 qlw_update_bus(struct qlw_softc *sc, int bus)
490 {
491 	int target;
492 
493 	for (target = 0; target < QLW_MAX_TARGETS; target++)
494 		qlw_update_target(sc, bus, target);
495 }
496 
497 void
498 qlw_update_target(struct qlw_softc *sc, int bus, int target)
499 {
500 	struct scsi_link *link;
501 	int lun;
502 
503 	if ((sc->sc_update_required[bus] & (1 << target)) == 0)
504 		return;
505 	atomic_clearbits_int(&sc->sc_update_required[bus], (1 << target));
506 
507 	link = scsi_get_link(sc->sc_scsibus[bus], target, 0);
508 	if (link == NULL)
509 		return;
510 
511 	sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
512 	sc->sc_mbox[1] = (((bus << 7) | target) << 8);
513 	sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
514 	sc->sc_mbox[2] |= QLW_TARGET_RENEG;
515 	sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ;
516 	if (link->quirks & SDEV_NOSYNC)
517 		sc->sc_mbox[2] &= ~QLW_TARGET_SYNC;
518 	if (link->quirks & SDEV_NOWIDE)
519 		sc->sc_mbox[2] &= ~QLW_TARGET_WIDE;
520 	if (link->quirks & SDEV_NOTAGS)
521 		sc->sc_mbox[2] &= ~QLW_TARGET_TAGS;
522 
523 	sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period;
524 	sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8);
525 
526 	if (qlw_mbox(sc, 0x000f, 0x0001)) {
527 		printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
528 		return;
529 	}
530 
531 	/* XXX do PPR detection */
532 
533 	for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
534 		sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
535 		sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
536 		sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
537 		sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
538 		if (qlw_mbox(sc, 0x000f, 0x0001)) {
539 			printf("couldn't set lun parameters: %x\n",
540 			    sc->sc_mbox[0]);
541 			return;
542 		}
543 	}
544 }
545 
546 void
547 qlw_update_task(void *xsc)
548 {
549 	struct qlw_softc *sc = xsc;
550 	int bus;
551 
552 	for (bus = 0; bus < sc->sc_numbusses; bus++)
553 		qlw_update_bus(sc, bus);
554 }
555 
556 struct qlw_ccb *
557 qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
558 {
559 	struct qlw_ccb *ccb;
560 	struct qlw_iocb_hdr *hdr;
561 	struct qlw_iocb_status *status;
562 	struct scsi_xfer *xs;
563 	u_int32_t handle;
564 	int entry_type;
565 	int flags;
566 	int bus;
567 
568 	ccb = NULL;
569 	hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE);
570 
571 	bus_dmamap_sync(sc->sc_dmat,
572 	    QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,
573 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD);
574 
575 	qlw_get_header(sc, hdr, &entry_type, &flags);
576 	switch (entry_type) {
577 	case QLW_IOCB_STATUS:
578 		status = (struct qlw_iocb_status *)hdr;
579 		handle = qlw_swap32(sc, status->handle);
580 		if (handle > sc->sc_maxccbs) {
581 			panic("bad completed command handle: %d (> %d)",
582 			    handle, sc->sc_maxccbs);
583 		}
584 
585 		ccb = &sc->sc_ccbs[handle];
586 		xs = ccb->ccb_xs;
587 		if (xs == NULL) {
588 			DPRINTF(QLW_D_INTR, "%s: got status for inactive"
589 			    " ccb %d\n", DEVNAME(sc), handle);
590 			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
591 			ccb = NULL;
592 			break;
593 		}
594 		if (xs->io != ccb) {
595 			panic("completed command handle doesn't match xs "
596 			    "(handle %d, ccb %p, xs->io %p)", handle, ccb,
597 			    xs->io);
598 		}
599 
600 		if (xs->datalen > 0) {
601 			bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
602 			    ccb->ccb_dmamap->dm_mapsize,
603 			    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
604 			    BUS_DMASYNC_POSTWRITE);
605 			bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
606 		}
607 
608 		bus = qlw_xs_bus(sc, xs);
609 		xs->status = qlw_swap16(sc, status->scsi_status);
610 		switch (qlw_swap16(sc, status->completion)) {
611 		case QLW_IOCB_STATUS_COMPLETE:
612 			if (qlw_swap16(sc, status->scsi_status) &
613 			    QLW_SCSI_STATUS_SENSE_VALID) {
614 				memcpy(&xs->sense, status->sense_data,
615 				    sizeof(xs->sense));
616 				xs->error = XS_SENSE;
617 			} else {
618 				xs->error = XS_NOERROR;
619 			}
620 			xs->resid = 0;
621 			break;
622 
623 		case QLW_IOCB_STATUS_INCOMPLETE:
624 			if (flags & QLW_STATE_GOT_TARGET) {
625 				xs->error = XS_DRIVER_STUFFUP;
626 			} else {
627 				xs->error = XS_SELTIMEOUT;
628 			}
629 			break;
630 
631 		case QLW_IOCB_STATUS_DMA_ERROR:
632 			DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc));
633 			/* set resid apparently? */
634 			break;
635 
636 		case QLW_IOCB_STATUS_RESET:
637 			DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",
638 			    DEVNAME(sc));
639 			sc->sc_marker_required[bus] = 1;
640 			xs->error = XS_RESET;
641 			break;
642 
643 		case QLW_IOCB_STATUS_ABORTED:
644 			DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc));
645 			sc->sc_marker_required[bus] = 1;
646 			xs->error = XS_DRIVER_STUFFUP;
647 			break;
648 
649 		case QLW_IOCB_STATUS_TIMEOUT:
650 			DPRINTF(QLW_D_INTR, "%s: command timed out\n",
651 			    DEVNAME(sc));
652 			xs->error = XS_TIMEOUT;
653 			break;
654 
655 		case QLW_IOCB_STATUS_DATA_OVERRUN:
656 		case QLW_IOCB_STATUS_DATA_UNDERRUN:
657 			xs->resid = qlw_swap32(sc, status->resid);
658 			xs->error = XS_NOERROR;
659 			break;
660 
661 		case QLW_IOCB_STATUS_QUEUE_FULL:
662 			DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc));
663 			xs->error = XS_BUSY;
664 			break;
665 
666 		case QLW_IOCB_STATUS_WIDE_FAILED:
667 			DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc));
668 			sc->sc_link->quirks |= SDEV_NOWIDE;
669 			atomic_setbits_int(&sc->sc_update_required[bus],
670 			    1 << xs->sc_link->target);
671 			task_add(systq, &sc->sc_update_task);
672 			xs->resid = qlw_swap32(sc, status->resid);
673 			xs->error = XS_NOERROR;
674 			break;
675 
676 		case QLW_IOCB_STATUS_SYNCXFER_FAILED:
677 			DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc));
678 			sc->sc_link->quirks |= SDEV_NOSYNC;
679 			atomic_setbits_int(&sc->sc_update_required[bus],
680 			    1 << xs->sc_link->target);
681 			task_add(systq, &sc->sc_update_task);
682 			xs->resid = qlw_swap32(sc, status->resid);
683 			xs->error = XS_NOERROR;
684 			break;
685 
686 		default:
687 			DPRINTF(QLW_D_INTR, "%s: unexpected completion"
688 			    " status %x\n", DEVNAME(sc),
689 			    qlw_swap16(sc, status->completion));
690 			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
691 			xs->error = XS_DRIVER_STUFFUP;
692 			break;
693 		}
694 		break;
695 
696 	default:
697 		DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",
698 		    DEVNAME(sc), entry_type);
699 		qlw_dump_iocb(sc, hdr, QLW_D_INTR);
700 		break;
701 	}
702 
703 	return (ccb);
704 }
705 
706 void
707 qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info)
708 {
709 	int i;
710 	u_int16_t rspin;
711 	struct qlw_ccb *ccb;
712 
713 	switch (isr) {
714 	case QLW_INT_TYPE_ASYNC:
715 		qlw_async(sc, info);
716 		qlw_clear_isr(sc, isr);
717 		break;
718 
719 	case QLW_INT_TYPE_IO:
720 		qlw_clear_isr(sc, isr);
721 		rspin = qlw_queue_read(sc, QLW_RESP_IN);
722 		if (rspin == sc->sc_last_resp_id) {
723 			/* seems to happen a lot on 2200s when mbox commands
724 			 * complete but it doesn't want to give us the register
725 			 * semaphore, or something.
726 			 *
727 			 * if we're waiting on a mailbox command, don't ack
728 			 * the interrupt yet.
729 			 */
730 			if (sc->sc_mbox_pending) {
731 				DPRINTF(QLW_D_MBOX, "%s: ignoring premature"
732 				    " mbox int\n", DEVNAME(sc));
733 				return;
734 			}
735 
736 			break;
737 		}
738 
739 		if (sc->sc_responses == NULL)
740 			break;
741 
742 		DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n",
743 		    DEVNAME(sc), sc->sc_last_resp_id, rspin);
744 
745 		do {
746 			ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
747 			if (ccb)
748 				scsi_done(ccb->ccb_xs);
749 
750 			sc->sc_last_resp_id++;
751 			sc->sc_last_resp_id %= sc->sc_maxresponses;
752 		} while (sc->sc_last_resp_id != rspin);
753 
754 		qlw_queue_write(sc, QLW_RESP_OUT, rspin);
755 		break;
756 
757 	case QLW_INT_TYPE_MBOX:
758 		if (sc->sc_mbox_pending) {
759 			if (info == QLW_MBOX_COMPLETE) {
760 				for (i = 1; i < nitems(sc->sc_mbox); i++) {
761 					sc->sc_mbox[i] = qlw_read_mbox(sc, i);
762 				}
763 			} else {
764 				sc->sc_mbox[0] = info;
765 			}
766 			wakeup(sc->sc_mbox);
767 		} else {
768 			DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:"
769 			    " %x\n", DEVNAME(sc), info);
770 		}
771 		qlw_clear_isr(sc, isr);
772 		break;
773 
774 	default:
775 		/* maybe log something? */
776 		break;
777 	}
778 }
779 
780 int
781 qlw_intr(void *xsc)
782 {
783 	struct qlw_softc *sc = xsc;
784 	u_int16_t isr;
785 	u_int16_t info;
786 
787 	if (qlw_read_isr(sc, &isr, &info) == 0)
788 		return (0);
789 
790 	qlw_handle_intr(sc, isr, info);
791 	return (1);
792 }
793 
794 int
795 qlw_scsi_probe(struct scsi_link *link)
796 {
797 	if (link->lun >= QLW_MAX_LUNS)
798 		return (EINVAL);
799 
800 	return (0);
801 }
802 
803 void
804 qlw_scsi_cmd(struct scsi_xfer *xs)
805 {
806 	struct scsi_link	*link = xs->sc_link;
807 	struct qlw_softc	*sc = link->adapter_softc;
808 	struct qlw_ccb		*ccb;
809 	struct qlw_iocb_req0	*iocb;
810 	struct qlw_ccb_list	list;
811 	u_int16_t		req, rspin;
812 	int			offset, error, done;
813 	bus_dmamap_t		dmap;
814 	int			bus;
815 	int			seg;
816 
817 	if (xs->cmdlen > sizeof(iocb->cdb)) {
818 		DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
819 		    xs->cmdlen);
820 		memset(&xs->sense, 0, sizeof(xs->sense));
821 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
822 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
823 		xs->sense.add_sense_code = 0x20;
824 		xs->error = XS_SENSE;
825 		scsi_done(xs);
826 		return;
827 	}
828 
829 	ccb = xs->io;
830 	dmap = ccb->ccb_dmamap;
831 	if (xs->datalen > 0) {
832 		error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,
833 		    xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
834 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
835 		if (error) {
836 			xs->error = XS_DRIVER_STUFFUP;
837 			scsi_done(xs);
838 			return;
839 		}
840 
841 		bus_dmamap_sync(sc->sc_dmat, dmap, 0,
842 		    dmap->dm_mapsize,
843 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
844 		    BUS_DMASYNC_PREWRITE);
845 	}
846 
847 	mtx_enter(&sc->sc_queue_mtx);
848 
849 	/* put in a sync marker if required */
850 	bus = qlw_xs_bus(sc, xs);
851 	if (sc->sc_marker_required[bus]) {
852 		req = sc->sc_next_req_id++;
853 		if (sc->sc_next_req_id == sc->sc_maxrequests)
854 			sc->sc_next_req_id = 0;
855 
856 		DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",
857 		    DEVNAME(sc), req);
858 		offset = (req * QLW_QUEUE_ENTRY_SIZE);
859 		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
860 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
861 		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
862 		qlw_put_marker(sc, bus, iocb);
863 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
864 		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
865 		qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
866 		sc->sc_marker_required[bus] = 0;
867 	}
868 
869 	req = sc->sc_next_req_id++;
870 	if (sc->sc_next_req_id == sc->sc_maxrequests)
871 		sc->sc_next_req_id = 0;
872 
873 	offset = (req * QLW_QUEUE_ENTRY_SIZE);
874 	iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
875 	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
876 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
877 
878 	ccb->ccb_xs = xs;
879 
880 	DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
881 	qlw_put_cmd(sc, iocb, xs, ccb);
882 	seg = QLW_IOCB_SEGS_PER_CMD;
883 
884 	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
885 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
886 
887 	while (seg < ccb->ccb_dmamap->dm_nsegs) {
888 		req = sc->sc_next_req_id++;
889 		if (sc->sc_next_req_id == sc->sc_maxrequests)
890 			sc->sc_next_req_id = 0;
891 
892 		offset = (req * QLW_QUEUE_ENTRY_SIZE);
893 		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
894 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
895 		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
896 
897 		DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req);
898 		qlw_put_cont(sc, iocb, xs, ccb, seg);
899 		seg += QLW_IOCB_SEGS_PER_CONT;
900 
901 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
902 		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
903 	}
904 
905 	qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
906 
907 	if (!ISSET(xs->flags, SCSI_POLL)) {
908 		mtx_leave(&sc->sc_queue_mtx);
909 		return;
910 	}
911 
912 	done = 0;
913 	SIMPLEQ_INIT(&list);
914 	do {
915 		u_int16_t isr, info;
916 
917 		delay(100);
918 
919 		if (qlw_read_isr(sc, &isr, &info) == 0) {
920 			continue;
921 		}
922 
923 		if (isr != QLW_INT_TYPE_IO) {
924 			qlw_handle_intr(sc, isr, info);
925 			continue;
926 		}
927 
928 		qlw_clear_isr(sc, isr);
929 
930 		rspin = qlw_queue_read(sc, QLW_RESP_IN);
931 		while (rspin != sc->sc_last_resp_id) {
932 			ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
933 
934 			sc->sc_last_resp_id++;
935 			if (sc->sc_last_resp_id == sc->sc_maxresponses)
936 				sc->sc_last_resp_id = 0;
937 
938 			if (ccb != NULL)
939 				SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link);
940 			if (ccb == xs->io)
941 				done = 1;
942 		}
943 		qlw_queue_write(sc, QLW_RESP_OUT, rspin);
944 	} while (done == 0);
945 
946 	mtx_leave(&sc->sc_queue_mtx);
947 
948 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
949 		SIMPLEQ_REMOVE_HEAD(&list, ccb_link);
950 		scsi_done(ccb->ccb_xs);
951 	}
952 }
953 
954 u_int16_t
955 qlw_read(struct qlw_softc *sc, bus_size_t offset)
956 {
957 	u_int16_t v;
958 	v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
959 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
960 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
961 	return (v);
962 }
963 
964 void
965 qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
966 {
967 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value);
968 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
969 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
970 }
971 
972 u_int16_t
973 qlw_read_mbox(struct qlw_softc *sc, int mbox)
974 {
975 	/* could range-check mboxes according to chip type? */
976 	return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2)));
977 }
978 
979 void
980 qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value)
981 {
982 	qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value);
983 }
984 
985 void
986 qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd)
987 {
988 	qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT);
989 }
990 
991 #define MBOX_COMMAND_TIMEOUT	4000
992 
993 int
994 qlw_mbox(struct qlw_softc *sc, int maskin, int maskout)
995 {
996 	int i;
997 	int result = 0;
998 	int rv;
999 
1000 	sc->sc_mbox_pending = 1;
1001 	for (i = 0; i < nitems(sc->sc_mbox); i++) {
1002 		if (maskin & (1 << i)) {
1003 			qlw_write_mbox(sc, i, sc->sc_mbox[i]);
1004 		}
1005 	}
1006 	qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT);
1007 
1008 	if (sc->sc_running == 0) {
1009 		for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) {
1010 			u_int16_t isr, info;
1011 
1012 			delay(100);
1013 
1014 			if (qlw_read_isr(sc, &isr, &info) == 0)
1015 				continue;
1016 
1017 			switch (isr) {
1018 			case QLW_INT_TYPE_MBOX:
1019 				result = info;
1020 				break;
1021 
1022 			default:
1023 				qlw_handle_intr(sc, isr, info);
1024 				break;
1025 			}
1026 		}
1027 	} else {
1028 		tsleep(sc->sc_mbox, PRIBIO, "qlw_mbox", 0);
1029 		result = sc->sc_mbox[0];
1030 	}
1031 
1032 	switch (result) {
1033 	case QLW_MBOX_COMPLETE:
1034 		for (i = 1; i < nitems(sc->sc_mbox); i++) {
1035 			sc->sc_mbox[i] = (maskout & (1 << i)) ?
1036 			    qlw_read_mbox(sc, i) : 0;
1037 		}
1038 		rv = 0;
1039 		break;
1040 
1041 	case 0:
1042 		/* timed out; do something? */
1043 		DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1044 		rv = 1;
1045 		break;
1046 
1047 	default:
1048 		sc->sc_mbox[0] = result;
1049 		rv = result;
1050 		break;
1051 	}
1052 
1053 	qlw_clear_isr(sc, QLW_INT_TYPE_MBOX);
1054 	sc->sc_mbox_pending = 0;
1055 	return (rv);
1056 }
1057 
1058 void
1059 qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem)
1060 {
1061 	mbox[2] = (QLW_DMA_DVA(mem) >> 16) & 0xffff;
1062 	mbox[3] = (QLW_DMA_DVA(mem) >> 0) & 0xffff;
1063 	mbox[6] = (QLW_DMA_DVA(mem) >> 48) & 0xffff;
1064 	mbox[7] = (QLW_DMA_DVA(mem) >> 32) & 0xffff;
1065 }
1066 
1067 void
1068 qlw_set_ints(struct qlw_softc *sc, int enabled)
1069 {
1070 	u_int16_t v = enabled ? (QLW_INT_REQ | QLW_RISC_INT_REQ) : 0;
1071 	qlw_write(sc, QLW_INT_CTRL, v);
1072 }
1073 
1074 int
1075 qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info)
1076 {
1077 	u_int16_t int_status;
1078 
1079 	if (qlw_read(sc, QLW_SEMA) & QLW_SEMA_LOCK) {
1080 		*info = qlw_read_mbox(sc, 0);
1081 		if (*info & QLW_MBOX_HAS_STATUS)
1082 			*isr = QLW_INT_TYPE_MBOX;
1083 		else
1084 			*isr = QLW_INT_TYPE_ASYNC;
1085 	} else {
1086 		int_status = qlw_read(sc, QLW_INT_STATUS);
1087 		if ((int_status & (QLW_INT_REQ | QLW_RISC_INT_REQ)) == 0)
1088 			return (0);
1089 
1090 		*isr = QLW_INT_TYPE_IO;
1091 	}
1092 
1093 	return (1);
1094 }
1095 
1096 void
1097 qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr)
1098 {
1099 	qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT);
1100 	switch (isr) {
1101 	case QLW_INT_TYPE_MBOX:
1102 	case QLW_INT_TYPE_ASYNC:
1103 		qlw_write(sc, QLW_SEMA, 0);
1104 		break;
1105 	default:
1106 		break;
1107 	}
1108 }
1109 
1110 int
1111 qlw_softreset(struct qlw_softc *sc)
1112 {
1113 	int i;
1114 
1115 	qlw_set_ints(sc, 0);
1116 
1117 	/* reset */
1118 	qlw_write(sc, QLW_INT_CTRL, QLW_RESET);
1119 	delay(100);
1120 	/* clear data and control dma engines? */
1121 
1122 	/* wait for soft reset to clear */
1123 	for (i = 0; i < 1000; i++) {
1124 		if ((qlw_read(sc, QLW_INT_CTRL) & QLW_RESET) == 0)
1125 			break;
1126 
1127 		delay(100);
1128 	}
1129 
1130 	if (i == 1000) {
1131 		DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc));
1132 		qlw_set_ints(sc, 0);
1133 		return (ENXIO);
1134 	}
1135 
1136 	qlw_write(sc, QLW_CFG1, 0);
1137 
1138 	/* reset risc processor */
1139 	qlw_host_cmd(sc, QLW_HOST_CMD_RESET);
1140 	delay(100);
1141 	qlw_write(sc, QLW_SEMA, 0);
1142 	qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1143 
1144 	/* reset queue pointers */
1145 	qlw_queue_write(sc, QLW_REQ_IN, 0);
1146 	qlw_queue_write(sc, QLW_REQ_OUT, 0);
1147 	qlw_queue_write(sc, QLW_RESP_IN, 0);
1148 	qlw_queue_write(sc, QLW_RESP_OUT, 0);
1149 
1150 	qlw_set_ints(sc, 1);
1151 	qlw_host_cmd(sc, QLW_HOST_CMD_BIOS);
1152 
1153 	/* do a basic mailbox operation to check we're alive */
1154 	sc->sc_mbox[0] = QLW_MBOX_NOP;
1155 	if (qlw_mbox(sc, 0x0001, 0x0001)) {
1156 		DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n",
1157 		    DEVNAME(sc));
1158 		return (ENXIO);
1159 	}
1160 
1161 	return (0);
1162 }
1163 
1164 void
1165 qlw_dma_burst_enable(struct qlw_softc *sc)
1166 {
1167 	if (sc->sc_isp_gen == QLW_GEN_ISP1000 ||
1168 	    sc->sc_isp_gen == QLW_GEN_ISP1040) {
1169 		qlw_write(sc, QLW_CDMA_CFG,
1170 		    qlw_read(sc, QLW_CDMA_CFG) | QLW_DMA_BURST_ENABLE);
1171 		qlw_write(sc, QLW_DDMA_CFG,
1172 		    qlw_read(sc, QLW_DDMA_CFG) | QLW_DMA_BURST_ENABLE);
1173 	} else {
1174 		qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
1175 		qlw_write(sc, QLW_CFG1,
1176 		    qlw_read(sc, QLW_CFG1) | QLW_DMA_BANK);
1177 		qlw_write(sc, QLW_CDMA_CFG_1080,
1178 		    qlw_read(sc, QLW_CDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1179 		qlw_write(sc, QLW_DDMA_CFG_1080,
1180 		    qlw_read(sc, QLW_DDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1181 		qlw_write(sc, QLW_CFG1,
1182 		    qlw_read(sc, QLW_CFG1) & ~QLW_DMA_BANK);
1183 		qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1184 	}
1185 }
1186 
1187 void
1188 qlw_update(struct qlw_softc *sc, int task)
1189 {
1190 	/* do things */
1191 }
1192 
1193 int
1194 qlw_async(struct qlw_softc *sc, u_int16_t info)
1195 {
1196 	int bus;
1197 
1198 	switch (info) {
1199 	case QLW_ASYNC_BUS_RESET:
1200 		DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc));
1201 		bus = qlw_read_mbox(sc, 6);
1202 		sc->sc_marker_required[bus] = 1;
1203 		break;
1204 
1205 #if 0
1206 	case QLW_ASYNC_SYSTEM_ERROR:
1207 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1208 		break;
1209 
1210 	case QLW_ASYNC_REQ_XFER_ERROR:
1211 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1212 		break;
1213 
1214 	case QLW_ASYNC_RSP_XFER_ERROR:
1215 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1216 		break;
1217 #endif
1218 
1219 	case QLW_ASYNC_SCSI_CMD_COMPLETE:
1220 		/* shouldn't happen, we disable fast posting */
1221 		break;
1222 
1223 	case QLW_ASYNC_CTIO_COMPLETE:
1224 		/* definitely shouldn't happen, we don't do target mode */
1225 		break;
1226 
1227 	default:
1228 		DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),
1229 		    info);
1230 		break;
1231 	}
1232 	return (1);
1233 }
1234 
1235 #ifdef QLW_DEBUG
1236 void
1237 qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags)
1238 {
1239 	u_int8_t *iocb = buf;
1240 	int l;
1241 	int b;
1242 
1243 	if ((qlwdebug & flags) == 0)
1244 		return;
1245 
1246 	printf("%s: iocb:\n", DEVNAME(sc));
1247 	for (l = 0; l < 4; l++) {
1248 		for (b = 0; b < 16; b++) {
1249 			printf(" %2.2x", iocb[(l*16)+b]);
1250 		}
1251 		printf("\n");
1252 	}
1253 }
1254 
1255 void
1256 qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n)
1257 {
1258 	u_int8_t *buf = segs;
1259 	int s, b;
1260 	if ((qlwdebug & QLW_D_IOCB) == 0)
1261 		return;
1262 
1263 	printf("%s: iocb segs:\n", DEVNAME(sc));
1264 	for (s = 0; s < n; s++) {
1265 		for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) {
1266 			printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg)))
1267 			    + b]);
1268 		}
1269 		printf("\n");
1270 	}
1271 }
1272 #endif
1273 
1274 /*
1275  * The PCI bus is little-endian whereas SBus is big-endian.  This
1276  * leads to some differences in byte twisting of DMA transfers of
1277  * request and response queue entries.  Most fields can be treated as
1278  * 16-bit or 32-bit with the endianness of the bus, but the header
1279  * fields end up being swapped by the ISP1000's SBus interface.
1280  */
1281 
1282 void
1283 qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1284     int *type, int *flags)
1285 {
1286 	if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1287 		*type = hdr->entry_count;
1288 		*flags = hdr->seqno;
1289 	} else {
1290 		*type = hdr->entry_type;
1291 		*flags = hdr->flags;
1292 	}
1293 }
1294 
1295 void
1296 qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1297     int type, int count)
1298 {
1299 	if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1300 		hdr->entry_type = count;
1301 		hdr->entry_count = type;
1302 		hdr->seqno = 0;
1303 		hdr->flags = 0;
1304 	} else {
1305 		hdr->entry_type = type;
1306 		hdr->entry_count = count;
1307 		hdr->seqno = 0;
1308 		hdr->flags = 0;
1309 	}
1310 }
1311 
1312 void
1313 qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg,
1314     bus_dmamap_t dmap, int num)
1315 {
1316 	seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr);
1317 	seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len);
1318 }
1319 
1320 void
1321 qlw_put_marker(struct qlw_softc *sc, int bus, void *buf)
1322 {
1323 	struct qlw_iocb_marker *marker = buf;
1324 
1325 	qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER, 1);
1326 
1327 	/* could be more specific here; isp(4) isn't */
1328 	marker->device = qlw_swap16(sc, (bus << 7) << 8);
1329 	marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL);
1330 	qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1331 }
1332 
1333 void
1334 qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1335     struct qlw_ccb *ccb)
1336 {
1337 	struct qlw_iocb_req0 *req = buf;
1338 	int entry_count = 1;
1339 	u_int16_t dir;
1340 	int seg, nsegs;
1341 	int seg_count;
1342 	int timeout = 0;
1343 	int bus, target, lun;
1344 
1345 	if (xs->datalen == 0) {
1346 		dir = QLW_IOCB_CMD_NO_DATA;
1347 		seg_count = 1;
1348 	} else {
1349 		dir = xs->flags & SCSI_DATA_IN ? QLW_IOCB_CMD_READ_DATA :
1350 		    QLW_IOCB_CMD_WRITE_DATA;
1351 		seg_count = ccb->ccb_dmamap->dm_nsegs;
1352 		nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD;
1353 		while (nsegs > 0) {
1354 			entry_count++;
1355 			nsegs -= QLW_IOCB_SEGS_PER_CONT;
1356 		}
1357 		for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1358 			if (seg >= QLW_IOCB_SEGS_PER_CMD)
1359 				break;
1360 			qlw_put_data_seg(sc, &req->segs[seg],
1361 			    ccb->ccb_dmamap, seg);
1362 		}
1363 	}
1364 
1365 	if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS) == 0)
1366 		dir |= QLW_IOCB_CMD_SIMPLE_QUEUE;
1367 
1368 	qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_0, entry_count);
1369 
1370 	/*
1371 	 * timeout is in seconds.  make sure it's at least 1 if a timeout
1372 	 * was specified in xs
1373 	 */
1374 	if (xs->timeout != 0)
1375 		timeout = MAX(1, xs->timeout/1000);
1376 
1377 	req->flags = qlw_swap16(sc, dir);
1378 	req->seg_count = qlw_swap16(sc, seg_count);
1379 	req->timeout = qlw_swap16(sc, timeout);
1380 
1381 	bus = qlw_xs_bus(sc, xs);
1382 	target = xs->sc_link->target;
1383 	lun = xs->sc_link->lun;
1384 	req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun);
1385 
1386 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
1387 	req->ccblen = qlw_swap16(sc, xs->cmdlen);
1388 
1389 	req->handle = qlw_swap32(sc, ccb->ccb_id);
1390 
1391 	qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1392 }
1393 
1394 void
1395 qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1396     struct qlw_ccb *ccb, int seg0)
1397 {
1398 	struct qlw_iocb_cont0 *cont = buf;
1399 	int seg;
1400 
1401 	qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_0, 1);
1402 
1403 	for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1404 		if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT)
1405 			break;
1406 		qlw_put_data_seg(sc, &cont->segs[seg - seg0],
1407 		    ccb->ccb_dmamap, seg);
1408 	}
1409 }
1410 
1411 #ifndef ISP_NOFIRMWARE
1412 int
1413 qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src,
1414     u_int16_t dest)
1415 {
1416 	u_int16_t i;
1417 
1418 	for (i = 0; i < src[3]; i++) {
1419 		sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD;
1420 		sc->sc_mbox[1] = i + dest;
1421 		sc->sc_mbox[2] = src[i];
1422 		if (qlw_mbox(sc, 0x07, 0x01)) {
1423 			printf("firmware load failed\n");
1424 			return (1);
1425 		}
1426 	}
1427 
1428 	sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM;
1429 	sc->sc_mbox[1] = dest;
1430 	if (qlw_mbox(sc, 0x0003, 0x0003)) {
1431 		printf("verification of chunk at %x failed: %x\n",
1432 		    dest, sc->sc_mbox[1]);
1433 		return (1);
1434 	}
1435 
1436 	return (0);
1437 }
1438 
1439 int
1440 qlw_load_firmware(struct qlw_softc *sc)
1441 {
1442 	return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG);
1443 }
1444 
1445 #endif	/* !ISP_NOFIRMWARE */
1446 
1447 int
1448 qlw_read_nvram(struct qlw_softc *sc)
1449 {
1450 	u_int16_t data[sizeof(sc->sc_nvram) >> 1];
1451 	u_int16_t req, cmd, val;
1452 	u_int8_t csum;
1453 	int i, bit;
1454 	int reqcmd;
1455 	int nbits;
1456 
1457 	if (sc->sc_nvram_size == 0)
1458 		return (1);
1459 
1460 	if (sc->sc_nvram_size == 128) {
1461 		reqcmd = (QLW_NVRAM_CMD_READ << 6);
1462 		nbits = 8;
1463 	} else {
1464 		reqcmd = (QLW_NVRAM_CMD_READ << 8);
1465 		nbits = 10;
1466 	}
1467 
1468 	qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1469 	delay(10);
1470 	qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | QLW_NVRAM_CLOCK);
1471 	delay(10);
1472 
1473 	for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1474 		req = i | reqcmd;
1475 
1476 		/* write each bit out through the nvram register */
1477 		for (bit = nbits; bit >= 0; bit--) {
1478 			cmd = QLW_NVRAM_CHIP_SEL;
1479 			if ((req >> bit) & 1) {
1480 				cmd |= QLW_NVRAM_DATA_OUT;
1481 			}
1482 			qlw_write(sc, QLW_NVRAM, cmd);
1483 			delay(10);
1484 			qlw_read(sc, QLW_NVRAM);
1485 
1486 			qlw_write(sc, QLW_NVRAM, cmd | QLW_NVRAM_CLOCK);
1487 			delay(10);
1488 			qlw_read(sc, QLW_NVRAM);
1489 
1490 			qlw_write(sc, QLW_NVRAM, cmd);
1491 			delay(10);
1492 			qlw_read(sc, QLW_NVRAM);
1493 		}
1494 
1495 		/* read the result back */
1496 		val = 0;
1497 		for (bit = 0; bit < 16; bit++) {
1498 			val <<= 1;
1499 			qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL |
1500 			    QLW_NVRAM_CLOCK);
1501 			delay(10);
1502 			if (qlw_read(sc, QLW_NVRAM) & QLW_NVRAM_DATA_IN)
1503 				val |= 1;
1504 			delay(10);
1505 
1506 			qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1507 			delay(10);
1508 			qlw_read(sc, QLW_NVRAM);
1509 		}
1510 
1511 		qlw_write(sc, QLW_NVRAM, 0);
1512 		delay(10);
1513 		qlw_read(sc, QLW_NVRAM);
1514 
1515 		data[i] = letoh16(val);
1516 	}
1517 
1518 	csum = 0;
1519 	for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1520 		csum += data[i] & 0xff;
1521 		csum += data[i] >> 8;
1522 	}
1523 
1524 	memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram));
1525 	/* id field should be 'ISP ', version should high enough */
1526 	if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
1527 	    sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
1528 	    sc->sc_nvram.nvram_version < sc->sc_nvram_minversion ||
1529 	    (csum != 0)) {
1530 		printf("%s: nvram corrupt\n", DEVNAME(sc));
1531 		return (1);
1532 	}
1533 	return (0);
1534 }
1535 
1536 void
1537 qlw_parse_nvram_1040(struct qlw_softc *sc, int bus)
1538 {
1539 	struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram;
1540 	int target;
1541 
1542 	KASSERT(bus == 0);
1543 
1544 	if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1545 		sc->sc_initiator[0] = (nv->config1 >> 4);
1546 
1547 	sc->sc_retry_count[0] = nv->retry_count;
1548 	sc->sc_retry_delay[0] = nv->retry_delay;
1549 	sc->sc_reset_delay[0] = nv->reset_delay;
1550 	sc->sc_tag_age_limit[0] = nv->tag_age_limit;
1551 	sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout);
1552 	sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth);
1553 	sc->sc_async_data_setup[0] = (nv->config2 & 0x0f);
1554 	sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4);
1555 	sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5);
1556 
1557 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1558 		struct qlw_target *qt = &sc->sc_target[0][target];
1559 
1560 		qt->qt_params = (nv->target[target].parameter << 8);
1561 		qt->qt_exec_throttle = nv->target[target].execution_throttle;
1562 		qt->qt_sync_period = nv->target[target].sync_period;
1563 		qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1564 	}
1565 }
1566 
1567 void
1568 qlw_parse_nvram_1080(struct qlw_softc *sc, int bus)
1569 {
1570 	struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram;
1571 	struct qlw_nvram_bus *nv = &nvram->bus[bus];
1572 	int target;
1573 
1574 	sc->sc_isp_config = nvram->isp_config;
1575 	sc->sc_fw_features = nvram->fw_features;
1576 
1577 	if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1578 		sc->sc_initiator[bus] = (nv->config1 & 0x0f);
1579 
1580 	sc->sc_retry_count[bus] = nv->retry_count;
1581 	sc->sc_retry_delay[bus] = nv->retry_delay;
1582 	sc->sc_reset_delay[bus] = nv->reset_delay;
1583 	sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout);
1584 	sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth);
1585 	sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f);
1586 	sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4);
1587 	sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5);
1588 
1589 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1590 		struct qlw_target *qt = &sc->sc_target[bus][target];
1591 
1592 		qt->qt_params = (nv->target[target].parameter << 8);
1593 		qt->qt_exec_throttle = nv->target[target].execution_throttle;
1594 		qt->qt_sync_period = nv->target[target].sync_period;
1595 		if (sc->sc_isp_gen == QLW_GEN_ISP12160)
1596 			qt->qt_sync_offset = nv->target[target].flags & 0x1f;
1597 		else
1598 			qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1599 	}
1600 }
1601 
1602 void
1603 qlw_init_defaults(struct qlw_softc *sc, int bus)
1604 {
1605 	int target;
1606 
1607 	switch (sc->sc_isp_gen) {
1608 	case QLW_GEN_ISP1000:
1609 		break;
1610 	case QLW_GEN_ISP1040:
1611 		sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_64;
1612 		break;
1613 	case QLW_GEN_ISP1080:
1614 	case QLW_GEN_ISP12160:
1615 		sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_128;
1616 		sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY;
1617 		break;
1618 	}
1619 
1620 	sc->sc_retry_count[bus] = 0;
1621 	sc->sc_retry_delay[bus] = 0;
1622 	sc->sc_reset_delay[bus] = 3;
1623 	sc->sc_tag_age_limit[bus] = 8;
1624 	sc->sc_selection_timeout[bus] = 250;
1625 	sc->sc_max_queue_depth[bus] = 32;
1626 	if (sc->sc_clock > 40)
1627 		sc->sc_async_data_setup[bus] = 9;
1628 	else
1629 		sc->sc_async_data_setup[bus] = 6;
1630 	sc->sc_req_ack_active_neg[bus] = 1;
1631 	sc->sc_data_line_active_neg[bus] = 1;
1632 
1633 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1634 		struct qlw_target *qt = &sc->sc_target[bus][target];
1635 
1636 		qt->qt_params = QLW_TARGET_DEFAULT;
1637 		qt->qt_exec_throttle = 16;
1638 		qt->qt_sync_period = 10;
1639 		qt->qt_sync_offset = 12;
1640 	}
1641 }
1642 
1643 struct qlw_dmamem *
1644 qlw_dmamem_alloc(struct qlw_softc *sc, size_t size)
1645 {
1646 	struct qlw_dmamem *m;
1647 	int nsegs;
1648 
1649 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1650 	if (m == NULL)
1651 		return (NULL);
1652 
1653 	m->qdm_size = size;
1654 
1655 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1656 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0)
1657 		goto qdmfree;
1658 
1659 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,
1660 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1661 		goto destroy;
1662 
1663 	if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,
1664 	    BUS_DMA_NOWAIT) != 0)
1665 		goto free;
1666 
1667 	if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,
1668 	    BUS_DMA_NOWAIT) != 0)
1669 		goto unmap;
1670 
1671 	return (m);
1672 
1673 unmap:
1674 	bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1675 free:
1676 	bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1677 destroy:
1678 	bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1679 qdmfree:
1680 	free(m, M_DEVBUF, sizeof(*m));
1681 
1682 	return (NULL);
1683 }
1684 
1685 void
1686 qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m)
1687 {
1688 	bus_dmamap_unload(sc->sc_dmat, m->qdm_map);
1689 	bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1690 	bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1691 	bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1692 	free(m, M_DEVBUF, sizeof(*m));
1693 }
1694 
1695 int
1696 qlw_alloc_ccbs(struct qlw_softc *sc)
1697 {
1698 	struct qlw_ccb		*ccb;
1699 	u_int8_t		*cmd;
1700 	int			i;
1701 
1702 	SIMPLEQ_INIT(&sc->sc_ccb_free);
1703 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1704 	mtx_init(&sc->sc_queue_mtx, IPL_BIO);
1705 
1706 	sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb),
1707 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1708 	if (sc->sc_ccbs == NULL) {
1709 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1710 		return (1);
1711 	}
1712 
1713 	sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests *
1714 	    QLW_QUEUE_ENTRY_SIZE);
1715 	if (sc->sc_requests == NULL) {
1716 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1717 		goto free_ccbs;
1718 	}
1719 	sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses *
1720 	    QLW_QUEUE_ENTRY_SIZE);
1721 	if (sc->sc_responses == NULL) {
1722 		printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc));
1723 		goto free_req;
1724 	}
1725 
1726 	cmd = QLW_DMA_KVA(sc->sc_requests);
1727 	memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs);
1728 	for (i = 0; i < sc->sc_maxccbs; i++) {
1729 		ccb = &sc->sc_ccbs[i];
1730 
1731 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1732 		    QLW_MAX_SEGS, MAXPHYS, 0,
1733 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1734 		    &ccb->ccb_dmamap) != 0) {
1735 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1736 			goto free_maps;
1737 		}
1738 
1739 		ccb->ccb_sc = sc;
1740 		ccb->ccb_id = i;
1741 
1742 		qlw_put_ccb(sc, ccb);
1743 	}
1744 
1745 	scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb);
1746 	return (0);
1747 
1748 free_maps:
1749 	while ((ccb = qlw_get_ccb(sc)) != NULL)
1750 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1751 
1752 	qlw_dmamem_free(sc, sc->sc_responses);
1753 free_req:
1754 	qlw_dmamem_free(sc, sc->sc_requests);
1755 free_ccbs:
1756 	free(sc->sc_ccbs, M_DEVBUF, 0);
1757 
1758 	return (1);
1759 }
1760 
1761 void
1762 qlw_free_ccbs(struct qlw_softc *sc)
1763 {
1764 	struct qlw_ccb		*ccb;
1765 
1766 	scsi_iopool_destroy(&sc->sc_iopool);
1767 	while ((ccb = qlw_get_ccb(sc)) != NULL)
1768 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1769 	qlw_dmamem_free(sc, sc->sc_responses);
1770 	qlw_dmamem_free(sc, sc->sc_requests);
1771 	free(sc->sc_ccbs, M_DEVBUF, 0);
1772 }
1773 
1774 void *
1775 qlw_get_ccb(void *xsc)
1776 {
1777 	struct qlw_softc	*sc = xsc;
1778 	struct qlw_ccb		*ccb;
1779 
1780 	mtx_enter(&sc->sc_ccb_mtx);
1781 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
1782 	if (ccb != NULL) {
1783 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1784 	}
1785 	mtx_leave(&sc->sc_ccb_mtx);
1786 	return (ccb);
1787 }
1788 
1789 void
1790 qlw_put_ccb(void *xsc, void *io)
1791 {
1792 	struct qlw_softc	*sc = xsc;
1793 	struct qlw_ccb		*ccb = io;
1794 
1795 	ccb->ccb_xs = NULL;
1796 	mtx_enter(&sc->sc_ccb_mtx);
1797 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1798 	mtx_leave(&sc->sc_ccb_mtx);
1799 }
1800