1 /* $OpenBSD: qlw.c,v 1.48 2022/04/16 19:19:59 naddy Exp $ */
2
3 /*
4 * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6 * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/atomic.h>
24 #include <sys/device.h>
25 #include <sys/ioctl.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/mutex.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/queue.h>
32
33 #include <machine/bus.h>
34
35 #include <scsi/scsi_all.h>
36 #include <scsi/scsiconf.h>
37
38 #include <dev/ic/qlwreg.h>
39 #include <dev/ic/qlwvar.h>
40
41 #ifndef SMALL_KERNEL
42 #ifndef QLW_DEBUG
43 #define QLW_DEBUG
44 #endif
45 #endif
46
47 #ifdef QLW_DEBUG
48 #define DPRINTF(m, f...) do { if ((qlwdebug & (m)) == (m)) printf(f); } \
49 while (0)
50 #define QLW_D_MBOX 0x01
51 #define QLW_D_INTR 0x02
52 #define QLW_D_PORT 0x04
53 #define QLW_D_IO 0x08
54 #define QLW_D_IOCB 0x10
55 int qlwdebug = QLW_D_PORT | QLW_D_INTR | QLW_D_MBOX;
56 #else
57 #define DPRINTF(m, f...)
58 #endif
59
60 struct cfdriver qlw_cd = {
61 NULL,
62 "qlw",
63 DV_DULL
64 };
65
66 void qlw_scsi_cmd(struct scsi_xfer *);
67
68 u_int16_t qlw_read(struct qlw_softc *, bus_size_t);
69 void qlw_write(struct qlw_softc *, bus_size_t, u_int16_t);
70 void qlw_host_cmd(struct qlw_softc *sc, u_int16_t);
71
72 int qlw_mbox(struct qlw_softc *, int, int);
73 void qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *);
74 u_int16_t qlw_read_mbox(struct qlw_softc *, int);
75 void qlw_write_mbox(struct qlw_softc *, int, u_int16_t);
76
77 int qlw_config_bus(struct qlw_softc *, int);
78 int qlw_config_target(struct qlw_softc *, int, int);
79 void qlw_update_bus(struct qlw_softc *, int);
80 void qlw_update_target(struct qlw_softc *, int, int);
81 void qlw_update_task(void *);
82
83 void qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t);
84 void qlw_set_ints(struct qlw_softc *, int);
85 int qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *);
86 void qlw_clear_isr(struct qlw_softc *, u_int16_t);
87
88 void qlw_update(struct qlw_softc *, int);
89 void qlw_put_marker(struct qlw_softc *, int, void *);
90 void qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *,
91 struct qlw_ccb *);
92 void qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *,
93 struct qlw_ccb *, int);
94 struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t);
95 void qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *,
96 int *, int *);
97 void qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *,
98 int, int);
99 void qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *,
100 bus_dmamap_t, int);
101
102 int qlw_softreset(struct qlw_softc *);
103 void qlw_dma_burst_enable(struct qlw_softc *);
104
105 int qlw_async(struct qlw_softc *, u_int16_t);
106
107 int qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *,
108 u_int16_t);
109 int qlw_load_firmware(struct qlw_softc *);
110 int qlw_read_nvram(struct qlw_softc *);
111 void qlw_parse_nvram_1040(struct qlw_softc *, int);
112 void qlw_parse_nvram_1080(struct qlw_softc *, int);
113 void qlw_init_defaults(struct qlw_softc *, int);
114
115 struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t);
116 void qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *);
117
118 int qlw_alloc_ccbs(struct qlw_softc *);
119 void qlw_free_ccbs(struct qlw_softc *);
120 void *qlw_get_ccb(void *);
121 void qlw_put_ccb(void *, void *);
122
123 #ifdef QLW_DEBUG
124 void qlw_dump_iocb(struct qlw_softc *, void *, int);
125 void qlw_dump_iocb_segs(struct qlw_softc *, void *, int);
126 #else
127 #define qlw_dump_iocb(sc, h, fl) do { /* nothing */ } while (0)
128 #define qlw_dump_iocb_segs(sc, h, fl) do { /* nothing */ } while (0)
129 #endif
130
131 static inline int
qlw_xs_bus(struct qlw_softc * sc,struct scsi_xfer * xs)132 qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs)
133 {
134 /*
135 * sc_scsibus[0] == NULL -> bus 0 probing during config_found().
136 * sc_scsibus[0] == xs->sc_link->bus -> bus 0 normal operation.
137 * sc_scsibus[1] == NULL -> bus 1 probing during config_found().
138 * sc_scsibus[1] == xs->sc_link->bus -> bus 1 normal operation.
139 */
140 if ((sc->sc_scsibus[0] == NULL) ||
141 (xs->sc_link->bus == sc->sc_scsibus[0]))
142 return 0;
143 else
144 return 1;
145 }
146
147 static inline u_int16_t
qlw_swap16(struct qlw_softc * sc,u_int16_t value)148 qlw_swap16(struct qlw_softc *sc, u_int16_t value)
149 {
150 if (sc->sc_isp_gen == QLW_GEN_ISP1000)
151 return htobe16(value);
152 else
153 return htole16(value);
154 }
155
156 static inline u_int32_t
qlw_swap32(struct qlw_softc * sc,u_int32_t value)157 qlw_swap32(struct qlw_softc *sc, u_int32_t value)
158 {
159 if (sc->sc_isp_gen == QLW_GEN_ISP1000)
160 return htobe32(value);
161 else
162 return htole32(value);
163 }
164
165 static inline u_int16_t
qlw_queue_read(struct qlw_softc * sc,bus_size_t offset)166 qlw_queue_read(struct qlw_softc *sc, bus_size_t offset)
167 {
168 return qlw_read(sc, sc->sc_mbox_base + offset);
169 }
170
171 static inline void
qlw_queue_write(struct qlw_softc * sc,bus_size_t offset,u_int16_t value)172 qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
173 {
174 qlw_write(sc, sc->sc_mbox_base + offset, value);
175 }
176
177 const struct scsi_adapter qlw_switch = {
178 qlw_scsi_cmd, NULL, NULL, NULL, NULL
179 };
180
181 int
qlw_attach(struct qlw_softc * sc)182 qlw_attach(struct qlw_softc *sc)
183 {
184 struct scsibus_attach_args saa;
185 void (*parse_nvram)(struct qlw_softc *, int);
186 int reset_delay;
187 int bus;
188
189 task_set(&sc->sc_update_task, qlw_update_task, sc);
190
191 switch (sc->sc_isp_gen) {
192 case QLW_GEN_ISP1000:
193 sc->sc_nvram_size = 0;
194 break;
195 case QLW_GEN_ISP1040:
196 sc->sc_nvram_size = 128;
197 sc->sc_nvram_minversion = 2;
198 parse_nvram = qlw_parse_nvram_1040;
199 break;
200 case QLW_GEN_ISP1080:
201 case QLW_GEN_ISP12160:
202 sc->sc_nvram_size = 256;
203 sc->sc_nvram_minversion = 1;
204 parse_nvram = qlw_parse_nvram_1080;
205 break;
206
207 default:
208 printf("unknown isp type\n");
209 return (ENXIO);
210 }
211
212 /* after reset, mbox registers 1-3 should contain the string "ISP " */
213 if (qlw_read_mbox(sc, 1) != 0x4953 ||
214 qlw_read_mbox(sc, 2) != 0x5020 ||
215 qlw_read_mbox(sc, 3) != 0x2020) {
216 /* try releasing the risc processor */
217 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
218 }
219
220 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
221 if (qlw_softreset(sc) != 0) {
222 printf("softreset failed\n");
223 return (ENXIO);
224 }
225
226 for (bus = 0; bus < sc->sc_numbusses; bus++)
227 qlw_init_defaults(sc, bus);
228
229 if (qlw_read_nvram(sc) == 0) {
230 for (bus = 0; bus < sc->sc_numbusses; bus++)
231 parse_nvram(sc, bus);
232 }
233
234 #ifndef ISP_NOFIRMWARE
235 if (sc->sc_firmware && qlw_load_firmware(sc)) {
236 printf("firmware load failed\n");
237 return (ENXIO);
238 }
239 #endif
240
241 /* execute firmware */
242 sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE;
243 sc->sc_mbox[1] = QLW_CODE_ORG;
244 if (qlw_mbox(sc, 0x0003, 0x0001)) {
245 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
246 return (ENXIO);
247 }
248
249 delay(250000); /* from isp(4) */
250
251 sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE;
252 if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN,
253 QLW_MBOX_ABOUT_FIRMWARE_OUT)) {
254 printf("ISP not talking after firmware exec: %x\n",
255 sc->sc_mbox[0]);
256 return (ENXIO);
257 }
258 /* The ISP1000 firmware we use doesn't return a version number. */
259 if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) {
260 sc->sc_mbox[1] = 1;
261 sc->sc_mbox[2] = 37;
262 sc->sc_mbox[3] = 0;
263 sc->sc_mbox[6] = 0;
264 }
265 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc),
266 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
267
268 /* work out how many ccbs to allocate */
269 sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS;
270 if (qlw_mbox(sc, 0x0001, 0x0007)) {
271 printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
272 return (ENXIO);
273 }
274 sc->sc_maxrequests = sc->sc_mbox[2];
275 if (sc->sc_maxrequests > 512)
276 sc->sc_maxrequests = 512;
277 for (bus = 0; bus < sc->sc_numbusses; bus++) {
278 if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests)
279 sc->sc_max_queue_depth[bus] = sc->sc_maxrequests;
280 }
281
282 /*
283 * On some 1020/1040 variants the response queue is limited to
284 * 256 entries. We don't really need all that many anyway.
285 */
286 sc->sc_maxresponses = sc->sc_maxrequests / 2;
287 if (sc->sc_maxresponses < 64)
288 sc->sc_maxresponses = 64;
289
290 /* We may need up to 3 request entries per SCSI command. */
291 sc->sc_maxccbs = sc->sc_maxrequests / 3;
292
293 /* Allegedly the FIFO is busted on the 1040A. */
294 if (sc->sc_isp_type == QLW_ISP1040A)
295 sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK;
296 qlw_write(sc, QLW_CFG1, sc->sc_isp_config);
297
298 if (sc->sc_isp_config & QLW_BURST_ENABLE)
299 qlw_dma_burst_enable(sc);
300
301 sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES;
302 sc->sc_mbox[1] = 0;
303 if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY)
304 sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY;
305 if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) {
306 printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]);
307 return (ENXIO);
308 }
309
310 sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE;
311 sc->sc_mbox[1] = sc->sc_clock;
312 if (qlw_mbox(sc, 0x0003, 0x0001)) {
313 printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]);
314 return (ENXIO);
315 }
316
317 sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT;
318 sc->sc_mbox[1] = sc->sc_retry_count[0];
319 sc->sc_mbox[2] = sc->sc_retry_delay[0];
320 sc->sc_mbox[6] = sc->sc_retry_count[1];
321 sc->sc_mbox[7] = sc->sc_retry_delay[1];
322 if (qlw_mbox(sc, 0x00c7, 0x0001)) {
323 printf("couldn't set retry count: %x\n", sc->sc_mbox[0]);
324 return (ENXIO);
325 }
326
327 sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP;
328 sc->sc_mbox[1] = sc->sc_async_data_setup[0];
329 sc->sc_mbox[2] = sc->sc_async_data_setup[1];
330 if (qlw_mbox(sc, 0x0007, 0x0001)) {
331 printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]);
332 return (ENXIO);
333 }
334
335 sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION;
336 sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5;
337 sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4;
338 sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5;
339 sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4;
340 if (qlw_mbox(sc, 0x0007, 0x0001)) {
341 printf("couldn't set active negation: %x\n", sc->sc_mbox[0]);
342 return (ENXIO);
343 }
344
345 sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT;
346 sc->sc_mbox[1] = sc->sc_tag_age_limit[0];
347 sc->sc_mbox[2] = sc->sc_tag_age_limit[1];
348 if (qlw_mbox(sc, 0x0007, 0x0001)) {
349 printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]);
350 return (ENXIO);
351 }
352
353 sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT;
354 sc->sc_mbox[1] = sc->sc_selection_timeout[0];
355 sc->sc_mbox[2] = sc->sc_selection_timeout[1];
356 if (qlw_mbox(sc, 0x0007, 0x0001)) {
357 printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]);
358 return (ENXIO);
359 }
360
361 for (bus = 0; bus < sc->sc_numbusses; bus++) {
362 if (qlw_config_bus(sc, bus))
363 return (ENXIO);
364 }
365
366 if (qlw_alloc_ccbs(sc)) {
367 /* error already printed */
368 return (ENOMEM);
369 }
370
371 sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE;
372 sc->sc_mbox[1] = sc->sc_maxrequests;
373 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests);
374 sc->sc_mbox[4] = 0;
375 if (qlw_mbox(sc, 0x00df, 0x0001)) {
376 printf("couldn't init request queue: %x\n", sc->sc_mbox[0]);
377 goto free_ccbs;
378 }
379
380 sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE;
381 sc->sc_mbox[1] = sc->sc_maxresponses;
382 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses);
383 sc->sc_mbox[5] = 0;
384 if (qlw_mbox(sc, 0x00ef, 0x0001)) {
385 printf("couldn't init response queue: %x\n", sc->sc_mbox[0]);
386 goto free_ccbs;
387 }
388
389 reset_delay = 0;
390 for (bus = 0; bus < sc->sc_numbusses; bus++) {
391 sc->sc_mbox[0] = QLW_MBOX_BUS_RESET;
392 sc->sc_mbox[1] = sc->sc_reset_delay[bus];
393 sc->sc_mbox[2] = bus;
394 if (qlw_mbox(sc, 0x0007, 0x0001)) {
395 printf("couldn't reset bus: %x\n", sc->sc_mbox[0]);
396 goto free_ccbs;
397 }
398 sc->sc_marker_required[bus] = 1;
399 sc->sc_update_required[bus] = 0xffff;
400
401 if (sc->sc_reset_delay[bus] > reset_delay)
402 reset_delay = sc->sc_reset_delay[bus];
403 }
404
405 /* wait for the busses to settle */
406 delay(reset_delay * 1000000);
407
408 saa.saa_adapter = &qlw_switch;
409 saa.saa_adapter_softc = sc;
410 saa.saa_adapter_buswidth = QLW_MAX_TARGETS;
411 saa.saa_luns = QLW_MAX_LUNS;
412 saa.saa_pool = &sc->sc_iopool;
413 saa.saa_quirks = saa.saa_flags = 0;
414 saa.saa_wwpn = saa.saa_wwnn = 0;
415 for (bus = 0; bus < sc->sc_numbusses; bus++) {
416 saa.saa_adapter_target = sc->sc_initiator[bus];
417 saa.saa_openings = sc->sc_max_queue_depth[bus];
418
419 sc->sc_scsibus[bus] = (struct scsibus_softc *)
420 config_found(&sc->sc_dev, &saa, scsiprint);
421
422 qlw_update_bus(sc, bus);
423 }
424
425 sc->sc_running = 1;
426 return(0);
427
428 free_ccbs:
429 qlw_free_ccbs(sc);
430 return (ENXIO);
431 }
432
433 int
qlw_detach(struct qlw_softc * sc,int flags)434 qlw_detach(struct qlw_softc *sc, int flags)
435 {
436 return (0);
437 }
438
439 int
qlw_config_bus(struct qlw_softc * sc,int bus)440 qlw_config_bus(struct qlw_softc *sc, int bus)
441 {
442 int target, err;
443
444 sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID;
445 sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus];
446
447 if (qlw_mbox(sc, 0x0003, 0x0001)) {
448 printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]);
449 return (ENXIO);
450 }
451
452 for (target = 0; target < QLW_MAX_TARGETS; target++) {
453 err = qlw_config_target(sc, bus, target);
454 if (err)
455 return (err);
456 }
457
458 return (0);
459 }
460
461 int
qlw_config_target(struct qlw_softc * sc,int bus,int target)462 qlw_config_target(struct qlw_softc *sc, int bus, int target)
463 {
464 int lun;
465
466 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
467 sc->sc_mbox[1] = (((bus << 7) | target) << 8);
468 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
469 sc->sc_mbox[2] &= QLW_TARGET_SAFE;
470 sc->sc_mbox[2] |= QLW_TARGET_NARROW | QLW_TARGET_ASYNC;
471 sc->sc_mbox[3] = 0;
472
473 if (qlw_mbox(sc, 0x000f, 0x0001)) {
474 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
475 return (ENXIO);
476 }
477
478 for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
479 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
480 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
481 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
482 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
483 if (qlw_mbox(sc, 0x000f, 0x0001)) {
484 printf("couldn't set lun parameters: %x\n",
485 sc->sc_mbox[0]);
486 return (ENXIO);
487 }
488 }
489
490 return (0);
491 }
492
493 void
qlw_update_bus(struct qlw_softc * sc,int bus)494 qlw_update_bus(struct qlw_softc *sc, int bus)
495 {
496 int target;
497
498 for (target = 0; target < QLW_MAX_TARGETS; target++)
499 qlw_update_target(sc, bus, target);
500 }
501
502 void
qlw_update_target(struct qlw_softc * sc,int bus,int target)503 qlw_update_target(struct qlw_softc *sc, int bus, int target)
504 {
505 struct scsi_link *link;
506 int lun;
507
508 if ((sc->sc_update_required[bus] & (1 << target)) == 0)
509 return;
510 atomic_clearbits_int(&sc->sc_update_required[bus], (1 << target));
511
512 link = scsi_get_link(sc->sc_scsibus[bus], target, 0);
513 if (link == NULL)
514 return;
515
516 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
517 sc->sc_mbox[1] = (((bus << 7) | target) << 8);
518 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
519 sc->sc_mbox[2] |= QLW_TARGET_RENEG;
520 sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ;
521 if (link->quirks & SDEV_NOSYNC)
522 sc->sc_mbox[2] &= ~QLW_TARGET_SYNC;
523 if (link->quirks & SDEV_NOWIDE)
524 sc->sc_mbox[2] &= ~QLW_TARGET_WIDE;
525 if (link->quirks & SDEV_NOTAGS)
526 sc->sc_mbox[2] &= ~QLW_TARGET_TAGS;
527
528 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period;
529 sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8);
530
531 if (qlw_mbox(sc, 0x000f, 0x0001)) {
532 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
533 return;
534 }
535
536 /* XXX do PPR detection */
537
538 for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
539 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
540 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
541 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
542 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
543 if (qlw_mbox(sc, 0x000f, 0x0001)) {
544 printf("couldn't set lun parameters: %x\n",
545 sc->sc_mbox[0]);
546 return;
547 }
548 }
549 }
550
551 void
qlw_update_task(void * xsc)552 qlw_update_task(void *xsc)
553 {
554 struct qlw_softc *sc = xsc;
555 int bus;
556
557 for (bus = 0; bus < sc->sc_numbusses; bus++)
558 qlw_update_bus(sc, bus);
559 }
560
561 struct qlw_ccb *
qlw_handle_resp(struct qlw_softc * sc,u_int16_t id)562 qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
563 {
564 struct qlw_ccb *ccb;
565 struct qlw_iocb_hdr *hdr;
566 struct qlw_iocb_status *status;
567 struct scsi_xfer *xs;
568 u_int32_t handle;
569 int entry_type;
570 int flags;
571 int bus;
572
573 ccb = NULL;
574 hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE);
575
576 bus_dmamap_sync(sc->sc_dmat,
577 QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,
578 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD);
579
580 qlw_get_header(sc, hdr, &entry_type, &flags);
581 switch (entry_type) {
582 case QLW_IOCB_STATUS:
583 status = (struct qlw_iocb_status *)hdr;
584 handle = qlw_swap32(sc, status->handle);
585 if (handle > sc->sc_maxccbs) {
586 panic("bad completed command handle: %d (> %d)",
587 handle, sc->sc_maxccbs);
588 }
589
590 ccb = &sc->sc_ccbs[handle];
591 xs = ccb->ccb_xs;
592 if (xs == NULL) {
593 DPRINTF(QLW_D_INTR, "%s: got status for inactive"
594 " ccb %d\n", DEVNAME(sc), handle);
595 qlw_dump_iocb(sc, hdr, QLW_D_INTR);
596 ccb = NULL;
597 break;
598 }
599 if (xs->io != ccb) {
600 panic("completed command handle doesn't match xs "
601 "(handle %d, ccb %p, xs->io %p)", handle, ccb,
602 xs->io);
603 }
604
605 if (xs->datalen > 0) {
606 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
607 ccb->ccb_dmamap->dm_mapsize,
608 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
609 BUS_DMASYNC_POSTWRITE);
610 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
611 }
612
613 bus = qlw_xs_bus(sc, xs);
614 xs->status = qlw_swap16(sc, status->scsi_status);
615 switch (qlw_swap16(sc, status->completion)) {
616 case QLW_IOCB_STATUS_COMPLETE:
617 if (qlw_swap16(sc, status->scsi_status) &
618 QLW_SCSI_STATUS_SENSE_VALID) {
619 memcpy(&xs->sense, status->sense_data,
620 sizeof(xs->sense));
621 xs->error = XS_SENSE;
622 } else {
623 xs->error = XS_NOERROR;
624 }
625 xs->resid = 0;
626 break;
627
628 case QLW_IOCB_STATUS_INCOMPLETE:
629 if (flags & QLW_STATE_GOT_TARGET) {
630 xs->error = XS_DRIVER_STUFFUP;
631 } else {
632 xs->error = XS_SELTIMEOUT;
633 }
634 break;
635
636 case QLW_IOCB_STATUS_DMA_ERROR:
637 DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc));
638 /* set resid apparently? */
639 break;
640
641 case QLW_IOCB_STATUS_RESET:
642 DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",
643 DEVNAME(sc));
644 sc->sc_marker_required[bus] = 1;
645 xs->error = XS_RESET;
646 break;
647
648 case QLW_IOCB_STATUS_ABORTED:
649 DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc));
650 sc->sc_marker_required[bus] = 1;
651 xs->error = XS_DRIVER_STUFFUP;
652 break;
653
654 case QLW_IOCB_STATUS_TIMEOUT:
655 DPRINTF(QLW_D_INTR, "%s: command timed out\n",
656 DEVNAME(sc));
657 xs->error = XS_TIMEOUT;
658 break;
659
660 case QLW_IOCB_STATUS_DATA_OVERRUN:
661 case QLW_IOCB_STATUS_DATA_UNDERRUN:
662 xs->resid = qlw_swap32(sc, status->resid);
663 xs->error = XS_NOERROR;
664 break;
665
666 case QLW_IOCB_STATUS_QUEUE_FULL:
667 DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc));
668 xs->error = XS_BUSY;
669 break;
670
671 case QLW_IOCB_STATUS_WIDE_FAILED:
672 DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc));
673 xs->sc_link->quirks |= SDEV_NOWIDE;
674 atomic_setbits_int(&sc->sc_update_required[bus],
675 1 << xs->sc_link->target);
676 task_add(systq, &sc->sc_update_task);
677 xs->resid = qlw_swap32(sc, status->resid);
678 xs->error = XS_NOERROR;
679 break;
680
681 case QLW_IOCB_STATUS_SYNCXFER_FAILED:
682 DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc));
683 xs->sc_link->quirks |= SDEV_NOSYNC;
684 atomic_setbits_int(&sc->sc_update_required[bus],
685 1 << xs->sc_link->target);
686 task_add(systq, &sc->sc_update_task);
687 xs->resid = qlw_swap32(sc, status->resid);
688 xs->error = XS_NOERROR;
689 break;
690
691 default:
692 DPRINTF(QLW_D_INTR, "%s: unexpected completion"
693 " status %x\n", DEVNAME(sc),
694 qlw_swap16(sc, status->completion));
695 qlw_dump_iocb(sc, hdr, QLW_D_INTR);
696 xs->error = XS_DRIVER_STUFFUP;
697 break;
698 }
699 break;
700
701 default:
702 DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",
703 DEVNAME(sc), entry_type);
704 qlw_dump_iocb(sc, hdr, QLW_D_INTR);
705 break;
706 }
707
708 return (ccb);
709 }
710
711 void
qlw_handle_intr(struct qlw_softc * sc,u_int16_t isr,u_int16_t info)712 qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info)
713 {
714 int i;
715 u_int16_t rspin;
716 struct qlw_ccb *ccb;
717
718 switch (isr) {
719 case QLW_INT_TYPE_ASYNC:
720 qlw_async(sc, info);
721 qlw_clear_isr(sc, isr);
722 break;
723
724 case QLW_INT_TYPE_IO:
725 qlw_clear_isr(sc, isr);
726 rspin = qlw_queue_read(sc, QLW_RESP_IN);
727 if (rspin == sc->sc_last_resp_id) {
728 /* seems to happen a lot on 2200s when mbox commands
729 * complete but it doesn't want to give us the register
730 * semaphore, or something.
731 *
732 * if we're waiting on a mailbox command, don't ack
733 * the interrupt yet.
734 */
735 if (sc->sc_mbox_pending) {
736 DPRINTF(QLW_D_MBOX, "%s: ignoring premature"
737 " mbox int\n", DEVNAME(sc));
738 return;
739 }
740
741 break;
742 }
743
744 if (sc->sc_responses == NULL)
745 break;
746
747 DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n",
748 DEVNAME(sc), sc->sc_last_resp_id, rspin);
749
750 do {
751 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
752 if (ccb)
753 scsi_done(ccb->ccb_xs);
754
755 sc->sc_last_resp_id++;
756 sc->sc_last_resp_id %= sc->sc_maxresponses;
757 } while (sc->sc_last_resp_id != rspin);
758
759 qlw_queue_write(sc, QLW_RESP_OUT, rspin);
760 break;
761
762 case QLW_INT_TYPE_MBOX:
763 if (sc->sc_mbox_pending) {
764 if (info == QLW_MBOX_COMPLETE) {
765 for (i = 1; i < nitems(sc->sc_mbox); i++) {
766 sc->sc_mbox[i] = qlw_read_mbox(sc, i);
767 }
768 } else {
769 sc->sc_mbox[0] = info;
770 }
771 wakeup(sc->sc_mbox);
772 } else {
773 DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:"
774 " %x\n", DEVNAME(sc), info);
775 }
776 qlw_clear_isr(sc, isr);
777 break;
778
779 default:
780 /* maybe log something? */
781 break;
782 }
783 }
784
785 int
qlw_intr(void * xsc)786 qlw_intr(void *xsc)
787 {
788 struct qlw_softc *sc = xsc;
789 u_int16_t isr;
790 u_int16_t info;
791
792 if (qlw_read_isr(sc, &isr, &info) == 0)
793 return (0);
794
795 qlw_handle_intr(sc, isr, info);
796 return (1);
797 }
798
799 void
qlw_scsi_cmd(struct scsi_xfer * xs)800 qlw_scsi_cmd(struct scsi_xfer *xs)
801 {
802 struct scsi_link *link = xs->sc_link;
803 struct qlw_softc *sc = link->bus->sb_adapter_softc;
804 struct qlw_ccb *ccb;
805 struct qlw_iocb_req0 *iocb;
806 struct qlw_ccb_list list;
807 u_int16_t req, rspin;
808 int offset, error, done;
809 bus_dmamap_t dmap;
810 int bus;
811 int seg;
812
813 if (xs->cmdlen > sizeof(iocb->cdb)) {
814 DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
815 xs->cmdlen);
816 memset(&xs->sense, 0, sizeof(xs->sense));
817 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
818 xs->sense.flags = SKEY_ILLEGAL_REQUEST;
819 xs->sense.add_sense_code = 0x20;
820 xs->error = XS_SENSE;
821 scsi_done(xs);
822 return;
823 }
824
825 ccb = xs->io;
826 dmap = ccb->ccb_dmamap;
827 if (xs->datalen > 0) {
828 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,
829 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
830 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
831 if (error) {
832 xs->error = XS_DRIVER_STUFFUP;
833 scsi_done(xs);
834 return;
835 }
836
837 bus_dmamap_sync(sc->sc_dmat, dmap, 0,
838 dmap->dm_mapsize,
839 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
840 BUS_DMASYNC_PREWRITE);
841 }
842
843 mtx_enter(&sc->sc_queue_mtx);
844
845 /* put in a sync marker if required */
846 bus = qlw_xs_bus(sc, xs);
847 if (sc->sc_marker_required[bus]) {
848 req = sc->sc_next_req_id++;
849 if (sc->sc_next_req_id == sc->sc_maxrequests)
850 sc->sc_next_req_id = 0;
851
852 DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",
853 DEVNAME(sc), req);
854 offset = (req * QLW_QUEUE_ENTRY_SIZE);
855 iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
856 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
857 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
858 qlw_put_marker(sc, bus, iocb);
859 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
860 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
861 qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
862 sc->sc_marker_required[bus] = 0;
863 }
864
865 req = sc->sc_next_req_id++;
866 if (sc->sc_next_req_id == sc->sc_maxrequests)
867 sc->sc_next_req_id = 0;
868
869 offset = (req * QLW_QUEUE_ENTRY_SIZE);
870 iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
871 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
872 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
873
874 ccb->ccb_xs = xs;
875
876 DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
877 qlw_put_cmd(sc, iocb, xs, ccb);
878 seg = QLW_IOCB_SEGS_PER_CMD;
879
880 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
881 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
882
883 while (seg < ccb->ccb_dmamap->dm_nsegs) {
884 req = sc->sc_next_req_id++;
885 if (sc->sc_next_req_id == sc->sc_maxrequests)
886 sc->sc_next_req_id = 0;
887
888 offset = (req * QLW_QUEUE_ENTRY_SIZE);
889 iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
890 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
891 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
892
893 DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req);
894 qlw_put_cont(sc, iocb, xs, ccb, seg);
895 seg += QLW_IOCB_SEGS_PER_CONT;
896
897 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
898 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
899 }
900
901 qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
902
903 if (!ISSET(xs->flags, SCSI_POLL)) {
904 mtx_leave(&sc->sc_queue_mtx);
905 return;
906 }
907
908 done = 0;
909 SIMPLEQ_INIT(&list);
910 do {
911 u_int16_t isr, info;
912
913 delay(100);
914
915 if (qlw_read_isr(sc, &isr, &info) == 0) {
916 continue;
917 }
918
919 if (isr != QLW_INT_TYPE_IO) {
920 qlw_handle_intr(sc, isr, info);
921 continue;
922 }
923
924 qlw_clear_isr(sc, isr);
925
926 rspin = qlw_queue_read(sc, QLW_RESP_IN);
927 while (rspin != sc->sc_last_resp_id) {
928 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
929
930 sc->sc_last_resp_id++;
931 if (sc->sc_last_resp_id == sc->sc_maxresponses)
932 sc->sc_last_resp_id = 0;
933
934 if (ccb != NULL)
935 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link);
936 if (ccb == xs->io)
937 done = 1;
938 }
939 qlw_queue_write(sc, QLW_RESP_OUT, rspin);
940 } while (done == 0);
941
942 mtx_leave(&sc->sc_queue_mtx);
943
944 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
945 SIMPLEQ_REMOVE_HEAD(&list, ccb_link);
946 scsi_done(ccb->ccb_xs);
947 }
948 }
949
950 u_int16_t
qlw_read(struct qlw_softc * sc,bus_size_t offset)951 qlw_read(struct qlw_softc *sc, bus_size_t offset)
952 {
953 u_int16_t v;
954 v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
955 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
956 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
957 return (v);
958 }
959
960 void
qlw_write(struct qlw_softc * sc,bus_size_t offset,u_int16_t value)961 qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
962 {
963 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value);
964 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
965 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
966 }
967
968 u_int16_t
qlw_read_mbox(struct qlw_softc * sc,int mbox)969 qlw_read_mbox(struct qlw_softc *sc, int mbox)
970 {
971 /* could range-check mboxes according to chip type? */
972 return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2)));
973 }
974
975 void
qlw_write_mbox(struct qlw_softc * sc,int mbox,u_int16_t value)976 qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value)
977 {
978 qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value);
979 }
980
981 void
qlw_host_cmd(struct qlw_softc * sc,u_int16_t cmd)982 qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd)
983 {
984 qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT);
985 }
986
987 #define MBOX_COMMAND_TIMEOUT 4000
988
989 int
qlw_mbox(struct qlw_softc * sc,int maskin,int maskout)990 qlw_mbox(struct qlw_softc *sc, int maskin, int maskout)
991 {
992 int i;
993 int result = 0;
994 int rv;
995
996 sc->sc_mbox_pending = 1;
997 for (i = 0; i < nitems(sc->sc_mbox); i++) {
998 if (maskin & (1 << i)) {
999 qlw_write_mbox(sc, i, sc->sc_mbox[i]);
1000 }
1001 }
1002 qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT);
1003
1004 if (sc->sc_running == 0) {
1005 for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) {
1006 u_int16_t isr, info;
1007
1008 delay(100);
1009
1010 if (qlw_read_isr(sc, &isr, &info) == 0)
1011 continue;
1012
1013 switch (isr) {
1014 case QLW_INT_TYPE_MBOX:
1015 result = info;
1016 break;
1017
1018 default:
1019 qlw_handle_intr(sc, isr, info);
1020 break;
1021 }
1022 }
1023 } else {
1024 tsleep_nsec(sc->sc_mbox, PRIBIO, "qlw_mbox", INFSLP);
1025 result = sc->sc_mbox[0];
1026 }
1027
1028 switch (result) {
1029 case QLW_MBOX_COMPLETE:
1030 for (i = 1; i < nitems(sc->sc_mbox); i++) {
1031 sc->sc_mbox[i] = (maskout & (1 << i)) ?
1032 qlw_read_mbox(sc, i) : 0;
1033 }
1034 rv = 0;
1035 break;
1036
1037 case 0:
1038 /* timed out; do something? */
1039 DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1040 rv = 1;
1041 break;
1042
1043 default:
1044 sc->sc_mbox[0] = result;
1045 rv = result;
1046 break;
1047 }
1048
1049 qlw_clear_isr(sc, QLW_INT_TYPE_MBOX);
1050 sc->sc_mbox_pending = 0;
1051 return (rv);
1052 }
1053
1054 void
qlw_mbox_putaddr(u_int16_t * mbox,struct qlw_dmamem * mem)1055 qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem)
1056 {
1057 mbox[2] = (QLW_DMA_DVA(mem) >> 16) & 0xffff;
1058 mbox[3] = (QLW_DMA_DVA(mem) >> 0) & 0xffff;
1059 mbox[6] = (QLW_DMA_DVA(mem) >> 48) & 0xffff;
1060 mbox[7] = (QLW_DMA_DVA(mem) >> 32) & 0xffff;
1061 }
1062
1063 void
qlw_set_ints(struct qlw_softc * sc,int enabled)1064 qlw_set_ints(struct qlw_softc *sc, int enabled)
1065 {
1066 u_int16_t v = enabled ? (QLW_INT_REQ | QLW_RISC_INT_REQ) : 0;
1067 qlw_write(sc, QLW_INT_CTRL, v);
1068 }
1069
1070 int
qlw_read_isr(struct qlw_softc * sc,u_int16_t * isr,u_int16_t * info)1071 qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info)
1072 {
1073 u_int16_t int_status;
1074
1075 if (qlw_read(sc, QLW_SEMA) & QLW_SEMA_LOCK) {
1076 *info = qlw_read_mbox(sc, 0);
1077 if (*info & QLW_MBOX_HAS_STATUS)
1078 *isr = QLW_INT_TYPE_MBOX;
1079 else
1080 *isr = QLW_INT_TYPE_ASYNC;
1081 } else {
1082 int_status = qlw_read(sc, QLW_INT_STATUS);
1083 if ((int_status & (QLW_INT_REQ | QLW_RISC_INT_REQ)) == 0)
1084 return (0);
1085
1086 *isr = QLW_INT_TYPE_IO;
1087 }
1088
1089 return (1);
1090 }
1091
1092 void
qlw_clear_isr(struct qlw_softc * sc,u_int16_t isr)1093 qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr)
1094 {
1095 qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT);
1096 switch (isr) {
1097 case QLW_INT_TYPE_MBOX:
1098 case QLW_INT_TYPE_ASYNC:
1099 qlw_write(sc, QLW_SEMA, 0);
1100 break;
1101 default:
1102 break;
1103 }
1104 }
1105
1106 int
qlw_softreset(struct qlw_softc * sc)1107 qlw_softreset(struct qlw_softc *sc)
1108 {
1109 int i;
1110
1111 qlw_set_ints(sc, 0);
1112
1113 /* reset */
1114 qlw_write(sc, QLW_INT_CTRL, QLW_RESET);
1115 delay(100);
1116 /* clear data and control dma engines? */
1117
1118 /* wait for soft reset to clear */
1119 for (i = 0; i < 1000; i++) {
1120 if ((qlw_read(sc, QLW_INT_CTRL) & QLW_RESET) == 0)
1121 break;
1122
1123 delay(100);
1124 }
1125
1126 if (i == 1000) {
1127 DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc));
1128 qlw_set_ints(sc, 0);
1129 return (ENXIO);
1130 }
1131
1132 qlw_write(sc, QLW_CFG1, 0);
1133
1134 /* reset risc processor */
1135 qlw_host_cmd(sc, QLW_HOST_CMD_RESET);
1136 delay(100);
1137 qlw_write(sc, QLW_SEMA, 0);
1138 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1139
1140 /* reset queue pointers */
1141 qlw_queue_write(sc, QLW_REQ_IN, 0);
1142 qlw_queue_write(sc, QLW_REQ_OUT, 0);
1143 qlw_queue_write(sc, QLW_RESP_IN, 0);
1144 qlw_queue_write(sc, QLW_RESP_OUT, 0);
1145
1146 qlw_set_ints(sc, 1);
1147 qlw_host_cmd(sc, QLW_HOST_CMD_BIOS);
1148
1149 /* do a basic mailbox operation to check we're alive */
1150 sc->sc_mbox[0] = QLW_MBOX_NOP;
1151 if (qlw_mbox(sc, 0x0001, 0x0001)) {
1152 DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n",
1153 DEVNAME(sc));
1154 return (ENXIO);
1155 }
1156
1157 return (0);
1158 }
1159
1160 void
qlw_dma_burst_enable(struct qlw_softc * sc)1161 qlw_dma_burst_enable(struct qlw_softc *sc)
1162 {
1163 if (sc->sc_isp_gen == QLW_GEN_ISP1000 ||
1164 sc->sc_isp_gen == QLW_GEN_ISP1040) {
1165 qlw_write(sc, QLW_CDMA_CFG,
1166 qlw_read(sc, QLW_CDMA_CFG) | QLW_DMA_BURST_ENABLE);
1167 qlw_write(sc, QLW_DDMA_CFG,
1168 qlw_read(sc, QLW_DDMA_CFG) | QLW_DMA_BURST_ENABLE);
1169 } else {
1170 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
1171 qlw_write(sc, QLW_CFG1,
1172 qlw_read(sc, QLW_CFG1) | QLW_DMA_BANK);
1173 qlw_write(sc, QLW_CDMA_CFG_1080,
1174 qlw_read(sc, QLW_CDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1175 qlw_write(sc, QLW_DDMA_CFG_1080,
1176 qlw_read(sc, QLW_DDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1177 qlw_write(sc, QLW_CFG1,
1178 qlw_read(sc, QLW_CFG1) & ~QLW_DMA_BANK);
1179 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1180 }
1181 }
1182
1183 void
qlw_update(struct qlw_softc * sc,int task)1184 qlw_update(struct qlw_softc *sc, int task)
1185 {
1186 /* do things */
1187 }
1188
1189 int
qlw_async(struct qlw_softc * sc,u_int16_t info)1190 qlw_async(struct qlw_softc *sc, u_int16_t info)
1191 {
1192 int bus;
1193
1194 switch (info) {
1195 case QLW_ASYNC_BUS_RESET:
1196 DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc));
1197 bus = qlw_read_mbox(sc, 6);
1198 sc->sc_marker_required[bus] = 1;
1199 break;
1200
1201 #if 0
1202 case QLW_ASYNC_SYSTEM_ERROR:
1203 qla_update(sc, QLW_UPDATE_SOFTRESET);
1204 break;
1205
1206 case QLW_ASYNC_REQ_XFER_ERROR:
1207 qla_update(sc, QLW_UPDATE_SOFTRESET);
1208 break;
1209
1210 case QLW_ASYNC_RSP_XFER_ERROR:
1211 qla_update(sc, QLW_UPDATE_SOFTRESET);
1212 break;
1213 #endif
1214
1215 case QLW_ASYNC_SCSI_CMD_COMPLETE:
1216 /* shouldn't happen, we disable fast posting */
1217 break;
1218
1219 case QLW_ASYNC_CTIO_COMPLETE:
1220 /* definitely shouldn't happen, we don't do target mode */
1221 break;
1222
1223 default:
1224 DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),
1225 info);
1226 break;
1227 }
1228 return (1);
1229 }
1230
1231 #ifdef QLW_DEBUG
1232 void
qlw_dump_iocb(struct qlw_softc * sc,void * buf,int flags)1233 qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags)
1234 {
1235 u_int8_t *iocb = buf;
1236 int l;
1237 int b;
1238
1239 if ((qlwdebug & flags) == 0)
1240 return;
1241
1242 printf("%s: iocb:\n", DEVNAME(sc));
1243 for (l = 0; l < 4; l++) {
1244 for (b = 0; b < 16; b++) {
1245 printf(" %2.2x", iocb[(l*16)+b]);
1246 }
1247 printf("\n");
1248 }
1249 }
1250
1251 void
qlw_dump_iocb_segs(struct qlw_softc * sc,void * segs,int n)1252 qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n)
1253 {
1254 u_int8_t *buf = segs;
1255 int s, b;
1256 if ((qlwdebug & QLW_D_IOCB) == 0)
1257 return;
1258
1259 printf("%s: iocb segs:\n", DEVNAME(sc));
1260 for (s = 0; s < n; s++) {
1261 for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) {
1262 printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg)))
1263 + b]);
1264 }
1265 printf("\n");
1266 }
1267 }
1268 #endif
1269
1270 /*
1271 * The PCI bus is little-endian whereas SBus is big-endian. This
1272 * leads to some differences in byte twisting of DMA transfers of
1273 * request and response queue entries. Most fields can be treated as
1274 * 16-bit or 32-bit with the endianness of the bus, but the header
1275 * fields end up being swapped by the ISP1000's SBus interface.
1276 */
1277
1278 void
qlw_get_header(struct qlw_softc * sc,struct qlw_iocb_hdr * hdr,int * type,int * flags)1279 qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1280 int *type, int *flags)
1281 {
1282 if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1283 *type = hdr->entry_count;
1284 *flags = hdr->seqno;
1285 } else {
1286 *type = hdr->entry_type;
1287 *flags = hdr->flags;
1288 }
1289 }
1290
1291 void
qlw_put_header(struct qlw_softc * sc,struct qlw_iocb_hdr * hdr,int type,int count)1292 qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1293 int type, int count)
1294 {
1295 if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1296 hdr->entry_type = count;
1297 hdr->entry_count = type;
1298 hdr->seqno = 0;
1299 hdr->flags = 0;
1300 } else {
1301 hdr->entry_type = type;
1302 hdr->entry_count = count;
1303 hdr->seqno = 0;
1304 hdr->flags = 0;
1305 }
1306 }
1307
1308 void
qlw_put_data_seg(struct qlw_softc * sc,struct qlw_iocb_seg * seg,bus_dmamap_t dmap,int num)1309 qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg,
1310 bus_dmamap_t dmap, int num)
1311 {
1312 seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr);
1313 seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len);
1314 }
1315
1316 void
qlw_put_marker(struct qlw_softc * sc,int bus,void * buf)1317 qlw_put_marker(struct qlw_softc *sc, int bus, void *buf)
1318 {
1319 struct qlw_iocb_marker *marker = buf;
1320
1321 qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER, 1);
1322
1323 /* could be more specific here; isp(4) isn't */
1324 marker->device = qlw_swap16(sc, (bus << 7) << 8);
1325 marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL);
1326 qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1327 }
1328
1329 void
qlw_put_cmd(struct qlw_softc * sc,void * buf,struct scsi_xfer * xs,struct qlw_ccb * ccb)1330 qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1331 struct qlw_ccb *ccb)
1332 {
1333 struct qlw_iocb_req0 *req = buf;
1334 int entry_count = 1;
1335 u_int16_t dir;
1336 int seg, nsegs;
1337 int seg_count;
1338 int timeout = 0;
1339 int bus, target, lun;
1340
1341 if (xs->datalen == 0) {
1342 dir = QLW_IOCB_CMD_NO_DATA;
1343 seg_count = 1;
1344 } else {
1345 dir = xs->flags & SCSI_DATA_IN ? QLW_IOCB_CMD_READ_DATA :
1346 QLW_IOCB_CMD_WRITE_DATA;
1347 seg_count = ccb->ccb_dmamap->dm_nsegs;
1348 nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD;
1349 while (nsegs > 0) {
1350 entry_count++;
1351 nsegs -= QLW_IOCB_SEGS_PER_CONT;
1352 }
1353 for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1354 if (seg >= QLW_IOCB_SEGS_PER_CMD)
1355 break;
1356 qlw_put_data_seg(sc, &req->segs[seg],
1357 ccb->ccb_dmamap, seg);
1358 }
1359 }
1360
1361 if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS) == 0)
1362 dir |= QLW_IOCB_CMD_SIMPLE_QUEUE;
1363
1364 qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_0, entry_count);
1365
1366 /*
1367 * timeout is in seconds. make sure it's at least 1 if a timeout
1368 * was specified in xs
1369 */
1370 if (xs->timeout != 0)
1371 timeout = MAX(1, xs->timeout/1000);
1372
1373 req->flags = qlw_swap16(sc, dir);
1374 req->seg_count = qlw_swap16(sc, seg_count);
1375 req->timeout = qlw_swap16(sc, timeout);
1376
1377 bus = qlw_xs_bus(sc, xs);
1378 target = xs->sc_link->target;
1379 lun = xs->sc_link->lun;
1380 req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun);
1381
1382 memcpy(req->cdb, &xs->cmd, xs->cmdlen);
1383 req->ccblen = qlw_swap16(sc, xs->cmdlen);
1384
1385 req->handle = qlw_swap32(sc, ccb->ccb_id);
1386
1387 qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1388 }
1389
1390 void
qlw_put_cont(struct qlw_softc * sc,void * buf,struct scsi_xfer * xs,struct qlw_ccb * ccb,int seg0)1391 qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1392 struct qlw_ccb *ccb, int seg0)
1393 {
1394 struct qlw_iocb_cont0 *cont = buf;
1395 int seg;
1396
1397 qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_0, 1);
1398
1399 for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1400 if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT)
1401 break;
1402 qlw_put_data_seg(sc, &cont->segs[seg - seg0],
1403 ccb->ccb_dmamap, seg);
1404 }
1405 }
1406
1407 #ifndef ISP_NOFIRMWARE
1408 int
qlw_load_firmware_words(struct qlw_softc * sc,const u_int16_t * src,u_int16_t dest)1409 qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src,
1410 u_int16_t dest)
1411 {
1412 u_int16_t i;
1413
1414 for (i = 0; i < src[3]; i++) {
1415 sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD;
1416 sc->sc_mbox[1] = i + dest;
1417 sc->sc_mbox[2] = src[i];
1418 if (qlw_mbox(sc, 0x07, 0x01)) {
1419 printf("firmware load failed\n");
1420 return (1);
1421 }
1422 }
1423
1424 sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM;
1425 sc->sc_mbox[1] = dest;
1426 if (qlw_mbox(sc, 0x0003, 0x0003)) {
1427 printf("verification of chunk at %x failed: %x\n",
1428 dest, sc->sc_mbox[1]);
1429 return (1);
1430 }
1431
1432 return (0);
1433 }
1434
1435 int
qlw_load_firmware(struct qlw_softc * sc)1436 qlw_load_firmware(struct qlw_softc *sc)
1437 {
1438 return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG);
1439 }
1440
1441 #endif /* !ISP_NOFIRMWARE */
1442
1443 int
qlw_read_nvram(struct qlw_softc * sc)1444 qlw_read_nvram(struct qlw_softc *sc)
1445 {
1446 u_int16_t data[sizeof(sc->sc_nvram) >> 1];
1447 u_int16_t req, cmd, val;
1448 u_int8_t csum;
1449 int i, bit;
1450 int reqcmd;
1451 int nbits;
1452
1453 if (sc->sc_nvram_size == 0)
1454 return (1);
1455
1456 if (sc->sc_nvram_size == 128) {
1457 reqcmd = (QLW_NVRAM_CMD_READ << 6);
1458 nbits = 8;
1459 } else {
1460 reqcmd = (QLW_NVRAM_CMD_READ << 8);
1461 nbits = 10;
1462 }
1463
1464 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1465 delay(10);
1466 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | QLW_NVRAM_CLOCK);
1467 delay(10);
1468
1469 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1470 req = i | reqcmd;
1471
1472 /* write each bit out through the nvram register */
1473 for (bit = nbits; bit >= 0; bit--) {
1474 cmd = QLW_NVRAM_CHIP_SEL;
1475 if ((req >> bit) & 1) {
1476 cmd |= QLW_NVRAM_DATA_OUT;
1477 }
1478 qlw_write(sc, QLW_NVRAM, cmd);
1479 delay(10);
1480 qlw_read(sc, QLW_NVRAM);
1481
1482 qlw_write(sc, QLW_NVRAM, cmd | QLW_NVRAM_CLOCK);
1483 delay(10);
1484 qlw_read(sc, QLW_NVRAM);
1485
1486 qlw_write(sc, QLW_NVRAM, cmd);
1487 delay(10);
1488 qlw_read(sc, QLW_NVRAM);
1489 }
1490
1491 /* read the result back */
1492 val = 0;
1493 for (bit = 0; bit < 16; bit++) {
1494 val <<= 1;
1495 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL |
1496 QLW_NVRAM_CLOCK);
1497 delay(10);
1498 if (qlw_read(sc, QLW_NVRAM) & QLW_NVRAM_DATA_IN)
1499 val |= 1;
1500 delay(10);
1501
1502 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1503 delay(10);
1504 qlw_read(sc, QLW_NVRAM);
1505 }
1506
1507 qlw_write(sc, QLW_NVRAM, 0);
1508 delay(10);
1509 qlw_read(sc, QLW_NVRAM);
1510
1511 data[i] = letoh16(val);
1512 }
1513
1514 csum = 0;
1515 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1516 csum += data[i] & 0xff;
1517 csum += data[i] >> 8;
1518 }
1519
1520 memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram));
1521 /* id field should be 'ISP ', version should high enough */
1522 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
1523 sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
1524 sc->sc_nvram.nvram_version < sc->sc_nvram_minversion ||
1525 (csum != 0)) {
1526 printf("%s: nvram corrupt\n", DEVNAME(sc));
1527 return (1);
1528 }
1529 return (0);
1530 }
1531
1532 void
qlw_parse_nvram_1040(struct qlw_softc * sc,int bus)1533 qlw_parse_nvram_1040(struct qlw_softc *sc, int bus)
1534 {
1535 struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram;
1536 int target;
1537
1538 KASSERT(bus == 0);
1539
1540 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1541 sc->sc_initiator[0] = (nv->config1 >> 4);
1542
1543 sc->sc_retry_count[0] = nv->retry_count;
1544 sc->sc_retry_delay[0] = nv->retry_delay;
1545 sc->sc_reset_delay[0] = nv->reset_delay;
1546 sc->sc_tag_age_limit[0] = nv->tag_age_limit;
1547 sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout);
1548 sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth);
1549 sc->sc_async_data_setup[0] = (nv->config2 & 0x0f);
1550 sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4);
1551 sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5);
1552
1553 for (target = 0; target < QLW_MAX_TARGETS; target++) {
1554 struct qlw_target *qt = &sc->sc_target[0][target];
1555
1556 qt->qt_params = (nv->target[target].parameter << 8);
1557 qt->qt_exec_throttle = nv->target[target].execution_throttle;
1558 qt->qt_sync_period = nv->target[target].sync_period;
1559 qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1560 }
1561 }
1562
1563 void
qlw_parse_nvram_1080(struct qlw_softc * sc,int bus)1564 qlw_parse_nvram_1080(struct qlw_softc *sc, int bus)
1565 {
1566 struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram;
1567 struct qlw_nvram_bus *nv = &nvram->bus[bus];
1568 int target;
1569
1570 sc->sc_isp_config = nvram->isp_config;
1571 sc->sc_fw_features = nvram->fw_features;
1572
1573 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1574 sc->sc_initiator[bus] = (nv->config1 & 0x0f);
1575
1576 sc->sc_retry_count[bus] = nv->retry_count;
1577 sc->sc_retry_delay[bus] = nv->retry_delay;
1578 sc->sc_reset_delay[bus] = nv->reset_delay;
1579 sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout);
1580 sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth);
1581 sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f);
1582 sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4);
1583 sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5);
1584
1585 for (target = 0; target < QLW_MAX_TARGETS; target++) {
1586 struct qlw_target *qt = &sc->sc_target[bus][target];
1587
1588 qt->qt_params = (nv->target[target].parameter << 8);
1589 qt->qt_exec_throttle = nv->target[target].execution_throttle;
1590 qt->qt_sync_period = nv->target[target].sync_period;
1591 if (sc->sc_isp_gen == QLW_GEN_ISP12160)
1592 qt->qt_sync_offset = nv->target[target].flags & 0x1f;
1593 else
1594 qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1595 }
1596 }
1597
1598 void
qlw_init_defaults(struct qlw_softc * sc,int bus)1599 qlw_init_defaults(struct qlw_softc *sc, int bus)
1600 {
1601 int target;
1602
1603 switch (sc->sc_isp_gen) {
1604 case QLW_GEN_ISP1000:
1605 break;
1606 case QLW_GEN_ISP1040:
1607 sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_64;
1608 break;
1609 case QLW_GEN_ISP1080:
1610 case QLW_GEN_ISP12160:
1611 sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_128;
1612 sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY;
1613 break;
1614 }
1615
1616 sc->sc_retry_count[bus] = 0;
1617 sc->sc_retry_delay[bus] = 0;
1618 sc->sc_reset_delay[bus] = 3;
1619 sc->sc_tag_age_limit[bus] = 8;
1620 sc->sc_selection_timeout[bus] = 250;
1621 sc->sc_max_queue_depth[bus] = 32;
1622 if (sc->sc_clock > 40)
1623 sc->sc_async_data_setup[bus] = 9;
1624 else
1625 sc->sc_async_data_setup[bus] = 6;
1626 sc->sc_req_ack_active_neg[bus] = 1;
1627 sc->sc_data_line_active_neg[bus] = 1;
1628
1629 for (target = 0; target < QLW_MAX_TARGETS; target++) {
1630 struct qlw_target *qt = &sc->sc_target[bus][target];
1631
1632 qt->qt_params = QLW_TARGET_DEFAULT;
1633 qt->qt_exec_throttle = 16;
1634 qt->qt_sync_period = 10;
1635 qt->qt_sync_offset = 12;
1636 }
1637 }
1638
1639 struct qlw_dmamem *
qlw_dmamem_alloc(struct qlw_softc * sc,size_t size)1640 qlw_dmamem_alloc(struct qlw_softc *sc, size_t size)
1641 {
1642 struct qlw_dmamem *m;
1643 int nsegs;
1644
1645 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1646 if (m == NULL)
1647 return (NULL);
1648
1649 m->qdm_size = size;
1650
1651 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1652 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0)
1653 goto qdmfree;
1654
1655 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,
1656 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1657 goto destroy;
1658
1659 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,
1660 BUS_DMA_NOWAIT) != 0)
1661 goto free;
1662
1663 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,
1664 BUS_DMA_NOWAIT) != 0)
1665 goto unmap;
1666
1667 return (m);
1668
1669 unmap:
1670 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1671 free:
1672 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1673 destroy:
1674 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1675 qdmfree:
1676 free(m, M_DEVBUF, sizeof(*m));
1677
1678 return (NULL);
1679 }
1680
1681 void
qlw_dmamem_free(struct qlw_softc * sc,struct qlw_dmamem * m)1682 qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m)
1683 {
1684 bus_dmamap_unload(sc->sc_dmat, m->qdm_map);
1685 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1686 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1687 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1688 free(m, M_DEVBUF, sizeof(*m));
1689 }
1690
1691 int
qlw_alloc_ccbs(struct qlw_softc * sc)1692 qlw_alloc_ccbs(struct qlw_softc *sc)
1693 {
1694 struct qlw_ccb *ccb;
1695 u_int8_t *cmd;
1696 int i;
1697
1698 SIMPLEQ_INIT(&sc->sc_ccb_free);
1699 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1700 mtx_init(&sc->sc_queue_mtx, IPL_BIO);
1701
1702 sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb),
1703 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1704 if (sc->sc_ccbs == NULL) {
1705 printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1706 return (1);
1707 }
1708
1709 sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests *
1710 QLW_QUEUE_ENTRY_SIZE);
1711 if (sc->sc_requests == NULL) {
1712 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1713 goto free_ccbs;
1714 }
1715 sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses *
1716 QLW_QUEUE_ENTRY_SIZE);
1717 if (sc->sc_responses == NULL) {
1718 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc));
1719 goto free_req;
1720 }
1721
1722 cmd = QLW_DMA_KVA(sc->sc_requests);
1723 memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs);
1724 for (i = 0; i < sc->sc_maxccbs; i++) {
1725 ccb = &sc->sc_ccbs[i];
1726
1727 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1728 QLW_MAX_SEGS, MAXPHYS, 0,
1729 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1730 &ccb->ccb_dmamap) != 0) {
1731 printf("%s: unable to create dma map\n", DEVNAME(sc));
1732 goto free_maps;
1733 }
1734
1735 ccb->ccb_sc = sc;
1736 ccb->ccb_id = i;
1737
1738 qlw_put_ccb(sc, ccb);
1739 }
1740
1741 scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb);
1742 return (0);
1743
1744 free_maps:
1745 while ((ccb = qlw_get_ccb(sc)) != NULL)
1746 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1747
1748 qlw_dmamem_free(sc, sc->sc_responses);
1749 free_req:
1750 qlw_dmamem_free(sc, sc->sc_requests);
1751 free_ccbs:
1752 free(sc->sc_ccbs, M_DEVBUF, 0);
1753
1754 return (1);
1755 }
1756
1757 void
qlw_free_ccbs(struct qlw_softc * sc)1758 qlw_free_ccbs(struct qlw_softc *sc)
1759 {
1760 struct qlw_ccb *ccb;
1761
1762 scsi_iopool_destroy(&sc->sc_iopool);
1763 while ((ccb = qlw_get_ccb(sc)) != NULL)
1764 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1765 qlw_dmamem_free(sc, sc->sc_responses);
1766 qlw_dmamem_free(sc, sc->sc_requests);
1767 free(sc->sc_ccbs, M_DEVBUF, 0);
1768 }
1769
1770 void *
qlw_get_ccb(void * xsc)1771 qlw_get_ccb(void *xsc)
1772 {
1773 struct qlw_softc *sc = xsc;
1774 struct qlw_ccb *ccb;
1775
1776 mtx_enter(&sc->sc_ccb_mtx);
1777 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
1778 if (ccb != NULL) {
1779 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1780 }
1781 mtx_leave(&sc->sc_ccb_mtx);
1782 return (ccb);
1783 }
1784
1785 void
qlw_put_ccb(void * xsc,void * io)1786 qlw_put_ccb(void *xsc, void *io)
1787 {
1788 struct qlw_softc *sc = xsc;
1789 struct qlw_ccb *ccb = io;
1790
1791 ccb->ccb_xs = NULL;
1792 mtx_enter(&sc->sc_ccb_mtx);
1793 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1794 mtx_leave(&sc->sc_ccb_mtx);
1795 }
1796