1 /* $OpenBSD: qle.c,v 1.61 2020/09/22 19:32:53 krw Exp $ */ 2 3 /* 4 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "bio.h" 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/atomic.h> 24 #include <sys/malloc.h> 25 #include <sys/device.h> 26 #include <sys/sensors.h> 27 #include <sys/rwlock.h> 28 #include <sys/task.h> 29 #include <sys/timeout.h> 30 31 #include <machine/bus.h> 32 33 #include <dev/pci/pcireg.h> 34 #include <dev/pci/pcivar.h> 35 #include <dev/pci/pcidevs.h> 36 37 #ifdef __sparc64__ 38 #include <dev/ofw/openfirm.h> 39 #endif 40 41 #include <scsi/scsi_all.h> 42 #include <scsi/scsiconf.h> 43 44 #include <dev/pci/qlereg.h> 45 46 #ifdef QLE_DEBUG 47 #define DPRINTF(m, f...) do { if ((qledebug & (m)) == (m)) printf(f); } \ 48 while (0) 49 #define QLE_D_MBOX 0x01 50 #define QLE_D_INTR 0x02 51 #define QLE_D_PORT 0x04 52 #define QLE_D_IO 0x08 53 #define QLE_D_IOCB 0x10 54 int qledebug = QLE_D_PORT; 55 #else 56 #define DPRINTF(m, f...) 57 #endif 58 59 #ifndef QLE_NOFIRMWARE 60 #include <dev/microcode/isp/asm_2400.h> 61 #include <dev/microcode/isp/asm_2500.h> 62 #endif 63 64 #define QLE_PCI_MEM_BAR 0x14 65 #define QLE_PCI_IO_BAR 0x10 66 67 68 #define QLE_DEFAULT_PORT_NAME 0x400000007F000003ULL /* from isp(4) */ 69 70 #define QLE_WAIT_FOR_LOOP 10 /* seconds */ 71 #define QLE_LOOP_SETTLE 200 /* ms */ 72 73 /* rounded up range of assignable handles */ 74 #define QLE_MAX_TARGETS 2048 75 76 /* maximum number of segments allowed for in a single io */ 77 #define QLE_MAX_SEGS 32 78 79 enum qle_isp_gen { 80 QLE_GEN_ISP24XX = 1, 81 QLE_GEN_ISP25XX 82 }; 83 84 enum qle_isp_type { 85 QLE_ISP2422 = 1, 86 QLE_ISP2432, 87 QLE_ISP2512, 88 QLE_ISP2522, 89 QLE_ISP2532 90 }; 91 92 /* port database things */ 93 #define QLE_SCRATCH_SIZE 0x1000 94 95 enum qle_port_disp { 96 QLE_PORT_DISP_NEW, 97 QLE_PORT_DISP_GONE, 98 QLE_PORT_DISP_SAME, 99 QLE_PORT_DISP_CHANGED, 100 QLE_PORT_DISP_MOVED, 101 QLE_PORT_DISP_DUP 102 }; 103 104 #define QLE_LOCATION_LOOP (1 << 24) 105 #define QLE_LOCATION_FABRIC (2 << 24) 106 #define QLE_LOCATION_LOOP_ID(l) (l | QLE_LOCATION_LOOP) 107 #define QLE_LOCATION_PORT_ID(p) (p | QLE_LOCATION_FABRIC) 108 109 struct qle_fc_port { 110 TAILQ_ENTRY(qle_fc_port) ports; 111 TAILQ_ENTRY(qle_fc_port) update; 112 113 u_int64_t node_name; 114 u_int64_t port_name; 115 u_int32_t location; /* port id or loop id */ 116 117 int flags; 118 #define QLE_PORT_FLAG_IS_TARGET 1 119 #define QLE_PORT_FLAG_NEEDS_LOGIN 2 120 121 u_int32_t portid; 122 u_int16_t loopid; 123 }; 124 125 126 /* request/response queue stuff */ 127 #define QLE_QUEUE_ENTRY_SIZE 64 128 129 struct qle_ccb { 130 struct qle_softc *ccb_sc; 131 int ccb_id; 132 struct scsi_xfer *ccb_xs; 133 134 bus_dmamap_t ccb_dmamap; 135 136 struct qle_iocb_seg *ccb_segs; 137 u_int64_t ccb_seg_offset; 138 139 SIMPLEQ_ENTRY(qle_ccb) ccb_link; 140 }; 141 142 SIMPLEQ_HEAD(qle_ccb_list, qle_ccb); 143 144 struct qle_dmamem { 145 bus_dmamap_t qdm_map; 146 bus_dma_segment_t qdm_seg; 147 size_t qdm_size; 148 caddr_t qdm_kva; 149 }; 150 #define QLE_DMA_MAP(_qdm) ((_qdm)->qdm_map) 151 #define QLE_DMA_LEN(_qdm) ((_qdm)->qdm_size) 152 #define QLE_DMA_DVA(_qdm) ((u_int64_t)(_qdm)->qdm_map->dm_segs[0].ds_addr) 153 #define QLE_DMA_KVA(_qdm) ((void *)(_qdm)->qdm_kva) 154 155 struct qle_softc { 156 struct device sc_dev; 157 158 pci_chipset_tag_t sc_pc; 159 pcitag_t sc_tag; 160 161 void *sc_ih; 162 bus_space_tag_t sc_iot; 163 bus_space_handle_t sc_ioh; 164 bus_size_t sc_ios; 165 bus_dma_tag_t sc_dmat; 166 167 struct scsibus_softc *sc_scsibus; 168 169 enum qle_isp_type sc_isp_type; 170 enum qle_isp_gen sc_isp_gen; 171 int sc_port; 172 173 bus_space_handle_t sc_mbox_ioh; 174 u_int16_t sc_mbox[QLE_MBOX_COUNT]; 175 int sc_mbox_pending; 176 struct mutex sc_mbox_mtx; 177 178 int sc_loop_up; 179 int sc_topology; 180 int sc_loop_id; 181 int sc_port_id; 182 int sc_loop_max_id; 183 u_int64_t sc_sns_port_name; 184 185 struct mutex sc_port_mtx; 186 TAILQ_HEAD(, qle_fc_port) sc_ports; 187 TAILQ_HEAD(, qle_fc_port) sc_ports_new; 188 TAILQ_HEAD(, qle_fc_port) sc_ports_gone; 189 TAILQ_HEAD(, qle_fc_port) sc_ports_found; 190 struct qle_fc_port *sc_targets[QLE_MAX_TARGETS]; 191 192 struct taskq *sc_update_taskq; 193 struct task sc_update_task; 194 struct timeout sc_update_timeout; 195 int sc_update; 196 int sc_update_tasks; 197 #define QLE_UPDATE_TASK_CLEAR_ALL 0x00000001 198 #define QLE_UPDATE_TASK_SOFTRESET 0x00000002 199 #define QLE_UPDATE_TASK_UPDATE_TOPO 0x00000004 200 #define QLE_UPDATE_TASK_GET_PORT_LIST 0x00000008 201 #define QLE_UPDATE_TASK_PORT_LIST 0x00000010 202 #define QLE_UPDATE_TASK_SCAN_FABRIC 0x00000020 203 #define QLE_UPDATE_TASK_SCANNING_FABRIC 0x00000040 204 #define QLE_UPDATE_TASK_FABRIC_LOGIN 0x00000080 205 #define QLE_UPDATE_TASK_FABRIC_RELOGIN 0x00000100 206 #define QLE_UPDATE_TASK_DETACH_TARGET 0x00000200 207 #define QLE_UPDATE_TASK_ATTACH_TARGET 0x00000400 208 209 int sc_maxcmds; 210 struct qle_dmamem *sc_requests; 211 struct qle_dmamem *sc_responses; 212 struct qle_dmamem *sc_segments; 213 struct qle_dmamem *sc_pri_requests; 214 struct qle_dmamem *sc_scratch; 215 struct qle_dmamem *sc_fcp_cmnds; 216 struct qle_ccb *sc_ccbs; 217 struct qle_ccb_list sc_ccb_free; 218 struct mutex sc_ccb_mtx; 219 struct mutex sc_queue_mtx; 220 struct scsi_iopool sc_iopool; 221 u_int32_t sc_next_req_id; 222 u_int32_t sc_last_resp_id; 223 int sc_marker_required; 224 int sc_fabric_pending; 225 u_int8_t sc_fabric_response[QLE_QUEUE_ENTRY_SIZE]; 226 227 struct qle_nvram sc_nvram; 228 int sc_nvram_valid; 229 }; 230 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 231 232 int qle_intr(void *); 233 234 int qle_match(struct device *, void *, void *); 235 void qle_attach(struct device *, struct device *, void *); 236 int qle_detach(struct device *, int); 237 238 struct cfattach qle_ca = { 239 sizeof(struct qle_softc), 240 qle_match, 241 qle_attach, 242 qle_detach 243 }; 244 245 struct cfdriver qle_cd = { 246 NULL, 247 "qle", 248 DV_DULL 249 }; 250 251 void qle_scsi_cmd(struct scsi_xfer *); 252 int qle_scsi_probe(struct scsi_link *); 253 254 255 struct scsi_adapter qle_switch = { 256 qle_scsi_cmd, NULL, qle_scsi_probe, NULL, NULL 257 }; 258 259 u_int32_t qle_read(struct qle_softc *, int); 260 void qle_write(struct qle_softc *, int, u_int32_t); 261 void qle_host_cmd(struct qle_softc *sc, u_int32_t); 262 263 int qle_mbox(struct qle_softc *, int); 264 int qle_ct_pass_through(struct qle_softc *sc, 265 u_int32_t port_handle, struct qle_dmamem *mem, 266 size_t req_size, size_t resp_size); 267 void qle_mbox_putaddr(u_int16_t *, struct qle_dmamem *); 268 u_int16_t qle_read_mbox(struct qle_softc *, int); 269 void qle_write_mbox(struct qle_softc *, int, u_int16_t); 270 271 void qle_handle_intr(struct qle_softc *, u_int16_t, u_int16_t); 272 void qle_set_ints(struct qle_softc *, int); 273 int qle_read_isr(struct qle_softc *, u_int16_t *, u_int16_t *); 274 void qle_clear_isr(struct qle_softc *, u_int16_t); 275 276 void qle_put_marker(struct qle_softc *, void *); 277 void qle_put_cmd(struct qle_softc *, void *, struct scsi_xfer *, 278 struct qle_ccb *, u_int32_t); 279 struct qle_ccb *qle_handle_resp(struct qle_softc *, u_int32_t); 280 void qle_sge(struct qle_iocb_seg *, u_int64_t, u_int32_t); 281 282 struct qle_fc_port *qle_next_fabric_port(struct qle_softc *, u_int32_t *, 283 u_int32_t *); 284 int qle_get_port_db(struct qle_softc *, u_int16_t, 285 struct qle_dmamem *); 286 int qle_get_port_name_list(struct qle_softc *sc, u_int32_t); 287 int qle_add_loop_port(struct qle_softc *, struct qle_fc_port *); 288 int qle_add_fabric_port(struct qle_softc *, struct qle_fc_port *); 289 int qle_add_logged_in_port(struct qle_softc *, u_int16_t, 290 u_int32_t); 291 int qle_classify_port(struct qle_softc *, u_int32_t, u_int64_t, 292 u_int64_t, struct qle_fc_port **); 293 int qle_get_loop_id(struct qle_softc *sc, int); 294 void qle_clear_port_lists(struct qle_softc *); 295 int qle_softreset(struct qle_softc *); 296 void qle_update_topology(struct qle_softc *); 297 int qle_update_fabric(struct qle_softc *); 298 int qle_fabric_plogx(struct qle_softc *, struct qle_fc_port *, int, 299 u_int32_t *); 300 int qle_fabric_plogi(struct qle_softc *, struct qle_fc_port *); 301 void qle_fabric_plogo(struct qle_softc *, struct qle_fc_port *); 302 303 void qle_update_start(struct qle_softc *, int); 304 void qle_update_defer(struct qle_softc *, int); 305 void qle_update_cancel(struct qle_softc *); 306 void qle_update_done(struct qle_softc *, int); 307 void qle_do_update(void *); 308 void qle_deferred_update(void *); 309 int qle_async(struct qle_softc *, u_int16_t); 310 311 int qle_load_fwchunk(struct qle_softc *, 312 struct qle_dmamem *, const u_int32_t *); 313 u_int32_t qle_read_ram_word(struct qle_softc *, u_int32_t); 314 int qle_verify_firmware(struct qle_softc *, u_int32_t); 315 int qle_load_firmware_chunks(struct qle_softc *, const u_int32_t *); 316 int qle_read_nvram(struct qle_softc *); 317 318 struct qle_dmamem *qle_dmamem_alloc(struct qle_softc *, size_t); 319 void qle_dmamem_free(struct qle_softc *, struct qle_dmamem *); 320 321 int qle_alloc_ccbs(struct qle_softc *); 322 void qle_free_ccbs(struct qle_softc *); 323 void *qle_get_ccb(void *); 324 void qle_put_ccb(void *, void *); 325 326 void qle_dump_stuff(struct qle_softc *, void *, int); 327 void qle_dump_iocb(struct qle_softc *, void *); 328 void qle_dump_iocb_segs(struct qle_softc *, void *, int); 329 330 static const struct pci_matchid qle_devices[] = { 331 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2422 }, 332 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2432 }, 333 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2512 }, 334 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2522 }, 335 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2532 }, 336 }; 337 338 int 339 qle_match(struct device *parent, void *match, void *aux) 340 { 341 return (pci_matchbyid(aux, qle_devices, nitems(qle_devices))); 342 } 343 344 void 345 qle_attach(struct device *parent, struct device *self, void *aux) 346 { 347 struct qle_softc *sc = (void *)self; 348 struct pci_attach_args *pa = aux; 349 pci_intr_handle_t ih; 350 const char *intrstr; 351 u_int32_t pcictl; 352 struct scsibus_attach_args saa; 353 struct qle_init_cb *icb; 354 bus_size_t mbox_base; 355 u_int32_t firmware_addr; 356 #ifndef QLE_NOFIRMWARE 357 const u_int32_t *firmware = NULL; 358 #endif 359 360 pcireg_t bars[] = { QLE_PCI_MEM_BAR, QLE_PCI_IO_BAR }; 361 pcireg_t memtype; 362 int r, i, rv, loop_up; 363 364 sc->sc_pc = pa->pa_pc; 365 sc->sc_tag = pa->pa_tag; 366 sc->sc_ih = NULL; 367 sc->sc_dmat = pa->pa_dmat; 368 sc->sc_ios = 0; 369 370 for (r = 0; r < nitems(bars); r++) { 371 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, bars[r]); 372 if (pci_mapreg_map(pa, bars[r], memtype, 0, 373 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, 0) == 0) 374 break; 375 376 sc->sc_ios = 0; 377 } 378 if (sc->sc_ios == 0) { 379 printf(": unable to map registers\n"); 380 return; 381 } 382 383 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 384 printf(": unable to map interrupt\n"); 385 goto unmap; 386 } 387 intrstr = pci_intr_string(sc->sc_pc, ih); 388 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO, 389 qle_intr, sc, DEVNAME(sc)); 390 if (sc->sc_ih == NULL) { 391 printf(": unable to establish interrupt"); 392 if (intrstr != NULL) 393 printf(" at %s", intrstr); 394 printf("\n"); 395 goto deintr; 396 } 397 398 printf(": %s\n", intrstr); 399 400 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 401 pcictl |= PCI_COMMAND_INVALIDATE_ENABLE | 402 PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE; 403 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pcictl); 404 405 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 406 pcictl &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT); 407 pcictl &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 408 pcictl |= (0x80 << PCI_LATTIMER_SHIFT); 409 pcictl |= (0x10 << PCI_CACHELINE_SHIFT); 410 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, pcictl); 411 412 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 413 pcictl &= ~1; 414 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, pcictl); 415 416 switch (PCI_PRODUCT(pa->pa_id)) { 417 case PCI_PRODUCT_QLOGIC_ISP2422: 418 sc->sc_isp_type = QLE_ISP2422; 419 sc->sc_isp_gen = QLE_GEN_ISP24XX; 420 break; 421 case PCI_PRODUCT_QLOGIC_ISP2432: 422 sc->sc_isp_type = QLE_ISP2432; 423 sc->sc_isp_gen = QLE_GEN_ISP24XX; 424 break; 425 case PCI_PRODUCT_QLOGIC_ISP2512: 426 sc->sc_isp_type = QLE_ISP2512; 427 sc->sc_isp_gen = QLE_GEN_ISP25XX; 428 break; 429 case PCI_PRODUCT_QLOGIC_ISP2522: 430 sc->sc_isp_type = QLE_ISP2522; 431 sc->sc_isp_gen = QLE_GEN_ISP25XX; 432 break; 433 case PCI_PRODUCT_QLOGIC_ISP2532: 434 sc->sc_isp_type = QLE_ISP2532; 435 sc->sc_isp_gen = QLE_GEN_ISP25XX; 436 break; 437 438 default: 439 printf("unknown pci id %x", pa->pa_id); 440 goto deintr; 441 } 442 443 /* these are the same for 24xx and 25xx but may vary later */ 444 mbox_base = QLE_MBOX_BASE_24XX; 445 firmware_addr = QLE_2400_CODE_ORG; 446 447 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, mbox_base, 448 sizeof(sc->sc_mbox), &sc->sc_mbox_ioh) != 0) { 449 printf("%s: unable to map mbox registers\n", DEVNAME(sc)); 450 goto deintr; 451 } 452 453 sc->sc_port = pa->pa_function; 454 455 TAILQ_INIT(&sc->sc_ports); 456 TAILQ_INIT(&sc->sc_ports_new); 457 TAILQ_INIT(&sc->sc_ports_gone); 458 TAILQ_INIT(&sc->sc_ports_found); 459 460 /* after reset, mbox regs 1 and 2 contain the string "ISP " */ 461 if (qle_read_mbox(sc, 1) != 0x4953 || 462 qle_read_mbox(sc, 2) != 0x5020) { 463 /* try releasing the risc processor */ 464 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE); 465 } 466 467 qle_host_cmd(sc, QLE_HOST_CMD_PAUSE); 468 if (qle_softreset(sc) != 0) { 469 printf("softreset failed\n"); 470 goto deintr; 471 } 472 473 if (qle_read_nvram(sc) == 0) 474 sc->sc_nvram_valid = 1; 475 476 #ifdef QLE_NOFIRMWARE 477 if (qle_verify_firmware(sc, firmware_addr)) { 478 printf("%s: no firmware loaded\n", DEVNAME(sc)); 479 goto deintr; 480 } 481 #else 482 switch (sc->sc_isp_gen) { 483 case QLE_GEN_ISP24XX: 484 firmware = isp_2400_risc_code; 485 break; 486 case QLE_GEN_ISP25XX: 487 firmware = isp_2500_risc_code; 488 break; 489 default: 490 printf("%s: no firmware to load?\n", DEVNAME(sc)); 491 goto deintr; 492 } 493 if (qle_load_firmware_chunks(sc, firmware)) { 494 printf("%s: firmware load failed\n", DEVNAME(sc)); 495 goto deintr; 496 } 497 #endif 498 499 /* execute firmware */ 500 sc->sc_mbox[0] = QLE_MBOX_EXEC_FIRMWARE; 501 sc->sc_mbox[1] = firmware_addr >> 16; 502 sc->sc_mbox[2] = firmware_addr & 0xffff; 503 #ifdef QLE_NOFIRMWARE 504 sc->sc_mbox[3] = 1; 505 #else 506 sc->sc_mbox[3] = 0; 507 #endif 508 sc->sc_mbox[4] = 0; 509 if (qle_mbox(sc, 0x001f)) { 510 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]); 511 goto deintr; 512 } 513 514 delay(250000); /* from isp(4) */ 515 516 sc->sc_mbox[0] = QLE_MBOX_ABOUT_FIRMWARE; 517 if (qle_mbox(sc, 0x0001)) { 518 printf("ISP not talking after firmware exec: %x\n", 519 sc->sc_mbox[0]); 520 goto deintr; 521 } 522 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc), 523 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]); 524 525 sc->sc_maxcmds = 4096; 526 527 /* reserve queue slots for markers and fabric ops */ 528 sc->sc_maxcmds -= 2; 529 530 if (qle_alloc_ccbs(sc)) { 531 /* error already printed */ 532 goto deintr; 533 } 534 sc->sc_scratch = qle_dmamem_alloc(sc, QLE_SCRATCH_SIZE); 535 if (sc->sc_scratch == NULL) { 536 printf("%s: unable to allocate scratch\n", DEVNAME(sc)); 537 goto free_ccbs; 538 } 539 540 /* build init buffer thing */ 541 icb = (struct qle_init_cb *)QLE_DMA_KVA(sc->sc_scratch); 542 memset(icb, 0, sizeof(*icb)); 543 icb->icb_version = QLE_ICB_VERSION; 544 if (sc->sc_nvram_valid) { 545 icb->icb_max_frame_len = sc->sc_nvram.frame_payload_size; 546 icb->icb_exec_throttle = sc->sc_nvram.execution_throttle; 547 icb->icb_hardaddr = sc->sc_nvram.hard_address; 548 icb->icb_portname = sc->sc_nvram.port_name; 549 icb->icb_nodename = sc->sc_nvram.node_name; 550 icb->icb_login_retry = sc->sc_nvram.login_retry; 551 icb->icb_login_timeout = sc->sc_nvram.login_timeout; 552 icb->icb_fwoptions1 = sc->sc_nvram.fwoptions1; 553 icb->icb_fwoptions2 = sc->sc_nvram.fwoptions2; 554 icb->icb_fwoptions3 = sc->sc_nvram.fwoptions3; 555 } else { 556 /* defaults copied from isp(4) */ 557 htolem16(&icb->icb_max_frame_len, 1024); 558 htolem16(&icb->icb_exec_throttle, 16); 559 icb->icb_portname = htobe64(QLE_DEFAULT_PORT_NAME); 560 icb->icb_nodename = 0; 561 icb->icb_login_retry = 3; 562 563 htolem32(&icb->icb_fwoptions1, QLE_ICB_FW1_FAIRNESS | 564 QLE_ICB_FW1_HARD_ADDR | QLE_ICB_FW1_FULL_DUPLEX); 565 htolem32(&icb->icb_fwoptions2, QLE_ICB_FW2_LOOP_PTP); 566 htolem32(&icb->icb_fwoptions3, QLE_ICB_FW3_FCP_RSP_24_0 | 567 QLE_ICB_FW3_AUTONEG); 568 } 569 570 icb->icb_exchange_count = 0; 571 572 icb->icb_req_out = 0; 573 icb->icb_resp_in = 0; 574 icb->icb_pri_req_out = 0; 575 htolem16(&icb->icb_req_queue_len, sc->sc_maxcmds); 576 htolem16(&icb->icb_resp_queue_len, sc->sc_maxcmds); 577 htolem16(&icb->icb_pri_req_queue_len, 8); /* apparently the minimum */ 578 htolem32(&icb->icb_req_queue_addr_lo, 579 QLE_DMA_DVA(sc->sc_requests)); 580 htolem32(&icb->icb_req_queue_addr_hi, 581 QLE_DMA_DVA(sc->sc_requests) >> 32); 582 htolem32(&icb->icb_resp_queue_addr_lo, 583 QLE_DMA_DVA(sc->sc_responses)); 584 htolem32(&icb->icb_resp_queue_addr_hi, 585 QLE_DMA_DVA(sc->sc_responses) >> 32); 586 htolem32(&icb->icb_pri_req_queue_addr_lo, 587 QLE_DMA_DVA(sc->sc_pri_requests)); 588 htolem32(&icb->icb_pri_req_queue_addr_hi, 589 QLE_DMA_DVA(sc->sc_pri_requests) >> 32); 590 591 htolem16(&icb->icb_link_down_nos, 200); 592 icb->icb_int_delay = 0; 593 icb->icb_login_timeout = 0; 594 595 sc->sc_mbox[0] = QLE_MBOX_INIT_FIRMWARE; 596 sc->sc_mbox[4] = 0; 597 sc->sc_mbox[5] = 0; 598 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 599 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 600 sizeof(*icb), BUS_DMASYNC_PREWRITE); 601 rv = qle_mbox(sc, 0x00fd); 602 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 603 sizeof(*icb), BUS_DMASYNC_POSTWRITE); 604 605 if (rv != 0) { 606 printf("%s: ISP firmware init failed: %x\n", DEVNAME(sc), 607 sc->sc_mbox[0]); 608 goto free_scratch; 609 } 610 611 /* enable some more notifications */ 612 sc->sc_mbox[0] = QLE_MBOX_SET_FIRMWARE_OPTIONS; 613 sc->sc_mbox[1] = QLE_FW_OPTION1_ASYNC_LIP_F8 | 614 QLE_FW_OPTION1_ASYNC_LIP_RESET | 615 QLE_FW_OPTION1_ASYNC_LIP_ERROR | 616 QLE_FW_OPTION1_ASYNC_LOGIN_RJT; 617 sc->sc_mbox[2] = 0; 618 sc->sc_mbox[3] = 0; 619 if (qle_mbox(sc, 0x000f)) { 620 printf("%s: setting firmware options failed: %x\n", 621 DEVNAME(sc), sc->sc_mbox[0]); 622 goto free_scratch; 623 } 624 625 sc->sc_update_taskq = taskq_create(DEVNAME(sc), 1, IPL_BIO, 0); 626 task_set(&sc->sc_update_task, qle_do_update, sc); 627 timeout_set(&sc->sc_update_timeout, qle_deferred_update, sc); 628 629 /* wait a bit for link to come up so we can scan and attach devices */ 630 for (i = 0; i < QLE_WAIT_FOR_LOOP * 1000; i++) { 631 u_int16_t isr, info; 632 633 if (sc->sc_loop_up) { 634 if (++loop_up == QLE_LOOP_SETTLE) 635 break; 636 } else 637 loop_up = 0; 638 639 delay(1000); 640 641 if (qle_read_isr(sc, &isr, &info) == 0) 642 continue; 643 644 qle_handle_intr(sc, isr, info); 645 646 } 647 648 if (sc->sc_loop_up) { 649 qle_do_update(sc); 650 } else { 651 DPRINTF(QLE_D_PORT, "%s: loop still down, giving up\n", 652 DEVNAME(sc)); 653 } 654 655 saa.saa_adapter = &qle_switch; 656 saa.saa_adapter_softc = sc; 657 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 658 saa.saa_adapter_buswidth = QLE_MAX_TARGETS; 659 saa.saa_luns = 8; 660 saa.saa_openings = sc->sc_maxcmds; 661 saa.saa_pool = &sc->sc_iopool; 662 if (sc->sc_nvram_valid) { 663 saa.saa_wwpn = betoh64(sc->sc_nvram.port_name); 664 saa.saa_wwnn = betoh64(sc->sc_nvram.node_name); 665 } else { 666 saa.saa_wwpn = QLE_DEFAULT_PORT_NAME; 667 saa.saa_wwnn = 0; 668 } 669 if (saa.saa_wwnn == 0) { 670 /* 671 * mask out the port number from the port name to get 672 * the node name. 673 */ 674 saa.saa_wwnn = saa.saa_wwpn; 675 saa.saa_wwnn &= ~(0xfULL << 56); 676 } 677 saa.saa_quirks = saa.saa_flags = 0; 678 679 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, 680 &saa, scsiprint); 681 682 return; 683 684 free_scratch: 685 qle_dmamem_free(sc, sc->sc_scratch); 686 free_ccbs: 687 qle_free_ccbs(sc); 688 deintr: 689 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 690 sc->sc_ih = NULL; 691 unmap: 692 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 693 sc->sc_ios = 0; 694 } 695 696 int 697 qle_detach(struct device *self, int flags) 698 { 699 struct qle_softc *sc = (struct qle_softc *)self; 700 701 if (sc->sc_ih == NULL) { 702 /* we didnt attach properly, so nothing to detach */ 703 return (0); 704 } 705 706 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 707 sc->sc_ih = NULL; 708 709 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 710 sc->sc_ios = 0; 711 712 return (0); 713 } 714 715 int 716 qle_classify_port(struct qle_softc *sc, u_int32_t location, 717 u_int64_t port_name, u_int64_t node_name, struct qle_fc_port **prev) 718 { 719 struct qle_fc_port *port, *locmatch, *wwnmatch; 720 locmatch = NULL; 721 wwnmatch = NULL; 722 723 /* make sure we don't try to add a port or location twice */ 724 TAILQ_FOREACH(port, &sc->sc_ports_new, update) { 725 if ((port->port_name == port_name && 726 port->node_name == node_name) || 727 port->location == location) { 728 *prev = port; 729 return (QLE_PORT_DISP_DUP); 730 } 731 } 732 733 /* if we're attaching, everything is new */ 734 if (sc->sc_scsibus == NULL) { 735 *prev = NULL; 736 return (QLE_PORT_DISP_NEW); 737 } 738 739 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 740 if (port->location == location) 741 locmatch = port; 742 743 if (port->port_name == port_name && 744 port->node_name == node_name) 745 wwnmatch = port; 746 } 747 748 if (locmatch == NULL && wwnmatch == NULL) { 749 *prev = NULL; 750 return (QLE_PORT_DISP_NEW); 751 } else if (locmatch == wwnmatch) { 752 *prev = locmatch; 753 return (QLE_PORT_DISP_SAME); 754 } else if (wwnmatch != NULL) { 755 *prev = wwnmatch; 756 return (QLE_PORT_DISP_MOVED); 757 } else { 758 *prev = locmatch; 759 return (QLE_PORT_DISP_CHANGED); 760 } 761 } 762 763 int 764 qle_get_loop_id(struct qle_softc *sc, int start) 765 { 766 int i, last; 767 768 i = QLE_MIN_HANDLE; 769 last = QLE_MAX_HANDLE; 770 if (i < start) 771 i = start; 772 773 for (; i <= last; i++) { 774 if (sc->sc_targets[i] == NULL) 775 return (i); 776 } 777 778 return (-1); 779 } 780 781 int 782 qle_get_port_db(struct qle_softc *sc, u_int16_t loopid, struct qle_dmamem *mem) 783 { 784 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB; 785 sc->sc_mbox[1] = loopid; 786 qle_mbox_putaddr(sc->sc_mbox, mem); 787 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 788 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD); 789 if (qle_mbox(sc, 0x00cf)) { 790 DPRINTF(QLE_D_PORT, "%s: get port db for %d failed: %x\n", 791 DEVNAME(sc), loopid, sc->sc_mbox[0]); 792 return (1); 793 } 794 795 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 796 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD); 797 return (0); 798 } 799 800 int 801 qle_get_port_name_list(struct qle_softc *sc, u_int32_t match) 802 { 803 struct qle_port_name_list *l; 804 struct qle_fc_port *port; 805 int i; 806 807 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_NAME_LIST; 808 sc->sc_mbox[1] = 0; 809 sc->sc_mbox[8] = QLE_DMA_LEN(sc->sc_scratch); 810 sc->sc_mbox[9] = 0; 811 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 812 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 813 QLE_DMA_LEN(sc->sc_scratch), BUS_DMASYNC_PREREAD); 814 if (qle_mbox(sc, 0x03cf)) { 815 DPRINTF(QLE_D_PORT, "%s: get port name list failed: %x\n", 816 DEVNAME(sc), sc->sc_mbox[0]); 817 return (1); 818 } 819 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 820 sc->sc_mbox[1], BUS_DMASYNC_POSTREAD); 821 822 i = 0; 823 l = QLE_DMA_KVA(sc->sc_scratch); 824 mtx_enter(&sc->sc_port_mtx); 825 while (i * sizeof(*l) < sc->sc_mbox[1]) { 826 u_int16_t loopid; 827 u_int32_t loc; 828 829 loopid = lemtoh16(&l[i].loopid) & 0xfff; 830 /* skip special ports */ 831 switch (loopid) { 832 case QLE_F_PORT_HANDLE: 833 case QLE_SNS_HANDLE: 834 case QLE_FABRIC_CTRL_HANDLE: 835 case QLE_IP_BCAST_HANDLE: 836 loc = 0; 837 break; 838 default: 839 if (loopid <= sc->sc_loop_max_id) { 840 loc = QLE_LOCATION_LOOP_ID(loopid); 841 } else { 842 /* 843 * we don't have the port id here, so just 844 * indicate it's a fabric port. 845 */ 846 loc = QLE_LOCATION_FABRIC; 847 } 848 break; 849 } 850 851 if (match & loc) { 852 port = malloc(sizeof(*port), M_DEVBUF, M_ZERO | 853 M_NOWAIT); 854 if (port == NULL) { 855 printf("%s: failed to allocate port struct\n", 856 DEVNAME(sc)); 857 break; 858 } 859 port->location = loc; 860 port->loopid = loopid; 861 port->port_name = letoh64(l[i].port_name); 862 DPRINTF(QLE_D_PORT, "%s: loop id %d, port name %llx\n", 863 DEVNAME(sc), port->loopid, port->port_name); 864 TAILQ_INSERT_TAIL(&sc->sc_ports_found, port, update); 865 } 866 i++; 867 } 868 mtx_leave(&sc->sc_port_mtx); 869 870 return (0); 871 } 872 873 int 874 qle_add_loop_port(struct qle_softc *sc, struct qle_fc_port *port) 875 { 876 struct qle_get_port_db *pdb; 877 struct qle_fc_port *pport; 878 int disp; 879 880 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) { 881 return (1); 882 } 883 pdb = QLE_DMA_KVA(sc->sc_scratch); 884 885 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 886 port->flags |= QLE_PORT_FLAG_IS_TARGET; 887 888 port->port_name = betoh64(pdb->port_name); 889 port->node_name = betoh64(pdb->node_name); 890 port->portid = (pdb->port_id[0] << 16) | (pdb->port_id[1] << 8) | 891 pdb->port_id[2]; 892 893 mtx_enter(&sc->sc_port_mtx); 894 disp = qle_classify_port(sc, port->location, port->port_name, 895 port->node_name, &pport); 896 switch (disp) { 897 case QLE_PORT_DISP_CHANGED: 898 case QLE_PORT_DISP_MOVED: 899 case QLE_PORT_DISP_NEW: 900 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update); 901 sc->sc_targets[port->loopid] = port; 902 break; 903 case QLE_PORT_DISP_DUP: 904 free(port, M_DEVBUF, sizeof *port); 905 break; 906 case QLE_PORT_DISP_SAME: 907 TAILQ_REMOVE(&sc->sc_ports_gone, pport, update); 908 free(port, M_DEVBUF, sizeof *port); 909 break; 910 } 911 mtx_leave(&sc->sc_port_mtx); 912 913 switch (disp) { 914 case QLE_PORT_DISP_CHANGED: 915 case QLE_PORT_DISP_MOVED: 916 case QLE_PORT_DISP_NEW: 917 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n", 918 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ? 919 "target" : "non-target", port->loopid, 920 betoh64(pdb->port_name)); 921 break; 922 default: 923 break; 924 } 925 return (0); 926 } 927 928 int 929 qle_add_fabric_port(struct qle_softc *sc, struct qle_fc_port *port) 930 { 931 struct qle_get_port_db *pdb; 932 933 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) { 934 free(port, M_DEVBUF, sizeof *port); 935 return (1); 936 } 937 pdb = QLE_DMA_KVA(sc->sc_scratch); 938 939 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 940 port->flags |= QLE_PORT_FLAG_IS_TARGET; 941 942 /* 943 * if we only know about this port because qle_get_port_name_list 944 * returned it, we don't have its port id or node name, so fill 945 * those in and update its location. 946 */ 947 if (port->location == QLE_LOCATION_FABRIC) { 948 port->node_name = betoh64(pdb->node_name); 949 port->port_name = betoh64(pdb->port_name); 950 port->portid = (pdb->port_id[0] << 16) | 951 (pdb->port_id[1] << 8) | pdb->port_id[2]; 952 port->location = QLE_LOCATION_PORT_ID(port->portid); 953 } 954 955 mtx_enter(&sc->sc_port_mtx); 956 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update); 957 sc->sc_targets[port->loopid] = port; 958 mtx_leave(&sc->sc_port_mtx); 959 960 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n", 961 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ? 962 "target" : "non-target", port->loopid, port->port_name); 963 return (0); 964 } 965 966 int 967 qle_add_logged_in_port(struct qle_softc *sc, u_int16_t loopid, 968 u_int32_t portid) 969 { 970 struct qle_fc_port *port; 971 struct qle_get_port_db *pdb; 972 u_int64_t node_name, port_name; 973 int flags, ret; 974 975 ret = qle_get_port_db(sc, loopid, sc->sc_scratch); 976 mtx_enter(&sc->sc_port_mtx); 977 if (ret != 0) { 978 /* put in a fake port to prevent use of this loop id */ 979 printf("%s: loop id %d used, but can't see what's using it\n", 980 DEVNAME(sc), loopid); 981 node_name = 0; 982 port_name = 0; 983 flags = 0; 984 } else { 985 pdb = QLE_DMA_KVA(sc->sc_scratch); 986 node_name = betoh64(pdb->node_name); 987 port_name = betoh64(pdb->port_name); 988 flags = 0; 989 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 990 flags |= QLE_PORT_FLAG_IS_TARGET; 991 992 /* see if we've already found this port */ 993 TAILQ_FOREACH(port, &sc->sc_ports_found, update) { 994 if ((port->node_name == node_name) && 995 (port->port_name == port_name) && 996 (port->portid == portid)) { 997 mtx_leave(&sc->sc_port_mtx); 998 DPRINTF(QLE_D_PORT, "%s: already found port " 999 "%06x\n", DEVNAME(sc), portid); 1000 return (0); 1001 } 1002 } 1003 } 1004 1005 port = malloc(sizeof(*port), M_DEVBUF, M_ZERO | M_NOWAIT); 1006 if (port == NULL) { 1007 mtx_leave(&sc->sc_port_mtx); 1008 printf("%s: failed to allocate a port structure\n", 1009 DEVNAME(sc)); 1010 return (1); 1011 } 1012 port->location = QLE_LOCATION_PORT_ID(portid); 1013 port->port_name = port_name; 1014 port->node_name = node_name; 1015 port->loopid = loopid; 1016 port->portid = portid; 1017 port->flags = flags; 1018 1019 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports); 1020 sc->sc_targets[port->loopid] = port; 1021 mtx_leave(&sc->sc_port_mtx); 1022 1023 DPRINTF(QLE_D_PORT, "%s: added logged in port %06x at %d\n", 1024 DEVNAME(sc), portid, loopid); 1025 return (0); 1026 } 1027 1028 struct qle_ccb * 1029 qle_handle_resp(struct qle_softc *sc, u_int32_t id) 1030 { 1031 struct qle_ccb *ccb; 1032 struct qle_iocb_status *status; 1033 struct qle_iocb_req6 *req; 1034 struct scsi_xfer *xs; 1035 u_int32_t handle; 1036 u_int16_t completion; 1037 u_int8_t *entry; 1038 u_int8_t *data; 1039 1040 ccb = NULL; 1041 entry = QLE_DMA_KVA(sc->sc_responses) + (id * QLE_QUEUE_ENTRY_SIZE); 1042 1043 bus_dmamap_sync(sc->sc_dmat, 1044 QLE_DMA_MAP(sc->sc_responses), id * QLE_QUEUE_ENTRY_SIZE, 1045 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD); 1046 1047 qle_dump_iocb(sc, entry); 1048 switch(entry[0]) { 1049 case QLE_IOCB_STATUS: 1050 status = (struct qle_iocb_status *)entry; 1051 handle = status->handle; 1052 if (handle > sc->sc_maxcmds) { 1053 panic("bad completed command handle: %d (> %d)", 1054 handle, sc->sc_maxcmds); 1055 } 1056 1057 ccb = &sc->sc_ccbs[handle]; 1058 xs = ccb->ccb_xs; 1059 if (xs == NULL) { 1060 DPRINTF(QLE_D_IO, "%s: got status for inactive ccb %d\n", 1061 DEVNAME(sc), handle); 1062 ccb = NULL; 1063 break; 1064 } 1065 if (xs->io != ccb) { 1066 panic("completed command handle doesn't match xs " 1067 "(handle %d, ccb %p, xs->io %p)", handle, ccb, 1068 xs->io); 1069 } 1070 1071 if (xs->datalen > 0) { 1072 if (ccb->ccb_dmamap->dm_nsegs > 1073 QLE_IOCB_SEGS_PER_CMD) { 1074 bus_dmamap_sync(sc->sc_dmat, 1075 QLE_DMA_MAP(sc->sc_segments), 1076 ccb->ccb_seg_offset, 1077 sizeof(*ccb->ccb_segs) * 1078 ccb->ccb_dmamap->dm_nsegs + 1, 1079 BUS_DMASYNC_POSTWRITE); 1080 } 1081 1082 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1083 ccb->ccb_dmamap->dm_mapsize, 1084 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1085 BUS_DMASYNC_POSTWRITE); 1086 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1087 } 1088 1089 xs->status = lemtoh16(&status->scsi_status) & 0xff; 1090 xs->resid = 0; 1091 completion = lemtoh16(&status->completion); 1092 switch (completion) { 1093 case QLE_IOCB_STATUS_DATA_UNDERRUN: 1094 xs->resid = lemtoh32(&status->resid); 1095 /* FALLTHROUGH */ 1096 case QLE_IOCB_STATUS_DATA_OVERRUN: 1097 case QLE_IOCB_STATUS_COMPLETE: 1098 if (lemtoh16(&status->scsi_status) & 1099 QLE_SCSI_STATUS_SENSE_VALID) { 1100 u_int32_t *pp; 1101 int sr; 1102 data = status->data + 1103 lemtoh32(&status->fcp_rsp_len); 1104 sr = MIN(lemtoh32(&status->fcp_sense_len), 1105 sizeof(xs->sense)); 1106 memcpy(&xs->sense, data, sr); 1107 xs->error = XS_SENSE; 1108 pp = (u_int32_t *)&xs->sense; 1109 for (sr = 0; sr < sizeof(xs->sense)/4; sr++) { 1110 pp[sr] = swap32(pp[sr]); 1111 } 1112 } else { 1113 xs->error = XS_NOERROR; 1114 } 1115 break; 1116 1117 case QLE_IOCB_STATUS_DMA_ERROR: 1118 DPRINTF(QLE_D_IO, "%s: dma error\n", DEVNAME(sc)); 1119 /* set resid apparently? */ 1120 break; 1121 1122 case QLE_IOCB_STATUS_RESET: 1123 DPRINTF(QLE_D_IO, "%s: reset destroyed command\n", 1124 DEVNAME(sc)); 1125 sc->sc_marker_required = 1; 1126 xs->error = XS_RESET; 1127 break; 1128 1129 case QLE_IOCB_STATUS_ABORTED: 1130 DPRINTF(QLE_D_IO, "%s: aborted\n", DEVNAME(sc)); 1131 sc->sc_marker_required = 1; 1132 xs->error = XS_DRIVER_STUFFUP; 1133 break; 1134 1135 case QLE_IOCB_STATUS_TIMEOUT: 1136 DPRINTF(QLE_D_IO, "%s: command timed out\n", 1137 DEVNAME(sc)); 1138 xs->error = XS_TIMEOUT; 1139 break; 1140 1141 case QLE_IOCB_STATUS_QUEUE_FULL: 1142 DPRINTF(QLE_D_IO, "%s: queue full\n", DEVNAME(sc)); 1143 xs->error = XS_BUSY; 1144 break; 1145 1146 case QLE_IOCB_STATUS_PORT_UNAVAIL: 1147 case QLE_IOCB_STATUS_PORT_LOGGED_OUT: 1148 case QLE_IOCB_STATUS_PORT_CHANGED: 1149 DPRINTF(QLE_D_IO, "%s: dev gone\n", DEVNAME(sc)); 1150 xs->error = XS_SELTIMEOUT; 1151 /* mark port as needing relogin? */ 1152 break; 1153 1154 default: 1155 DPRINTF(QLE_D_IO, "%s: unexpected completion status " 1156 "%x\n", DEVNAME(sc), status->completion); 1157 xs->error = XS_DRIVER_STUFFUP; 1158 break; 1159 } 1160 break; 1161 1162 case QLE_IOCB_STATUS_CONT: 1163 DPRINTF(QLE_D_IO, "%s: ignoring status continuation iocb\n", 1164 DEVNAME(sc)); 1165 break; 1166 1167 case QLE_IOCB_PLOGX: 1168 case QLE_IOCB_CT_PASSTHROUGH: 1169 if (sc->sc_fabric_pending) { 1170 qle_dump_iocb(sc, entry); 1171 memcpy(sc->sc_fabric_response, entry, 1172 QLE_QUEUE_ENTRY_SIZE); 1173 sc->sc_fabric_pending = 2; 1174 wakeup(sc->sc_scratch); 1175 } else { 1176 DPRINTF(QLE_D_IO, "%s: unexpected fabric response %x\n", 1177 DEVNAME(sc), entry[0]); 1178 } 1179 break; 1180 1181 case QLE_IOCB_MARKER: 1182 break; 1183 1184 case QLE_IOCB_CMD_TYPE_6: 1185 case QLE_IOCB_CMD_TYPE_7: 1186 DPRINTF(QLE_D_IO, "%s: request bounced back\n", DEVNAME(sc)); 1187 req = (struct qle_iocb_req6 *)entry; 1188 handle = req->req_handle; 1189 if (handle > sc->sc_maxcmds) { 1190 panic("bad bounced command handle: %d (> %d)", 1191 handle, sc->sc_maxcmds); 1192 } 1193 1194 ccb = &sc->sc_ccbs[handle]; 1195 xs = ccb->ccb_xs; 1196 xs->error = XS_DRIVER_STUFFUP; 1197 break; 1198 default: 1199 DPRINTF(QLE_D_IO, "%s: unexpected response entry type %x\n", 1200 DEVNAME(sc), entry[0]); 1201 break; 1202 } 1203 1204 return (ccb); 1205 } 1206 1207 void 1208 qle_handle_intr(struct qle_softc *sc, u_int16_t isr, u_int16_t info) 1209 { 1210 int i; 1211 u_int32_t rspin; 1212 struct qle_ccb *ccb; 1213 1214 switch (isr) { 1215 case QLE_INT_TYPE_ASYNC: 1216 qle_async(sc, info); 1217 break; 1218 1219 case QLE_INT_TYPE_IO: 1220 rspin = qle_read(sc, QLE_RESP_IN); 1221 if (rspin == sc->sc_last_resp_id) 1222 break; 1223 1224 do { 1225 ccb = qle_handle_resp(sc, sc->sc_last_resp_id); 1226 if (ccb) 1227 scsi_done(ccb->ccb_xs); 1228 1229 sc->sc_last_resp_id++; 1230 sc->sc_last_resp_id %= sc->sc_maxcmds; 1231 } while (sc->sc_last_resp_id != rspin); 1232 1233 qle_write(sc, QLE_RESP_OUT, sc->sc_last_resp_id); 1234 break; 1235 1236 case QLE_INT_TYPE_MBOX: 1237 mtx_enter(&sc->sc_mbox_mtx); 1238 if (sc->sc_mbox_pending) { 1239 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1240 sc->sc_mbox[i] = qle_read_mbox(sc, i); 1241 } 1242 sc->sc_mbox_pending = 2; 1243 wakeup(sc->sc_mbox); 1244 mtx_leave(&sc->sc_mbox_mtx); 1245 } else { 1246 mtx_leave(&sc->sc_mbox_mtx); 1247 DPRINTF(QLE_D_INTR, "%s: unexpected mbox interrupt: " 1248 "%x\n", DEVNAME(sc), info); 1249 } 1250 break; 1251 1252 default: 1253 break; 1254 } 1255 1256 qle_clear_isr(sc, isr); 1257 } 1258 1259 int 1260 qle_intr(void *xsc) 1261 { 1262 struct qle_softc *sc = xsc; 1263 u_int16_t isr; 1264 u_int16_t info; 1265 1266 if (qle_read_isr(sc, &isr, &info) == 0) 1267 return (0); 1268 1269 qle_handle_intr(sc, isr, info); 1270 return (1); 1271 } 1272 1273 int 1274 qle_scsi_probe(struct scsi_link *link) 1275 { 1276 struct qle_softc *sc = link->bus->sb_adapter_softc; 1277 int rv = 0; 1278 1279 mtx_enter(&sc->sc_port_mtx); 1280 if (sc->sc_targets[link->target] == NULL) 1281 rv = ENXIO; 1282 else if (!ISSET(sc->sc_targets[link->target]->flags, 1283 QLE_PORT_FLAG_IS_TARGET)) 1284 rv = ENXIO; 1285 mtx_leave(&sc->sc_port_mtx); 1286 1287 return (rv); 1288 } 1289 1290 void 1291 qle_scsi_cmd(struct scsi_xfer *xs) 1292 { 1293 struct scsi_link *link = xs->sc_link; 1294 struct qle_softc *sc = link->bus->sb_adapter_softc; 1295 struct qle_ccb *ccb; 1296 void *iocb; 1297 struct qle_ccb_list list; 1298 u_int16_t req; 1299 u_int32_t portid; 1300 int offset, error, done; 1301 bus_dmamap_t dmap; 1302 1303 if (xs->cmdlen > 16) { 1304 DPRINTF(QLE_D_IO, "%s: cmd too big (%d)\n", DEVNAME(sc), 1305 xs->cmdlen); 1306 memset(&xs->sense, 0, sizeof(xs->sense)); 1307 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1308 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1309 xs->sense.add_sense_code = 0x20; 1310 xs->error = XS_SENSE; 1311 scsi_done(xs); 1312 return; 1313 } 1314 1315 portid = 0xffffffff; 1316 mtx_enter(&sc->sc_port_mtx); 1317 if (sc->sc_targets[xs->sc_link->target] != NULL) { 1318 portid = sc->sc_targets[xs->sc_link->target]->portid; 1319 } 1320 mtx_leave(&sc->sc_port_mtx); 1321 if (portid == 0xffffffff) { 1322 xs->error = XS_DRIVER_STUFFUP; 1323 scsi_done(xs); 1324 return; 1325 } 1326 1327 ccb = xs->io; 1328 dmap = ccb->ccb_dmamap; 1329 if (xs->datalen > 0) { 1330 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, 1331 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ? 1332 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1333 if (error) { 1334 xs->error = XS_DRIVER_STUFFUP; 1335 scsi_done(xs); 1336 return; 1337 } 1338 1339 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 1340 dmap->dm_mapsize, 1341 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1342 BUS_DMASYNC_PREWRITE); 1343 } 1344 1345 mtx_enter(&sc->sc_queue_mtx); 1346 1347 /* put in a sync marker if required */ 1348 if (sc->sc_marker_required) { 1349 req = sc->sc_next_req_id++; 1350 if (sc->sc_next_req_id == sc->sc_maxcmds) 1351 sc->sc_next_req_id = 0; 1352 1353 DPRINTF(QLE_D_IO, "%s: writing marker at request %d\n", 1354 DEVNAME(sc), req); 1355 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1356 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1357 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), 1358 offset, QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1359 qle_put_marker(sc, iocb); 1360 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1361 sc->sc_marker_required = 0; 1362 } 1363 1364 req = sc->sc_next_req_id++; 1365 if (sc->sc_next_req_id == sc->sc_maxcmds) 1366 sc->sc_next_req_id = 0; 1367 1368 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1369 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1370 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1371 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1372 1373 ccb->ccb_xs = xs; 1374 1375 qle_put_cmd(sc, iocb, xs, ccb, portid); 1376 1377 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1378 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREREAD); 1379 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1380 1381 if (!ISSET(xs->flags, SCSI_POLL)) { 1382 mtx_leave(&sc->sc_queue_mtx); 1383 return; 1384 } 1385 1386 done = 0; 1387 SIMPLEQ_INIT(&list); 1388 do { 1389 u_int16_t isr, info; 1390 u_int32_t rspin; 1391 delay(100); 1392 1393 if (qle_read_isr(sc, &isr, &info) == 0) { 1394 continue; 1395 } 1396 1397 if (isr != QLE_INT_TYPE_IO) { 1398 qle_handle_intr(sc, isr, info); 1399 continue; 1400 } 1401 1402 rspin = qle_read(sc, QLE_RESP_IN); 1403 while (rspin != sc->sc_last_resp_id) { 1404 ccb = qle_handle_resp(sc, sc->sc_last_resp_id); 1405 1406 sc->sc_last_resp_id++; 1407 if (sc->sc_last_resp_id == sc->sc_maxcmds) 1408 sc->sc_last_resp_id = 0; 1409 1410 if (ccb != NULL) 1411 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link); 1412 if (ccb == xs->io) 1413 done = 1; 1414 } 1415 qle_write(sc, QLE_RESP_OUT, sc->sc_last_resp_id); 1416 qle_clear_isr(sc, isr); 1417 } while (done == 0); 1418 1419 mtx_leave(&sc->sc_queue_mtx); 1420 1421 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) { 1422 SIMPLEQ_REMOVE_HEAD(&list, ccb_link); 1423 scsi_done(ccb->ccb_xs); 1424 } 1425 } 1426 1427 u_int32_t 1428 qle_read(struct qle_softc *sc, int offset) 1429 { 1430 u_int32_t v; 1431 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 1432 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4, 1433 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1434 return (v); 1435 } 1436 1437 void 1438 qle_write(struct qle_softc *sc, int offset, u_int32_t value) 1439 { 1440 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 1441 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4, 1442 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1443 } 1444 1445 u_int16_t 1446 qle_read_mbox(struct qle_softc *sc, int mbox) 1447 { 1448 u_int16_t v; 1449 bus_size_t offset = mbox * 2; 1450 v = bus_space_read_2(sc->sc_iot, sc->sc_mbox_ioh, offset); 1451 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2, 1452 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1453 return (v); 1454 } 1455 1456 void 1457 qle_write_mbox(struct qle_softc *sc, int mbox, u_int16_t value) 1458 { 1459 bus_size_t offset = (mbox * 2); 1460 bus_space_write_2(sc->sc_iot, sc->sc_mbox_ioh, offset, value); 1461 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2, 1462 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1463 } 1464 1465 void 1466 qle_host_cmd(struct qle_softc *sc, u_int32_t cmd) 1467 { 1468 qle_write(sc, QLE_HOST_CMD_CTRL, cmd << QLE_HOST_CMD_SHIFT); 1469 } 1470 1471 #define MBOX_COMMAND_TIMEOUT 400000 1472 1473 int 1474 qle_mbox(struct qle_softc *sc, int maskin) 1475 { 1476 int i; 1477 int result = 0; 1478 int rv; 1479 1480 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1481 if (maskin & (1 << i)) { 1482 qle_write_mbox(sc, i, sc->sc_mbox[i]); 1483 } 1484 } 1485 qle_host_cmd(sc, QLE_HOST_CMD_SET_HOST_INT); 1486 1487 if (sc->sc_scsibus != NULL) { 1488 mtx_enter(&sc->sc_mbox_mtx); 1489 sc->sc_mbox_pending = 1; 1490 while (sc->sc_mbox_pending == 1) { 1491 msleep_nsec(sc->sc_mbox, &sc->sc_mbox_mtx, PRIBIO, 1492 "qlembox", INFSLP); 1493 } 1494 result = sc->sc_mbox[0]; 1495 sc->sc_mbox_pending = 0; 1496 mtx_leave(&sc->sc_mbox_mtx); 1497 return (result == QLE_MBOX_COMPLETE ? 0 : result); 1498 } 1499 1500 for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) { 1501 u_int16_t isr, info; 1502 1503 delay(100); 1504 1505 if (qle_read_isr(sc, &isr, &info) == 0) 1506 continue; 1507 1508 switch (isr) { 1509 case QLE_INT_TYPE_MBOX: 1510 result = info; 1511 break; 1512 1513 default: 1514 qle_handle_intr(sc, isr, info); 1515 break; 1516 } 1517 } 1518 1519 if (result == 0) { 1520 /* timed out; do something? */ 1521 DPRINTF(QLE_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc)); 1522 rv = 1; 1523 } else { 1524 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1525 sc->sc_mbox[i] = qle_read_mbox(sc, i); 1526 } 1527 rv = (result == QLE_MBOX_COMPLETE ? 0 : result); 1528 } 1529 1530 qle_clear_isr(sc, QLE_INT_TYPE_MBOX); 1531 return (rv); 1532 } 1533 1534 void 1535 qle_mbox_putaddr(u_int16_t *mbox, struct qle_dmamem *mem) 1536 { 1537 mbox[2] = (QLE_DMA_DVA(mem) >> 16) & 0xffff; 1538 mbox[3] = (QLE_DMA_DVA(mem) >> 0) & 0xffff; 1539 mbox[6] = (QLE_DMA_DVA(mem) >> 48) & 0xffff; 1540 mbox[7] = (QLE_DMA_DVA(mem) >> 32) & 0xffff; 1541 } 1542 1543 void 1544 qle_set_ints(struct qle_softc *sc, int enabled) 1545 { 1546 u_int32_t v = enabled ? QLE_INT_CTRL_ENABLE : 0; 1547 qle_write(sc, QLE_INT_CTRL, v); 1548 } 1549 1550 int 1551 qle_read_isr(struct qle_softc *sc, u_int16_t *isr, u_int16_t *info) 1552 { 1553 u_int32_t v; 1554 1555 switch (sc->sc_isp_gen) { 1556 case QLE_GEN_ISP24XX: 1557 case QLE_GEN_ISP25XX: 1558 if ((qle_read(sc, QLE_INT_STATUS) & QLE_RISC_INT_REQ) == 0) 1559 return (0); 1560 1561 v = qle_read(sc, QLE_RISC_STATUS); 1562 1563 switch (v & QLE_INT_STATUS_MASK) { 1564 case QLE_24XX_INT_ROM_MBOX: 1565 case QLE_24XX_INT_ROM_MBOX_FAIL: 1566 case QLE_24XX_INT_MBOX: 1567 case QLE_24XX_INT_MBOX_FAIL: 1568 *isr = QLE_INT_TYPE_MBOX; 1569 break; 1570 1571 case QLE_24XX_INT_ASYNC: 1572 *isr = QLE_INT_TYPE_ASYNC; 1573 break; 1574 1575 case QLE_24XX_INT_RSPQ: 1576 *isr = QLE_INT_TYPE_IO; 1577 break; 1578 1579 default: 1580 *isr = QLE_INT_TYPE_OTHER; 1581 break; 1582 } 1583 1584 *info = (v >> QLE_INT_INFO_SHIFT); 1585 return (1); 1586 1587 default: 1588 return (0); 1589 } 1590 } 1591 1592 void 1593 qle_clear_isr(struct qle_softc *sc, u_int16_t isr) 1594 { 1595 qle_host_cmd(sc, QLE_HOST_CMD_CLR_RISC_INT); 1596 } 1597 1598 void 1599 qle_update_done(struct qle_softc *sc, int task) 1600 { 1601 atomic_clearbits_int(&sc->sc_update_tasks, task); 1602 } 1603 1604 void 1605 qle_update_cancel(struct qle_softc *sc) 1606 { 1607 atomic_swap_uint(&sc->sc_update_tasks, 0); 1608 timeout_del(&sc->sc_update_timeout); 1609 task_del(sc->sc_update_taskq, &sc->sc_update_task); 1610 } 1611 1612 void 1613 qle_update_start(struct qle_softc *sc, int task) 1614 { 1615 atomic_setbits_int(&sc->sc_update_tasks, task); 1616 if (!timeout_pending(&sc->sc_update_timeout)) 1617 task_add(sc->sc_update_taskq, &sc->sc_update_task); 1618 } 1619 1620 void 1621 qle_update_defer(struct qle_softc *sc, int task) 1622 { 1623 atomic_setbits_int(&sc->sc_update_tasks, task); 1624 timeout_del(&sc->sc_update_timeout); 1625 task_del(sc->sc_update_taskq, &sc->sc_update_task); 1626 timeout_add_msec(&sc->sc_update_timeout, QLE_LOOP_SETTLE); 1627 } 1628 1629 void 1630 qle_clear_port_lists(struct qle_softc *sc) 1631 { 1632 struct qle_fc_port *port; 1633 while (!TAILQ_EMPTY(&sc->sc_ports_found)) { 1634 port = TAILQ_FIRST(&sc->sc_ports_found); 1635 TAILQ_REMOVE(&sc->sc_ports_found, port, update); 1636 free(port, M_DEVBUF, sizeof *port); 1637 } 1638 1639 while (!TAILQ_EMPTY(&sc->sc_ports_new)) { 1640 port = TAILQ_FIRST(&sc->sc_ports_new); 1641 TAILQ_REMOVE(&sc->sc_ports_new, port, update); 1642 free(port, M_DEVBUF, sizeof *port); 1643 } 1644 1645 while (!TAILQ_EMPTY(&sc->sc_ports_gone)) { 1646 port = TAILQ_FIRST(&sc->sc_ports_gone); 1647 TAILQ_REMOVE(&sc->sc_ports_gone, port, update); 1648 } 1649 } 1650 1651 int 1652 qle_softreset(struct qle_softc *sc) 1653 { 1654 int i; 1655 qle_set_ints(sc, 0); 1656 1657 /* set led control bits, stop dma */ 1658 qle_write(sc, QLE_GPIO_DATA, 0); 1659 qle_write(sc, QLE_CTRL_STATUS, QLE_CTRL_DMA_SHUTDOWN); 1660 while (qle_read(sc, QLE_CTRL_STATUS) & QLE_CTRL_DMA_ACTIVE) { 1661 DPRINTF(QLE_D_IO, "%s: dma still active\n", DEVNAME(sc)); 1662 delay(100); 1663 } 1664 1665 /* reset */ 1666 qle_write(sc, QLE_CTRL_STATUS, QLE_CTRL_RESET | QLE_CTRL_DMA_SHUTDOWN); 1667 delay(100); 1668 /* clear data and control dma engines? */ 1669 1670 /* wait for soft reset to clear */ 1671 for (i = 0; i < 1000; i++) { 1672 if (qle_read_mbox(sc, 0) == 0x0000) 1673 break; 1674 1675 delay(100); 1676 } 1677 1678 if (i == 1000) { 1679 printf("%s: reset mbox didn't clear\n", DEVNAME(sc)); 1680 qle_set_ints(sc, 0); 1681 return (ENXIO); 1682 } 1683 1684 for (i = 0; i < 500000; i++) { 1685 if ((qle_read(sc, QLE_CTRL_STATUS) & QLE_CTRL_RESET) == 0) 1686 break; 1687 delay(5); 1688 } 1689 if (i == 500000) { 1690 printf("%s: reset status didn't clear\n", DEVNAME(sc)); 1691 return (ENXIO); 1692 } 1693 1694 /* reset risc processor */ 1695 qle_host_cmd(sc, QLE_HOST_CMD_RESET); 1696 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE); 1697 qle_host_cmd(sc, QLE_HOST_CMD_CLEAR_RESET); 1698 1699 /* wait for reset to clear */ 1700 for (i = 0; i < 1000; i++) { 1701 if (qle_read_mbox(sc, 0) == 0x0000) 1702 break; 1703 delay(100); 1704 } 1705 if (i == 1000) { 1706 printf("%s: risc not ready after reset\n", DEVNAME(sc)); 1707 return (ENXIO); 1708 } 1709 1710 /* reset queue pointers */ 1711 qle_write(sc, QLE_REQ_IN, 0); 1712 qle_write(sc, QLE_REQ_OUT, 0); 1713 qle_write(sc, QLE_RESP_IN, 0); 1714 qle_write(sc, QLE_RESP_OUT, 0); 1715 1716 qle_set_ints(sc, 1); 1717 1718 /* do a basic mailbox operation to check we're alive */ 1719 sc->sc_mbox[0] = QLE_MBOX_NOP; 1720 if (qle_mbox(sc, 0x0001)) { 1721 printf("ISP not responding after reset\n"); 1722 return (ENXIO); 1723 } 1724 1725 return (0); 1726 } 1727 1728 void 1729 qle_update_topology(struct qle_softc *sc) 1730 { 1731 sc->sc_mbox[0] = QLE_MBOX_GET_ID; 1732 if (qle_mbox(sc, 0x0001)) { 1733 DPRINTF(QLE_D_PORT, "%s: unable to get loop id\n", DEVNAME(sc)); 1734 sc->sc_topology = QLE_TOPO_N_PORT_NO_TARGET; 1735 } else { 1736 sc->sc_topology = sc->sc_mbox[6]; 1737 sc->sc_loop_id = sc->sc_mbox[1]; 1738 1739 switch (sc->sc_topology) { 1740 case QLE_TOPO_NL_PORT: 1741 case QLE_TOPO_N_PORT: 1742 DPRINTF(QLE_D_PORT, "%s: loop id %d\n", DEVNAME(sc), 1743 sc->sc_loop_id); 1744 break; 1745 1746 case QLE_TOPO_FL_PORT: 1747 case QLE_TOPO_F_PORT: 1748 sc->sc_port_id = sc->sc_mbox[2] | 1749 (sc->sc_mbox[3] << 16); 1750 DPRINTF(QLE_D_PORT, "%s: fabric port id %06x\n", 1751 DEVNAME(sc), sc->sc_port_id); 1752 break; 1753 1754 case QLE_TOPO_N_PORT_NO_TARGET: 1755 default: 1756 DPRINTF(QLE_D_PORT, "%s: not useful\n", DEVNAME(sc)); 1757 break; 1758 } 1759 1760 switch (sc->sc_topology) { 1761 case QLE_TOPO_NL_PORT: 1762 case QLE_TOPO_FL_PORT: 1763 sc->sc_loop_max_id = 126; 1764 break; 1765 1766 case QLE_TOPO_N_PORT: 1767 sc->sc_loop_max_id = 2; 1768 break; 1769 1770 default: 1771 sc->sc_loop_max_id = 0; 1772 break; 1773 } 1774 } 1775 } 1776 1777 int 1778 qle_update_fabric(struct qle_softc *sc) 1779 { 1780 /*struct qle_sns_rft_id *rft;*/ 1781 1782 switch (sc->sc_topology) { 1783 case QLE_TOPO_F_PORT: 1784 case QLE_TOPO_FL_PORT: 1785 break; 1786 1787 default: 1788 return (0); 1789 } 1790 1791 /* get the name server's port db entry */ 1792 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB; 1793 sc->sc_mbox[1] = QLE_F_PORT_HANDLE; 1794 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 1795 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 1796 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD); 1797 if (qle_mbox(sc, 0x00cf)) { 1798 DPRINTF(QLE_D_PORT, "%s: get port db for SNS failed: %x\n", 1799 DEVNAME(sc), sc->sc_mbox[0]); 1800 sc->sc_sns_port_name = 0; 1801 } else { 1802 struct qle_get_port_db *pdb; 1803 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 1804 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD); 1805 pdb = QLE_DMA_KVA(sc->sc_scratch); 1806 DPRINTF(QLE_D_PORT, "%s: SNS port name %llx\n", DEVNAME(sc), 1807 betoh64(pdb->port_name)); 1808 sc->sc_sns_port_name = betoh64(pdb->port_name); 1809 } 1810 1811 /* 1812 * register fc4 types with the fabric 1813 * some switches do this automatically, but apparently 1814 * some don't. 1815 */ 1816 /* 1817 rft = QLE_DMA_KVA(sc->sc_scratch); 1818 memset(rft, 0, sizeof(*rft) + sizeof(struct qle_sns_req_hdr)); 1819 htolem16(&rft->subcmd, QLE_SNS_RFT_ID); 1820 htolem16(&rft->max_word, sizeof(struct qle_sns_req_hdr) / 4); 1821 htolem32(&rft->port_id, sc->sc_port_id); 1822 rft->fc4_types[0] = (1 << QLE_FC4_SCSI); 1823 if (qle_sns_req(sc, sc->sc_scratch, sizeof(*rft))) { 1824 printf("%s: RFT_ID failed\n", DEVNAME(sc)); 1825 / * we might be able to continue after this fails * / 1826 } 1827 */ 1828 1829 return (1); 1830 } 1831 1832 int 1833 qle_ct_pass_through(struct qle_softc *sc, u_int32_t port_handle, 1834 struct qle_dmamem *mem, size_t req_size, size_t resp_size) 1835 { 1836 struct qle_iocb_ct_passthrough *iocb; 1837 u_int16_t req; 1838 u_int64_t offset; 1839 int rv; 1840 1841 mtx_enter(&sc->sc_queue_mtx); 1842 1843 req = sc->sc_next_req_id++; 1844 if (sc->sc_next_req_id == sc->sc_maxcmds) 1845 sc->sc_next_req_id = 0; 1846 1847 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1848 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1849 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1850 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1851 1852 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE); 1853 iocb->entry_type = QLE_IOCB_CT_PASSTHROUGH; 1854 iocb->entry_count = 1; 1855 1856 iocb->req_handle = 9; 1857 htolem16(&iocb->req_nport_handle, port_handle); 1858 htolem16(&iocb->req_dsd_count, 1); 1859 htolem16(&iocb->req_resp_dsd_count, 1); 1860 htolem32(&iocb->req_cmd_byte_count, req_size); 1861 htolem32(&iocb->req_resp_byte_count, resp_size); 1862 qle_sge(&iocb->req_cmd_seg, QLE_DMA_DVA(mem), req_size); 1863 qle_sge(&iocb->req_resp_seg, QLE_DMA_DVA(mem) + req_size, resp_size); 1864 1865 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, QLE_DMA_LEN(mem), 1866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1867 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1868 sc->sc_fabric_pending = 1; 1869 mtx_leave(&sc->sc_queue_mtx); 1870 1871 /* maybe put a proper timeout on this */ 1872 rv = 0; 1873 while (sc->sc_fabric_pending == 1) { 1874 if (sc->sc_scsibus == NULL) { 1875 u_int16_t isr, info; 1876 1877 delay(100); 1878 if (qle_read_isr(sc, &isr, &info) != 0) 1879 qle_handle_intr(sc, isr, info); 1880 } else { 1881 tsleep_nsec(sc->sc_scratch, PRIBIO, "qle_fabric", 1882 SEC_TO_NSEC(1)); 1883 } 1884 } 1885 if (rv == 0) 1886 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 1887 QLE_DMA_LEN(mem), BUS_DMASYNC_POSTREAD | 1888 BUS_DMASYNC_POSTWRITE); 1889 1890 sc->sc_fabric_pending = 0; 1891 1892 return (rv); 1893 } 1894 1895 struct qle_fc_port * 1896 qle_next_fabric_port(struct qle_softc *sc, u_int32_t *firstport, 1897 u_int32_t *lastport) 1898 { 1899 struct qle_ct_ga_nxt_req *ga; 1900 struct qle_ct_ga_nxt_resp *gar; 1901 struct qle_fc_port *fport; 1902 int result; 1903 1904 /* get the next port from the fabric nameserver */ 1905 ga = QLE_DMA_KVA(sc->sc_scratch); 1906 memset(ga, 0, sizeof(*ga) + sizeof(*gar)); 1907 ga->header.ct_revision = 0x01; 1908 ga->header.ct_gs_type = 0xfc; 1909 ga->header.ct_gs_subtype = 0x02; 1910 ga->subcmd = htobe16(QLE_SNS_GA_NXT); 1911 ga->max_word = htobe16((sizeof(*gar) - 16) / 4); 1912 ga->port_id = htobe32(*lastport); 1913 result = qle_ct_pass_through(sc, QLE_SNS_HANDLE, sc->sc_scratch, 1914 sizeof(*ga), sizeof(*gar)); 1915 if (result) { 1916 DPRINTF(QLE_D_PORT, "%s: GA_NXT %06x failed: %x\n", DEVNAME(sc), 1917 *lastport, result); 1918 *lastport = 0xffffffff; 1919 return (NULL); 1920 } 1921 1922 gar = (struct qle_ct_ga_nxt_resp *)(ga + 1); 1923 /* if the response is all zeroes, try again */ 1924 if (gar->port_type_id == 0 && gar->port_name == 0 && 1925 gar->node_name == 0) { 1926 DPRINTF(QLE_D_PORT, "%s: GA_NXT returned junk\n", DEVNAME(sc)); 1927 return (NULL); 1928 } 1929 1930 /* are we back at the start? */ 1931 *lastport = betoh32(gar->port_type_id) & 0xffffff; 1932 if (*lastport == *firstport) { 1933 *lastport = 0xffffffff; 1934 return (NULL); 1935 } 1936 if (*firstport == 0xffffffff) 1937 *firstport = *lastport; 1938 1939 DPRINTF(QLE_D_PORT, "%s: GA_NXT: port id: %06x, wwpn %llx, wwnn %llx\n", 1940 DEVNAME(sc), *lastport, betoh64(gar->port_name), 1941 betoh64(gar->node_name)); 1942 1943 /* don't try to log in to ourselves */ 1944 if (*lastport == sc->sc_port_id) { 1945 return (NULL); 1946 } 1947 1948 fport = malloc(sizeof(*fport), M_DEVBUF, M_ZERO | M_NOWAIT); 1949 if (fport == NULL) { 1950 printf("%s: failed to allocate a port struct\n", 1951 DEVNAME(sc)); 1952 *lastport = 0xffffffff; 1953 return (NULL); 1954 } 1955 fport->port_name = betoh64(gar->port_name); 1956 fport->node_name = betoh64(gar->node_name); 1957 fport->location = QLE_LOCATION_PORT_ID(*lastport); 1958 fport->portid = *lastport; 1959 return (fport); 1960 } 1961 1962 int 1963 qle_fabric_plogx(struct qle_softc *sc, struct qle_fc_port *port, int flags, 1964 u_int32_t *info) 1965 { 1966 struct qle_iocb_plogx *iocb; 1967 u_int16_t req; 1968 u_int64_t offset; 1969 int rv; 1970 1971 mtx_enter(&sc->sc_queue_mtx); 1972 1973 req = sc->sc_next_req_id++; 1974 if (sc->sc_next_req_id == sc->sc_maxcmds) 1975 sc->sc_next_req_id = 0; 1976 1977 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1978 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1979 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1980 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1981 1982 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE); 1983 iocb->entry_type = QLE_IOCB_PLOGX; 1984 iocb->entry_count = 1; 1985 1986 iocb->req_handle = 7; 1987 htolem16(&iocb->req_nport_handle, port->loopid); 1988 htolem16(&iocb->req_port_id_lo, port->portid); 1989 iocb->req_port_id_hi = port->portid >> 16; 1990 htolem16(&iocb->req_flags, flags); 1991 1992 DPRINTF(QLE_D_PORT, "%s: plogx loop id %d port %06x, flags %x\n", 1993 DEVNAME(sc), port->loopid, port->portid, flags); 1994 qle_dump_iocb(sc, iocb); 1995 1996 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1997 sc->sc_fabric_pending = 1; 1998 mtx_leave(&sc->sc_queue_mtx); 1999 2000 /* maybe put a proper timeout on this */ 2001 rv = 0; 2002 while (sc->sc_fabric_pending == 1) { 2003 if (sc->sc_scsibus == NULL) { 2004 u_int16_t isr, info; 2005 2006 delay(100); 2007 if (qle_read_isr(sc, &isr, &info) != 0) 2008 qle_handle_intr(sc, isr, info); 2009 } else { 2010 tsleep_nsec(sc->sc_scratch, PRIBIO, "qle_fabric", 2011 SEC_TO_NSEC(1)); 2012 } 2013 } 2014 sc->sc_fabric_pending = 0; 2015 2016 iocb = (struct qle_iocb_plogx *)&sc->sc_fabric_response; 2017 rv = lemtoh16(&iocb->req_status); 2018 if (rv == QLE_PLOGX_ERROR) { 2019 rv = lemtoh32(&iocb->req_ioparms[0]); 2020 *info = lemtoh32(&iocb->req_ioparms[1]); 2021 } 2022 2023 return (rv); 2024 } 2025 2026 int 2027 qle_fabric_plogi(struct qle_softc *sc, struct qle_fc_port *port) 2028 { 2029 u_int32_t info; 2030 int err, loopid; 2031 2032 loopid = 0; 2033 retry: 2034 if (port->loopid == 0) { 2035 2036 mtx_enter(&sc->sc_port_mtx); 2037 loopid = qle_get_loop_id(sc, loopid); 2038 mtx_leave(&sc->sc_port_mtx); 2039 if (loopid == -1) { 2040 printf("%s: ran out of loop ids\n", DEVNAME(sc)); 2041 return (1); 2042 } 2043 2044 port->loopid = loopid; 2045 } 2046 2047 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGIN, &info); 2048 switch (err) { 2049 case 0: 2050 DPRINTF(QLE_D_PORT, "%s: logged in to %06x as %d\n", 2051 DEVNAME(sc), port->portid, port->loopid); 2052 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2053 return (0); 2054 2055 case QLE_PLOGX_ERROR_PORT_ID_USED: 2056 DPRINTF(QLE_D_PORT, "%s: already logged in to %06x as %d\n", 2057 DEVNAME(sc), port->portid, info); 2058 port->loopid = info; 2059 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2060 return (0); 2061 2062 case QLE_PLOGX_ERROR_HANDLE_USED: 2063 if (qle_add_logged_in_port(sc, loopid, info)) { 2064 return (1); 2065 } 2066 port->loopid = 0; 2067 loopid++; 2068 goto retry; 2069 2070 default: 2071 DPRINTF(QLE_D_PORT, "%s: error %x logging in to port %06x\n", 2072 DEVNAME(sc), err, port->portid); 2073 port->loopid = 0; 2074 return (1); 2075 } 2076 } 2077 2078 void 2079 qle_fabric_plogo(struct qle_softc *sc, struct qle_fc_port *port) 2080 { 2081 int err; 2082 u_int32_t info; 2083 2084 /* 2085 * we only log out if we can't see the port any more, so we always 2086 * want to do an explicit logout and free the n-port handle. 2087 */ 2088 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGOUT | 2089 QLE_PLOGX_LOGOUT_EXPLICIT | QLE_PLOGX_LOGOUT_FREE_HANDLE, &info); 2090 if (err == 0) { 2091 DPRINTF(QLE_D_PORT, "%s: logged out of port %06x\n", 2092 DEVNAME(sc), port->portid); 2093 } else { 2094 DPRINTF(QLE_D_PORT, "%s: failed to log out of port %06x: " 2095 "%x %x\n", DEVNAME(sc), port->portid, err, info); 2096 } 2097 } 2098 2099 void 2100 qle_deferred_update(void *xsc) 2101 { 2102 struct qle_softc *sc = xsc; 2103 task_add(sc->sc_update_taskq, &sc->sc_update_task); 2104 } 2105 2106 void 2107 qle_do_update(void *xsc) 2108 { 2109 struct qle_softc *sc = xsc; 2110 int firstport, lastport; 2111 struct qle_fc_port *port, *fport; 2112 2113 DPRINTF(QLE_D_PORT, "%s: updating\n", DEVNAME(sc)); 2114 while (sc->sc_update_tasks != 0) { 2115 if (sc->sc_update_tasks & QLE_UPDATE_TASK_CLEAR_ALL) { 2116 TAILQ_HEAD(, qle_fc_port) detach; 2117 DPRINTF(QLE_D_PORT, "%s: detaching everything\n", 2118 DEVNAME(sc)); 2119 2120 mtx_enter(&sc->sc_port_mtx); 2121 qle_clear_port_lists(sc); 2122 TAILQ_INIT(&detach); 2123 TAILQ_CONCAT(&detach, &sc->sc_ports, ports); 2124 mtx_leave(&sc->sc_port_mtx); 2125 2126 while (!TAILQ_EMPTY(&detach)) { 2127 port = TAILQ_FIRST(&detach); 2128 TAILQ_REMOVE(&detach, port, ports); 2129 if (port->flags & QLE_PORT_FLAG_IS_TARGET) { 2130 scsi_detach_target(sc->sc_scsibus, 2131 port->loopid, DETACH_FORCE | 2132 DETACH_QUIET); 2133 sc->sc_targets[port->loopid] = NULL; 2134 } 2135 if (port->location & QLE_LOCATION_FABRIC) 2136 qle_fabric_plogo(sc, port); 2137 2138 free(port, M_DEVBUF, sizeof *port); 2139 } 2140 2141 qle_update_done(sc, QLE_UPDATE_TASK_CLEAR_ALL); 2142 continue; 2143 } 2144 2145 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SOFTRESET) { 2146 DPRINTF(QLE_D_IO, "%s: attempting softreset\n", 2147 DEVNAME(sc)); 2148 if (qle_softreset(sc) != 0) { 2149 DPRINTF(QLE_D_IO, "%s: couldn't softreset\n", 2150 DEVNAME(sc)); 2151 } 2152 qle_update_done(sc, QLE_UPDATE_TASK_SOFTRESET); 2153 continue; 2154 } 2155 2156 if (sc->sc_update_tasks & QLE_UPDATE_TASK_UPDATE_TOPO) { 2157 DPRINTF(QLE_D_PORT, "%s: updating topology\n", 2158 DEVNAME(sc)); 2159 qle_update_topology(sc); 2160 qle_update_done(sc, QLE_UPDATE_TASK_UPDATE_TOPO); 2161 continue; 2162 } 2163 2164 if (sc->sc_update_tasks & QLE_UPDATE_TASK_GET_PORT_LIST) { 2165 DPRINTF(QLE_D_PORT, "%s: getting port name list\n", 2166 DEVNAME(sc)); 2167 mtx_enter(&sc->sc_port_mtx); 2168 qle_clear_port_lists(sc); 2169 mtx_leave(&sc->sc_port_mtx); 2170 2171 qle_get_port_name_list(sc, QLE_LOCATION_LOOP | 2172 QLE_LOCATION_FABRIC); 2173 mtx_enter(&sc->sc_port_mtx); 2174 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 2175 TAILQ_INSERT_TAIL(&sc->sc_ports_gone, port, 2176 update); 2177 if (port->location & QLE_LOCATION_FABRIC) { 2178 port->flags |= 2179 QLE_PORT_FLAG_NEEDS_LOGIN; 2180 } 2181 } 2182 2183 /* take care of ports that haven't changed first */ 2184 TAILQ_FOREACH(fport, &sc->sc_ports_found, update) { 2185 port = sc->sc_targets[fport->loopid]; 2186 if (port == NULL || fport->port_name != 2187 port->port_name) { 2188 /* new or changed port, handled later */ 2189 continue; 2190 } 2191 2192 /* 2193 * the port hasn't been logged out, which 2194 * means we don't need to log in again, and, 2195 * for loop ports, that the port still exists 2196 */ 2197 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2198 if (port->location & QLE_LOCATION_LOOP) 2199 TAILQ_REMOVE(&sc->sc_ports_gone, 2200 port, update); 2201 2202 fport->location = 0; 2203 } 2204 mtx_leave(&sc->sc_port_mtx); 2205 qle_update_start(sc, QLE_UPDATE_TASK_PORT_LIST); 2206 qle_update_done(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2207 continue; 2208 } 2209 2210 if (sc->sc_update_tasks & QLE_UPDATE_TASK_PORT_LIST) { 2211 mtx_enter(&sc->sc_port_mtx); 2212 fport = TAILQ_FIRST(&sc->sc_ports_found); 2213 if (fport != NULL) { 2214 TAILQ_REMOVE(&sc->sc_ports_found, fport, 2215 update); 2216 } 2217 mtx_leave(&sc->sc_port_mtx); 2218 2219 if (fport == NULL) { 2220 DPRINTF(QLE_D_PORT, "%s: done with ports\n", 2221 DEVNAME(sc)); 2222 qle_update_done(sc, 2223 QLE_UPDATE_TASK_PORT_LIST); 2224 qle_update_start(sc, 2225 QLE_UPDATE_TASK_SCAN_FABRIC); 2226 } else if (fport->location & QLE_LOCATION_LOOP) { 2227 DPRINTF(QLE_D_PORT, "%s: loop port %04x\n", 2228 DEVNAME(sc), fport->loopid); 2229 if (qle_add_loop_port(sc, fport) != 0) 2230 free(fport, M_DEVBUF, sizeof *port); 2231 } else if (fport->location & QLE_LOCATION_FABRIC) { 2232 qle_add_fabric_port(sc, fport); 2233 } else { 2234 /* already processed */ 2235 free(fport, M_DEVBUF, sizeof *port); 2236 } 2237 continue; 2238 } 2239 2240 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCAN_FABRIC) { 2241 DPRINTF(QLE_D_PORT, "%s: starting fabric scan\n", 2242 DEVNAME(sc)); 2243 lastport = sc->sc_port_id; 2244 firstport = 0xffffffff; 2245 if (qle_update_fabric(sc)) 2246 qle_update_start(sc, 2247 QLE_UPDATE_TASK_SCANNING_FABRIC); 2248 else 2249 qle_update_start(sc, 2250 QLE_UPDATE_TASK_ATTACH_TARGET | 2251 QLE_UPDATE_TASK_DETACH_TARGET); 2252 2253 qle_update_done(sc, QLE_UPDATE_TASK_SCAN_FABRIC); 2254 continue; 2255 } 2256 2257 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCANNING_FABRIC) { 2258 fport = qle_next_fabric_port(sc, &firstport, &lastport); 2259 if (fport != NULL) { 2260 int disp; 2261 2262 mtx_enter(&sc->sc_port_mtx); 2263 disp = qle_classify_port(sc, fport->location, 2264 fport->port_name, fport->node_name, &port); 2265 switch (disp) { 2266 case QLE_PORT_DISP_CHANGED: 2267 case QLE_PORT_DISP_MOVED: 2268 /* we'll log out the old port later */ 2269 case QLE_PORT_DISP_NEW: 2270 DPRINTF(QLE_D_PORT, "%s: new port " 2271 "%06x\n", DEVNAME(sc), 2272 fport->portid); 2273 TAILQ_INSERT_TAIL(&sc->sc_ports_found, 2274 fport, update); 2275 break; 2276 case QLE_PORT_DISP_DUP: 2277 free(fport, M_DEVBUF, sizeof *port); 2278 break; 2279 case QLE_PORT_DISP_SAME: 2280 DPRINTF(QLE_D_PORT, "%s: existing port " 2281 " %06x\n", DEVNAME(sc), 2282 fport->portid); 2283 TAILQ_REMOVE(&sc->sc_ports_gone, port, 2284 update); 2285 free(fport, M_DEVBUF, sizeof *port); 2286 break; 2287 } 2288 mtx_leave(&sc->sc_port_mtx); 2289 } 2290 if (lastport == 0xffffffff) { 2291 DPRINTF(QLE_D_PORT, "%s: finished\n", 2292 DEVNAME(sc)); 2293 qle_update_done(sc, 2294 QLE_UPDATE_TASK_SCANNING_FABRIC); 2295 qle_update_start(sc, 2296 QLE_UPDATE_TASK_FABRIC_LOGIN); 2297 } 2298 continue; 2299 } 2300 2301 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_LOGIN) { 2302 mtx_enter(&sc->sc_port_mtx); 2303 port = TAILQ_FIRST(&sc->sc_ports_found); 2304 if (port != NULL) { 2305 TAILQ_REMOVE(&sc->sc_ports_found, port, update); 2306 } 2307 mtx_leave(&sc->sc_port_mtx); 2308 2309 if (port != NULL) { 2310 DPRINTF(QLE_D_PORT, "%s: found port %06x\n", 2311 DEVNAME(sc), port->portid); 2312 if (qle_fabric_plogi(sc, port) == 0) { 2313 qle_add_fabric_port(sc, port); 2314 } else { 2315 DPRINTF(QLE_D_PORT, "%s: plogi %06x " 2316 "failed\n", DEVNAME(sc), 2317 port->portid); 2318 free(port, M_DEVBUF, sizeof *port); 2319 } 2320 } else { 2321 DPRINTF(QLE_D_PORT, "%s: done with logins\n", 2322 DEVNAME(sc)); 2323 qle_update_done(sc, 2324 QLE_UPDATE_TASK_FABRIC_LOGIN); 2325 qle_update_start(sc, 2326 QLE_UPDATE_TASK_ATTACH_TARGET | 2327 QLE_UPDATE_TASK_DETACH_TARGET); 2328 } 2329 continue; 2330 } 2331 2332 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_RELOGIN) { 2333 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 2334 if (port->flags & QLE_PORT_FLAG_NEEDS_LOGIN) { 2335 qle_fabric_plogi(sc, port); 2336 break; 2337 } 2338 } 2339 2340 if (port == NULL) 2341 qle_update_done(sc, 2342 QLE_UPDATE_TASK_FABRIC_RELOGIN); 2343 continue; 2344 } 2345 2346 if (sc->sc_update_tasks & QLE_UPDATE_TASK_DETACH_TARGET) { 2347 mtx_enter(&sc->sc_port_mtx); 2348 port = TAILQ_FIRST(&sc->sc_ports_gone); 2349 if (port != NULL) { 2350 sc->sc_targets[port->loopid] = NULL; 2351 TAILQ_REMOVE(&sc->sc_ports_gone, port, update); 2352 TAILQ_REMOVE(&sc->sc_ports, port, ports); 2353 } 2354 mtx_leave(&sc->sc_port_mtx); 2355 2356 if (port != NULL) { 2357 DPRINTF(QLE_D_PORT, "%s: detaching port %06x\n", 2358 DEVNAME(sc), port->portid); 2359 if (sc->sc_scsibus != NULL) 2360 scsi_detach_target(sc->sc_scsibus, 2361 port->loopid, DETACH_FORCE | 2362 DETACH_QUIET); 2363 2364 if (port->location & QLE_LOCATION_FABRIC) 2365 qle_fabric_plogo(sc, port); 2366 2367 free(port, M_DEVBUF, sizeof *port); 2368 } else { 2369 DPRINTF(QLE_D_PORT, "%s: nothing to detach\n", 2370 DEVNAME(sc)); 2371 qle_update_done(sc, 2372 QLE_UPDATE_TASK_DETACH_TARGET); 2373 } 2374 continue; 2375 } 2376 2377 if (sc->sc_update_tasks & QLE_UPDATE_TASK_ATTACH_TARGET) { 2378 mtx_enter(&sc->sc_port_mtx); 2379 port = TAILQ_FIRST(&sc->sc_ports_new); 2380 if (port != NULL) { 2381 TAILQ_REMOVE(&sc->sc_ports_new, port, update); 2382 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports); 2383 } 2384 mtx_leave(&sc->sc_port_mtx); 2385 2386 if (port != NULL) { 2387 if (sc->sc_scsibus != NULL) 2388 scsi_probe_target(sc->sc_scsibus, 2389 port->loopid); 2390 } else { 2391 qle_update_done(sc, 2392 QLE_UPDATE_TASK_ATTACH_TARGET); 2393 } 2394 continue; 2395 } 2396 2397 } 2398 2399 DPRINTF(QLE_D_PORT, "%s: done updating\n", DEVNAME(sc)); 2400 } 2401 2402 int 2403 qle_async(struct qle_softc *sc, u_int16_t info) 2404 { 2405 switch (info) { 2406 case QLE_ASYNC_SYSTEM_ERROR: 2407 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2408 break; 2409 2410 case QLE_ASYNC_REQ_XFER_ERROR: 2411 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2412 break; 2413 2414 case QLE_ASYNC_RSP_XFER_ERROR: 2415 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2416 break; 2417 2418 case QLE_ASYNC_LIP_OCCURRED: 2419 DPRINTF(QLE_D_INTR, "%s: lip occurred\n", DEVNAME(sc)); 2420 break; 2421 2422 case QLE_ASYNC_LOOP_UP: 2423 DPRINTF(QLE_D_PORT, "%s: loop up\n", DEVNAME(sc)); 2424 sc->sc_loop_up = 1; 2425 sc->sc_marker_required = 1; 2426 qle_update_defer(sc, QLE_UPDATE_TASK_UPDATE_TOPO | 2427 QLE_UPDATE_TASK_GET_PORT_LIST); 2428 break; 2429 2430 case QLE_ASYNC_LOOP_DOWN: 2431 DPRINTF(QLE_D_PORT, "%s: loop down\n", DEVNAME(sc)); 2432 sc->sc_loop_up = 0; 2433 qle_update_cancel(sc); 2434 qle_update_start(sc, QLE_UPDATE_TASK_CLEAR_ALL); 2435 break; 2436 2437 case QLE_ASYNC_LIP_RESET: 2438 DPRINTF(QLE_D_PORT, "%s: lip reset\n", DEVNAME(sc)); 2439 sc->sc_marker_required = 1; 2440 qle_update_defer(sc, QLE_UPDATE_TASK_FABRIC_RELOGIN); 2441 break; 2442 2443 case QLE_ASYNC_PORT_DB_CHANGE: 2444 DPRINTF(QLE_D_PORT, "%s: port db changed %x\n", DEVNAME(sc), 2445 qle_read_mbox(sc, 1)); 2446 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2447 break; 2448 2449 case QLE_ASYNC_CHANGE_NOTIFY: 2450 DPRINTF(QLE_D_PORT, "%s: name server change (%02x:%02x)\n", 2451 DEVNAME(sc), qle_read_mbox(sc, 1), qle_read_mbox(sc, 2)); 2452 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2453 break; 2454 2455 case QLE_ASYNC_LIP_F8: 2456 DPRINTF(QLE_D_INTR, "%s: lip f8\n", DEVNAME(sc)); 2457 break; 2458 2459 case QLE_ASYNC_LOOP_INIT_ERROR: 2460 DPRINTF(QLE_D_PORT, "%s: loop initialization error: %x\n", 2461 DEVNAME(sc), qle_read_mbox(sc, 1)); 2462 break; 2463 2464 case QLE_ASYNC_POINT_TO_POINT: 2465 DPRINTF(QLE_D_PORT, "%s: connected in point-to-point mode\n", 2466 DEVNAME(sc)); 2467 break; 2468 2469 case QLE_ASYNC_ZIO_RESP_UPDATE: 2470 /* shouldn't happen, we don't do zio */ 2471 break; 2472 2473 default: 2474 DPRINTF(QLE_D_INTR, "%s: unknown async %x\n", DEVNAME(sc), info); 2475 break; 2476 } 2477 return (1); 2478 } 2479 2480 void 2481 qle_dump_stuff(struct qle_softc *sc, void *buf, int n) 2482 { 2483 #ifdef QLE_DEBUG 2484 u_int8_t *d = buf; 2485 int l; 2486 2487 if ((qledebug & QLE_D_IOCB) == 0) 2488 return; 2489 2490 printf("%s: stuff\n", DEVNAME(sc)); 2491 for (l = 0; l < n; l++) { 2492 printf(" %2.2x", d[l]); 2493 if (l % 16 == 15) 2494 printf("\n"); 2495 } 2496 if (n % 16 != 0) 2497 printf("\n"); 2498 #endif 2499 } 2500 2501 void 2502 qle_dump_iocb(struct qle_softc *sc, void *buf) 2503 { 2504 #ifdef QLE_DEBUG 2505 u_int8_t *iocb = buf; 2506 int l; 2507 int b; 2508 2509 if ((qledebug & QLE_D_IOCB) == 0) 2510 return; 2511 2512 printf("%s: iocb:\n", DEVNAME(sc)); 2513 for (l = 0; l < 4; l++) { 2514 for (b = 0; b < 16; b++) { 2515 printf(" %2.2x", iocb[(l*16)+b]); 2516 } 2517 printf("\n"); 2518 } 2519 #endif 2520 } 2521 2522 void 2523 qle_dump_iocb_segs(struct qle_softc *sc, void *segs, int n) 2524 { 2525 #ifdef QLE_DEBUG 2526 u_int8_t *buf = segs; 2527 int s, b; 2528 2529 if ((qledebug & QLE_D_IOCB) == 0) 2530 return; 2531 2532 printf("%s: iocb segs:\n", DEVNAME(sc)); 2533 for (s = 0; s < n; s++) { 2534 for (b = 0; b < sizeof(struct qle_iocb_seg); b++) { 2535 printf(" %2.2x", buf[(s*(sizeof(struct qle_iocb_seg))) 2536 + b]); 2537 } 2538 printf("\n"); 2539 } 2540 #endif 2541 } 2542 2543 void 2544 qle_put_marker(struct qle_softc *sc, void *buf) 2545 { 2546 struct qle_iocb_marker *marker = buf; 2547 2548 marker->entry_type = QLE_IOCB_MARKER; 2549 marker->entry_count = 1; 2550 marker->seqno = 0; 2551 marker->flags = 0; 2552 2553 /* could be more specific here; isp(4) isn't */ 2554 marker->target = 0; 2555 marker->modifier = QLE_IOCB_MARKER_SYNC_ALL; 2556 } 2557 2558 void 2559 qle_sge(struct qle_iocb_seg *seg, u_int64_t addr, u_int32_t len) 2560 { 2561 htolem32(&seg->seg_addr_lo, addr); 2562 htolem32(&seg->seg_addr_hi, addr >> 32); 2563 htolem32(&seg->seg_len, len); 2564 } 2565 2566 void 2567 qle_put_cmd(struct qle_softc *sc, void *buf, struct scsi_xfer *xs, 2568 struct qle_ccb *ccb, u_int32_t target_port) 2569 { 2570 bus_dmamap_t dmap = ccb->ccb_dmamap; 2571 struct qle_iocb_req6 *req = buf; 2572 struct qle_fcp_cmnd *cmnd; 2573 u_int64_t fcp_cmnd_offset; 2574 u_int32_t fcp_dl; 2575 int seg; 2576 int target = xs->sc_link->target; 2577 int lun = xs->sc_link->lun; 2578 u_int16_t flags; 2579 2580 memset(req, 0, sizeof(*req)); 2581 req->entry_type = QLE_IOCB_CMD_TYPE_6; 2582 req->entry_count = 1; 2583 2584 req->req_handle = ccb->ccb_id; 2585 htolem16(&req->req_nport_handle, target); 2586 2587 /* 2588 * timeout is in seconds. make sure it's at least 1 if a timeout 2589 * was specified in xs 2590 */ 2591 if (xs->timeout != 0) 2592 htolem16(&req->req_timeout, MAX(1, xs->timeout/1000)); 2593 2594 if (xs->datalen > 0) { 2595 flags = (xs->flags & SCSI_DATA_IN) ? 2596 QLE_IOCB_CTRL_FLAG_READ : QLE_IOCB_CTRL_FLAG_WRITE; 2597 if (dmap->dm_nsegs == 1) { 2598 qle_sge(&req->req_data_seg, dmap->dm_segs[0].ds_addr, 2599 dmap->dm_segs[0].ds_len); 2600 } else { 2601 flags |= QLE_IOCB_CTRL_FLAG_EXT_SEG; 2602 for (seg = 0; seg < dmap->dm_nsegs; seg++) { 2603 qle_sge(&ccb->ccb_segs[seg], 2604 dmap->dm_segs[seg].ds_addr, 2605 dmap->dm_segs[seg].ds_len); 2606 } 2607 qle_sge(&ccb->ccb_segs[seg++], 0, 0); 2608 2609 bus_dmamap_sync(sc->sc_dmat, 2610 QLE_DMA_MAP(sc->sc_segments), ccb->ccb_seg_offset, 2611 seg * sizeof(*ccb->ccb_segs), 2612 BUS_DMASYNC_PREWRITE); 2613 2614 qle_sge(&req->req_data_seg, 2615 QLE_DMA_DVA(sc->sc_segments) + ccb->ccb_seg_offset, 2616 seg * sizeof(struct qle_iocb_seg)); 2617 } 2618 2619 htolem16(&req->req_data_seg_count, dmap->dm_nsegs); 2620 htolem32(&req->req_data_len, xs->datalen); 2621 htolem16(&req->req_ctrl_flags, flags); 2622 } 2623 2624 htobem16(&req->req_fcp_lun[0], lun); 2625 htobem16(&req->req_fcp_lun[1], lun >> 16); 2626 htolem32(&req->req_target_id, target_port & 0xffffff); 2627 2628 fcp_cmnd_offset = ccb->ccb_id * sizeof(*cmnd); 2629 /* set up FCP_CMND */ 2630 cmnd = (struct qle_fcp_cmnd *)QLE_DMA_KVA(sc->sc_fcp_cmnds) + 2631 ccb->ccb_id; 2632 2633 memset(cmnd, 0, sizeof(*cmnd)); 2634 htobem16(&cmnd->fcp_lun[0], lun); 2635 htobem16(&cmnd->fcp_lun[1], lun >> 16); 2636 /* cmnd->fcp_task_attr = TSK_SIMPLE; */ 2637 /* cmnd->fcp_task_mgmt = 0; */ 2638 memcpy(cmnd->fcp_cdb, &xs->cmd, xs->cmdlen); 2639 2640 /* FCP_DL goes after the cdb */ 2641 fcp_dl = htobe32(xs->datalen); 2642 if (xs->cmdlen > 16) { 2643 htolem16(&req->req_fcp_cmnd_len, 12 + xs->cmdlen + 4); 2644 cmnd->fcp_add_cdb_len = xs->cmdlen - 16; 2645 memcpy(cmnd->fcp_cdb + xs->cmdlen, &fcp_dl, sizeof(fcp_dl)); 2646 } else { 2647 htolem16(&req->req_fcp_cmnd_len, 12 + 16 + 4); 2648 cmnd->fcp_add_cdb_len = 0; 2649 memcpy(cmnd->fcp_cdb + 16, &fcp_dl, sizeof(fcp_dl)); 2650 } 2651 if (xs->datalen > 0) 2652 cmnd->fcp_add_cdb_len |= (xs->flags & SCSI_DATA_IN) ? 2 : 1; 2653 2654 bus_dmamap_sync(sc->sc_dmat, 2655 QLE_DMA_MAP(sc->sc_fcp_cmnds), fcp_cmnd_offset, 2656 sizeof(*cmnd), BUS_DMASYNC_PREWRITE); 2657 2658 /* link req to cmnd */ 2659 fcp_cmnd_offset += QLE_DMA_DVA(sc->sc_fcp_cmnds); 2660 htolem32(&req->req_fcp_cmnd_addr_lo, fcp_cmnd_offset); 2661 htolem32(&req->req_fcp_cmnd_addr_hi, fcp_cmnd_offset >> 32); 2662 } 2663 2664 int 2665 qle_load_fwchunk(struct qle_softc *sc, struct qle_dmamem *mem, 2666 const u_int32_t *src) 2667 { 2668 u_int32_t dest, done, total; 2669 int i; 2670 2671 dest = src[2]; 2672 done = 0; 2673 total = src[3]; 2674 2675 while (done < total) { 2676 u_int32_t *copy; 2677 u_int32_t words; 2678 2679 /* limit transfer size otherwise it just doesn't work */ 2680 words = MIN(total - done, 1 << 10); 2681 copy = QLE_DMA_KVA(mem); 2682 for (i = 0; i < words; i++) { 2683 htolem32(©[i], src[done++]); 2684 } 2685 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4, 2686 BUS_DMASYNC_PREWRITE); 2687 2688 sc->sc_mbox[0] = QLE_MBOX_LOAD_RISC_RAM; 2689 sc->sc_mbox[1] = dest; 2690 sc->sc_mbox[4] = words >> 16; 2691 sc->sc_mbox[5] = words & 0xffff; 2692 sc->sc_mbox[8] = dest >> 16; 2693 qle_mbox_putaddr(sc->sc_mbox, mem); 2694 if (qle_mbox(sc, 0x01ff)) { 2695 printf("firmware load failed\n"); 2696 return (1); 2697 } 2698 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4, 2699 BUS_DMASYNC_POSTWRITE); 2700 2701 dest += words; 2702 } 2703 2704 return (qle_verify_firmware(sc, src[2])); 2705 } 2706 2707 int 2708 qle_load_firmware_chunks(struct qle_softc *sc, const u_int32_t *fw) 2709 { 2710 struct qle_dmamem *mem; 2711 int res = 0; 2712 2713 mem = qle_dmamem_alloc(sc, 65536); 2714 for (;;) { 2715 if (qle_load_fwchunk(sc, mem, fw)) { 2716 res = 1; 2717 break; 2718 } 2719 if (fw[1] == 0) 2720 break; 2721 fw += fw[3]; 2722 } 2723 2724 qle_dmamem_free(sc, mem); 2725 return (res); 2726 } 2727 2728 u_int32_t 2729 qle_read_ram_word(struct qle_softc *sc, u_int32_t addr) 2730 { 2731 sc->sc_mbox[0] = QLE_MBOX_READ_RISC_RAM; 2732 sc->sc_mbox[1] = addr & 0xffff; 2733 sc->sc_mbox[8] = addr >> 16; 2734 if (qle_mbox(sc, 0x0103)) { 2735 return (0); 2736 } 2737 return ((sc->sc_mbox[3] << 16) | sc->sc_mbox[2]); 2738 } 2739 2740 int 2741 qle_verify_firmware(struct qle_softc *sc, u_int32_t addr) 2742 { 2743 /* 2744 * QLE_MBOX_VERIFY_CSUM requires at least the firmware header 2745 * to be correct, otherwise it wanders all over ISP memory and 2746 * gets lost. Check that chunk address (addr+2) is right and 2747 * size (addr+3) is plausible first. 2748 */ 2749 if ((qle_read_ram_word(sc, addr+2) != addr) || 2750 (qle_read_ram_word(sc, addr+3) > 0xffff)) { 2751 return (1); 2752 } 2753 2754 sc->sc_mbox[0] = QLE_MBOX_VERIFY_CSUM; 2755 sc->sc_mbox[1] = addr >> 16; 2756 sc->sc_mbox[2] = addr; 2757 if (qle_mbox(sc, 0x0007)) { 2758 return (1); 2759 } 2760 return (0); 2761 } 2762 2763 int 2764 qle_read_nvram(struct qle_softc *sc) 2765 { 2766 u_int32_t data[sizeof(sc->sc_nvram) / 4]; 2767 u_int32_t csum, tmp, v; 2768 int i, base, l; 2769 2770 switch (sc->sc_isp_gen) { 2771 case QLE_GEN_ISP24XX: 2772 base = 0x7ffe0080; 2773 break; 2774 case QLE_GEN_ISP25XX: 2775 base = 0x7ff48080; 2776 break; 2777 } 2778 base += sc->sc_port * 0x100; 2779 2780 csum = 0; 2781 for (i = 0; i < nitems(data); i++) { 2782 data[i] = 0xffffffff; 2783 qle_write(sc, QLE_FLASH_NVRAM_ADDR, base + i); 2784 for (l = 0; l < 5000; l++) { 2785 delay(10); 2786 tmp = qle_read(sc, QLE_FLASH_NVRAM_ADDR); 2787 if (tmp & (1U << 31)) { 2788 v = qle_read(sc, QLE_FLASH_NVRAM_DATA); 2789 csum += v; 2790 data[i] = letoh32(v); 2791 break; 2792 } 2793 } 2794 } 2795 2796 bcopy(data, &sc->sc_nvram, sizeof(sc->sc_nvram)); 2797 /* id field should be 'ISP' */ 2798 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' || 2799 sc->sc_nvram.id[2] != 'P' || csum != 0) { 2800 printf("%s: nvram corrupt\n", DEVNAME(sc)); 2801 return (1); 2802 } 2803 return (0); 2804 } 2805 2806 struct qle_dmamem * 2807 qle_dmamem_alloc(struct qle_softc *sc, size_t size) 2808 { 2809 struct qle_dmamem *m; 2810 int nsegs; 2811 2812 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 2813 if (m == NULL) 2814 return (NULL); 2815 2816 m->qdm_size = size; 2817 2818 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2819 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0) 2820 goto qdmfree; 2821 2822 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1, 2823 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2824 goto destroy; 2825 2826 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva, 2827 BUS_DMA_NOWAIT) != 0) 2828 goto free; 2829 2830 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL, 2831 BUS_DMA_NOWAIT) != 0) 2832 goto unmap; 2833 2834 return (m); 2835 2836 unmap: 2837 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 2838 free: 2839 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 2840 destroy: 2841 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 2842 qdmfree: 2843 free(m, M_DEVBUF, sizeof *m); 2844 2845 return (NULL); 2846 } 2847 2848 void 2849 qle_dmamem_free(struct qle_softc *sc, struct qle_dmamem *m) 2850 { 2851 bus_dmamap_unload(sc->sc_dmat, m->qdm_map); 2852 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 2853 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 2854 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 2855 free(m, M_DEVBUF, sizeof *m); 2856 } 2857 2858 int 2859 qle_alloc_ccbs(struct qle_softc *sc) 2860 { 2861 struct qle_ccb *ccb; 2862 u_int8_t *cmd; 2863 int i; 2864 2865 SIMPLEQ_INIT(&sc->sc_ccb_free); 2866 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 2867 mtx_init(&sc->sc_queue_mtx, IPL_BIO); 2868 mtx_init(&sc->sc_port_mtx, IPL_BIO); 2869 mtx_init(&sc->sc_mbox_mtx, IPL_BIO); 2870 2871 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct qle_ccb), 2872 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 2873 if (sc->sc_ccbs == NULL) { 2874 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 2875 return (1); 2876 } 2877 2878 sc->sc_requests = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2879 QLE_QUEUE_ENTRY_SIZE); 2880 if (sc->sc_requests == NULL) { 2881 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 2882 goto free_ccbs; 2883 } 2884 sc->sc_responses = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2885 QLE_QUEUE_ENTRY_SIZE); 2886 if (sc->sc_responses == NULL) { 2887 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)); 2888 goto free_req; 2889 } 2890 sc->sc_pri_requests = qle_dmamem_alloc(sc, 8 * QLE_QUEUE_ENTRY_SIZE); 2891 if (sc->sc_pri_requests == NULL) { 2892 printf("%s: unable to allocate pri ccb dmamem\n", DEVNAME(sc)); 2893 goto free_res; 2894 } 2895 sc->sc_segments = qle_dmamem_alloc(sc, sc->sc_maxcmds * QLE_MAX_SEGS * 2896 sizeof(struct qle_iocb_seg)); 2897 if (sc->sc_segments == NULL) { 2898 printf("%s: unable to allocate iocb segments\n", DEVNAME(sc)); 2899 goto free_pri; 2900 } 2901 2902 sc->sc_fcp_cmnds = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2903 sizeof(struct qle_fcp_cmnd)); 2904 if (sc->sc_fcp_cmnds == NULL) { 2905 printf("%s: unable to allocate FCP_CMNDs\n", DEVNAME(sc)); 2906 goto free_seg; 2907 } 2908 2909 cmd = QLE_DMA_KVA(sc->sc_requests); 2910 memset(cmd, 0, QLE_QUEUE_ENTRY_SIZE * sc->sc_maxcmds); 2911 for (i = 0; i < sc->sc_maxcmds; i++) { 2912 ccb = &sc->sc_ccbs[i]; 2913 2914 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2915 QLE_MAX_SEGS-1, MAXPHYS, 0, 2916 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2917 &ccb->ccb_dmamap) != 0) { 2918 printf("%s: unable to create dma map\n", DEVNAME(sc)); 2919 goto free_maps; 2920 } 2921 2922 ccb->ccb_sc = sc; 2923 ccb->ccb_id = i; 2924 2925 ccb->ccb_seg_offset = i * QLE_MAX_SEGS * 2926 sizeof(struct qle_iocb_seg); 2927 ccb->ccb_segs = QLE_DMA_KVA(sc->sc_segments) + 2928 ccb->ccb_seg_offset; 2929 2930 qle_put_ccb(sc, ccb); 2931 } 2932 2933 scsi_iopool_init(&sc->sc_iopool, sc, qle_get_ccb, qle_put_ccb); 2934 return (0); 2935 2936 free_maps: 2937 while ((ccb = qle_get_ccb(sc)) != NULL) 2938 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2939 2940 qle_dmamem_free(sc, sc->sc_fcp_cmnds); 2941 free_seg: 2942 qle_dmamem_free(sc, sc->sc_segments); 2943 free_pri: 2944 qle_dmamem_free(sc, sc->sc_pri_requests); 2945 free_res: 2946 qle_dmamem_free(sc, sc->sc_responses); 2947 free_req: 2948 qle_dmamem_free(sc, sc->sc_requests); 2949 free_ccbs: 2950 free(sc->sc_ccbs, M_DEVBUF, 0); 2951 2952 return (1); 2953 } 2954 2955 void 2956 qle_free_ccbs(struct qle_softc *sc) 2957 { 2958 struct qle_ccb *ccb; 2959 2960 scsi_iopool_destroy(&sc->sc_iopool); 2961 while ((ccb = qle_get_ccb(sc)) != NULL) 2962 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2963 qle_dmamem_free(sc, sc->sc_segments); 2964 qle_dmamem_free(sc, sc->sc_responses); 2965 qle_dmamem_free(sc, sc->sc_requests); 2966 free(sc->sc_ccbs, M_DEVBUF, 0); 2967 } 2968 2969 void * 2970 qle_get_ccb(void *xsc) 2971 { 2972 struct qle_softc *sc = xsc; 2973 struct qle_ccb *ccb; 2974 2975 mtx_enter(&sc->sc_ccb_mtx); 2976 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free); 2977 if (ccb != NULL) { 2978 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 2979 } 2980 mtx_leave(&sc->sc_ccb_mtx); 2981 return (ccb); 2982 } 2983 2984 void 2985 qle_put_ccb(void *xsc, void *io) 2986 { 2987 struct qle_softc *sc = xsc; 2988 struct qle_ccb *ccb = io; 2989 2990 ccb->ccb_xs = NULL; 2991 mtx_enter(&sc->sc_ccb_mtx); 2992 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 2993 mtx_leave(&sc->sc_ccb_mtx); 2994 } 2995