1 /* $OpenBSD: mfi.c,v 1.119 2011/04/09 20:23:31 marco Exp $ */ 2 /* 3 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "bio.h" 19 20 #include <sys/types.h> 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/buf.h> 24 #include <sys/ioctl.h> 25 #include <sys/device.h> 26 #include <sys/kernel.h> 27 #include <sys/malloc.h> 28 #include <sys/proc.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/pool.h> 32 33 #include <machine/bus.h> 34 35 #include <scsi/scsi_all.h> 36 #include <scsi/scsi_disk.h> 37 #include <scsi/scsiconf.h> 38 39 #include <dev/biovar.h> 40 #include <dev/ic/mfireg.h> 41 #include <dev/ic/mfivar.h> 42 43 #ifdef MFI_DEBUG 44 uint32_t mfi_debug = 0 45 /* | MFI_D_CMD */ 46 /* | MFI_D_INTR */ 47 /* | MFI_D_MISC */ 48 /* | MFI_D_DMA */ 49 /* | MFI_D_IOCTL */ 50 /* | MFI_D_RW */ 51 /* | MFI_D_MEM */ 52 /* | MFI_D_CCB */ 53 ; 54 #endif 55 56 struct cfdriver mfi_cd = { 57 NULL, "mfi", DV_DULL 58 }; 59 60 void mfi_scsi_cmd(struct scsi_xfer *); 61 int mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int); 62 void mfiminphys(struct buf *bp, struct scsi_link *sl); 63 64 struct scsi_adapter mfi_switch = { 65 mfi_scsi_cmd, mfiminphys, 0, 0, mfi_scsi_ioctl 66 }; 67 68 void * mfi_get_ccb(void *); 69 void mfi_put_ccb(void *, void *); 70 int mfi_init_ccb(struct mfi_softc *); 71 72 struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t); 73 void mfi_freemem(struct mfi_softc *, struct mfi_mem *); 74 75 int mfi_transition_firmware(struct mfi_softc *); 76 int mfi_initialize_firmware(struct mfi_softc *); 77 int mfi_get_info(struct mfi_softc *); 78 uint32_t mfi_read(struct mfi_softc *, bus_size_t); 79 void mfi_write(struct mfi_softc *, bus_size_t, uint32_t); 80 int mfi_poll(struct mfi_ccb *); 81 int mfi_create_sgl(struct mfi_ccb *, int); 82 83 /* commands */ 84 int mfi_scsi_ld(struct mfi_ccb *, struct scsi_xfer *); 85 int mfi_scsi_io(struct mfi_ccb *, struct scsi_xfer *, uint64_t, 86 uint32_t); 87 void mfi_scsi_xs_done(struct mfi_ccb *); 88 int mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t, 89 void *, uint8_t *); 90 int mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t, 91 uint32_t, uint32_t, void *, uint8_t *); 92 void mfi_mgmt_done(struct mfi_ccb *); 93 94 #if NBIO > 0 95 int mfi_ioctl(struct device *, u_long, caddr_t); 96 int mfi_bio_getitall(struct mfi_softc *); 97 int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *); 98 int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *); 99 int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *); 100 int mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *); 101 int mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *); 102 int mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *); 103 int mfi_bio_hs(struct mfi_softc *, int, int, void *); 104 #ifndef SMALL_KERNEL 105 int mfi_create_sensors(struct mfi_softc *); 106 void mfi_refresh_sensors(void *); 107 #endif /* SMALL_KERNEL */ 108 #endif /* NBIO > 0 */ 109 110 void mfi_start(struct mfi_softc *, struct mfi_ccb *); 111 void mfi_done(struct mfi_ccb *); 112 u_int32_t mfi_xscale_fw_state(struct mfi_softc *); 113 void mfi_xscale_intr_ena(struct mfi_softc *); 114 int mfi_xscale_intr(struct mfi_softc *); 115 void mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *); 116 117 static const struct mfi_iop_ops mfi_iop_xscale = { 118 mfi_xscale_fw_state, 119 mfi_xscale_intr_ena, 120 mfi_xscale_intr, 121 mfi_xscale_post 122 }; 123 124 u_int32_t mfi_ppc_fw_state(struct mfi_softc *); 125 void mfi_ppc_intr_ena(struct mfi_softc *); 126 int mfi_ppc_intr(struct mfi_softc *); 127 void mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *); 128 129 static const struct mfi_iop_ops mfi_iop_ppc = { 130 mfi_ppc_fw_state, 131 mfi_ppc_intr_ena, 132 mfi_ppc_intr, 133 mfi_ppc_post 134 }; 135 136 u_int32_t mfi_gen2_fw_state(struct mfi_softc *); 137 void mfi_gen2_intr_ena(struct mfi_softc *); 138 int mfi_gen2_intr(struct mfi_softc *); 139 void mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *); 140 141 static const struct mfi_iop_ops mfi_iop_gen2 = { 142 mfi_gen2_fw_state, 143 mfi_gen2_intr_ena, 144 mfi_gen2_intr, 145 mfi_gen2_post 146 }; 147 148 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s)) 149 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s)) 150 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s)) 151 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c))) 152 153 void * 154 mfi_get_ccb(void *cookie) 155 { 156 struct mfi_softc *sc = cookie; 157 struct mfi_ccb *ccb; 158 159 mtx_enter(&sc->sc_ccb_mtx); 160 ccb = SLIST_FIRST(&sc->sc_ccb_freeq); 161 if (ccb != NULL) { 162 SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link); 163 ccb->ccb_state = MFI_CCB_READY; 164 } 165 mtx_leave(&sc->sc_ccb_mtx); 166 167 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb); 168 169 return (ccb); 170 } 171 172 void 173 mfi_put_ccb(void *cookie, void *io) 174 { 175 struct mfi_softc *sc = cookie; 176 struct mfi_ccb *ccb = io; 177 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 178 179 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb); 180 181 hdr->mfh_cmd_status = 0x0; 182 hdr->mfh_flags = 0x0; 183 ccb->ccb_state = MFI_CCB_FREE; 184 ccb->ccb_cookie = NULL; 185 ccb->ccb_flags = 0; 186 ccb->ccb_done = NULL; 187 ccb->ccb_direction = 0; 188 ccb->ccb_frame_size = 0; 189 ccb->ccb_extra_frames = 0; 190 ccb->ccb_sgl = NULL; 191 ccb->ccb_data = NULL; 192 ccb->ccb_len = 0; 193 194 mtx_enter(&sc->sc_ccb_mtx); 195 SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link); 196 mtx_leave(&sc->sc_ccb_mtx); 197 } 198 199 int 200 mfi_init_ccb(struct mfi_softc *sc) 201 { 202 struct mfi_ccb *ccb; 203 uint32_t i; 204 int error; 205 206 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); 207 208 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds, 209 M_DEVBUF, M_WAITOK|M_ZERO); 210 211 for (i = 0; i < sc->sc_max_cmds; i++) { 212 ccb = &sc->sc_ccb[i]; 213 214 ccb->ccb_sc = sc; 215 216 /* select i'th frame */ 217 ccb->ccb_frame = (union mfi_frame *) 218 (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i); 219 ccb->ccb_pframe = 220 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i; 221 ccb->ccb_pframe_offset = sc->sc_frames_size * i; 222 ccb->ccb_frame->mfr_header.mfh_context = i; 223 224 /* select i'th sense */ 225 ccb->ccb_sense = (struct mfi_sense *) 226 (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 227 ccb->ccb_psense = 228 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 229 230 /* create a dma map for transfer */ 231 error = bus_dmamap_create(sc->sc_dmat, 232 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, 233 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 234 if (error) { 235 printf("%s: cannot create ccb dmamap (%d)\n", 236 DEVNAME(sc), error); 237 goto destroy; 238 } 239 240 DNPRINTF(MFI_D_CCB, 241 "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n", 242 ccb->ccb_frame->mfr_header.mfh_context, ccb, 243 ccb->ccb_frame, ccb->ccb_pframe, 244 ccb->ccb_sense, ccb->ccb_psense, 245 ccb->ccb_dmamap); 246 247 /* add ccb to queue */ 248 mfi_put_ccb(sc, ccb); 249 } 250 251 return (0); 252 destroy: 253 /* free dma maps and ccb memory */ 254 while (i) { 255 ccb = &sc->sc_ccb[i]; 256 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 257 i--; 258 } 259 260 free(sc->sc_ccb, M_DEVBUF); 261 262 return (1); 263 } 264 265 uint32_t 266 mfi_read(struct mfi_softc *sc, bus_size_t r) 267 { 268 uint32_t rv; 269 270 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 271 BUS_SPACE_BARRIER_READ); 272 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 273 274 DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv); 275 return (rv); 276 } 277 278 void 279 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v) 280 { 281 DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v); 282 283 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 284 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 285 BUS_SPACE_BARRIER_WRITE); 286 } 287 288 struct mfi_mem * 289 mfi_allocmem(struct mfi_softc *sc, size_t size) 290 { 291 struct mfi_mem *mm; 292 int nsegs; 293 294 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc), 295 size); 296 297 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO); 298 if (mm == NULL) 299 return (NULL); 300 301 mm->am_size = size; 302 303 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 304 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0) 305 goto amfree; 306 307 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1, 308 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 309 goto destroy; 310 311 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva, 312 BUS_DMA_NOWAIT) != 0) 313 goto free; 314 315 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL, 316 BUS_DMA_NOWAIT) != 0) 317 goto unmap; 318 319 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n", 320 mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map); 321 322 return (mm); 323 324 unmap: 325 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size); 326 free: 327 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 328 destroy: 329 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 330 amfree: 331 free(mm, M_DEVBUF); 332 333 return (NULL); 334 } 335 336 void 337 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm) 338 { 339 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm); 340 341 bus_dmamap_unload(sc->sc_dmat, mm->am_map); 342 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size); 343 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 344 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 345 free(mm, M_DEVBUF); 346 } 347 348 int 349 mfi_transition_firmware(struct mfi_softc *sc) 350 { 351 int32_t fw_state, cur_state; 352 int max_wait, i; 353 354 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 355 356 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc), 357 fw_state); 358 359 while (fw_state != MFI_STATE_READY) { 360 DNPRINTF(MFI_D_MISC, 361 "%s: waiting for firmware to become ready\n", 362 DEVNAME(sc)); 363 cur_state = fw_state; 364 switch (fw_state) { 365 case MFI_STATE_FAULT: 366 printf("%s: firmware fault\n", DEVNAME(sc)); 367 return (1); 368 case MFI_STATE_WAIT_HANDSHAKE: 369 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE); 370 max_wait = 2; 371 break; 372 case MFI_STATE_OPERATIONAL: 373 mfi_write(sc, MFI_IDB, MFI_INIT_READY); 374 max_wait = 10; 375 break; 376 case MFI_STATE_UNDEFINED: 377 case MFI_STATE_BB_INIT: 378 max_wait = 2; 379 break; 380 case MFI_STATE_FW_INIT: 381 case MFI_STATE_DEVICE_SCAN: 382 case MFI_STATE_FLUSH_CACHE: 383 max_wait = 20; 384 break; 385 default: 386 printf("%s: unknown firmware state %d\n", 387 DEVNAME(sc), fw_state); 388 return (1); 389 } 390 for (i = 0; i < (max_wait * 10); i++) { 391 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 392 if (fw_state == cur_state) 393 DELAY(100000); 394 else 395 break; 396 } 397 if (fw_state == cur_state) { 398 printf("%s: firmware stuck in state %#x\n", 399 DEVNAME(sc), fw_state); 400 return (1); 401 } 402 } 403 404 return (0); 405 } 406 407 int 408 mfi_initialize_firmware(struct mfi_softc *sc) 409 { 410 struct mfi_ccb *ccb; 411 struct mfi_init_frame *init; 412 struct mfi_init_qinfo *qinfo; 413 uint64_t handy; 414 415 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc)); 416 417 if ((ccb = mfi_get_ccb(sc)) == NULL) 418 return (1); 419 420 init = &ccb->ccb_frame->mfr_init; 421 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE); 422 423 memset(qinfo, 0, sizeof *qinfo); 424 qinfo->miq_rq_entries = sc->sc_max_cmds + 1; 425 426 handy = MFIMEM_DVA(sc->sc_pcq) + 427 offsetof(struct mfi_prod_cons, mpc_reply_q); 428 qinfo->miq_rq_addr_hi = htole32(handy >> 32); 429 qinfo->miq_rq_addr_lo = htole32(handy); 430 431 handy = MFIMEM_DVA(sc->sc_pcq) + 432 offsetof(struct mfi_prod_cons, mpc_producer); 433 qinfo->miq_pi_addr_hi = htole32(handy >> 32); 434 qinfo->miq_pi_addr_lo = htole32(handy); 435 436 handy = MFIMEM_DVA(sc->sc_pcq) + 437 offsetof(struct mfi_prod_cons, mpc_consumer); 438 qinfo->miq_ci_addr_hi = htole32(handy >> 32); 439 qinfo->miq_ci_addr_lo = htole32(handy); 440 441 init->mif_header.mfh_cmd = MFI_CMD_INIT; 442 init->mif_header.mfh_data_len = sizeof *qinfo; 443 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE); 444 445 DNPRINTF(MFI_D_MISC, "%s: entries: %08x%08x rq: %08x%08x pi: %#x " 446 "ci: %08x%08x\n", 447 DEVNAME(sc), 448 qinfo->miq_rq_entries, 449 qinfo->miq_rq_addr_hi, qinfo->miq_rq_addr_lo, 450 qinfo->miq_pi_addr_hi, qinfo->miq_pi_addr_lo, 451 qinfo->miq_ci_addr_hi, qinfo->miq_ci_addr_lo); 452 453 if (mfi_poll(ccb)) { 454 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc)); 455 return (1); 456 } 457 458 mfi_put_ccb(sc, ccb); 459 460 return (0); 461 } 462 463 int 464 mfi_get_info(struct mfi_softc *sc) 465 { 466 #ifdef MFI_DEBUG 467 int i; 468 #endif 469 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc)); 470 471 if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN, 472 sizeof(sc->sc_info), &sc->sc_info, NULL)) 473 return (1); 474 475 #ifdef MFI_DEBUG 476 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) { 477 printf("%s: active FW %s Version %s date %s time %s\n", 478 DEVNAME(sc), 479 sc->sc_info.mci_image_component[i].mic_name, 480 sc->sc_info.mci_image_component[i].mic_version, 481 sc->sc_info.mci_image_component[i].mic_build_date, 482 sc->sc_info.mci_image_component[i].mic_build_time); 483 } 484 485 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) { 486 printf("%s: pending FW %s Version %s date %s time %s\n", 487 DEVNAME(sc), 488 sc->sc_info.mci_pending_image_component[i].mic_name, 489 sc->sc_info.mci_pending_image_component[i].mic_version, 490 sc->sc_info.mci_pending_image_component[i].mic_build_date, 491 sc->sc_info.mci_pending_image_component[i].mic_build_time); 492 } 493 494 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n", 495 DEVNAME(sc), 496 sc->sc_info.mci_max_arms, 497 sc->sc_info.mci_max_spans, 498 sc->sc_info.mci_max_arrays, 499 sc->sc_info.mci_max_lds, 500 sc->sc_info.mci_product_name); 501 502 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n", 503 DEVNAME(sc), 504 sc->sc_info.mci_serial_number, 505 sc->sc_info.mci_hw_present, 506 sc->sc_info.mci_current_fw_time, 507 sc->sc_info.mci_max_cmds, 508 sc->sc_info.mci_max_sg_elements); 509 510 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n", 511 DEVNAME(sc), 512 sc->sc_info.mci_max_request_size, 513 sc->sc_info.mci_lds_present, 514 sc->sc_info.mci_lds_degraded, 515 sc->sc_info.mci_lds_offline, 516 sc->sc_info.mci_pd_present); 517 518 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n", 519 DEVNAME(sc), 520 sc->sc_info.mci_pd_disks_present, 521 sc->sc_info.mci_pd_disks_pred_failure, 522 sc->sc_info.mci_pd_disks_failed); 523 524 printf("%s: nvram %d mem %d flash %d\n", 525 DEVNAME(sc), 526 sc->sc_info.mci_nvram_size, 527 sc->sc_info.mci_memory_size, 528 sc->sc_info.mci_flash_size); 529 530 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n", 531 DEVNAME(sc), 532 sc->sc_info.mci_ram_correctable_errors, 533 sc->sc_info.mci_ram_uncorrectable_errors, 534 sc->sc_info.mci_cluster_allowed, 535 sc->sc_info.mci_cluster_active); 536 537 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n", 538 DEVNAME(sc), 539 sc->sc_info.mci_max_strips_per_io, 540 sc->sc_info.mci_raid_levels, 541 sc->sc_info.mci_adapter_ops, 542 sc->sc_info.mci_ld_ops); 543 544 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n", 545 DEVNAME(sc), 546 sc->sc_info.mci_stripe_sz_ops.min, 547 sc->sc_info.mci_stripe_sz_ops.max, 548 sc->sc_info.mci_pd_ops, 549 sc->sc_info.mci_pd_mix_support); 550 551 printf("%s: ecc_bucket %d pckg_prop %s\n", 552 DEVNAME(sc), 553 sc->sc_info.mci_ecc_bucket_count, 554 sc->sc_info.mci_package_version); 555 556 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n", 557 DEVNAME(sc), 558 sc->sc_info.mci_properties.mcp_seq_num, 559 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval, 560 sc->sc_info.mci_properties.mcp_intr_throttle_cnt, 561 sc->sc_info.mci_properties.mcp_intr_throttle_timeout); 562 563 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n", 564 DEVNAME(sc), 565 sc->sc_info.mci_properties.mcp_rebuild_rate, 566 sc->sc_info.mci_properties.mcp_patrol_read_rate, 567 sc->sc_info.mci_properties.mcp_bgi_rate, 568 sc->sc_info.mci_properties.mcp_cc_rate); 569 570 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n", 571 DEVNAME(sc), 572 sc->sc_info.mci_properties.mcp_recon_rate, 573 sc->sc_info.mci_properties.mcp_cache_flush_interval, 574 sc->sc_info.mci_properties.mcp_spinup_drv_cnt, 575 sc->sc_info.mci_properties.mcp_spinup_delay, 576 sc->sc_info.mci_properties.mcp_cluster_enable); 577 578 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n", 579 DEVNAME(sc), 580 sc->sc_info.mci_properties.mcp_coercion_mode, 581 sc->sc_info.mci_properties.mcp_alarm_enable, 582 sc->sc_info.mci_properties.mcp_disable_auto_rebuild, 583 sc->sc_info.mci_properties.mcp_disable_battery_warn, 584 sc->sc_info.mci_properties.mcp_ecc_bucket_size); 585 586 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n", 587 DEVNAME(sc), 588 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate, 589 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion, 590 sc->sc_info.mci_properties.mcp_expose_encl_devices); 591 592 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n", 593 DEVNAME(sc), 594 sc->sc_info.mci_pci.mip_vendor, 595 sc->sc_info.mci_pci.mip_device, 596 sc->sc_info.mci_pci.mip_subvendor, 597 sc->sc_info.mci_pci.mip_subdevice); 598 599 printf("%s: type %#x port_count %d port_addr ", 600 DEVNAME(sc), 601 sc->sc_info.mci_host.mih_type, 602 sc->sc_info.mci_host.mih_port_count); 603 604 for (i = 0; i < 8; i++) 605 printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]); 606 printf("\n"); 607 608 printf("%s: type %.x port_count %d port_addr ", 609 DEVNAME(sc), 610 sc->sc_info.mci_device.mid_type, 611 sc->sc_info.mci_device.mid_port_count); 612 613 for (i = 0; i < 8; i++) 614 printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]); 615 printf("\n"); 616 #endif /* MFI_DEBUG */ 617 618 return (0); 619 } 620 621 void 622 mfiminphys(struct buf *bp, struct scsi_link *sl) 623 { 624 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount); 625 626 /* XXX currently using MFI_MAXFER = MAXPHYS */ 627 if (bp->b_bcount > MFI_MAXFER) 628 bp->b_bcount = MFI_MAXFER; 629 minphys(bp); 630 } 631 632 int 633 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop) 634 { 635 struct scsibus_attach_args saa; 636 uint32_t status, frames, max_sgl; 637 int i; 638 639 switch (iop) { 640 case MFI_IOP_XSCALE: 641 sc->sc_iop = &mfi_iop_xscale; 642 break; 643 case MFI_IOP_PPC: 644 sc->sc_iop = &mfi_iop_ppc; 645 break; 646 case MFI_IOP_GEN2: 647 sc->sc_iop = &mfi_iop_gen2; 648 break; 649 default: 650 panic("%s: unknown iop %d", DEVNAME(sc), iop); 651 } 652 653 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc)); 654 655 if (mfi_transition_firmware(sc)) 656 return (1); 657 658 SLIST_INIT(&sc->sc_ccb_freeq); 659 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 660 scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb); 661 662 rw_init(&sc->sc_lock, "mfi_lock"); 663 664 status = mfi_fw_state(sc); 665 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK; 666 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; 667 if (sc->sc_64bit_dma) { 668 sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1); 669 sc->sc_sgl_size = sizeof(struct mfi_sg64); 670 sc->sc_sgl_flags = MFI_FRAME_SGL64; 671 } else { 672 sc->sc_max_sgl = max_sgl; 673 sc->sc_sgl_size = sizeof(struct mfi_sg32); 674 sc->sc_sgl_flags = MFI_FRAME_SGL32; 675 } 676 DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n", 677 DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl); 678 679 /* consumer/producer and reply queue memory */ 680 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) + 681 sizeof(struct mfi_prod_cons)); 682 if (sc->sc_pcq == NULL) { 683 printf("%s: unable to allocate reply queue memory\n", 684 DEVNAME(sc)); 685 goto nopcq; 686 } 687 688 /* frame memory */ 689 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */ 690 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) / 691 MFI_FRAME_SIZE + 1; 692 sc->sc_frames_size = frames * MFI_FRAME_SIZE; 693 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds); 694 if (sc->sc_frames == NULL) { 695 printf("%s: unable to allocate frame memory\n", DEVNAME(sc)); 696 goto noframe; 697 } 698 /* XXX hack, fix this */ 699 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) { 700 printf("%s: improper frame alignment (%#x) FIXME\n", 701 DEVNAME(sc), MFIMEM_DVA(sc->sc_frames)); 702 goto noframe; 703 } 704 705 /* sense memory */ 706 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); 707 if (sc->sc_sense == NULL) { 708 printf("%s: unable to allocate sense memory\n", DEVNAME(sc)); 709 goto nosense; 710 } 711 712 /* now that we have all memory bits go initialize ccbs */ 713 if (mfi_init_ccb(sc)) { 714 printf("%s: could not init ccb list\n", DEVNAME(sc)); 715 goto noinit; 716 } 717 718 /* kickstart firmware with all addresses and pointers */ 719 if (mfi_initialize_firmware(sc)) { 720 printf("%s: could not initialize firmware\n", DEVNAME(sc)); 721 goto noinit; 722 } 723 724 if (mfi_get_info(sc)) { 725 printf("%s: could not retrieve controller information\n", 726 DEVNAME(sc)); 727 goto noinit; 728 } 729 730 printf("%s: logical drives %d, version %s, %dMB RAM\n", 731 DEVNAME(sc), 732 sc->sc_info.mci_lds_present, 733 sc->sc_info.mci_package_version, 734 sc->sc_info.mci_memory_size); 735 736 sc->sc_ld_cnt = sc->sc_info.mci_lds_present; 737 sc->sc_max_ld = sc->sc_ld_cnt; 738 for (i = 0; i < sc->sc_ld_cnt; i++) 739 sc->sc_ld[i].ld_present = 1; 740 741 if (sc->sc_ld_cnt) 742 sc->sc_link.openings = sc->sc_max_cmds / sc->sc_ld_cnt; 743 else 744 sc->sc_link.openings = sc->sc_max_cmds; 745 746 sc->sc_link.adapter_softc = sc; 747 sc->sc_link.adapter = &mfi_switch; 748 sc->sc_link.adapter_target = MFI_MAX_LD; 749 sc->sc_link.adapter_buswidth = sc->sc_max_ld; 750 sc->sc_link.pool = &sc->sc_iopool; 751 752 bzero(&saa, sizeof(saa)); 753 saa.saa_sc_link = &sc->sc_link; 754 755 config_found(&sc->sc_dev, &saa, scsiprint); 756 757 /* enable interrupts */ 758 mfi_intr_enable(sc); 759 760 #if NBIO > 0 761 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0) 762 panic("%s: controller registration failed", DEVNAME(sc)); 763 else 764 sc->sc_ioctl = mfi_ioctl; 765 766 #ifndef SMALL_KERNEL 767 if (mfi_create_sensors(sc) != 0) 768 printf("%s: unable to create sensors\n", DEVNAME(sc)); 769 #endif 770 #endif /* NBIO > 0 */ 771 772 return (0); 773 noinit: 774 mfi_freemem(sc, sc->sc_sense); 775 nosense: 776 mfi_freemem(sc, sc->sc_frames); 777 noframe: 778 mfi_freemem(sc, sc->sc_pcq); 779 nopcq: 780 return (1); 781 } 782 783 int 784 mfi_poll(struct mfi_ccb *ccb) 785 { 786 struct mfi_softc *sc = ccb->ccb_sc; 787 struct mfi_frame_header *hdr; 788 int to = 0, rv = 0; 789 790 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc)); 791 792 hdr = &ccb->ccb_frame->mfr_header; 793 hdr->mfh_cmd_status = 0xff; 794 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 795 796 mfi_start(sc, ccb); 797 798 while (hdr->mfh_cmd_status == 0xff) { 799 delay(1000); 800 if (to++ > 5000) /* XXX 5 seconds busywait sucks */ 801 break; 802 } 803 if (hdr->mfh_cmd_status == 0xff) { 804 printf("%s: timeout on ccb %d\n", DEVNAME(sc), 805 hdr->mfh_context); 806 ccb->ccb_flags |= MFI_CCB_F_ERR; 807 rv = 1; 808 } 809 810 if (ccb->ccb_direction != MFI_DATA_NONE) { 811 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 812 ccb->ccb_dmamap->dm_mapsize, 813 (ccb->ccb_direction & MFI_DATA_IN) ? 814 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 815 816 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 817 } 818 819 return (rv); 820 } 821 822 int 823 mfi_intr(void *arg) 824 { 825 struct mfi_softc *sc = arg; 826 struct mfi_prod_cons *pcq; 827 struct mfi_ccb *ccb; 828 uint32_t producer, consumer, ctx; 829 int claimed = 0; 830 831 if (!mfi_my_intr(sc)) 832 return (0); 833 834 pcq = MFIMEM_KVA(sc->sc_pcq); 835 producer = pcq->mpc_producer; 836 consumer = pcq->mpc_consumer; 837 838 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq); 839 840 while (consumer != producer) { 841 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n", 842 DEVNAME(sc), producer, consumer); 843 844 ctx = pcq->mpc_reply_q[consumer]; 845 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX; 846 if (ctx == MFI_INVALID_CTX) 847 printf("%s: invalid context, p: %d c: %d\n", 848 DEVNAME(sc), producer, consumer); 849 else { 850 /* XXX remove from queue and call scsi_done */ 851 ccb = &sc->sc_ccb[ctx]; 852 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n", 853 DEVNAME(sc), ctx); 854 mfi_done(ccb); 855 856 claimed = 1; 857 } 858 consumer++; 859 if (consumer == (sc->sc_max_cmds + 1)) 860 consumer = 0; 861 } 862 863 pcq->mpc_consumer = consumer; 864 865 return (claimed); 866 } 867 868 int 869 mfi_scsi_io(struct mfi_ccb *ccb, struct scsi_xfer *xs, uint64_t blockno, 870 uint32_t blockcnt) 871 { 872 struct scsi_link *link = xs->sc_link; 873 struct mfi_io_frame *io; 874 uint64_t handy; 875 876 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n", 877 DEVNAME((struct mfi_softc *)link->adapter_softc), link->target); 878 879 if (!xs->data) 880 return (1); 881 882 io = &ccb->ccb_frame->mfr_io; 883 if (xs->flags & SCSI_DATA_IN) { 884 io->mif_header.mfh_cmd = MFI_CMD_LD_READ; 885 ccb->ccb_direction = MFI_DATA_IN; 886 } else { 887 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE; 888 ccb->ccb_direction = MFI_DATA_OUT; 889 } 890 io->mif_header.mfh_target_id = link->target; 891 io->mif_header.mfh_timeout = 0; 892 io->mif_header.mfh_flags = 0; 893 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE; 894 io->mif_header.mfh_data_len= blockcnt; 895 io->mif_lba_hi = (uint32_t)(blockno >> 32); 896 io->mif_lba_lo = (uint32_t)(blockno & 0xffffffffull); 897 898 handy = ccb->ccb_psense; 899 io->mif_sense_addr_hi = htole32((u_int32_t)(handy >> 32)); 900 io->mif_sense_addr_lo = htole32(handy); 901 902 ccb->ccb_done = mfi_scsi_xs_done; 903 ccb->ccb_cookie = xs; 904 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE; 905 ccb->ccb_sgl = &io->mif_sgl; 906 ccb->ccb_data = xs->data; 907 ccb->ccb_len = xs->datalen; 908 909 if (mfi_create_sgl(ccb, (xs->flags & SCSI_NOSLEEP) ? 910 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 911 return (1); 912 913 return (0); 914 } 915 916 void 917 mfi_scsi_xs_done(struct mfi_ccb *ccb) 918 { 919 struct scsi_xfer *xs = ccb->ccb_cookie; 920 struct mfi_softc *sc = ccb->ccb_sc; 921 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 922 923 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n", 924 DEVNAME(sc), ccb, ccb->ccb_frame); 925 926 if (xs->data != NULL) { 927 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n", 928 DEVNAME(sc)); 929 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 930 ccb->ccb_dmamap->dm_mapsize, 931 (xs->flags & SCSI_DATA_IN) ? 932 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 933 934 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 935 } 936 937 switch (hdr->mfh_cmd_status) { 938 case MFI_STAT_OK: 939 xs->resid = 0; 940 break; 941 942 case MFI_STAT_SCSI_DONE_WITH_ERROR: 943 xs->error = XS_SENSE; 944 xs->resid = 0; 945 memset(&xs->sense, 0, sizeof(xs->sense)); 946 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense)); 947 break; 948 949 default: 950 xs->error = XS_DRIVER_STUFFUP; 951 printf("%s: mfi_scsi_xs_done stuffup %#x\n", 952 DEVNAME(sc), hdr->mfh_cmd_status); 953 954 if (hdr->mfh_scsi_status != 0) { 955 DNPRINTF(MFI_D_INTR, 956 "%s: mfi_scsi_xs_done sense %#x %x %x\n", 957 DEVNAME(sc), hdr->mfh_scsi_status, 958 &xs->sense, ccb->ccb_sense); 959 memset(&xs->sense, 0, sizeof(xs->sense)); 960 memcpy(&xs->sense, ccb->ccb_sense, 961 sizeof(struct scsi_sense_data)); 962 xs->error = XS_SENSE; 963 } 964 break; 965 } 966 967 scsi_done(xs); 968 } 969 970 int 971 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsi_xfer *xs) 972 { 973 struct scsi_link *link = xs->sc_link; 974 struct mfi_pass_frame *pf; 975 uint64_t handy; 976 977 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n", 978 DEVNAME((struct mfi_softc *)link->adapter_softc), link->target); 979 980 pf = &ccb->ccb_frame->mfr_pass; 981 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO; 982 pf->mpf_header.mfh_target_id = link->target; 983 pf->mpf_header.mfh_lun_id = 0; 984 pf->mpf_header.mfh_cdb_len = xs->cmdlen; 985 pf->mpf_header.mfh_timeout = 0; 986 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */ 987 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE; 988 989 handy = ccb->ccb_psense; 990 pf->mpf_sense_addr_hi = htole32((u_int32_t)(handy >> 32)); 991 pf->mpf_sense_addr_lo = htole32(handy); 992 993 memset(pf->mpf_cdb, 0, 16); 994 memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen); 995 996 ccb->ccb_done = mfi_scsi_xs_done; 997 ccb->ccb_cookie = xs; 998 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE; 999 ccb->ccb_sgl = &pf->mpf_sgl; 1000 1001 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) 1002 ccb->ccb_direction = xs->flags & SCSI_DATA_IN ? 1003 MFI_DATA_IN : MFI_DATA_OUT; 1004 else 1005 ccb->ccb_direction = MFI_DATA_NONE; 1006 1007 if (xs->data) { 1008 ccb->ccb_data = xs->data; 1009 ccb->ccb_len = xs->datalen; 1010 1011 if (mfi_create_sgl(ccb, (xs->flags & SCSI_NOSLEEP) ? 1012 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 1013 return (1); 1014 } 1015 1016 return (0); 1017 } 1018 1019 void 1020 mfi_scsi_cmd(struct scsi_xfer *xs) 1021 { 1022 struct scsi_link *link = xs->sc_link; 1023 struct mfi_softc *sc = link->adapter_softc; 1024 struct device *dev = link->device_softc; 1025 struct mfi_ccb *ccb = xs->io; 1026 struct scsi_rw *rw; 1027 struct scsi_rw_big *rwb; 1028 struct scsi_rw_16 *rw16; 1029 uint64_t blockno; 1030 uint32_t blockcnt; 1031 uint8_t target = link->target; 1032 uint8_t mbox[MFI_MBOX_SIZE]; 1033 1034 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n", 1035 DEVNAME(sc), xs->cmd->opcode); 1036 1037 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present || 1038 link->lun != 0) { 1039 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n", 1040 DEVNAME(sc), target); 1041 goto stuffup; 1042 } 1043 1044 xs->error = XS_NOERROR; 1045 1046 switch (xs->cmd->opcode) { 1047 /* IO path */ 1048 case READ_BIG: 1049 case WRITE_BIG: 1050 rwb = (struct scsi_rw_big *)xs->cmd; 1051 blockno = (uint64_t)_4btol(rwb->addr); 1052 blockcnt = _2btol(rwb->length); 1053 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) 1054 goto stuffup; 1055 break; 1056 1057 case READ_COMMAND: 1058 case WRITE_COMMAND: 1059 rw = (struct scsi_rw *)xs->cmd; 1060 blockno = 1061 (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff)); 1062 blockcnt = rw->length ? rw->length : 0x100; 1063 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) 1064 goto stuffup; 1065 break; 1066 1067 case READ_16: 1068 case WRITE_16: 1069 rw16 = (struct scsi_rw_16 *)xs->cmd; 1070 blockno = _8btol(rw16->addr); 1071 blockcnt = _4btol(rw16->length); 1072 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) 1073 goto stuffup; 1074 break; 1075 1076 case SYNCHRONIZE_CACHE: 1077 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 1078 if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, 1079 MFI_DATA_NONE, 0, NULL, mbox)) 1080 goto stuffup; 1081 1082 goto complete; 1083 /* NOTREACHED */ 1084 1085 /* hand it of to the firmware and let it deal with it */ 1086 case TEST_UNIT_READY: 1087 /* save off sd? after autoconf */ 1088 if (!cold) /* XXX bogus */ 1089 strlcpy(sc->sc_ld[target].ld_dev, dev->dv_xname, 1090 sizeof(sc->sc_ld[target].ld_dev)); 1091 /* FALLTHROUGH */ 1092 1093 default: 1094 if (mfi_scsi_ld(ccb, xs)) 1095 goto stuffup; 1096 break; 1097 } 1098 1099 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target); 1100 1101 if (xs->flags & SCSI_POLL) { 1102 if (mfi_poll(ccb)) { 1103 /* XXX check for sense in ccb->ccb_sense? */ 1104 printf("%s: mfi_scsi_cmd poll failed\n", 1105 DEVNAME(sc)); 1106 bzero(&xs->sense, sizeof(xs->sense)); 1107 xs->sense.error_code = SSD_ERRCODE_VALID | 1108 SSD_ERRCODE_CURRENT; 1109 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1110 xs->sense.add_sense_code = 0x20; /* invalid opcode */ 1111 xs->error = XS_SENSE; 1112 } 1113 1114 scsi_done(xs); 1115 return; 1116 } 1117 1118 mfi_start(sc, ccb); 1119 1120 DNPRINTF(MFI_D_DMA, "%s: mfi_scsi_cmd queued %d\n", DEVNAME(sc), 1121 ccb->ccb_dmamap->dm_nsegs); 1122 1123 return; 1124 1125 stuffup: 1126 xs->error = XS_DRIVER_STUFFUP; 1127 complete: 1128 scsi_done(xs); 1129 } 1130 1131 int 1132 mfi_create_sgl(struct mfi_ccb *ccb, int flags) 1133 { 1134 struct mfi_softc *sc = ccb->ccb_sc; 1135 struct mfi_frame_header *hdr; 1136 bus_dma_segment_t *sgd; 1137 union mfi_sgl *sgl; 1138 int error, i; 1139 1140 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc), 1141 ccb->ccb_data); 1142 1143 if (!ccb->ccb_data) 1144 return (1); 1145 1146 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, 1147 ccb->ccb_data, ccb->ccb_len, NULL, flags); 1148 if (error) { 1149 if (error == EFBIG) 1150 printf("more than %d dma segs\n", 1151 sc->sc_max_sgl); 1152 else 1153 printf("error %d loading dma map\n", error); 1154 return (1); 1155 } 1156 1157 hdr = &ccb->ccb_frame->mfr_header; 1158 sgl = ccb->ccb_sgl; 1159 sgd = ccb->ccb_dmamap->dm_segs; 1160 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) { 1161 if (sc->sc_64bit_dma) { 1162 sgl->sg64[i].addr = htole64(sgd[i].ds_addr); 1163 sgl->sg64[i].len = htole32(sgd[i].ds_len); 1164 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", 1165 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len); 1166 } else { 1167 sgl->sg32[i].addr = htole32(sgd[i].ds_addr); 1168 sgl->sg32[i].len = htole32(sgd[i].ds_len); 1169 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", 1170 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); 1171 } 1172 } 1173 1174 if (ccb->ccb_direction == MFI_DATA_IN) { 1175 hdr->mfh_flags |= MFI_FRAME_DIR_READ; 1176 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1177 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1178 } else { 1179 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE; 1180 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1181 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1182 } 1183 1184 hdr->mfh_flags |= sc->sc_sgl_flags; 1185 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs; 1186 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs; 1187 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE; 1188 1189 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d" 1190 " dm_nsegs: %d extra_frames: %d\n", 1191 DEVNAME(sc), 1192 hdr->mfh_sg_count, 1193 ccb->ccb_frame_size, 1194 sc->sc_frames_size, 1195 ccb->ccb_dmamap->dm_nsegs, 1196 ccb->ccb_extra_frames); 1197 1198 return (0); 1199 } 1200 1201 int 1202 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len, 1203 void *buf, uint8_t *mbox) 1204 { 1205 struct mfi_ccb *ccb; 1206 int rv; 1207 1208 ccb = scsi_io_get(&sc->sc_iopool, 0); 1209 rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox); 1210 scsi_io_put(&sc->sc_iopool, ccb); 1211 1212 return (rv); 1213 } 1214 1215 int 1216 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc, 1217 uint32_t dir, uint32_t len, void *buf, uint8_t *mbox) 1218 { 1219 struct mfi_dcmd_frame *dcmd; 1220 int s, rv = EINVAL; 1221 uint8_t *dma_buf = NULL; 1222 1223 DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc); 1224 1225 dma_buf = dma_alloc(len, PR_WAITOK); 1226 if (dma_buf == NULL) 1227 goto done; 1228 1229 dcmd = &ccb->ccb_frame->mfr_dcmd; 1230 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE); 1231 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD; 1232 dcmd->mdf_header.mfh_timeout = 0; 1233 1234 dcmd->mdf_opcode = opc; 1235 dcmd->mdf_header.mfh_data_len = 0; 1236 ccb->ccb_direction = dir; 1237 ccb->ccb_done = mfi_mgmt_done; 1238 1239 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE; 1240 1241 /* handle special opcodes */ 1242 if (mbox) 1243 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE); 1244 1245 if (dir != MFI_DATA_NONE) { 1246 if (dir == MFI_DATA_OUT) 1247 bcopy(buf, dma_buf, len); 1248 dcmd->mdf_header.mfh_data_len = len; 1249 ccb->ccb_data = dma_buf; 1250 ccb->ccb_len = len; 1251 ccb->ccb_sgl = &dcmd->mdf_sgl; 1252 1253 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) { 1254 rv = EINVAL; 1255 goto done; 1256 } 1257 } 1258 1259 if (cold) { 1260 if (mfi_poll(ccb)) { 1261 rv = EIO; 1262 goto done; 1263 } 1264 } else { 1265 s = splbio(); 1266 mfi_start(sc, ccb); 1267 1268 DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt sleeping\n", DEVNAME(sc)); 1269 while (ccb->ccb_state != MFI_CCB_DONE) 1270 tsleep(ccb, PRIBIO, "mfimgmt", 0); 1271 splx(s); 1272 1273 if (ccb->ccb_flags & MFI_CCB_F_ERR) { 1274 rv = EIO; 1275 goto done; 1276 } 1277 } 1278 1279 if (dir == MFI_DATA_IN) 1280 bcopy(dma_buf, buf, len); 1281 1282 rv = 0; 1283 done: 1284 if (dma_buf) 1285 dma_free(dma_buf, len); 1286 1287 return (rv); 1288 } 1289 1290 void 1291 mfi_mgmt_done(struct mfi_ccb *ccb) 1292 { 1293 struct mfi_softc *sc = ccb->ccb_sc; 1294 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1295 1296 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#x %#x\n", 1297 DEVNAME(sc), ccb, ccb->ccb_frame); 1298 1299 if (ccb->ccb_data != NULL) { 1300 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 1301 DEVNAME(sc)); 1302 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1303 ccb->ccb_dmamap->dm_mapsize, 1304 (ccb->ccb_direction & MFI_DATA_IN) ? 1305 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1306 1307 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1308 } 1309 1310 if (hdr->mfh_cmd_status != MFI_STAT_OK) 1311 ccb->ccb_flags |= MFI_CCB_F_ERR; 1312 1313 ccb->ccb_state = MFI_CCB_DONE; 1314 1315 wakeup(ccb); 1316 } 1317 1318 int 1319 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 1320 { 1321 struct mfi_softc *sc = (struct mfi_softc *)link->adapter_softc; 1322 1323 DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc)); 1324 1325 if (sc->sc_ioctl) 1326 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 1327 else 1328 return (ENOTTY); 1329 } 1330 1331 #if NBIO > 0 1332 int 1333 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 1334 { 1335 struct mfi_softc *sc = (struct mfi_softc *)dev; 1336 int error = 0; 1337 1338 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc)); 1339 1340 rw_enter_write(&sc->sc_lock); 1341 1342 switch (cmd) { 1343 case BIOCINQ: 1344 DNPRINTF(MFI_D_IOCTL, "inq\n"); 1345 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr); 1346 break; 1347 1348 case BIOCVOL: 1349 DNPRINTF(MFI_D_IOCTL, "vol\n"); 1350 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr); 1351 break; 1352 1353 case BIOCDISK: 1354 DNPRINTF(MFI_D_IOCTL, "disk\n"); 1355 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr); 1356 break; 1357 1358 case BIOCALARM: 1359 DNPRINTF(MFI_D_IOCTL, "alarm\n"); 1360 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1361 break; 1362 1363 case BIOCBLINK: 1364 DNPRINTF(MFI_D_IOCTL, "blink\n"); 1365 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr); 1366 break; 1367 1368 case BIOCSETSTATE: 1369 DNPRINTF(MFI_D_IOCTL, "setstate\n"); 1370 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1371 break; 1372 1373 default: 1374 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n"); 1375 error = EINVAL; 1376 } 1377 1378 rw_exit_write(&sc->sc_lock); 1379 1380 return (error); 1381 } 1382 1383 int 1384 mfi_bio_getitall(struct mfi_softc *sc) 1385 { 1386 int i, d, size, rv = EINVAL; 1387 uint8_t mbox[MFI_MBOX_SIZE]; 1388 struct mfi_conf *cfg = NULL; 1389 struct mfi_ld_details *ld_det = NULL; 1390 1391 /* get info */ 1392 if (mfi_get_info(sc)) { 1393 DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n", 1394 DEVNAME(sc)); 1395 goto done; 1396 } 1397 1398 /* send single element command to retrieve size for full structure */ 1399 cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO); 1400 if (cfg == NULL) 1401 goto done; 1402 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, 1403 NULL)) 1404 goto done; 1405 1406 size = cfg->mfc_size; 1407 free(cfg, M_DEVBUF); 1408 1409 /* memory for read config */ 1410 cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1411 if (cfg == NULL) 1412 goto done; 1413 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) 1414 goto done; 1415 1416 /* replace current pointer with enw one */ 1417 if (sc->sc_cfg) 1418 free(sc->sc_cfg, M_DEVBUF); 1419 sc->sc_cfg = cfg; 1420 1421 /* get all ld info */ 1422 if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, 1423 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL)) 1424 goto done; 1425 1426 /* get memory for all ld structures */ 1427 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details); 1428 if (sc->sc_ld_sz != size) { 1429 if (sc->sc_ld_details) 1430 free(sc->sc_ld_details, M_DEVBUF); 1431 1432 ld_det = malloc( size, M_DEVBUF, M_NOWAIT | M_ZERO); 1433 if (ld_det == NULL) 1434 goto done; 1435 sc->sc_ld_sz = size; 1436 sc->sc_ld_details = ld_det; 1437 } 1438 1439 /* find used physical disks */ 1440 size = sizeof(struct mfi_ld_details); 1441 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) { 1442 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 1443 if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size, 1444 &sc->sc_ld_details[i], mbox)) 1445 goto done; 1446 1447 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span * 1448 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth; 1449 } 1450 sc->sc_no_pd = d; 1451 1452 rv = 0; 1453 done: 1454 return (rv); 1455 } 1456 1457 int 1458 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi) 1459 { 1460 int rv = EINVAL; 1461 struct mfi_conf *cfg = NULL; 1462 1463 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc)); 1464 1465 if (mfi_bio_getitall(sc)) { 1466 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n", 1467 DEVNAME(sc)); 1468 goto done; 1469 } 1470 1471 /* count unused disks as volumes */ 1472 if (sc->sc_cfg == NULL) 1473 goto done; 1474 cfg = sc->sc_cfg; 1475 1476 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present; 1477 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs; 1478 #if notyet 1479 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs + 1480 (bi->bi_nodisk - sc->sc_no_pd); 1481 #endif 1482 /* tell bio who we are */ 1483 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 1484 1485 rv = 0; 1486 done: 1487 return (rv); 1488 } 1489 1490 int 1491 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv) 1492 { 1493 int i, per, rv = EINVAL; 1494 1495 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n", 1496 DEVNAME(sc), bv->bv_volid); 1497 1498 /* we really could skip and expect that inq took care of it */ 1499 if (mfi_bio_getitall(sc)) { 1500 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n", 1501 DEVNAME(sc)); 1502 goto done; 1503 } 1504 1505 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { 1506 /* go do hotspares & unused disks */ 1507 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv); 1508 goto done; 1509 } 1510 1511 i = bv->bv_volid; 1512 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev)); 1513 1514 switch(sc->sc_ld_list.mll_list[i].mll_state) { 1515 case MFI_LD_OFFLINE: 1516 bv->bv_status = BIOC_SVOFFLINE; 1517 break; 1518 1519 case MFI_LD_PART_DEGRADED: 1520 case MFI_LD_DEGRADED: 1521 bv->bv_status = BIOC_SVDEGRADED; 1522 break; 1523 1524 case MFI_LD_ONLINE: 1525 bv->bv_status = BIOC_SVONLINE; 1526 break; 1527 1528 default: 1529 bv->bv_status = BIOC_SVINVALID; 1530 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n", 1531 DEVNAME(sc), 1532 sc->sc_ld_list.mll_list[i].mll_state); 1533 } 1534 1535 /* additional status can modify MFI status */ 1536 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) { 1537 case MFI_LD_PROG_CC: 1538 case MFI_LD_PROG_BGI: 1539 bv->bv_status = BIOC_SVSCRUB; 1540 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress; 1541 bv->bv_percent = (per * 100) / 0xffff; 1542 bv->bv_seconds = 1543 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds; 1544 break; 1545 1546 case MFI_LD_PROG_FGI: 1547 case MFI_LD_PROG_RECONSTRUCT: 1548 /* nothing yet */ 1549 break; 1550 } 1551 1552 /* 1553 * The RAID levels are determined per the SNIA DDF spec, this is only 1554 * a subset that is valid for the MFI controller. 1555 */ 1556 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid; 1557 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid == 1558 MFI_DDF_SRL_SPANNED) 1559 bv->bv_level *= 10; 1560 1561 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span * 1562 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth; 1563 1564 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */ 1565 1566 rv = 0; 1567 done: 1568 return (rv); 1569 } 1570 1571 int 1572 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd) 1573 { 1574 struct mfi_conf *cfg; 1575 struct mfi_array *ar; 1576 struct mfi_ld_cfg *ld; 1577 struct mfi_pd_details *pd; 1578 struct scsi_inquiry_data *inqbuf; 1579 char vend[8+16+4+1], *vendp; 1580 int rv = EINVAL; 1581 int arr, vol, disk, span; 1582 uint8_t mbox[MFI_MBOX_SIZE]; 1583 1584 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n", 1585 DEVNAME(sc), bd->bd_diskid); 1586 1587 /* we really could skip and expect that inq took care of it */ 1588 if (mfi_bio_getitall(sc)) { 1589 DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n", 1590 DEVNAME(sc)); 1591 return (rv); 1592 } 1593 cfg = sc->sc_cfg; 1594 1595 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 1596 1597 ar = cfg->mfc_array; 1598 vol = bd->bd_volid; 1599 if (vol >= cfg->mfc_no_ld) { 1600 /* do hotspares */ 1601 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd); 1602 goto freeme; 1603 } 1604 1605 /* calculate offset to ld structure */ 1606 ld = (struct mfi_ld_cfg *)( 1607 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1608 cfg->mfc_array_size * cfg->mfc_no_array); 1609 1610 /* use span 0 only when raid group is not spanned */ 1611 if (ld[vol].mlc_parm.mpa_span_depth > 1) 1612 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span; 1613 else 1614 span = 0; 1615 arr = ld[vol].mlc_span[span].mls_index; 1616 1617 /* offset disk into pd list */ 1618 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span; 1619 bd->bd_target = ar[arr].pd[disk].mar_enc_slot; 1620 1621 /* get status */ 1622 switch (ar[arr].pd[disk].mar_pd_state){ 1623 case MFI_PD_UNCONFIG_GOOD: 1624 case MFI_PD_FAILED: 1625 bd->bd_status = BIOC_SDFAILED; 1626 break; 1627 1628 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */ 1629 bd->bd_status = BIOC_SDHOTSPARE; 1630 break; 1631 1632 case MFI_PD_OFFLINE: 1633 bd->bd_status = BIOC_SDOFFLINE; 1634 break; 1635 1636 case MFI_PD_REBUILD: 1637 bd->bd_status = BIOC_SDREBUILD; 1638 break; 1639 1640 case MFI_PD_ONLINE: 1641 bd->bd_status = BIOC_SDONLINE; 1642 break; 1643 1644 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */ 1645 default: 1646 bd->bd_status = BIOC_SDINVALID; 1647 break; 1648 } 1649 1650 /* get the remaining fields */ 1651 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id; 1652 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1653 sizeof *pd, pd, mbox)) { 1654 /* disk is missing but succeed command */ 1655 rv = 0; 1656 goto freeme; 1657 } 1658 1659 bd->bd_size = pd->mpd_size * 512; /* bytes per block */ 1660 1661 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */ 1662 bd->bd_channel = pd->mpd_enc_idx; 1663 1664 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data; 1665 vendp = inqbuf->vendor; 1666 memcpy(vend, vendp, sizeof vend - 1); 1667 vend[sizeof vend - 1] = '\0'; 1668 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor)); 1669 1670 /* XXX find a way to retrieve serial nr from drive */ 1671 /* XXX find a way to get bd_procdev */ 1672 1673 rv = 0; 1674 freeme: 1675 free(pd, M_DEVBUF); 1676 1677 return (rv); 1678 } 1679 1680 int 1681 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba) 1682 { 1683 uint32_t opc, dir = MFI_DATA_NONE; 1684 int rv = 0; 1685 int8_t ret; 1686 1687 switch(ba->ba_opcode) { 1688 case BIOC_SADISABLE: 1689 opc = MR_DCMD_SPEAKER_DISABLE; 1690 break; 1691 1692 case BIOC_SAENABLE: 1693 opc = MR_DCMD_SPEAKER_ENABLE; 1694 break; 1695 1696 case BIOC_SASILENCE: 1697 opc = MR_DCMD_SPEAKER_SILENCE; 1698 break; 1699 1700 case BIOC_GASTATUS: 1701 opc = MR_DCMD_SPEAKER_GET; 1702 dir = MFI_DATA_IN; 1703 break; 1704 1705 case BIOC_SATEST: 1706 opc = MR_DCMD_SPEAKER_TEST; 1707 break; 1708 1709 default: 1710 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid " 1711 "opcode %x\n", DEVNAME(sc), ba->ba_opcode); 1712 return (EINVAL); 1713 } 1714 1715 if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL)) 1716 rv = EINVAL; 1717 else 1718 if (ba->ba_opcode == BIOC_GASTATUS) 1719 ba->ba_status = ret; 1720 else 1721 ba->ba_status = 0; 1722 1723 return (rv); 1724 } 1725 1726 int 1727 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb) 1728 { 1729 int i, found, rv = EINVAL; 1730 uint8_t mbox[MFI_MBOX_SIZE]; 1731 uint32_t cmd; 1732 struct mfi_pd_list *pd; 1733 1734 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc), 1735 bb->bb_status); 1736 1737 /* channel 0 means not in an enclosure so can't be blinked */ 1738 if (bb->bb_channel == 0) 1739 return (EINVAL); 1740 1741 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1742 1743 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1744 MFI_PD_LIST_SIZE, pd, NULL)) 1745 goto done; 1746 1747 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1748 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index && 1749 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) { 1750 found = 1; 1751 break; 1752 } 1753 1754 if (!found) 1755 goto done; 1756 1757 memset(mbox, 0, sizeof mbox); 1758 1759 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1760 1761 switch (bb->bb_status) { 1762 case BIOC_SBUNBLINK: 1763 cmd = MR_DCMD_PD_UNBLINK; 1764 break; 1765 1766 case BIOC_SBBLINK: 1767 cmd = MR_DCMD_PD_BLINK; 1768 break; 1769 1770 case BIOC_SBALARM: 1771 default: 1772 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid " 1773 "opcode %x\n", DEVNAME(sc), bb->bb_status); 1774 goto done; 1775 } 1776 1777 1778 if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox)) 1779 goto done; 1780 1781 rv = 0; 1782 done: 1783 free(pd, M_DEVBUF); 1784 return (rv); 1785 } 1786 1787 int 1788 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs) 1789 { 1790 struct mfi_pd_list *pd; 1791 int i, found, rv = EINVAL; 1792 uint8_t mbox[MFI_MBOX_SIZE]; 1793 1794 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc), 1795 bs->bs_status); 1796 1797 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1798 1799 if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1800 MFI_PD_LIST_SIZE, pd, NULL)) 1801 goto done; 1802 1803 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1804 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index && 1805 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) { 1806 found = 1; 1807 break; 1808 } 1809 1810 if (!found) 1811 goto done; 1812 1813 memset(mbox, 0, sizeof mbox); 1814 1815 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1816 1817 switch (bs->bs_status) { 1818 case BIOC_SSONLINE: 1819 mbox[2] = MFI_PD_ONLINE; 1820 break; 1821 1822 case BIOC_SSOFFLINE: 1823 mbox[2] = MFI_PD_OFFLINE; 1824 break; 1825 1826 case BIOC_SSHOTSPARE: 1827 mbox[2] = MFI_PD_HOTSPARE; 1828 break; 1829 /* 1830 case BIOC_SSREBUILD: 1831 break; 1832 */ 1833 default: 1834 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid " 1835 "opcode %x\n", DEVNAME(sc), bs->bs_status); 1836 goto done; 1837 } 1838 1839 1840 if (mfi_mgmt(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, mbox)) 1841 goto done; 1842 1843 rv = 0; 1844 done: 1845 free(pd, M_DEVBUF); 1846 return (rv); 1847 } 1848 1849 int 1850 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs) 1851 { 1852 struct mfi_conf *cfg; 1853 struct mfi_hotspare *hs; 1854 struct mfi_pd_details *pd; 1855 struct bioc_disk *sdhs; 1856 struct bioc_vol *vdhs; 1857 struct scsi_inquiry_data *inqbuf; 1858 char vend[8+16+4+1], *vendp; 1859 int i, rv = EINVAL; 1860 uint32_t size; 1861 uint8_t mbox[MFI_MBOX_SIZE]; 1862 1863 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid); 1864 1865 if (!bio_hs) 1866 return (EINVAL); 1867 1868 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK); 1869 1870 /* send single element command to retrieve size for full structure */ 1871 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1872 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL)) 1873 goto freeme; 1874 1875 size = cfg->mfc_size; 1876 free(cfg, M_DEVBUF); 1877 1878 /* memory for read config */ 1879 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 1880 if (mfi_mgmt(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) 1881 goto freeme; 1882 1883 /* calculate offset to hs structure */ 1884 hs = (struct mfi_hotspare *)( 1885 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1886 cfg->mfc_array_size * cfg->mfc_no_array + 1887 cfg->mfc_ld_size * cfg->mfc_no_ld); 1888 1889 if (volid < cfg->mfc_no_ld) 1890 goto freeme; /* not a hotspare */ 1891 1892 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs)) 1893 goto freeme; /* not a hotspare */ 1894 1895 /* offset into hotspare structure */ 1896 i = volid - cfg->mfc_no_ld; 1897 1898 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d " 1899 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld, 1900 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id); 1901 1902 /* get pd fields */ 1903 memset(mbox, 0, sizeof mbox); 1904 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id; 1905 if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1906 sizeof *pd, pd, mbox)) { 1907 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n", 1908 DEVNAME(sc)); 1909 goto freeme; 1910 } 1911 1912 switch (type) { 1913 case MFI_MGMT_VD: 1914 vdhs = bio_hs; 1915 vdhs->bv_status = BIOC_SVONLINE; 1916 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */ 1917 vdhs->bv_level = -1; /* hotspare */ 1918 vdhs->bv_nodisk = 1; 1919 break; 1920 1921 case MFI_MGMT_SD: 1922 sdhs = bio_hs; 1923 sdhs->bd_status = BIOC_SDHOTSPARE; 1924 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */ 1925 sdhs->bd_channel = pd->mpd_enc_idx; 1926 sdhs->bd_target = pd->mpd_enc_slot; 1927 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data; 1928 vendp = inqbuf->vendor; 1929 memcpy(vend, vendp, sizeof vend - 1); 1930 vend[sizeof vend - 1] = '\0'; 1931 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor)); 1932 break; 1933 1934 default: 1935 goto freeme; 1936 } 1937 1938 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc)); 1939 rv = 0; 1940 freeme: 1941 free(pd, M_DEVBUF); 1942 free(cfg, M_DEVBUF); 1943 1944 return (rv); 1945 } 1946 1947 #ifndef SMALL_KERNEL 1948 int 1949 mfi_create_sensors(struct mfi_softc *sc) 1950 { 1951 struct device *dev; 1952 struct scsibus_softc *ssc = NULL; 1953 struct scsi_link *link; 1954 int i; 1955 1956 TAILQ_FOREACH(dev, &alldevs, dv_list) { 1957 if (dev->dv_parent != &sc->sc_dev) 1958 continue; 1959 1960 /* check if this is the scsibus for the logical disks */ 1961 ssc = (struct scsibus_softc *)dev; 1962 if (ssc->adapter_link == &sc->sc_link) 1963 break; 1964 } 1965 1966 if (ssc == NULL) 1967 return (1); 1968 1969 sc->sc_sensors = malloc(sizeof(struct ksensor) * sc->sc_ld_cnt, 1970 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1971 if (sc->sc_sensors == NULL) 1972 return (1); 1973 1974 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 1975 sizeof(sc->sc_sensordev.xname)); 1976 1977 for (i = 0; i < sc->sc_ld_cnt; i++) { 1978 link = scsi_get_link(ssc, i, 0); 1979 if (link == NULL) 1980 goto bad; 1981 1982 dev = link->device_softc; 1983 1984 sc->sc_sensors[i].type = SENSOR_DRIVE; 1985 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 1986 1987 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname, 1988 sizeof(sc->sc_sensors[i].desc)); 1989 1990 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]); 1991 } 1992 1993 if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL) 1994 goto bad; 1995 1996 sensordev_install(&sc->sc_sensordev); 1997 1998 return (0); 1999 2000 bad: 2001 free(sc->sc_sensors, M_DEVBUF); 2002 2003 return (1); 2004 } 2005 2006 void 2007 mfi_refresh_sensors(void *arg) 2008 { 2009 struct mfi_softc *sc = arg; 2010 int i; 2011 struct bioc_vol bv; 2012 2013 2014 for (i = 0; i < sc->sc_ld_cnt; i++) { 2015 bzero(&bv, sizeof(bv)); 2016 bv.bv_volid = i; 2017 if (mfi_ioctl_vol(sc, &bv)) 2018 return; 2019 2020 switch(bv.bv_status) { 2021 case BIOC_SVOFFLINE: 2022 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL; 2023 sc->sc_sensors[i].status = SENSOR_S_CRIT; 2024 break; 2025 2026 case BIOC_SVDEGRADED: 2027 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL; 2028 sc->sc_sensors[i].status = SENSOR_S_WARN; 2029 break; 2030 2031 case BIOC_SVSCRUB: 2032 case BIOC_SVONLINE: 2033 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE; 2034 sc->sc_sensors[i].status = SENSOR_S_OK; 2035 break; 2036 2037 case BIOC_SVINVALID: 2038 /* FALLTRHOUGH */ 2039 default: 2040 sc->sc_sensors[i].value = 0; /* unknown */ 2041 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 2042 } 2043 2044 } 2045 } 2046 #endif /* SMALL_KERNEL */ 2047 #endif /* NBIO > 0 */ 2048 2049 void 2050 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb) 2051 { 2052 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 2053 ccb->ccb_pframe_offset, sc->sc_frames_size, 2054 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2055 2056 mfi_post(sc, ccb); 2057 } 2058 2059 void 2060 mfi_done(struct mfi_ccb *ccb) 2061 { 2062 struct mfi_softc *sc = ccb->ccb_sc; 2063 2064 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 2065 ccb->ccb_pframe_offset, sc->sc_frames_size, BUS_DMASYNC_PREREAD); 2066 2067 ccb->ccb_done(ccb); 2068 } 2069 2070 u_int32_t 2071 mfi_xscale_fw_state(struct mfi_softc *sc) 2072 { 2073 return (mfi_read(sc, MFI_OMSG0)); 2074 } 2075 2076 void 2077 mfi_xscale_intr_ena(struct mfi_softc *sc) 2078 { 2079 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR); 2080 } 2081 2082 int 2083 mfi_xscale_intr(struct mfi_softc *sc) 2084 { 2085 u_int32_t status; 2086 2087 status = mfi_read(sc, MFI_OSTS); 2088 if (!ISSET(status, MFI_OSTS_INTR_VALID)) 2089 return (0); 2090 2091 /* write status back to acknowledge interrupt */ 2092 mfi_write(sc, MFI_OSTS, status); 2093 2094 return (1); 2095 } 2096 2097 void 2098 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2099 { 2100 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) | 2101 ccb->ccb_extra_frames); 2102 } 2103 2104 u_int32_t 2105 mfi_ppc_fw_state(struct mfi_softc *sc) 2106 { 2107 return (mfi_read(sc, MFI_OSP)); 2108 } 2109 2110 void 2111 mfi_ppc_intr_ena(struct mfi_softc *sc) 2112 { 2113 mfi_write(sc, MFI_ODC, 0xffffffff); 2114 mfi_write(sc, MFI_OMSK, ~0x80000004); 2115 } 2116 2117 int 2118 mfi_ppc_intr(struct mfi_softc *sc) 2119 { 2120 u_int32_t status; 2121 2122 status = mfi_read(sc, MFI_OSTS); 2123 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID)) 2124 return (0); 2125 2126 /* write status back to acknowledge interrupt */ 2127 mfi_write(sc, MFI_ODC, status); 2128 2129 return (1); 2130 } 2131 2132 void 2133 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2134 { 2135 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2136 (ccb->ccb_extra_frames << 1)); 2137 } 2138 2139 u_int32_t 2140 mfi_gen2_fw_state(struct mfi_softc *sc) 2141 { 2142 return (mfi_read(sc, MFI_OSP)); 2143 } 2144 2145 void 2146 mfi_gen2_intr_ena(struct mfi_softc *sc) 2147 { 2148 mfi_write(sc, MFI_ODC, 0xffffffff); 2149 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID); 2150 } 2151 2152 int 2153 mfi_gen2_intr(struct mfi_softc *sc) 2154 { 2155 u_int32_t status; 2156 2157 status = mfi_read(sc, MFI_OSTS); 2158 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID)) 2159 return (0); 2160 2161 /* write status back to acknowledge interrupt */ 2162 mfi_write(sc, MFI_ODC, status); 2163 2164 return (1); 2165 } 2166 2167 void 2168 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2169 { 2170 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2171 (ccb->ccb_extra_frames << 1)); 2172 } 2173