1 /* $OpenBSD: mpi.c,v 1.226 2023/07/06 10:17:43 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/malloc.h> 27 #include <sys/kernel.h> 28 #include <sys/mutex.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/dkio.h> 32 #include <sys/task.h> 33 34 #include <machine/bus.h> 35 36 #include <scsi/scsi_all.h> 37 #include <scsi/scsiconf.h> 38 39 #include <dev/biovar.h> 40 #include <dev/ic/mpireg.h> 41 #include <dev/ic/mpivar.h> 42 43 #ifdef MPI_DEBUG 44 uint32_t mpi_debug = 0 45 /* | MPI_D_CMD */ 46 /* | MPI_D_INTR */ 47 /* | MPI_D_MISC */ 48 /* | MPI_D_DMA */ 49 /* | MPI_D_IOCTL */ 50 /* | MPI_D_RW */ 51 /* | MPI_D_MEM */ 52 /* | MPI_D_CCB */ 53 /* | MPI_D_PPR */ 54 /* | MPI_D_RAID */ 55 /* | MPI_D_EVT */ 56 ; 57 #endif 58 59 struct cfdriver mpi_cd = { 60 NULL, 61 "mpi", 62 DV_DULL 63 }; 64 65 void mpi_scsi_cmd(struct scsi_xfer *); 66 void mpi_scsi_cmd_done(struct mpi_ccb *); 67 int mpi_scsi_probe(struct scsi_link *); 68 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 69 int); 70 71 const struct scsi_adapter mpi_switch = { 72 mpi_scsi_cmd, NULL, mpi_scsi_probe, NULL, mpi_scsi_ioctl 73 }; 74 75 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 76 void mpi_dmamem_free(struct mpi_softc *, 77 struct mpi_dmamem *); 78 int mpi_alloc_ccbs(struct mpi_softc *); 79 void *mpi_get_ccb(void *); 80 void mpi_put_ccb(void *, void *); 81 int mpi_alloc_replies(struct mpi_softc *); 82 void mpi_push_replies(struct mpi_softc *); 83 void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *); 84 85 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 86 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 87 void mpi_poll_done(struct mpi_ccb *); 88 void mpi_reply(struct mpi_softc *, u_int32_t); 89 90 void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *); 91 void mpi_wait_done(struct mpi_ccb *); 92 93 int mpi_cfg_spi_port(struct mpi_softc *); 94 void mpi_squash_ppr(struct mpi_softc *); 95 void mpi_run_ppr(struct mpi_softc *); 96 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 97 struct mpi_cfg_raid_physdisk *, int, int, int); 98 int mpi_inq(struct mpi_softc *, u_int16_t, int); 99 100 int mpi_cfg_sas(struct mpi_softc *); 101 int mpi_cfg_fc(struct mpi_softc *); 102 103 void mpi_timeout_xs(void *); 104 int mpi_load_xs(struct mpi_ccb *); 105 106 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 107 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 108 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 109 u_int32_t); 110 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 111 u_int32_t); 112 113 int mpi_init(struct mpi_softc *); 114 int mpi_reset_soft(struct mpi_softc *); 115 int mpi_reset_hard(struct mpi_softc *); 116 117 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 118 int mpi_handshake_recv_dword(struct mpi_softc *, 119 u_int32_t *); 120 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 121 122 void mpi_empty_done(struct mpi_ccb *); 123 124 int mpi_iocinit(struct mpi_softc *); 125 int mpi_iocfacts(struct mpi_softc *); 126 int mpi_portfacts(struct mpi_softc *); 127 int mpi_portenable(struct mpi_softc *); 128 int mpi_cfg_coalescing(struct mpi_softc *); 129 void mpi_get_raid(struct mpi_softc *); 130 int mpi_fwupload(struct mpi_softc *); 131 int mpi_manufacturing(struct mpi_softc *); 132 int mpi_scsi_probe_virtual(struct scsi_link *); 133 134 int mpi_eventnotify(struct mpi_softc *); 135 void mpi_eventnotify_done(struct mpi_ccb *); 136 void mpi_eventnotify_free(struct mpi_softc *, 137 struct mpi_rcb *); 138 void mpi_eventack(void *, void *); 139 void mpi_eventack_done(struct mpi_ccb *); 140 int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *); 141 void mpi_evt_sas_detach(void *, void *); 142 void mpi_evt_sas_detach_done(struct mpi_ccb *); 143 void mpi_fc_rescan(void *); 144 145 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t, 146 u_int8_t, u_int32_t, int, void *); 147 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int, 148 void *, int, void *, size_t); 149 150 int mpi_ioctl_cache(struct scsi_link *, u_long, 151 struct dk_cache *); 152 153 #if NBIO > 0 154 int mpi_bio_get_pg0_raid(struct mpi_softc *, int); 155 int mpi_ioctl(struct device *, u_long, caddr_t); 156 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *); 157 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *); 158 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *); 159 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *); 160 #ifndef SMALL_KERNEL 161 int mpi_create_sensors(struct mpi_softc *); 162 void mpi_refresh_sensors(void *); 163 #endif /* SMALL_KERNEL */ 164 #endif /* NBIO > 0 */ 165 166 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 167 168 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 169 170 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 171 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 172 #define mpi_read_intr(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 173 MPI_INTR_STATUS) 174 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 175 #define mpi_pop_reply(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 176 MPI_REPLY_QUEUE) 177 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 178 MPI_REPLY_QUEUE, (v)) 179 180 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 181 MPI_INTR_STATUS_DOORBELL, 0) 182 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 183 MPI_INTR_STATUS_IOCDOORBELL, 0) 184 185 #define MPI_PG_EXTENDED (1<<0) 186 #define MPI_PG_POLL (1<<1) 187 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED" 188 189 #define mpi_cfg_header(_s, _t, _n, _a, _h) \ 190 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 191 MPI_PG_POLL, (_h)) 192 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \ 193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 194 MPI_PG_POLL|MPI_PG_EXTENDED, (_h)) 195 196 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \ 197 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \ 198 (_h), (_r), (_p), (_l)) 199 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \ 200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \ 201 (_h), (_r), (_p), (_l)) 202 203 static inline void 204 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva) 205 { 206 htolem32(&sge->sg_addr_lo, dva); 207 htolem32(&sge->sg_addr_hi, dva >> 32); 208 } 209 210 int 211 mpi_attach(struct mpi_softc *sc) 212 { 213 struct scsibus_attach_args saa; 214 struct mpi_ccb *ccb; 215 216 printf("\n"); 217 218 rw_init(&sc->sc_lock, "mpi_lock"); 219 task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc); 220 221 /* disable interrupts */ 222 mpi_write(sc, MPI_INTR_MASK, 223 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 224 225 if (mpi_init(sc) != 0) { 226 printf("%s: unable to initialise\n", DEVNAME(sc)); 227 return (1); 228 } 229 230 if (mpi_iocfacts(sc) != 0) { 231 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 232 return (1); 233 } 234 235 if (mpi_alloc_ccbs(sc) != 0) { 236 /* error already printed */ 237 return (1); 238 } 239 240 if (mpi_alloc_replies(sc) != 0) { 241 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 242 goto free_ccbs; 243 } 244 245 if (mpi_iocinit(sc) != 0) { 246 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 247 goto free_ccbs; 248 } 249 250 /* spin until we're operational */ 251 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 252 MPI_DOORBELL_STATE_OPER) != 0) { 253 printf("%s: state: 0x%08x\n", DEVNAME(sc), 254 mpi_read_db(sc) & MPI_DOORBELL_STATE); 255 printf("%s: operational state timeout\n", DEVNAME(sc)); 256 goto free_ccbs; 257 } 258 259 mpi_push_replies(sc); 260 261 if (mpi_portfacts(sc) != 0) { 262 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 263 goto free_replies; 264 } 265 266 if (mpi_cfg_coalescing(sc) != 0) { 267 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 268 goto free_replies; 269 } 270 271 switch (sc->sc_porttype) { 272 case MPI_PORTFACTS_PORTTYPE_SAS: 273 SIMPLEQ_INIT(&sc->sc_evt_scan_queue); 274 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO); 275 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool, 276 mpi_evt_sas_detach, sc); 277 /* FALLTHROUGH */ 278 case MPI_PORTFACTS_PORTTYPE_FC: 279 if (mpi_eventnotify(sc) != 0) { 280 printf("%s: unable to enable events\n", DEVNAME(sc)); 281 goto free_replies; 282 } 283 break; 284 } 285 286 if (mpi_portenable(sc) != 0) { 287 printf("%s: unable to enable port\n", DEVNAME(sc)); 288 goto free_replies; 289 } 290 291 if (mpi_fwupload(sc) != 0) { 292 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 293 goto free_replies; 294 } 295 296 if (mpi_manufacturing(sc) != 0) { 297 printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc)); 298 goto free_replies; 299 } 300 301 switch (sc->sc_porttype) { 302 case MPI_PORTFACTS_PORTTYPE_SCSI: 303 if (mpi_cfg_spi_port(sc) != 0) { 304 printf("%s: unable to configure spi\n", DEVNAME(sc)); 305 goto free_replies; 306 } 307 mpi_squash_ppr(sc); 308 break; 309 case MPI_PORTFACTS_PORTTYPE_SAS: 310 if (mpi_cfg_sas(sc) != 0) { 311 printf("%s: unable to configure sas\n", DEVNAME(sc)); 312 goto free_replies; 313 } 314 break; 315 case MPI_PORTFACTS_PORTTYPE_FC: 316 if (mpi_cfg_fc(sc) != 0) { 317 printf("%s: unable to configure fc\n", DEVNAME(sc)); 318 goto free_replies; 319 } 320 break; 321 } 322 323 /* get raid pages */ 324 mpi_get_raid(sc); 325 #if NBIO > 0 326 if (sc->sc_flags & MPI_F_RAID) { 327 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0) 328 panic("%s: controller registration failed", 329 DEVNAME(sc)); 330 else { 331 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 332 2, 0, &sc->sc_cfg_hdr) != 0) { 333 panic("%s: can't get IOC page 2 hdr", 334 DEVNAME(sc)); 335 } 336 337 sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length, 338 4, M_TEMP, M_WAITOK | M_CANFAIL); 339 if (sc->sc_vol_page == NULL) { 340 panic("%s: can't get memory for IOC page 2, " 341 "bio disabled", DEVNAME(sc)); 342 } 343 344 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, 345 sc->sc_vol_page, 346 sc->sc_cfg_hdr.page_length * 4) != 0) { 347 panic("%s: can't get IOC page 2", DEVNAME(sc)); 348 } 349 350 sc->sc_vol_list = (struct mpi_cfg_raid_vol *) 351 (sc->sc_vol_page + 1); 352 353 sc->sc_ioctl = mpi_ioctl; 354 } 355 } 356 #endif /* NBIO > 0 */ 357 358 saa.saa_adapter = &mpi_switch; 359 saa.saa_adapter_softc = sc; 360 saa.saa_adapter_target = sc->sc_target; 361 saa.saa_adapter_buswidth = sc->sc_buswidth; 362 saa.saa_luns = 8; 363 saa.saa_openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16); 364 saa.saa_pool = &sc->sc_iopool; 365 saa.saa_wwpn = sc->sc_port_wwn; 366 saa.saa_wwnn = sc->sc_node_wwn; 367 saa.saa_quirks = saa.saa_flags = 0; 368 369 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, 370 &saa, scsiprint); 371 372 /* do domain validation */ 373 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 374 mpi_run_ppr(sc); 375 376 /* enable interrupts */ 377 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 378 379 #if NBIO > 0 380 #ifndef SMALL_KERNEL 381 mpi_create_sensors(sc); 382 #endif /* SMALL_KERNEL */ 383 #endif /* NBIO > 0 */ 384 385 return (0); 386 387 free_replies: 388 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 389 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 390 mpi_dmamem_free(sc, sc->sc_replies); 391 free_ccbs: 392 while ((ccb = mpi_get_ccb(sc)) != NULL) 393 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 394 mpi_dmamem_free(sc, sc->sc_requests); 395 free(sc->sc_ccbs, M_DEVBUF, 0); 396 397 return(1); 398 } 399 400 int 401 mpi_cfg_spi_port(struct mpi_softc *sc) 402 { 403 struct mpi_cfg_hdr hdr; 404 struct mpi_cfg_spi_port_pg1 port; 405 406 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0, 407 &hdr) != 0) 408 return (1); 409 410 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0) 411 return (1); 412 413 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc)); 414 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n", 415 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids)); 416 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc), 417 letoh32(port.port_scsi_id)); 418 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n", 419 DEVNAME(sc), port.target_config, letoh16(port.id_config)); 420 421 if (port.port_scsi_id == sc->sc_target && 422 port.port_resp_ids == htole16(1 << sc->sc_target) && 423 port.on_bus_timer_value != htole32(0x0)) 424 return (0); 425 426 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc), 427 sc->sc_target); 428 port.port_scsi_id = sc->sc_target; 429 port.port_resp_ids = htole16(1 << sc->sc_target); 430 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */ 431 432 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) { 433 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)); 434 return (1); 435 } 436 437 return (0); 438 } 439 440 void 441 mpi_squash_ppr(struct mpi_softc *sc) 442 { 443 struct mpi_cfg_hdr hdr; 444 struct mpi_cfg_spi_dev_pg1 page; 445 int i; 446 447 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 448 449 for (i = 0; i < sc->sc_buswidth; i++) { 450 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 451 1, i, &hdr) != 0) 452 return; 453 454 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 455 return; 456 457 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 458 "req_offset: 0x%02x req_period: 0x%02x " 459 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 460 page.req_params1, page.req_offset, page.req_period, 461 page.req_params2, letoh32(page.configuration)); 462 463 page.req_params1 = 0x0; 464 page.req_offset = 0x0; 465 page.req_period = 0x0; 466 page.req_params2 = 0x0; 467 page.configuration = htole32(0x0); 468 469 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 470 return; 471 } 472 } 473 474 void 475 mpi_run_ppr(struct mpi_softc *sc) 476 { 477 struct mpi_cfg_hdr hdr; 478 struct mpi_cfg_spi_port_pg0 port_pg; 479 struct mpi_cfg_ioc_pg3 *physdisk_pg; 480 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 481 size_t pagelen; 482 struct scsi_link *link; 483 int i, tries; 484 485 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 486 &hdr) != 0) { 487 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 488 DEVNAME(sc)); 489 return; 490 } 491 492 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 493 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 494 DEVNAME(sc)); 495 return; 496 } 497 498 for (i = 0; i < sc->sc_buswidth; i++) { 499 link = scsi_get_link(sc->sc_scsibus, i, 0); 500 if (link == NULL) 501 continue; 502 503 /* do not ppr volumes */ 504 if (link->flags & SDEV_VIRTUAL) 505 continue; 506 507 tries = 0; 508 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 509 port_pg.max_offset, tries) == EAGAIN) 510 tries++; 511 } 512 513 if ((sc->sc_flags & MPI_F_RAID) == 0) 514 return; 515 516 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 517 &hdr) != 0) { 518 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 519 "fetch ioc pg 3 header\n", DEVNAME(sc)); 520 return; 521 } 522 523 pagelen = hdr.page_length * 4; /* dwords to bytes */ 524 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 525 if (physdisk_pg == NULL) { 526 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 527 "allocate ioc pg 3\n", DEVNAME(sc)); 528 return; 529 } 530 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 531 532 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 533 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 534 "fetch ioc page 3\n", DEVNAME(sc)); 535 goto out; 536 } 537 538 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 539 physdisk_pg->no_phys_disks); 540 541 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 542 physdisk = &physdisk_list[i]; 543 544 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 545 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 546 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 547 physdisk->phys_disk_num); 548 549 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 550 continue; 551 552 tries = 0; 553 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 554 port_pg.max_offset, tries) == EAGAIN) 555 tries++; 556 } 557 558 out: 559 free(physdisk_pg, M_TEMP, pagelen); 560 } 561 562 int 563 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 564 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 565 { 566 struct mpi_cfg_hdr hdr0, hdr1; 567 struct mpi_cfg_spi_dev_pg0 pg0; 568 struct mpi_cfg_spi_dev_pg1 pg1; 569 u_int32_t address; 570 int id; 571 int raid = 0; 572 573 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 574 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 575 link->quirks); 576 577 if (try >= 3) 578 return (EIO); 579 580 if (physdisk == NULL) { 581 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 582 return (EIO); 583 584 address = link->target; 585 id = link->target; 586 } else { 587 raid = 1; 588 address = (physdisk->phys_disk_bus << 8) | 589 (physdisk->phys_disk_id); 590 id = physdisk->phys_disk_num; 591 } 592 593 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 594 address, &hdr0) != 0) { 595 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 596 DEVNAME(sc)); 597 return (EIO); 598 } 599 600 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 601 address, &hdr1) != 0) { 602 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 603 DEVNAME(sc)); 604 return (EIO); 605 } 606 607 #ifdef MPI_DEBUG 608 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 609 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 610 DEVNAME(sc)); 611 return (EIO); 612 } 613 614 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 615 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 616 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 617 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 618 #endif 619 620 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 621 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 622 DEVNAME(sc)); 623 return (EIO); 624 } 625 626 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 627 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 628 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 629 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 630 631 pg1.req_params1 = 0; 632 pg1.req_offset = offset; 633 pg1.req_period = period; 634 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 635 636 if (raid || !(link->quirks & SDEV_NOSYNC)) { 637 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 638 639 switch (try) { 640 case 0: /* U320 */ 641 break; 642 case 1: /* U160 */ 643 pg1.req_period = 0x09; 644 break; 645 case 2: /* U80 */ 646 pg1.req_period = 0x0a; 647 break; 648 } 649 650 if (pg1.req_period < 0x09) { 651 /* Ultra320: enable QAS & PACKETIZED */ 652 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 653 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 654 } 655 if (pg1.req_period < 0xa) { 656 /* >= Ultra160: enable dual xfers */ 657 pg1.req_params1 |= 658 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 659 } 660 } 661 662 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 663 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 664 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 665 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 666 667 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 668 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 669 DEVNAME(sc)); 670 return (EIO); 671 } 672 673 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 674 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 675 DEVNAME(sc)); 676 return (EIO); 677 } 678 679 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 680 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 681 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 682 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 683 684 if (mpi_inq(sc, id, raid) != 0) { 685 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 686 "target %d\n", DEVNAME(sc), link->target); 687 return (EIO); 688 } 689 690 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 691 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 692 "inquiry\n", DEVNAME(sc)); 693 return (EIO); 694 } 695 696 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 697 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 698 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 699 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 700 701 if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) { 702 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 703 DEVNAME(sc)); 704 return (EAGAIN); 705 } 706 707 if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 708 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 709 DEVNAME(sc)); 710 return (EAGAIN); 711 } 712 713 if (lemtoh32(&pg0.information) & 0x0e) { 714 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 715 DEVNAME(sc), lemtoh32(&pg0.information)); 716 return (EAGAIN); 717 } 718 719 switch(pg0.neg_period) { 720 case 0x08: 721 period = 160; 722 break; 723 case 0x09: 724 period = 80; 725 break; 726 case 0x0a: 727 period = 40; 728 break; 729 case 0x0b: 730 period = 20; 731 break; 732 case 0x0c: 733 period = 10; 734 break; 735 default: 736 period = 0; 737 break; 738 } 739 740 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 741 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 742 id, period ? "Sync" : "Async", period, 743 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 744 pg0.neg_offset, 745 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 746 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 747 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 748 749 return (0); 750 } 751 752 int 753 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 754 { 755 struct mpi_ccb *ccb; 756 struct scsi_inquiry inq; 757 struct inq_bundle { 758 struct mpi_msg_scsi_io io; 759 struct mpi_sge sge; 760 struct scsi_inquiry_data inqbuf; 761 struct scsi_sense_data sense; 762 } __packed *bundle; 763 struct mpi_msg_scsi_io *io; 764 struct mpi_sge *sge; 765 766 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 767 768 memset(&inq, 0, sizeof(inq)); 769 inq.opcode = INQUIRY; 770 _lto2b(sizeof(struct scsi_inquiry_data), inq.length); 771 772 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 773 if (ccb == NULL) 774 return (1); 775 776 ccb->ccb_done = mpi_empty_done; 777 778 bundle = ccb->ccb_cmd; 779 io = &bundle->io; 780 sge = &bundle->sge; 781 782 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 783 MPI_FUNCTION_SCSI_IO_REQUEST; 784 /* 785 * bus is always 0 786 * io->bus = htole16(sc->sc_bus); 787 */ 788 io->target_id = target; 789 790 io->cdb_length = sizeof(inq); 791 io->sense_buf_len = sizeof(struct scsi_sense_data); 792 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 793 794 /* 795 * always lun 0 796 * io->lun[0] = htobe16(link->lun); 797 */ 798 799 io->direction = MPI_SCSIIO_DIR_READ; 800 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 801 802 memcpy(io->cdb, &inq, sizeof(inq)); 803 804 htolem32(&io->data_length, sizeof(struct scsi_inquiry_data)); 805 806 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva + 807 offsetof(struct inq_bundle, sense)); 808 809 htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 810 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 811 (u_int32_t)sizeof(inq)); 812 813 mpi_dvatosge(sge, ccb->ccb_cmd_dva + 814 offsetof(struct inq_bundle, inqbuf)); 815 816 if (mpi_poll(sc, ccb, 5000) != 0) 817 return (1); 818 819 if (ccb->ccb_rcb != NULL) 820 mpi_push_reply(sc, ccb->ccb_rcb); 821 822 scsi_io_put(&sc->sc_iopool, ccb); 823 824 return (0); 825 } 826 827 int 828 mpi_cfg_sas(struct mpi_softc *sc) 829 { 830 struct mpi_ecfg_hdr ehdr; 831 struct mpi_cfg_sas_iou_pg1 *pg; 832 size_t pagelen; 833 int rv = 0; 834 835 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0, 836 &ehdr) != 0) 837 return (0); 838 839 pagelen = lemtoh16(&ehdr.ext_page_length) * 4; 840 pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 841 if (pg == NULL) 842 return (ENOMEM); 843 844 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0) 845 goto out; 846 847 if (pg->max_sata_q_depth != 32) { 848 pg->max_sata_q_depth = 32; 849 850 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0) 851 goto out; 852 } 853 854 out: 855 free(pg, M_TEMP, pagelen); 856 return (rv); 857 } 858 859 int 860 mpi_cfg_fc(struct mpi_softc *sc) 861 { 862 struct mpi_cfg_hdr hdr; 863 struct mpi_cfg_fc_port_pg0 pg0; 864 struct mpi_cfg_fc_port_pg1 pg1; 865 866 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 867 &hdr) != 0) { 868 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)); 869 return (1); 870 } 871 872 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) { 873 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)); 874 return (1); 875 } 876 877 sc->sc_port_wwn = letoh64(pg0.wwpn); 878 sc->sc_node_wwn = letoh64(pg0.wwnn); 879 880 /* configure port config more to our liking */ 881 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0, 882 &hdr) != 0) { 883 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)); 884 return (1); 885 } 886 887 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) { 888 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)); 889 return (1); 890 } 891 892 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR | 893 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN)); 894 895 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) { 896 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)); 897 return (1); 898 } 899 900 return (0); 901 } 902 903 void 904 mpi_detach(struct mpi_softc *sc) 905 { 906 907 } 908 909 int 910 mpi_intr(void *arg) 911 { 912 struct mpi_softc *sc = arg; 913 u_int32_t reg; 914 int rv = 0; 915 916 if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0) 917 return (rv); 918 919 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 920 mpi_reply(sc, reg); 921 rv = 1; 922 } 923 924 return (rv); 925 } 926 927 void 928 mpi_reply(struct mpi_softc *sc, u_int32_t reg) 929 { 930 struct mpi_ccb *ccb; 931 struct mpi_rcb *rcb = NULL; 932 struct mpi_msg_reply *reply = NULL; 933 u_int32_t reply_dva; 934 int id; 935 int i; 936 937 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg); 938 939 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 940 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 941 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) / 942 MPI_REPLY_SIZE; 943 rcb = &sc->sc_rcbs[i]; 944 945 bus_dmamap_sync(sc->sc_dmat, 946 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset, 947 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 948 949 reply = rcb->rcb_reply; 950 951 id = lemtoh32(&reply->msg_context); 952 } else { 953 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 954 case MPI_REPLY_QUEUE_TYPE_INIT: 955 id = reg & MPI_REPLY_QUEUE_CONTEXT; 956 break; 957 958 default: 959 panic("%s: unsupported context reply", 960 DEVNAME(sc)); 961 } 962 } 963 964 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n", 965 DEVNAME(sc), id, reply); 966 967 ccb = &sc->sc_ccbs[id]; 968 969 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 970 ccb->ccb_offset, MPI_REQUEST_SIZE, 971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 972 ccb->ccb_state = MPI_CCB_READY; 973 ccb->ccb_rcb = rcb; 974 975 ccb->ccb_done(ccb); 976 } 977 978 struct mpi_dmamem * 979 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 980 { 981 struct mpi_dmamem *mdm; 982 int nsegs; 983 984 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO); 985 if (mdm == NULL) 986 return (NULL); 987 988 mdm->mdm_size = size; 989 990 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 991 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 992 goto mdmfree; 993 994 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 995 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 996 goto destroy; 997 998 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 999 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 1000 goto free; 1001 1002 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 1003 NULL, BUS_DMA_NOWAIT) != 0) 1004 goto unmap; 1005 1006 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 1007 "map: %#x nsegs: %d segs: %#x kva: %x\n", 1008 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 1009 1010 return (mdm); 1011 1012 unmap: 1013 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 1014 free: 1015 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1016 destroy: 1017 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1018 mdmfree: 1019 free(mdm, M_DEVBUF, sizeof *mdm); 1020 1021 return (NULL); 1022 } 1023 1024 void 1025 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 1026 { 1027 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 1028 1029 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 1030 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 1031 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1032 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1033 free(mdm, M_DEVBUF, sizeof *mdm); 1034 } 1035 1036 int 1037 mpi_alloc_ccbs(struct mpi_softc *sc) 1038 { 1039 struct mpi_ccb *ccb; 1040 u_int8_t *cmd; 1041 int i; 1042 1043 SLIST_INIT(&sc->sc_ccb_free); 1044 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 1045 1046 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb), 1047 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1048 if (sc->sc_ccbs == NULL) { 1049 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 1050 return (1); 1051 } 1052 1053 sc->sc_requests = mpi_dmamem_alloc(sc, 1054 MPI_REQUEST_SIZE * sc->sc_maxcmds); 1055 if (sc->sc_requests == NULL) { 1056 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 1057 goto free_ccbs; 1058 } 1059 cmd = MPI_DMA_KVA(sc->sc_requests); 1060 memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds); 1061 1062 for (i = 0; i < sc->sc_maxcmds; i++) { 1063 ccb = &sc->sc_ccbs[i]; 1064 1065 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 1066 sc->sc_max_sgl_len, MAXPHYS, 0, 1067 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1068 &ccb->ccb_dmamap) != 0) { 1069 printf("%s: unable to create dma map\n", DEVNAME(sc)); 1070 goto free_maps; 1071 } 1072 1073 ccb->ccb_sc = sc; 1074 ccb->ccb_id = i; 1075 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 1076 ccb->ccb_state = MPI_CCB_READY; 1077 1078 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 1079 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 1080 ccb->ccb_offset; 1081 1082 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 1083 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 1084 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 1085 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 1086 ccb->ccb_cmd_dva); 1087 1088 mpi_put_ccb(sc, ccb); 1089 } 1090 1091 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb); 1092 1093 return (0); 1094 1095 free_maps: 1096 while ((ccb = mpi_get_ccb(sc)) != NULL) 1097 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1098 1099 mpi_dmamem_free(sc, sc->sc_requests); 1100 free_ccbs: 1101 free(sc->sc_ccbs, M_DEVBUF, 0); 1102 1103 return (1); 1104 } 1105 1106 void * 1107 mpi_get_ccb(void *xsc) 1108 { 1109 struct mpi_softc *sc = xsc; 1110 struct mpi_ccb *ccb; 1111 1112 mtx_enter(&sc->sc_ccb_mtx); 1113 ccb = SLIST_FIRST(&sc->sc_ccb_free); 1114 if (ccb != NULL) { 1115 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 1116 ccb->ccb_state = MPI_CCB_READY; 1117 } 1118 mtx_leave(&sc->sc_ccb_mtx); 1119 1120 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb); 1121 1122 return (ccb); 1123 } 1124 1125 void 1126 mpi_put_ccb(void *xsc, void *io) 1127 { 1128 struct mpi_softc *sc = xsc; 1129 struct mpi_ccb *ccb = io; 1130 1131 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb); 1132 1133 #ifdef DIAGNOSTIC 1134 if (ccb->ccb_state == MPI_CCB_FREE) 1135 panic("mpi_put_ccb: double free"); 1136 #endif 1137 1138 ccb->ccb_state = MPI_CCB_FREE; 1139 ccb->ccb_cookie = NULL; 1140 ccb->ccb_done = NULL; 1141 memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE); 1142 mtx_enter(&sc->sc_ccb_mtx); 1143 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 1144 mtx_leave(&sc->sc_ccb_mtx); 1145 } 1146 1147 int 1148 mpi_alloc_replies(struct mpi_softc *sc) 1149 { 1150 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 1151 1152 sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF, 1153 M_WAITOK|M_CANFAIL); 1154 if (sc->sc_rcbs == NULL) 1155 return (1); 1156 1157 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE); 1158 if (sc->sc_replies == NULL) { 1159 free(sc->sc_rcbs, M_DEVBUF, 0); 1160 return (1); 1161 } 1162 1163 return (0); 1164 } 1165 1166 void 1167 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb) 1168 { 1169 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 1170 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1171 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1172 } 1173 1174 void 1175 mpi_push_replies(struct mpi_softc *sc) 1176 { 1177 struct mpi_rcb *rcb; 1178 char *kva = MPI_DMA_KVA(sc->sc_replies); 1179 int i; 1180 1181 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 1182 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1183 1184 for (i = 0; i < sc->sc_repq; i++) { 1185 rcb = &sc->sc_rcbs[i]; 1186 1187 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i; 1188 rcb->rcb_offset = MPI_REPLY_SIZE * i; 1189 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 1190 MPI_REPLY_SIZE * i; 1191 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1192 } 1193 } 1194 1195 void 1196 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 1197 { 1198 struct mpi_msg_request *msg; 1199 1200 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 1201 ccb->ccb_cmd_dva); 1202 1203 msg = ccb->ccb_cmd; 1204 htolem32(&msg->msg_context, ccb->ccb_id); 1205 1206 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1207 ccb->ccb_offset, MPI_REQUEST_SIZE, 1208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1209 1210 ccb->ccb_state = MPI_CCB_QUEUED; 1211 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1212 MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 1213 } 1214 1215 int 1216 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1217 { 1218 void (*done)(struct mpi_ccb *); 1219 void *cookie; 1220 int rv = 1; 1221 u_int32_t reg; 1222 1223 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc), 1224 timeout); 1225 1226 done = ccb->ccb_done; 1227 cookie = ccb->ccb_cookie; 1228 1229 ccb->ccb_done = mpi_poll_done; 1230 ccb->ccb_cookie = &rv; 1231 1232 mpi_start(sc, ccb); 1233 while (rv == 1) { 1234 reg = mpi_pop_reply(sc); 1235 if (reg == 0xffffffff) { 1236 if (timeout-- == 0) { 1237 printf("%s: timeout\n", DEVNAME(sc)); 1238 goto timeout; 1239 } 1240 1241 delay(1000); 1242 continue; 1243 } 1244 1245 mpi_reply(sc, reg); 1246 } 1247 1248 ccb->ccb_cookie = cookie; 1249 done(ccb); 1250 1251 timeout: 1252 return (rv); 1253 } 1254 1255 void 1256 mpi_poll_done(struct mpi_ccb *ccb) 1257 { 1258 int *rv = ccb->ccb_cookie; 1259 1260 *rv = 0; 1261 } 1262 1263 void 1264 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb) 1265 { 1266 struct mutex cookie; 1267 void (*done)(struct mpi_ccb *); 1268 1269 mtx_init(&cookie, IPL_BIO); 1270 1271 done = ccb->ccb_done; 1272 ccb->ccb_done = mpi_wait_done; 1273 ccb->ccb_cookie = &cookie; 1274 1275 /* XXX this will wait forever for the ccb to complete */ 1276 1277 mpi_start(sc, ccb); 1278 1279 mtx_enter(&cookie); 1280 while (ccb->ccb_cookie != NULL) 1281 msleep_nsec(ccb, &cookie, PRIBIO, "mpiwait", INFSLP); 1282 mtx_leave(&cookie); 1283 1284 done(ccb); 1285 } 1286 1287 void 1288 mpi_wait_done(struct mpi_ccb *ccb) 1289 { 1290 struct mutex *cookie = ccb->ccb_cookie; 1291 1292 mtx_enter(cookie); 1293 ccb->ccb_cookie = NULL; 1294 wakeup_one(ccb); 1295 mtx_leave(cookie); 1296 } 1297 1298 void 1299 mpi_scsi_cmd(struct scsi_xfer *xs) 1300 { 1301 struct scsi_link *link = xs->sc_link; 1302 struct mpi_softc *sc = link->bus->sb_adapter_softc; 1303 struct mpi_ccb *ccb; 1304 struct mpi_ccb_bundle *mcb; 1305 struct mpi_msg_scsi_io *io; 1306 1307 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1308 1309 KERNEL_UNLOCK(); 1310 1311 if (xs->cmdlen > MPI_CDB_LEN) { 1312 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1313 DEVNAME(sc), xs->cmdlen); 1314 memset(&xs->sense, 0, sizeof(xs->sense)); 1315 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1316 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1317 xs->sense.add_sense_code = 0x20; 1318 xs->error = XS_SENSE; 1319 goto done; 1320 } 1321 1322 ccb = xs->io; 1323 1324 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1325 DEVNAME(sc), ccb->ccb_id, xs->flags); 1326 1327 ccb->ccb_cookie = xs; 1328 ccb->ccb_done = mpi_scsi_cmd_done; 1329 1330 mcb = ccb->ccb_cmd; 1331 io = &mcb->mcb_io; 1332 1333 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1334 /* 1335 * bus is always 0 1336 * io->bus = htole16(sc->sc_bus); 1337 */ 1338 io->target_id = link->target; 1339 1340 io->cdb_length = xs->cmdlen; 1341 io->sense_buf_len = sizeof(xs->sense); 1342 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1343 1344 htobem16(&io->lun[0], link->lun); 1345 1346 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1347 case SCSI_DATA_IN: 1348 io->direction = MPI_SCSIIO_DIR_READ; 1349 break; 1350 case SCSI_DATA_OUT: 1351 io->direction = MPI_SCSIIO_DIR_WRITE; 1352 break; 1353 default: 1354 io->direction = MPI_SCSIIO_DIR_NONE; 1355 break; 1356 } 1357 1358 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI && 1359 (link->quirks & SDEV_NOTAGS)) 1360 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1361 else 1362 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1363 1364 memcpy(io->cdb, &xs->cmd, xs->cmdlen); 1365 1366 htolem32(&io->data_length, xs->datalen); 1367 1368 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva + 1369 offsetof(struct mpi_ccb_bundle, mcb_sense)); 1370 1371 if (mpi_load_xs(ccb) != 0) 1372 goto stuffup; 1373 1374 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1375 1376 if (xs->flags & SCSI_POLL) { 1377 if (mpi_poll(sc, ccb, xs->timeout) != 0) 1378 goto stuffup; 1379 } else 1380 mpi_start(sc, ccb); 1381 1382 KERNEL_LOCK(); 1383 return; 1384 1385 stuffup: 1386 xs->error = XS_DRIVER_STUFFUP; 1387 done: 1388 KERNEL_LOCK(); 1389 scsi_done(xs); 1390 } 1391 1392 void 1393 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1394 { 1395 struct mpi_softc *sc = ccb->ccb_sc; 1396 struct scsi_xfer *xs = ccb->ccb_cookie; 1397 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1398 bus_dmamap_t dmap = ccb->ccb_dmamap; 1399 struct mpi_msg_scsi_io_error *sie; 1400 1401 if (xs->datalen != 0) { 1402 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1403 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1404 BUS_DMASYNC_POSTWRITE); 1405 1406 bus_dmamap_unload(sc->sc_dmat, dmap); 1407 } 1408 1409 /* timeout_del */ 1410 xs->error = XS_NOERROR; 1411 xs->resid = 0; 1412 1413 if (ccb->ccb_rcb == NULL) { 1414 /* no scsi error, we're ok so drop out early */ 1415 xs->status = SCSI_OK; 1416 KERNEL_LOCK(); 1417 scsi_done(xs); 1418 KERNEL_UNLOCK(); 1419 return; 1420 } 1421 1422 sie = ccb->ccb_rcb->rcb_reply; 1423 1424 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1425 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen, 1426 xs->flags); 1427 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1428 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1429 sie->msg_length, sie->function); 1430 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1431 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1432 sie->sense_buf_len, sie->msg_flags); 1433 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1434 letoh32(sie->msg_context)); 1435 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1436 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1437 sie->scsi_state, letoh16(sie->ioc_status)); 1438 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1439 letoh32(sie->ioc_loginfo)); 1440 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1441 letoh32(sie->transfer_count)); 1442 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1443 letoh32(sie->sense_count)); 1444 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1445 letoh32(sie->response_info)); 1446 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1447 letoh16(sie->tag)); 1448 1449 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) 1450 xs->status = SCSI_TERMINATED; 1451 else 1452 xs->status = sie->scsi_status; 1453 xs->resid = 0; 1454 1455 switch (lemtoh16(&sie->ioc_status)) { 1456 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1457 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count); 1458 /* FALLTHROUGH */ 1459 case MPI_IOCSTATUS_SUCCESS: 1460 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1461 switch (xs->status) { 1462 case SCSI_OK: 1463 xs->error = XS_NOERROR; 1464 break; 1465 1466 case SCSI_CHECK: 1467 xs->error = XS_SENSE; 1468 break; 1469 1470 case SCSI_BUSY: 1471 case SCSI_QUEUE_FULL: 1472 xs->error = XS_BUSY; 1473 break; 1474 1475 default: 1476 xs->error = XS_DRIVER_STUFFUP; 1477 break; 1478 } 1479 break; 1480 1481 case MPI_IOCSTATUS_BUSY: 1482 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1483 xs->error = XS_BUSY; 1484 break; 1485 1486 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1487 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1488 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1489 xs->error = XS_SELTIMEOUT; 1490 break; 1491 1492 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 1493 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 1494 xs->error = XS_RESET; 1495 break; 1496 1497 default: 1498 xs->error = XS_DRIVER_STUFFUP; 1499 break; 1500 } 1501 1502 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1503 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense)); 1504 1505 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1506 xs->error, xs->status); 1507 1508 mpi_push_reply(sc, ccb->ccb_rcb); 1509 KERNEL_LOCK(); 1510 scsi_done(xs); 1511 KERNEL_UNLOCK(); 1512 } 1513 1514 void 1515 mpi_timeout_xs(void *arg) 1516 { 1517 /* XXX */ 1518 } 1519 1520 int 1521 mpi_load_xs(struct mpi_ccb *ccb) 1522 { 1523 struct mpi_softc *sc = ccb->ccb_sc; 1524 struct scsi_xfer *xs = ccb->ccb_cookie; 1525 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1526 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1527 struct mpi_sge *sge = NULL; 1528 struct mpi_sge *nsge = &mcb->mcb_sgl[0]; 1529 struct mpi_sge *ce = NULL, *nce; 1530 bus_dmamap_t dmap = ccb->ccb_dmamap; 1531 u_int32_t addr, flags; 1532 int i, error; 1533 1534 if (xs->datalen == 0) { 1535 htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 1536 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1537 return (0); 1538 } 1539 1540 error = bus_dmamap_load(sc->sc_dmat, dmap, 1541 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING | 1542 ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1543 if (error) { 1544 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1545 return (1); 1546 } 1547 1548 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1549 if (xs->flags & SCSI_DATA_OUT) 1550 flags |= MPI_SGE_FL_DIR_OUT; 1551 1552 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1553 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1554 io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io; 1555 } 1556 1557 for (i = 0; i < dmap->dm_nsegs; i++) { 1558 1559 if (nsge == ce) { 1560 nsge++; 1561 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1562 1563 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1564 nce = &nsge[sc->sc_chain_len - 1]; 1565 addr = (u_int32_t *)nce - (u_int32_t *)nsge; 1566 addr = addr << 16 | 1567 sizeof(struct mpi_sge) * sc->sc_chain_len; 1568 } else { 1569 nce = NULL; 1570 addr = sizeof(struct mpi_sge) * 1571 (dmap->dm_nsegs - i); 1572 } 1573 1574 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1575 MPI_SGE_FL_SIZE_64 | addr); 1576 1577 mpi_dvatosge(ce, ccb->ccb_cmd_dva + 1578 ((u_int8_t *)nsge - (u_int8_t *)mcb)); 1579 1580 ce = nce; 1581 } 1582 1583 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1584 i, dmap->dm_segs[i].ds_len, 1585 (u_int64_t)dmap->dm_segs[i].ds_addr); 1586 1587 sge = nsge++; 1588 1589 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1590 mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr); 1591 } 1592 1593 /* terminate list */ 1594 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1595 MPI_SGE_FL_EOL); 1596 1597 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1598 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1599 BUS_DMASYNC_PREWRITE); 1600 1601 return (0); 1602 } 1603 1604 int 1605 mpi_scsi_probe_virtual(struct scsi_link *link) 1606 { 1607 struct mpi_softc *sc = link->bus->sb_adapter_softc; 1608 struct mpi_cfg_hdr hdr; 1609 struct mpi_cfg_raid_vol_pg0 *rp0; 1610 int len; 1611 int rv; 1612 1613 if (!ISSET(sc->sc_flags, MPI_F_RAID)) 1614 return (0); 1615 1616 if (link->lun > 0) 1617 return (0); 1618 1619 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 1620 0, link->target, MPI_PG_POLL, &hdr); 1621 if (rv != 0) 1622 return (0); 1623 1624 len = hdr.page_length * 4; 1625 rp0 = malloc(len, M_TEMP, M_NOWAIT); 1626 if (rp0 == NULL) 1627 return (ENOMEM); 1628 1629 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len); 1630 if (rv == 0) 1631 SET(link->flags, SDEV_VIRTUAL); 1632 1633 free(rp0, M_TEMP, len); 1634 return (0); 1635 } 1636 1637 int 1638 mpi_scsi_probe(struct scsi_link *link) 1639 { 1640 struct mpi_softc *sc = link->bus->sb_adapter_softc; 1641 struct mpi_ecfg_hdr ehdr; 1642 struct mpi_cfg_sas_dev_pg0 pg0; 1643 u_int32_t address; 1644 int rv; 1645 1646 rv = mpi_scsi_probe_virtual(link); 1647 if (rv != 0) 1648 return (rv); 1649 1650 if (ISSET(link->flags, SDEV_VIRTUAL)) 1651 return (0); 1652 1653 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS) 1654 return (0); 1655 1656 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target; 1657 1658 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0, 1659 address, &ehdr) != 0) 1660 return (EIO); 1661 1662 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0) 1663 return (0); 1664 1665 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n", 1666 DEVNAME(sc), link->target); 1667 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n", 1668 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle)); 1669 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc), 1670 letoh64(pg0.sas_addr)); 1671 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x " 1672 "access_status: 0x%02x\n", DEVNAME(sc), 1673 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status); 1674 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x " 1675 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc), 1676 letoh16(pg0.dev_handle), pg0.bus, pg0.target); 1677 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc), 1678 letoh32(pg0.device_info)); 1679 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n", 1680 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port); 1681 1682 if (ISSET(lemtoh32(&pg0.device_info), 1683 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 1684 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n", 1685 DEVNAME(sc), link->target); 1686 link->flags |= SDEV_ATAPI; 1687 } 1688 1689 return (0); 1690 } 1691 1692 u_int32_t 1693 mpi_read(struct mpi_softc *sc, bus_size_t r) 1694 { 1695 u_int32_t rv; 1696 1697 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1698 BUS_SPACE_BARRIER_READ); 1699 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1700 1701 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1702 1703 return (rv); 1704 } 1705 1706 void 1707 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1708 { 1709 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1710 1711 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1712 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1713 BUS_SPACE_BARRIER_WRITE); 1714 } 1715 1716 int 1717 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1718 u_int32_t target) 1719 { 1720 int i; 1721 1722 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1723 mask, target); 1724 1725 for (i = 0; i < 10000; i++) { 1726 if ((mpi_read(sc, r) & mask) == target) 1727 return (0); 1728 delay(1000); 1729 } 1730 1731 return (1); 1732 } 1733 1734 int 1735 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1736 u_int32_t target) 1737 { 1738 int i; 1739 1740 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1741 mask, target); 1742 1743 for (i = 0; i < 10000; i++) { 1744 if ((mpi_read(sc, r) & mask) != target) 1745 return (0); 1746 delay(1000); 1747 } 1748 1749 return (1); 1750 } 1751 1752 int 1753 mpi_init(struct mpi_softc *sc) 1754 { 1755 u_int32_t db; 1756 int i; 1757 1758 /* spin until the IOC leaves the RESET state */ 1759 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1760 MPI_DOORBELL_STATE_RESET) != 0) { 1761 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1762 "reset state\n", DEVNAME(sc)); 1763 return (1); 1764 } 1765 1766 /* check current ownership */ 1767 db = mpi_read_db(sc); 1768 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1769 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1770 DEVNAME(sc)); 1771 return (0); 1772 } 1773 1774 for (i = 0; i < 5; i++) { 1775 switch (db & MPI_DOORBELL_STATE) { 1776 case MPI_DOORBELL_STATE_READY: 1777 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1778 DEVNAME(sc)); 1779 return (0); 1780 1781 case MPI_DOORBELL_STATE_OPER: 1782 case MPI_DOORBELL_STATE_FAULT: 1783 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1784 "reset\n" , DEVNAME(sc)); 1785 if (mpi_reset_soft(sc) != 0) 1786 mpi_reset_hard(sc); 1787 break; 1788 1789 case MPI_DOORBELL_STATE_RESET: 1790 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1791 "out of reset\n", DEVNAME(sc)); 1792 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1793 MPI_DOORBELL_STATE_RESET) != 0) 1794 return (1); 1795 break; 1796 } 1797 db = mpi_read_db(sc); 1798 } 1799 1800 return (1); 1801 } 1802 1803 int 1804 mpi_reset_soft(struct mpi_softc *sc) 1805 { 1806 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1807 1808 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1809 return (1); 1810 1811 mpi_write_db(sc, 1812 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1813 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1814 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1815 return (1); 1816 1817 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1818 MPI_DOORBELL_STATE_READY) != 0) 1819 return (1); 1820 1821 return (0); 1822 } 1823 1824 int 1825 mpi_reset_hard(struct mpi_softc *sc) 1826 { 1827 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1828 1829 /* enable diagnostic register */ 1830 mpi_write(sc, MPI_WRITESEQ, 0xff); 1831 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1832 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1833 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1834 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1835 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1836 1837 /* reset ioc */ 1838 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1839 1840 delay(10000); 1841 1842 /* disable diagnostic register */ 1843 mpi_write(sc, MPI_WRITESEQ, 0xff); 1844 1845 /* restore pci bits? */ 1846 1847 /* firmware bits? */ 1848 return (0); 1849 } 1850 1851 int 1852 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1853 { 1854 u_int32_t *query = buf; 1855 int i; 1856 1857 /* make sure the doorbell is not in use. */ 1858 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1859 return (1); 1860 1861 /* clear pending doorbell interrupts */ 1862 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1863 mpi_write_intr(sc, 0); 1864 1865 /* 1866 * first write the doorbell with the handshake function and the 1867 * dword count. 1868 */ 1869 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1870 MPI_DOORBELL_DWORDS(dwords)); 1871 1872 /* 1873 * the doorbell used bit will be set because a doorbell function has 1874 * started. Wait for the interrupt and then ack it. 1875 */ 1876 if (mpi_wait_db_int(sc) != 0) 1877 return (1); 1878 mpi_write_intr(sc, 0); 1879 1880 /* poll for the acknowledgement. */ 1881 if (mpi_wait_db_ack(sc) != 0) 1882 return (1); 1883 1884 /* write the query through the doorbell. */ 1885 for (i = 0; i < dwords; i++) { 1886 mpi_write_db(sc, htole32(query[i])); 1887 if (mpi_wait_db_ack(sc) != 0) 1888 return (1); 1889 } 1890 1891 return (0); 1892 } 1893 1894 int 1895 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1896 { 1897 u_int16_t *words = (u_int16_t *)dword; 1898 int i; 1899 1900 for (i = 0; i < 2; i++) { 1901 if (mpi_wait_db_int(sc) != 0) 1902 return (1); 1903 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1904 mpi_write_intr(sc, 0); 1905 } 1906 1907 return (0); 1908 } 1909 1910 int 1911 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1912 { 1913 struct mpi_msg_reply *reply = buf; 1914 u_int32_t *dbuf = buf, dummy; 1915 int i; 1916 1917 /* get the first dword so we can read the length out of the header. */ 1918 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1919 return (1); 1920 1921 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1922 DEVNAME(sc), dwords, reply->msg_length); 1923 1924 /* 1925 * the total length, in dwords, is in the message length field of the 1926 * reply header. 1927 */ 1928 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1929 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1930 return (1); 1931 } 1932 1933 /* if there's extra stuff to come off the ioc, discard it */ 1934 while (i++ < reply->msg_length) { 1935 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1936 return (1); 1937 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1938 "0x%08x\n", DEVNAME(sc), dummy); 1939 } 1940 1941 /* wait for the doorbell used bit to be reset and clear the intr */ 1942 if (mpi_wait_db_int(sc) != 0) 1943 return (1); 1944 mpi_write_intr(sc, 0); 1945 1946 return (0); 1947 } 1948 1949 void 1950 mpi_empty_done(struct mpi_ccb *ccb) 1951 { 1952 /* nothing to do */ 1953 } 1954 1955 int 1956 mpi_iocfacts(struct mpi_softc *sc) 1957 { 1958 struct mpi_msg_iocfacts_request ifq; 1959 struct mpi_msg_iocfacts_reply ifp; 1960 1961 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1962 1963 memset(&ifq, 0, sizeof(ifq)); 1964 memset(&ifp, 0, sizeof(ifp)); 1965 1966 ifq.function = MPI_FUNCTION_IOC_FACTS; 1967 ifq.chain_offset = 0; 1968 ifq.msg_flags = 0; 1969 ifq.msg_context = htole32(0xdeadbeef); 1970 1971 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1972 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1973 DEVNAME(sc)); 1974 return (1); 1975 } 1976 1977 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1978 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1979 DEVNAME(sc)); 1980 return (1); 1981 } 1982 1983 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1984 DEVNAME(sc), ifp.function, ifp.msg_length, 1985 ifp.msg_version_maj, ifp.msg_version_min); 1986 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 1987 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 1988 ifp.ioc_number, ifp.header_version_maj, 1989 ifp.header_version_min); 1990 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 1991 letoh32(ifp.msg_context)); 1992 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 1993 DEVNAME(sc), letoh16(ifp.ioc_status), 1994 letoh16(ifp.ioc_exceptions)); 1995 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 1996 letoh32(ifp.ioc_loginfo)); 1997 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 1998 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 1999 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 2000 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 2001 DEVNAME(sc), letoh16(ifp.request_frame_size), 2002 letoh16(ifp.reply_queue_depth)); 2003 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 2004 letoh16(ifp.product_id)); 2005 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 2006 letoh32(ifp.current_host_mfa_hi_addr)); 2007 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 2008 "global_credits: %d\n", 2009 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 2010 letoh16(ifp.global_credits)); 2011 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 2012 letoh32(ifp.current_sense_buffer_hi_addr)); 2013 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 2014 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 2015 letoh16(ifp.current_reply_frame_size)); 2016 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 2017 letoh32(ifp.fw_image_size)); 2018 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 2019 letoh32(ifp.ioc_capabilities)); 2020 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 2021 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 2022 ifp.fw_version_maj, ifp.fw_version_min, 2023 ifp.fw_version_unit, ifp.fw_version_dev); 2024 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 2025 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 2026 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 2027 "addr 0x%08lx%08lx\n", DEVNAME(sc), 2028 letoh32(ifp.host_page_buffer_sge.sg_hdr), 2029 letoh32(ifp.host_page_buffer_sge.sg_addr_hi), 2030 letoh32(ifp.host_page_buffer_sge.sg_addr_lo)); 2031 2032 sc->sc_fw_maj = ifp.fw_version_maj; 2033 sc->sc_fw_min = ifp.fw_version_min; 2034 sc->sc_fw_unit = ifp.fw_version_unit; 2035 sc->sc_fw_dev = ifp.fw_version_dev; 2036 2037 sc->sc_maxcmds = lemtoh16(&ifp.global_credits); 2038 sc->sc_maxchdepth = ifp.max_chain_depth; 2039 sc->sc_ioc_number = ifp.ioc_number; 2040 if (sc->sc_flags & MPI_F_SPI) 2041 sc->sc_buswidth = 16; 2042 else 2043 sc->sc_buswidth = 2044 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 2045 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 2046 sc->sc_fw_len = lemtoh32(&ifp.fw_image_size); 2047 2048 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth)); 2049 2050 /* 2051 * you can fit sg elements on the end of the io cmd if they fit in the 2052 * request frame size. 2053 */ 2054 sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) - 2055 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 2056 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 2057 sc->sc_first_sgl_len); 2058 2059 sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) / 2060 sizeof(struct mpi_sge); 2061 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 2062 sc->sc_chain_len); 2063 2064 /* the sgl tailing the io cmd loses an entry to the chain element. */ 2065 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 2066 /* the sgl chains lose an entry for each chain element */ 2067 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 2068 sc->sc_chain_len; 2069 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 2070 sc->sc_max_sgl_len); 2071 2072 /* XXX we're ignoring the max chain depth */ 2073 2074 return (0); 2075 } 2076 2077 int 2078 mpi_iocinit(struct mpi_softc *sc) 2079 { 2080 struct mpi_msg_iocinit_request iiq; 2081 struct mpi_msg_iocinit_reply iip; 2082 u_int32_t hi_addr; 2083 2084 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 2085 2086 memset(&iiq, 0, sizeof(iiq)); 2087 memset(&iip, 0, sizeof(iip)); 2088 2089 iiq.function = MPI_FUNCTION_IOC_INIT; 2090 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 2091 2092 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 2093 iiq.max_buses = 1; 2094 2095 iiq.msg_context = htole32(0xd00fd00f); 2096 2097 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 2098 2099 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32); 2100 htolem32(&iiq.host_mfa_hi_addr, hi_addr); 2101 htolem32(&iiq.sense_buffer_hi_addr, hi_addr); 2102 2103 iiq.msg_version_maj = 0x01; 2104 iiq.msg_version_min = 0x02; 2105 2106 iiq.hdr_version_unit = 0x0d; 2107 iiq.hdr_version_dev = 0x00; 2108 2109 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 2110 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 2111 DEVNAME(sc)); 2112 return (1); 2113 } 2114 2115 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 2116 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 2117 DEVNAME(sc)); 2118 return (1); 2119 } 2120 2121 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 2122 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 2123 iip.msg_length, iip.whoinit); 2124 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 2125 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 2126 iip.max_buses, iip.max_devices, iip.flags); 2127 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2128 letoh32(iip.msg_context)); 2129 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2130 letoh16(iip.ioc_status)); 2131 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2132 letoh32(iip.ioc_loginfo)); 2133 2134 return (0); 2135 } 2136 2137 int 2138 mpi_portfacts(struct mpi_softc *sc) 2139 { 2140 struct mpi_ccb *ccb; 2141 struct mpi_msg_portfacts_request *pfq; 2142 volatile struct mpi_msg_portfacts_reply *pfp; 2143 int rv = 1; 2144 2145 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 2146 2147 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2148 if (ccb == NULL) { 2149 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 2150 DEVNAME(sc)); 2151 return (rv); 2152 } 2153 2154 ccb->ccb_done = mpi_empty_done; 2155 pfq = ccb->ccb_cmd; 2156 2157 pfq->function = MPI_FUNCTION_PORT_FACTS; 2158 pfq->chain_offset = 0; 2159 pfq->msg_flags = 0; 2160 pfq->port_number = 0; 2161 2162 if (mpi_poll(sc, ccb, 50000) != 0) { 2163 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 2164 goto err; 2165 } 2166 2167 if (ccb->ccb_rcb == NULL) { 2168 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 2169 DEVNAME(sc)); 2170 goto err; 2171 } 2172 pfp = ccb->ccb_rcb->rcb_reply; 2173 2174 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 2175 DEVNAME(sc), pfp->function, pfp->msg_length); 2176 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 2177 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 2178 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2179 letoh32(pfp->msg_context)); 2180 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2181 letoh16(pfp->ioc_status)); 2182 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2183 letoh32(pfp->ioc_loginfo)); 2184 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 2185 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 2186 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 2187 DEVNAME(sc), letoh16(pfp->protocol_flags), 2188 letoh16(pfp->port_scsi_id)); 2189 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 2190 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 2191 letoh16(pfp->max_persistent_ids), 2192 letoh16(pfp->max_posted_cmd_buffers)); 2193 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 2194 letoh16(pfp->max_lan_buckets)); 2195 2196 sc->sc_porttype = pfp->port_type; 2197 if (sc->sc_target == -1) 2198 sc->sc_target = lemtoh16(&pfp->port_scsi_id); 2199 2200 mpi_push_reply(sc, ccb->ccb_rcb); 2201 rv = 0; 2202 err: 2203 scsi_io_put(&sc->sc_iopool, ccb); 2204 2205 return (rv); 2206 } 2207 2208 int 2209 mpi_cfg_coalescing(struct mpi_softc *sc) 2210 { 2211 struct mpi_cfg_hdr hdr; 2212 struct mpi_cfg_ioc_pg1 pg; 2213 u_int32_t flags; 2214 2215 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) { 2216 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n", 2217 DEVNAME(sc)); 2218 return (1); 2219 } 2220 2221 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 2222 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n", 2223 DEVNAME(sc)); 2224 return (1); 2225 } 2226 2227 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc)); 2228 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc), 2229 letoh32(pg.flags)); 2230 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc), 2231 letoh32(pg.coalescing_timeout)); 2232 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n", 2233 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num); 2234 2235 flags = lemtoh32(&pg.flags); 2236 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)) 2237 return (0); 2238 2239 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING)); 2240 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) { 2241 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n", 2242 DEVNAME(sc)); 2243 return (1); 2244 } 2245 2246 return (0); 2247 } 2248 2249 int 2250 mpi_eventnotify(struct mpi_softc *sc) 2251 { 2252 struct mpi_ccb *ccb; 2253 struct mpi_msg_event_request *enq; 2254 2255 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2256 if (ccb == NULL) { 2257 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 2258 DEVNAME(sc)); 2259 return (1); 2260 } 2261 2262 sc->sc_evt_ccb = ccb; 2263 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 2264 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 2265 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 2266 mpi_eventack, sc); 2267 2268 ccb->ccb_done = mpi_eventnotify_done; 2269 enq = ccb->ccb_cmd; 2270 2271 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 2272 enq->chain_offset = 0; 2273 enq->event_switch = MPI_EVENT_SWITCH_ON; 2274 2275 mpi_start(sc, ccb); 2276 return (0); 2277 } 2278 2279 void 2280 mpi_eventnotify_done(struct mpi_ccb *ccb) 2281 { 2282 struct mpi_softc *sc = ccb->ccb_sc; 2283 struct mpi_rcb *rcb = ccb->ccb_rcb; 2284 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2285 2286 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc)); 2287 2288 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d " 2289 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length, 2290 letoh16(enp->data_length)); 2291 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n", 2292 DEVNAME(sc), enp->ack_required, enp->msg_flags); 2293 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2294 letoh32(enp->msg_context)); 2295 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2296 letoh16(enp->ioc_status)); 2297 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2298 letoh32(enp->ioc_loginfo)); 2299 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc), 2300 letoh32(enp->event)); 2301 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc), 2302 letoh32(enp->event_context)); 2303 2304 switch (lemtoh32(&enp->event)) { 2305 /* ignore these */ 2306 case MPI_EVENT_EVENT_CHANGE: 2307 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2308 break; 2309 2310 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2311 if (sc->sc_scsibus == NULL) 2312 break; 2313 2314 if (mpi_evt_sas(sc, rcb) != 0) { 2315 /* reply is freed later on */ 2316 return; 2317 } 2318 break; 2319 2320 case MPI_EVENT_RESCAN: 2321 if (sc->sc_scsibus != NULL && 2322 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC) 2323 task_add(systq, &sc->sc_evt_rescan); 2324 break; 2325 2326 default: 2327 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n", 2328 DEVNAME(sc), lemtoh32(&enp->event)); 2329 break; 2330 } 2331 2332 mpi_eventnotify_free(sc, rcb); 2333 } 2334 2335 void 2336 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb) 2337 { 2338 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2339 2340 if (enp->ack_required) { 2341 mtx_enter(&sc->sc_evt_ack_mtx); 2342 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2343 mtx_leave(&sc->sc_evt_ack_mtx); 2344 scsi_ioh_add(&sc->sc_evt_ack_handler); 2345 } else 2346 mpi_push_reply(sc, rcb); 2347 } 2348 2349 int 2350 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb) 2351 { 2352 struct mpi_evt_sas_change *ch; 2353 u_int8_t *data; 2354 2355 data = rcb->rcb_reply; 2356 data += sizeof(struct mpi_msg_event_reply); 2357 ch = (struct mpi_evt_sas_change *)data; 2358 2359 if (ch->bus != 0) 2360 return (0); 2361 2362 switch (ch->reason) { 2363 case MPI_EVT_SASCH_REASON_ADDED: 2364 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED: 2365 KERNEL_LOCK(); 2366 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) { 2367 printf("%s: unable to request attach of %d\n", 2368 DEVNAME(sc), ch->target); 2369 } 2370 KERNEL_UNLOCK(); 2371 break; 2372 2373 case MPI_EVT_SASCH_REASON_NOT_RESPONDING: 2374 KERNEL_LOCK(); 2375 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE); 2376 KERNEL_UNLOCK(); 2377 2378 mtx_enter(&sc->sc_evt_scan_mtx); 2379 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link); 2380 mtx_leave(&sc->sc_evt_scan_mtx); 2381 scsi_ioh_add(&sc->sc_evt_scan_handler); 2382 2383 /* we'll handle event ack later on */ 2384 return (1); 2385 2386 case MPI_EVT_SASCH_REASON_SMART_DATA: 2387 case MPI_EVT_SASCH_REASON_UNSUPPORTED: 2388 case MPI_EVT_SASCH_REASON_INTERNAL_RESET: 2389 break; 2390 default: 2391 printf("%s: unknown reason for SAS device status change: " 2392 "0x%02x\n", DEVNAME(sc), ch->reason); 2393 break; 2394 } 2395 2396 return (0); 2397 } 2398 2399 void 2400 mpi_evt_sas_detach(void *cookie, void *io) 2401 { 2402 struct mpi_softc *sc = cookie; 2403 struct mpi_ccb *ccb = io; 2404 struct mpi_rcb *rcb, *next; 2405 struct mpi_msg_event_reply *enp; 2406 struct mpi_evt_sas_change *ch; 2407 struct mpi_msg_scsi_task_request *str; 2408 2409 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc)); 2410 2411 mtx_enter(&sc->sc_evt_scan_mtx); 2412 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue); 2413 if (rcb != NULL) { 2414 next = SIMPLEQ_NEXT(rcb, rcb_link); 2415 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link); 2416 } 2417 mtx_leave(&sc->sc_evt_scan_mtx); 2418 2419 if (rcb == NULL) { 2420 scsi_io_put(&sc->sc_iopool, ccb); 2421 return; 2422 } 2423 2424 enp = rcb->rcb_reply; 2425 ch = (struct mpi_evt_sas_change *)(enp + 1); 2426 2427 ccb->ccb_done = mpi_evt_sas_detach_done; 2428 str = ccb->ccb_cmd; 2429 2430 str->target_id = ch->target; 2431 str->bus = 0; 2432 str->function = MPI_FUNCTION_SCSI_TASK_MGMT; 2433 2434 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET; 2435 2436 mpi_eventnotify_free(sc, rcb); 2437 2438 mpi_start(sc, ccb); 2439 2440 if (next != NULL) 2441 scsi_ioh_add(&sc->sc_evt_scan_handler); 2442 } 2443 2444 void 2445 mpi_evt_sas_detach_done(struct mpi_ccb *ccb) 2446 { 2447 struct mpi_softc *sc = ccb->ccb_sc; 2448 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply; 2449 2450 KERNEL_LOCK(); 2451 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1, 2452 DETACH_FORCE) != 0) { 2453 printf("%s: unable to request detach of %d\n", 2454 DEVNAME(sc), r->target_id); 2455 } 2456 KERNEL_UNLOCK(); 2457 2458 mpi_push_reply(sc, ccb->ccb_rcb); 2459 scsi_io_put(&sc->sc_iopool, ccb); 2460 } 2461 2462 void 2463 mpi_fc_rescan(void *xsc) 2464 { 2465 struct mpi_softc *sc = xsc; 2466 struct mpi_cfg_hdr hdr; 2467 struct mpi_cfg_fc_device_pg0 pg; 2468 struct scsi_link *link; 2469 u_int8_t devmap[256 / NBBY]; 2470 u_int32_t id = 0xffffff; 2471 int i; 2472 2473 memset(devmap, 0, sizeof(devmap)); 2474 2475 do { 2476 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0, 2477 id, 0, &hdr) != 0) { 2478 printf("%s: header get for rescan of 0x%08x failed\n", 2479 DEVNAME(sc), id); 2480 return; 2481 } 2482 2483 memset(&pg, 0, sizeof(pg)); 2484 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0) 2485 break; 2486 2487 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) && 2488 pg.current_bus == 0) 2489 setbit(devmap, pg.current_target_id); 2490 2491 id = lemtoh32(&pg.port_id); 2492 } while (id <= 0xff0000); 2493 2494 for (i = 0; i < sc->sc_buswidth; i++) { 2495 link = scsi_get_link(sc->sc_scsibus, i, 0); 2496 2497 if (isset(devmap, i)) { 2498 if (link == NULL) 2499 scsi_probe_target(sc->sc_scsibus, i); 2500 } else { 2501 if (link != NULL) { 2502 scsi_activate(sc->sc_scsibus, i, -1, 2503 DVACT_DEACTIVATE); 2504 scsi_detach_target(sc->sc_scsibus, i, 2505 DETACH_FORCE); 2506 } 2507 } 2508 } 2509 } 2510 2511 void 2512 mpi_eventack(void *cookie, void *io) 2513 { 2514 struct mpi_softc *sc = cookie; 2515 struct mpi_ccb *ccb = io; 2516 struct mpi_rcb *rcb, *next; 2517 struct mpi_msg_event_reply *enp; 2518 struct mpi_msg_eventack_request *eaq; 2519 2520 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc)); 2521 2522 mtx_enter(&sc->sc_evt_ack_mtx); 2523 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 2524 if (rcb != NULL) { 2525 next = SIMPLEQ_NEXT(rcb, rcb_link); 2526 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 2527 } 2528 mtx_leave(&sc->sc_evt_ack_mtx); 2529 2530 if (rcb == NULL) { 2531 scsi_io_put(&sc->sc_iopool, ccb); 2532 return; 2533 } 2534 2535 enp = rcb->rcb_reply; 2536 2537 ccb->ccb_done = mpi_eventack_done; 2538 eaq = ccb->ccb_cmd; 2539 2540 eaq->function = MPI_FUNCTION_EVENT_ACK; 2541 2542 eaq->event = enp->event; 2543 eaq->event_context = enp->event_context; 2544 2545 mpi_push_reply(sc, rcb); 2546 mpi_start(sc, ccb); 2547 2548 if (next != NULL) 2549 scsi_ioh_add(&sc->sc_evt_ack_handler); 2550 } 2551 2552 void 2553 mpi_eventack_done(struct mpi_ccb *ccb) 2554 { 2555 struct mpi_softc *sc = ccb->ccb_sc; 2556 2557 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 2558 2559 mpi_push_reply(sc, ccb->ccb_rcb); 2560 scsi_io_put(&sc->sc_iopool, ccb); 2561 } 2562 2563 int 2564 mpi_portenable(struct mpi_softc *sc) 2565 { 2566 struct mpi_ccb *ccb; 2567 struct mpi_msg_portenable_request *peq; 2568 int rv = 0; 2569 2570 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2571 2572 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2573 if (ccb == NULL) { 2574 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2575 DEVNAME(sc)); 2576 return (1); 2577 } 2578 2579 ccb->ccb_done = mpi_empty_done; 2580 peq = ccb->ccb_cmd; 2581 2582 peq->function = MPI_FUNCTION_PORT_ENABLE; 2583 peq->port_number = 0; 2584 2585 if (mpi_poll(sc, ccb, 50000) != 0) { 2586 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2587 return (1); 2588 } 2589 2590 if (ccb->ccb_rcb == NULL) { 2591 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2592 DEVNAME(sc)); 2593 rv = 1; 2594 } else 2595 mpi_push_reply(sc, ccb->ccb_rcb); 2596 2597 scsi_io_put(&sc->sc_iopool, ccb); 2598 2599 return (rv); 2600 } 2601 2602 int 2603 mpi_fwupload(struct mpi_softc *sc) 2604 { 2605 struct mpi_ccb *ccb; 2606 struct { 2607 struct mpi_msg_fwupload_request req; 2608 struct mpi_sge sge; 2609 } __packed *bundle; 2610 struct mpi_msg_fwupload_reply *upp; 2611 int rv = 0; 2612 2613 if (sc->sc_fw_len == 0) 2614 return (0); 2615 2616 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2617 2618 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2619 if (sc->sc_fw == NULL) { 2620 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2621 DEVNAME(sc), sc->sc_fw_len); 2622 return (1); 2623 } 2624 2625 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2626 if (ccb == NULL) { 2627 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2628 DEVNAME(sc)); 2629 goto err; 2630 } 2631 2632 ccb->ccb_done = mpi_empty_done; 2633 bundle = ccb->ccb_cmd; 2634 2635 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2636 2637 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2638 2639 bundle->req.tce.details_length = 12; 2640 htolem32(&bundle->req.tce.image_size, sc->sc_fw_len); 2641 2642 htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2643 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2644 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2645 mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw)); 2646 2647 if (mpi_poll(sc, ccb, 50000) != 0) { 2648 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2649 goto err; 2650 } 2651 2652 if (ccb->ccb_rcb == NULL) 2653 panic("%s: unable to do fw upload", DEVNAME(sc)); 2654 upp = ccb->ccb_rcb->rcb_reply; 2655 2656 if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2657 rv = 1; 2658 2659 mpi_push_reply(sc, ccb->ccb_rcb); 2660 scsi_io_put(&sc->sc_iopool, ccb); 2661 2662 return (rv); 2663 2664 err: 2665 mpi_dmamem_free(sc, sc->sc_fw); 2666 return (1); 2667 } 2668 2669 int 2670 mpi_manufacturing(struct mpi_softc *sc) 2671 { 2672 char board_name[33]; 2673 struct mpi_cfg_hdr hdr; 2674 struct mpi_cfg_manufacturing_pg0 *pg; 2675 size_t pagelen; 2676 int rv = 1; 2677 2678 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING, 2679 0, 0, &hdr) != 0) 2680 return (1); 2681 2682 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2683 if (pagelen < sizeof(*pg)) 2684 return (1); 2685 2686 pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2687 if (pg == NULL) 2688 return (1); 2689 2690 if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0) 2691 goto out; 2692 2693 scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name)); 2694 2695 printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name, 2696 sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev); 2697 2698 rv = 0; 2699 2700 out: 2701 free(pg, M_TEMP, pagelen); 2702 return (rv); 2703 } 2704 2705 void 2706 mpi_get_raid(struct mpi_softc *sc) 2707 { 2708 struct mpi_cfg_hdr hdr; 2709 struct mpi_cfg_ioc_pg2 *vol_page; 2710 size_t pagelen; 2711 u_int32_t capabilities; 2712 2713 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2714 2715 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2716 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2717 "for IOC page 2\n", DEVNAME(sc)); 2718 return; 2719 } 2720 2721 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2722 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2723 if (vol_page == NULL) { 2724 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2725 "space for ioc config page 2\n", DEVNAME(sc)); 2726 return; 2727 } 2728 2729 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2730 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2731 "page 2\n", DEVNAME(sc)); 2732 goto out; 2733 } 2734 2735 capabilities = lemtoh32(&vol_page->capabilities); 2736 2737 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2738 letoh32(vol_page->capabilities)); 2739 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2740 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2741 vol_page->active_vols, vol_page->max_vols, 2742 vol_page->active_physdisks, vol_page->max_physdisks); 2743 2744 /* don't walk list if there are no RAID capability */ 2745 if (capabilities == 0xdeadbeef) { 2746 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2747 goto out; 2748 } 2749 2750 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)) 2751 sc->sc_flags |= MPI_F_RAID; 2752 2753 out: 2754 free(vol_page, M_TEMP, pagelen); 2755 } 2756 2757 int 2758 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2759 u_int32_t address, int flags, void *p) 2760 { 2761 struct mpi_ccb *ccb; 2762 struct mpi_msg_config_request *cq; 2763 struct mpi_msg_config_reply *cp; 2764 struct mpi_cfg_hdr *hdr = p; 2765 struct mpi_ecfg_hdr *ehdr = p; 2766 int etype = 0; 2767 int rv = 0; 2768 2769 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x " 2770 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2771 address, flags, MPI_PG_FMT); 2772 2773 ccb = scsi_io_get(&sc->sc_iopool, 2774 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2775 if (ccb == NULL) { 2776 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2777 DEVNAME(sc)); 2778 return (1); 2779 } 2780 2781 if (ISSET(flags, MPI_PG_EXTENDED)) { 2782 etype = type; 2783 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2784 } 2785 2786 cq = ccb->ccb_cmd; 2787 2788 cq->function = MPI_FUNCTION_CONFIG; 2789 2790 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2791 2792 cq->config_header.page_number = number; 2793 cq->config_header.page_type = type; 2794 cq->ext_page_type = etype; 2795 htolem32(&cq->page_address, address); 2796 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2797 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2798 2799 ccb->ccb_done = mpi_empty_done; 2800 if (ISSET(flags, MPI_PG_POLL)) { 2801 if (mpi_poll(sc, ccb, 50000) != 0) { 2802 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2803 DEVNAME(sc)); 2804 return (1); 2805 } 2806 } else 2807 mpi_wait(sc, ccb); 2808 2809 if (ccb->ccb_rcb == NULL) 2810 panic("%s: unable to fetch config header", DEVNAME(sc)); 2811 cp = ccb->ccb_rcb->rcb_reply; 2812 2813 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2814 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2815 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2816 "msg_flags: 0x%02x\n", DEVNAME(sc), 2817 letoh16(cp->ext_page_length), cp->ext_page_type, 2818 cp->msg_flags); 2819 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2820 letoh32(cp->msg_context)); 2821 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2822 letoh16(cp->ioc_status)); 2823 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2824 letoh32(cp->ioc_loginfo)); 2825 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2826 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2827 cp->config_header.page_version, 2828 cp->config_header.page_length, 2829 cp->config_header.page_number, 2830 cp->config_header.page_type); 2831 2832 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2833 rv = 1; 2834 else if (ISSET(flags, MPI_PG_EXTENDED)) { 2835 memset(ehdr, 0, sizeof(*ehdr)); 2836 ehdr->page_version = cp->config_header.page_version; 2837 ehdr->page_number = cp->config_header.page_number; 2838 ehdr->page_type = cp->config_header.page_type; 2839 ehdr->ext_page_length = cp->ext_page_length; 2840 ehdr->ext_page_type = cp->ext_page_type; 2841 } else 2842 *hdr = cp->config_header; 2843 2844 mpi_push_reply(sc, ccb->ccb_rcb); 2845 scsi_io_put(&sc->sc_iopool, ccb); 2846 2847 return (rv); 2848 } 2849 2850 int 2851 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags, 2852 void *p, int read, void *page, size_t len) 2853 { 2854 struct mpi_ccb *ccb; 2855 struct mpi_msg_config_request *cq; 2856 struct mpi_msg_config_reply *cp; 2857 struct mpi_cfg_hdr *hdr = p; 2858 struct mpi_ecfg_hdr *ehdr = p; 2859 char *kva; 2860 int page_length; 2861 int rv = 0; 2862 2863 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2864 DEVNAME(sc), address, read, hdr->page_type); 2865 2866 page_length = ISSET(flags, MPI_PG_EXTENDED) ? 2867 lemtoh16(&ehdr->ext_page_length) : hdr->page_length; 2868 2869 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2870 len < page_length * 4) 2871 return (1); 2872 2873 ccb = scsi_io_get(&sc->sc_iopool, 2874 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2875 if (ccb == NULL) { 2876 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2877 return (1); 2878 } 2879 2880 cq = ccb->ccb_cmd; 2881 2882 cq->function = MPI_FUNCTION_CONFIG; 2883 2884 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2885 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2886 2887 if (ISSET(flags, MPI_PG_EXTENDED)) { 2888 cq->config_header.page_version = ehdr->page_version; 2889 cq->config_header.page_number = ehdr->page_number; 2890 cq->config_header.page_type = ehdr->page_type; 2891 cq->ext_page_len = ehdr->ext_page_length; 2892 cq->ext_page_type = ehdr->ext_page_type; 2893 } else 2894 cq->config_header = *hdr; 2895 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2896 htolem32(&cq->page_address, address); 2897 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2898 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2899 (page_length * 4) | 2900 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2901 2902 /* bounce the page via the request space to avoid more bus_dma games */ 2903 mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva + 2904 sizeof(struct mpi_msg_config_request)); 2905 2906 kva = ccb->ccb_cmd; 2907 kva += sizeof(struct mpi_msg_config_request); 2908 if (!read) 2909 memcpy(kva, page, len); 2910 2911 ccb->ccb_done = mpi_empty_done; 2912 if (ISSET(flags, MPI_PG_POLL)) { 2913 if (mpi_poll(sc, ccb, 50000) != 0) { 2914 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2915 DEVNAME(sc)); 2916 return (1); 2917 } 2918 } else 2919 mpi_wait(sc, ccb); 2920 2921 if (ccb->ccb_rcb == NULL) { 2922 scsi_io_put(&sc->sc_iopool, ccb); 2923 return (1); 2924 } 2925 cp = ccb->ccb_rcb->rcb_reply; 2926 2927 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2928 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2929 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2930 "msg_flags: 0x%02x\n", DEVNAME(sc), 2931 letoh16(cp->ext_page_length), cp->ext_page_type, 2932 cp->msg_flags); 2933 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2934 letoh32(cp->msg_context)); 2935 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2936 letoh16(cp->ioc_status)); 2937 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2938 letoh32(cp->ioc_loginfo)); 2939 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2940 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2941 cp->config_header.page_version, 2942 cp->config_header.page_length, 2943 cp->config_header.page_number, 2944 cp->config_header.page_type); 2945 2946 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2947 rv = 1; 2948 else if (read) 2949 memcpy(page, kva, len); 2950 2951 mpi_push_reply(sc, ccb->ccb_rcb); 2952 scsi_io_put(&sc->sc_iopool, ccb); 2953 2954 return (rv); 2955 } 2956 2957 int 2958 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 2959 { 2960 struct mpi_softc *sc = link->bus->sb_adapter_softc; 2961 2962 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc)); 2963 2964 switch (cmd) { 2965 case DIOCGCACHE: 2966 case DIOCSCACHE: 2967 if (ISSET(link->flags, SDEV_VIRTUAL)) { 2968 return (mpi_ioctl_cache(link, cmd, 2969 (struct dk_cache *)addr)); 2970 } 2971 break; 2972 2973 default: 2974 if (sc->sc_ioctl) 2975 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr)); 2976 2977 break; 2978 } 2979 2980 return (ENOTTY); 2981 } 2982 2983 int 2984 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 2985 { 2986 struct mpi_softc *sc = link->bus->sb_adapter_softc; 2987 struct mpi_ccb *ccb; 2988 int len, rv; 2989 struct mpi_cfg_hdr hdr; 2990 struct mpi_cfg_raid_vol_pg0 *rpg0; 2991 int enabled; 2992 struct mpi_msg_raid_action_request *req; 2993 struct mpi_msg_raid_action_reply *rep; 2994 struct mpi_raid_settings settings; 2995 2996 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 2997 link->target, MPI_PG_POLL, &hdr); 2998 if (rv != 0) 2999 return (EIO); 3000 3001 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks * 3002 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3003 rpg0 = malloc(len, M_TEMP, M_NOWAIT); 3004 if (rpg0 == NULL) 3005 return (ENOMEM); 3006 3007 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, 3008 rpg0, len) != 0) { 3009 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3010 DEVNAME(sc)); 3011 rv = EIO; 3012 goto done; 3013 } 3014 3015 enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings), 3016 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0; 3017 3018 if (cmd == DIOCGCACHE) { 3019 dc->wrcache = enabled; 3020 dc->rdcache = 0; 3021 goto done; 3022 } /* else DIOCSCACHE */ 3023 3024 if (dc->rdcache) { 3025 rv = EOPNOTSUPP; 3026 goto done; 3027 } 3028 3029 if (((dc->wrcache) ? 1 : 0) == enabled) 3030 goto done; 3031 3032 settings = rpg0->settings; 3033 if (dc->wrcache) { 3034 SET(settings.volume_settings, 3035 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3036 } else { 3037 CLR(settings.volume_settings, 3038 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3039 } 3040 3041 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 3042 if (ccb == NULL) { 3043 rv = ENOMEM; 3044 goto done; 3045 } 3046 3047 req = ccb->ccb_cmd; 3048 req->function = MPI_FUNCTION_RAID_ACTION; 3049 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS; 3050 req->vol_id = rpg0->volume_id; 3051 req->vol_bus = rpg0->volume_bus; 3052 3053 memcpy(&req->data_word, &settings, sizeof(req->data_word)); 3054 ccb->ccb_done = mpi_empty_done; 3055 if (mpi_poll(sc, ccb, 50000) != 0) { 3056 rv = EIO; 3057 goto done; 3058 } 3059 3060 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb; 3061 if (rep == NULL) 3062 panic("%s: raid volume settings change failed", DEVNAME(sc)); 3063 3064 switch (lemtoh16(&rep->action_status)) { 3065 case MPI_RAID_ACTION_STATUS_OK: 3066 rv = 0; 3067 break; 3068 default: 3069 rv = EIO; 3070 break; 3071 } 3072 3073 mpi_push_reply(sc, ccb->ccb_rcb); 3074 scsi_io_put(&sc->sc_iopool, ccb); 3075 3076 done: 3077 free(rpg0, M_TEMP, len); 3078 return (rv); 3079 } 3080 3081 #if NBIO > 0 3082 int 3083 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id) 3084 { 3085 int len, rv = EINVAL; 3086 u_int32_t address; 3087 struct mpi_cfg_hdr hdr; 3088 struct mpi_cfg_raid_vol_pg0 *rpg0; 3089 3090 /* get IOC page 2 */ 3091 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3092 sc->sc_cfg_hdr.page_length * 4) != 0) { 3093 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to " 3094 "fetch IOC page 2\n", DEVNAME(sc)); 3095 goto done; 3096 } 3097 3098 /* XXX return something else than EINVAL to indicate within hs range */ 3099 if (id > sc->sc_vol_page->active_vols) { 3100 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol " 3101 "id: %d\n", DEVNAME(sc), id); 3102 goto done; 3103 } 3104 3105 /* replace current buffer with new one */ 3106 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks * 3107 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3108 rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL); 3109 if (rpg0 == NULL) { 3110 printf("%s: can't get memory for RAID page 0, " 3111 "bio disabled\n", DEVNAME(sc)); 3112 goto done; 3113 } 3114 if (sc->sc_rpg0) 3115 free(sc->sc_rpg0, M_DEVBUF, 0); 3116 sc->sc_rpg0 = rpg0; 3117 3118 /* get raid vol page 0 */ 3119 address = sc->sc_vol_list[id].vol_id | 3120 (sc->sc_vol_list[id].vol_bus << 8); 3121 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3122 address, 0, &hdr) != 0) 3123 goto done; 3124 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) { 3125 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3126 DEVNAME(sc)); 3127 goto done; 3128 } 3129 3130 rv = 0; 3131 done: 3132 return (rv); 3133 } 3134 3135 int 3136 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3137 { 3138 struct mpi_softc *sc = (struct mpi_softc *)dev; 3139 int error = 0; 3140 3141 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc)); 3142 3143 /* make sure we have bio enabled */ 3144 if (sc->sc_ioctl != mpi_ioctl) 3145 return (EINVAL); 3146 3147 rw_enter_write(&sc->sc_lock); 3148 3149 switch (cmd) { 3150 case BIOCINQ: 3151 DNPRINTF(MPI_D_IOCTL, "inq\n"); 3152 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr); 3153 break; 3154 3155 case BIOCVOL: 3156 DNPRINTF(MPI_D_IOCTL, "vol\n"); 3157 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr); 3158 break; 3159 3160 case BIOCDISK: 3161 DNPRINTF(MPI_D_IOCTL, "disk\n"); 3162 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr); 3163 break; 3164 3165 case BIOCALARM: 3166 DNPRINTF(MPI_D_IOCTL, "alarm\n"); 3167 break; 3168 3169 case BIOCBLINK: 3170 DNPRINTF(MPI_D_IOCTL, "blink\n"); 3171 break; 3172 3173 case BIOCSETSTATE: 3174 DNPRINTF(MPI_D_IOCTL, "setstate\n"); 3175 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 3176 break; 3177 3178 default: 3179 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n"); 3180 error = ENOTTY; 3181 } 3182 3183 rw_exit_write(&sc->sc_lock); 3184 3185 return (error); 3186 } 3187 3188 int 3189 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi) 3190 { 3191 if (!(sc->sc_flags & MPI_F_RAID)) { 3192 bi->bi_novol = 0; 3193 bi->bi_nodisk = 0; 3194 } 3195 3196 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3197 sc->sc_cfg_hdr.page_length * 4) != 0) { 3198 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC " 3199 "page 2\n", DEVNAME(sc)); 3200 return (EINVAL); 3201 } 3202 3203 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d " 3204 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 3205 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols, 3206 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks); 3207 3208 bi->bi_novol = sc->sc_vol_page->active_vols; 3209 bi->bi_nodisk = sc->sc_vol_page->active_physdisks; 3210 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3211 3212 return (0); 3213 } 3214 3215 int 3216 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv) 3217 { 3218 int i, vol, id, rv = EINVAL; 3219 struct device *dev; 3220 struct scsi_link *link; 3221 struct mpi_cfg_raid_vol_pg0 *rpg0; 3222 char *vendp; 3223 3224 id = bv->bv_volid; 3225 if (mpi_bio_get_pg0_raid(sc, id)) 3226 goto done; 3227 3228 if (id > sc->sc_vol_page->active_vols) 3229 return (EINVAL); /* XXX deal with hot spares */ 3230 3231 rpg0 = sc->sc_rpg0; 3232 if (rpg0 == NULL) 3233 goto done; 3234 3235 /* determine status */ 3236 switch (rpg0->volume_state) { 3237 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3238 bv->bv_status = BIOC_SVONLINE; 3239 break; 3240 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3241 bv->bv_status = BIOC_SVDEGRADED; 3242 break; 3243 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3244 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3245 bv->bv_status = BIOC_SVOFFLINE; 3246 break; 3247 default: 3248 bv->bv_status = BIOC_SVINVALID; 3249 } 3250 3251 /* override status if scrubbing or something */ 3252 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) 3253 bv->bv_status = BIOC_SVREBUILD; 3254 3255 bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba) * 512; 3256 3257 switch (sc->sc_vol_list[id].vol_type) { 3258 case MPI_CFG_RAID_TYPE_RAID_IS: 3259 bv->bv_level = 0; 3260 break; 3261 case MPI_CFG_RAID_TYPE_RAID_IME: 3262 case MPI_CFG_RAID_TYPE_RAID_IM: 3263 bv->bv_level = 1; 3264 break; 3265 case MPI_CFG_RAID_TYPE_RAID_5: 3266 bv->bv_level = 5; 3267 break; 3268 case MPI_CFG_RAID_TYPE_RAID_6: 3269 bv->bv_level = 6; 3270 break; 3271 case MPI_CFG_RAID_TYPE_RAID_10: 3272 bv->bv_level = 10; 3273 break; 3274 case MPI_CFG_RAID_TYPE_RAID_50: 3275 bv->bv_level = 50; 3276 break; 3277 default: 3278 bv->bv_level = -1; 3279 } 3280 3281 bv->bv_nodisk = rpg0->num_phys_disks; 3282 3283 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) { 3284 link = scsi_get_link(sc->sc_scsibus, i, 0); 3285 if (link == NULL) 3286 continue; 3287 3288 /* skip if not a virtual disk */ 3289 if (!(link->flags & SDEV_VIRTUAL)) 3290 continue; 3291 3292 vol++; 3293 /* are we it? */ 3294 if (vol == bv->bv_volid) { 3295 dev = link->device_softc; 3296 vendp = link->inqdata.vendor; 3297 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor); 3298 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0'; 3299 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev); 3300 break; 3301 } 3302 } 3303 rv = 0; 3304 done: 3305 return (rv); 3306 } 3307 3308 int 3309 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd) 3310 { 3311 int pdid, id, rv = EINVAL; 3312 u_int32_t address; 3313 struct mpi_cfg_hdr hdr; 3314 struct mpi_cfg_raid_vol_pg0 *rpg0; 3315 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk; 3316 struct mpi_cfg_raid_physdisk_pg0 pdpg0; 3317 3318 id = bd->bd_volid; 3319 if (mpi_bio_get_pg0_raid(sc, id)) 3320 goto done; 3321 3322 if (id > sc->sc_vol_page->active_vols) 3323 return (EINVAL); /* XXX deal with hot spares */ 3324 3325 rpg0 = sc->sc_rpg0; 3326 if (rpg0 == NULL) 3327 goto done; 3328 3329 pdid = bd->bd_diskid; 3330 if (pdid > rpg0->num_phys_disks) 3331 goto done; 3332 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1); 3333 physdisk += pdid; 3334 3335 /* get raid phys disk page 0 */ 3336 address = physdisk->phys_disk_num; 3337 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address, 3338 &hdr) != 0) 3339 goto done; 3340 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) { 3341 bd->bd_status = BIOC_SDFAILED; 3342 return (0); 3343 } 3344 bd->bd_channel = pdpg0.phys_disk_bus; 3345 bd->bd_target = pdpg0.phys_disk_id; 3346 bd->bd_lun = 0; 3347 bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba) * 512; 3348 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor)); 3349 3350 switch (pdpg0.phys_disk_state) { 3351 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3352 bd->bd_status = BIOC_SDONLINE; 3353 break; 3354 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING: 3355 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED: 3356 bd->bd_status = BIOC_SDFAILED; 3357 break; 3358 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL: 3359 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER: 3360 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3361 bd->bd_status = BIOC_SDOFFLINE; 3362 break; 3363 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT: 3364 bd->bd_status = BIOC_SDSCRUB; 3365 break; 3366 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT: 3367 default: 3368 bd->bd_status = BIOC_SDINVALID; 3369 break; 3370 } 3371 3372 /* XXX figure this out */ 3373 /* bd_serial[32]; */ 3374 /* bd_procdev[16]; */ 3375 3376 rv = 0; 3377 done: 3378 return (rv); 3379 } 3380 3381 int 3382 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs) 3383 { 3384 return (ENOTTY); 3385 } 3386 3387 #ifndef SMALL_KERNEL 3388 int 3389 mpi_create_sensors(struct mpi_softc *sc) 3390 { 3391 struct device *dev; 3392 struct scsi_link *link; 3393 int i, vol, nsensors; 3394 3395 /* count volumes */ 3396 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3397 link = scsi_get_link(sc->sc_scsibus, i, 0); 3398 if (link == NULL) 3399 continue; 3400 /* skip if not a virtual disk */ 3401 if (!(link->flags & SDEV_VIRTUAL)) 3402 continue; 3403 3404 vol++; 3405 } 3406 if (vol == 0) 3407 return (0); 3408 3409 sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor), 3410 M_DEVBUF, M_NOWAIT | M_ZERO); 3411 if (sc->sc_sensors == NULL) 3412 return (1); 3413 nsensors = vol; 3414 3415 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3416 sizeof(sc->sc_sensordev.xname)); 3417 3418 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) { 3419 link = scsi_get_link(sc->sc_scsibus, i, 0); 3420 if (link == NULL) 3421 continue; 3422 /* skip if not a virtual disk */ 3423 if (!(link->flags & SDEV_VIRTUAL)) 3424 continue; 3425 3426 dev = link->device_softc; 3427 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname, 3428 sizeof(sc->sc_sensors[vol].desc)); 3429 sc->sc_sensors[vol].type = SENSOR_DRIVE; 3430 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3431 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]); 3432 3433 vol++; 3434 } 3435 3436 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL) 3437 goto bad; 3438 3439 sensordev_install(&sc->sc_sensordev); 3440 3441 return (0); 3442 3443 bad: 3444 free(sc->sc_sensors, M_DEVBUF, nsensors * sizeof(struct ksensor)); 3445 return (1); 3446 } 3447 3448 void 3449 mpi_refresh_sensors(void *arg) 3450 { 3451 int i, vol; 3452 struct scsi_link *link; 3453 struct mpi_softc *sc = arg; 3454 struct mpi_cfg_raid_vol_pg0 *rpg0; 3455 3456 rw_enter_write(&sc->sc_lock); 3457 3458 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3459 link = scsi_get_link(sc->sc_scsibus, i, 0); 3460 if (link == NULL) 3461 continue; 3462 /* skip if not a virtual disk */ 3463 if (!(link->flags & SDEV_VIRTUAL)) 3464 continue; 3465 3466 if (mpi_bio_get_pg0_raid(sc, vol)) 3467 continue; 3468 3469 rpg0 = sc->sc_rpg0; 3470 if (rpg0 == NULL) 3471 goto done; 3472 3473 /* determine status */ 3474 switch (rpg0->volume_state) { 3475 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3476 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE; 3477 sc->sc_sensors[vol].status = SENSOR_S_OK; 3478 break; 3479 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3480 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL; 3481 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3482 break; 3483 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3484 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3485 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL; 3486 sc->sc_sensors[vol].status = SENSOR_S_CRIT; 3487 break; 3488 default: 3489 sc->sc_sensors[vol].value = 0; /* unknown */ 3490 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3491 } 3492 3493 /* override status if scrubbing or something */ 3494 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) { 3495 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD; 3496 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3497 } 3498 3499 vol++; 3500 } 3501 done: 3502 rw_exit_write(&sc->sc_lock); 3503 } 3504 #endif /* SMALL_KERNEL */ 3505 #endif /* NBIO > 0 */ 3506