1 /* $OpenBSD: mpii.c,v 1.138 2020/07/22 13:16:04 krw Exp $ */ 2 /* 3 * Copyright (c) 2010, 2012 Mike Belopuhov 4 * Copyright (c) 2009 James Giannoules 5 * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org> 6 * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/malloc.h> 28 #include <sys/kernel.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/dkio.h> 32 #include <sys/tree.h> 33 #include <sys/task.h> 34 35 #include <machine/bus.h> 36 37 #include <dev/pci/pcireg.h> 38 #include <dev/pci/pcivar.h> 39 #include <dev/pci/pcidevs.h> 40 41 #include <scsi/scsi_all.h> 42 #include <scsi/scsiconf.h> 43 44 #include <dev/biovar.h> 45 46 #include <dev/pci/mpiireg.h> 47 48 /* #define MPII_DEBUG */ 49 #ifdef MPII_DEBUG 50 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0) 51 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0) 52 #define MPII_D_CMD (0x0001) 53 #define MPII_D_INTR (0x0002) 54 #define MPII_D_MISC (0x0004) 55 #define MPII_D_DMA (0x0008) 56 #define MPII_D_IOCTL (0x0010) 57 #define MPII_D_RW (0x0020) 58 #define MPII_D_MEM (0x0040) 59 #define MPII_D_CCB (0x0080) 60 #define MPII_D_PPR (0x0100) 61 #define MPII_D_RAID (0x0200) 62 #define MPII_D_EVT (0x0400) 63 #define MPII_D_CFG (0x0800) 64 #define MPII_D_MAP (0x1000) 65 66 u_int32_t mpii_debug = 0 67 | MPII_D_CMD 68 | MPII_D_INTR 69 | MPII_D_MISC 70 | MPII_D_DMA 71 | MPII_D_IOCTL 72 | MPII_D_RW 73 | MPII_D_MEM 74 | MPII_D_CCB 75 | MPII_D_PPR 76 | MPII_D_RAID 77 | MPII_D_EVT 78 | MPII_D_CFG 79 | MPII_D_MAP 80 ; 81 #else 82 #define DPRINTF(x...) 83 #define DNPRINTF(n,x...) 84 #endif 85 86 #define MPII_REQUEST_SIZE (512) 87 #define MPII_REQUEST_CREDIT (128) 88 89 struct mpii_dmamem { 90 bus_dmamap_t mdm_map; 91 bus_dma_segment_t mdm_seg; 92 size_t mdm_size; 93 caddr_t mdm_kva; 94 }; 95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map) 96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr) 97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva) 98 99 struct mpii_softc; 100 101 struct mpii_rcb { 102 SIMPLEQ_ENTRY(mpii_rcb) rcb_link; 103 void *rcb_reply; 104 u_int32_t rcb_reply_dva; 105 }; 106 107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb); 108 109 struct mpii_device { 110 int flags; 111 #define MPII_DF_ATTACH (0x0001) 112 #define MPII_DF_DETACH (0x0002) 113 #define MPII_DF_HIDDEN (0x0004) 114 #define MPII_DF_UNUSED (0x0008) 115 #define MPII_DF_VOLUME (0x0010) 116 #define MPII_DF_VOLUME_DISK (0x0020) 117 #define MPII_DF_HOT_SPARE (0x0040) 118 short slot; 119 short percent; 120 u_int16_t dev_handle; 121 u_int16_t enclosure; 122 u_int16_t expander; 123 u_int8_t phy_num; 124 u_int8_t physical_port; 125 }; 126 127 struct mpii_ccb { 128 struct mpii_softc *ccb_sc; 129 130 void * ccb_cookie; 131 bus_dmamap_t ccb_dmamap; 132 133 bus_addr_t ccb_offset; 134 void *ccb_cmd; 135 bus_addr_t ccb_cmd_dva; 136 u_int16_t ccb_dev_handle; 137 u_int16_t ccb_smid; 138 139 volatile enum { 140 MPII_CCB_FREE, 141 MPII_CCB_READY, 142 MPII_CCB_QUEUED, 143 MPII_CCB_TIMEOUT 144 } ccb_state; 145 146 void (*ccb_done)(struct mpii_ccb *); 147 struct mpii_rcb *ccb_rcb; 148 149 SIMPLEQ_ENTRY(mpii_ccb) ccb_link; 150 }; 151 152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb); 153 154 struct mpii_softc { 155 struct device sc_dev; 156 157 pci_chipset_tag_t sc_pc; 158 pcitag_t sc_tag; 159 160 void *sc_ih; 161 162 int sc_flags; 163 #define MPII_F_RAID (1<<1) 164 #define MPII_F_SAS3 (1<<2) 165 166 struct scsibus_softc *sc_scsibus; 167 unsigned int sc_pending; 168 169 struct mpii_device **sc_devs; 170 171 bus_space_tag_t sc_iot; 172 bus_space_handle_t sc_ioh; 173 bus_size_t sc_ios; 174 bus_dma_tag_t sc_dmat; 175 176 struct mutex sc_req_mtx; 177 struct mutex sc_rep_mtx; 178 179 ushort sc_reply_size; 180 ushort sc_request_size; 181 182 ushort sc_max_cmds; 183 ushort sc_num_reply_frames; 184 u_int sc_reply_free_qdepth; 185 u_int sc_reply_post_qdepth; 186 187 ushort sc_chain_sge; 188 ushort sc_max_sgl; 189 int sc_max_chain; 190 191 u_int8_t sc_ioc_event_replay; 192 193 u_int8_t sc_porttype; 194 u_int8_t sc_max_volumes; 195 u_int16_t sc_max_devices; 196 u_int16_t sc_vd_count; 197 u_int16_t sc_vd_id_low; 198 u_int16_t sc_pd_id_start; 199 int sc_ioc_number; 200 u_int8_t sc_vf_id; 201 202 struct mpii_ccb *sc_ccbs; 203 struct mpii_ccb_list sc_ccb_free; 204 struct mutex sc_ccb_free_mtx; 205 206 struct mutex sc_ccb_mtx; 207 /* 208 * this protects the ccb state and list entry 209 * between mpii_scsi_cmd and scsidone. 210 */ 211 212 struct mpii_ccb_list sc_ccb_tmos; 213 struct scsi_iohandler sc_ccb_tmo_handler; 214 215 struct scsi_iopool sc_iopool; 216 217 struct mpii_dmamem *sc_requests; 218 219 struct mpii_dmamem *sc_replies; 220 struct mpii_rcb *sc_rcbs; 221 222 struct mpii_dmamem *sc_reply_postq; 223 struct mpii_reply_descr *sc_reply_postq_kva; 224 u_int sc_reply_post_host_index; 225 226 struct mpii_dmamem *sc_reply_freeq; 227 u_int sc_reply_free_host_index; 228 229 struct mpii_rcb_list sc_evt_sas_queue; 230 struct mutex sc_evt_sas_mtx; 231 struct task sc_evt_sas_task; 232 233 struct mpii_rcb_list sc_evt_ack_queue; 234 struct mutex sc_evt_ack_mtx; 235 struct scsi_iohandler sc_evt_ack_handler; 236 237 /* scsi ioctl from sd device */ 238 int (*sc_ioctl)(struct device *, u_long, caddr_t); 239 240 int sc_nsensors; 241 struct ksensor *sc_sensors; 242 struct ksensordev sc_sensordev; 243 }; 244 245 int mpii_match(struct device *, void *, void *); 246 void mpii_attach(struct device *, struct device *, void *); 247 int mpii_detach(struct device *, int); 248 249 int mpii_intr(void *); 250 251 struct cfattach mpii_ca = { 252 sizeof(struct mpii_softc), 253 mpii_match, 254 mpii_attach, 255 mpii_detach 256 }; 257 258 struct cfdriver mpii_cd = { 259 NULL, 260 "mpii", 261 DV_DULL 262 }; 263 264 void mpii_scsi_cmd(struct scsi_xfer *); 265 void mpii_scsi_cmd_done(struct mpii_ccb *); 266 int mpii_scsi_probe(struct scsi_link *); 267 int mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int); 268 269 struct scsi_adapter mpii_switch = { 270 mpii_scsi_cmd, NULL, mpii_scsi_probe, NULL, mpii_scsi_ioctl 271 }; 272 273 struct mpii_dmamem * 274 mpii_dmamem_alloc(struct mpii_softc *, size_t); 275 void mpii_dmamem_free(struct mpii_softc *, 276 struct mpii_dmamem *); 277 int mpii_alloc_ccbs(struct mpii_softc *); 278 void * mpii_get_ccb(void *); 279 void mpii_put_ccb(void *, void *); 280 int mpii_alloc_replies(struct mpii_softc *); 281 int mpii_alloc_queues(struct mpii_softc *); 282 void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *); 283 void mpii_push_replies(struct mpii_softc *); 284 285 void mpii_scsi_cmd_tmo(void *); 286 void mpii_scsi_cmd_tmo_handler(void *, void *); 287 void mpii_scsi_cmd_tmo_done(struct mpii_ccb *); 288 289 int mpii_insert_dev(struct mpii_softc *, struct mpii_device *); 290 int mpii_remove_dev(struct mpii_softc *, struct mpii_device *); 291 struct mpii_device * 292 mpii_find_dev(struct mpii_softc *, u_int16_t); 293 294 void mpii_start(struct mpii_softc *, struct mpii_ccb *); 295 int mpii_poll(struct mpii_softc *, struct mpii_ccb *); 296 void mpii_poll_done(struct mpii_ccb *); 297 struct mpii_rcb * 298 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *); 299 300 void mpii_wait(struct mpii_softc *, struct mpii_ccb *); 301 void mpii_wait_done(struct mpii_ccb *); 302 303 void mpii_init_queues(struct mpii_softc *); 304 305 int mpii_load_xs(struct mpii_ccb *); 306 int mpii_load_xs_sas3(struct mpii_ccb *); 307 308 u_int32_t mpii_read(struct mpii_softc *, bus_size_t); 309 void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t); 310 int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t, 311 u_int32_t); 312 int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t, 313 u_int32_t); 314 315 int mpii_init(struct mpii_softc *); 316 int mpii_reset_soft(struct mpii_softc *); 317 int mpii_reset_hard(struct mpii_softc *); 318 319 int mpii_handshake_send(struct mpii_softc *, void *, size_t); 320 int mpii_handshake_recv_dword(struct mpii_softc *, 321 u_int32_t *); 322 int mpii_handshake_recv(struct mpii_softc *, void *, size_t); 323 324 void mpii_empty_done(struct mpii_ccb *); 325 326 int mpii_iocinit(struct mpii_softc *); 327 int mpii_iocfacts(struct mpii_softc *); 328 int mpii_portfacts(struct mpii_softc *); 329 int mpii_portenable(struct mpii_softc *); 330 int mpii_cfg_coalescing(struct mpii_softc *); 331 int mpii_board_info(struct mpii_softc *); 332 int mpii_target_map(struct mpii_softc *); 333 334 int mpii_eventnotify(struct mpii_softc *); 335 void mpii_eventnotify_done(struct mpii_ccb *); 336 void mpii_eventack(void *, void *); 337 void mpii_eventack_done(struct mpii_ccb *); 338 void mpii_event_process(struct mpii_softc *, struct mpii_rcb *); 339 void mpii_event_done(struct mpii_softc *, struct mpii_rcb *); 340 void mpii_event_sas(void *); 341 void mpii_event_raid(struct mpii_softc *, 342 struct mpii_msg_event_reply *); 343 void mpii_event_discovery(struct mpii_softc *, 344 struct mpii_msg_event_reply *); 345 346 void mpii_sas_remove_device(struct mpii_softc *, u_int16_t); 347 348 int mpii_req_cfg_header(struct mpii_softc *, u_int8_t, 349 u_int8_t, u_int32_t, int, void *); 350 int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int, 351 void *, int, void *, size_t); 352 353 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *); 354 355 #if NBIO > 0 356 int mpii_ioctl(struct device *, u_long, caddr_t); 357 int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *); 358 int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *); 359 int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *); 360 int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int, 361 int, int *); 362 int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *, 363 u_int8_t); 364 struct mpii_device * 365 mpii_find_vol(struct mpii_softc *, int); 366 #ifndef SMALL_KERNEL 367 int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *); 368 int mpii_create_sensors(struct mpii_softc *); 369 void mpii_refresh_sensors(void *); 370 #endif /* SMALL_KERNEL */ 371 #endif /* NBIO > 0 */ 372 373 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 374 375 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 376 377 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL) 378 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v)) 379 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS) 380 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v)) 381 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\ 382 == MPII_INTR_STATUS_REPLY) 383 384 #define mpii_write_reply_free(s, v) \ 385 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 386 MPII_REPLY_FREE_HOST_INDEX, (v)) 387 #define mpii_write_reply_post(s, v) \ 388 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 389 MPII_REPLY_POST_HOST_INDEX, (v)) 390 391 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \ 392 MPII_INTR_STATUS_IOC2SYSDB, 0) 393 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \ 394 MPII_INTR_STATUS_SYS2IOCDB, 0) 395 396 static inline void 397 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva) 398 { 399 htolem32(&sge->sg_addr_lo, dva); 400 htolem32(&sge->sg_addr_hi, dva >> 32); 401 } 402 403 #define MPII_PG_EXTENDED (1<<0) 404 #define MPII_PG_POLL (1<<1) 405 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED" 406 407 static const struct pci_matchid mpii_devices[] = { 408 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 }, 409 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 }, 410 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SSS6200 }, 411 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 }, 412 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 }, 413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 }, 414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 }, 415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 }, 416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 }, 417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 }, 418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 }, 419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 }, 420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 }, 421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 }, 422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 }, 423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 }, 424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 }, 425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 }, 426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 }, 427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 }, 428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 }, 429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 }, 430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 }, 431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 }, 432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 }, 433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 }, 434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 }, 435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 }, 436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 } 437 }; 438 439 int 440 mpii_match(struct device *parent, void *match, void *aux) 441 { 442 return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices))); 443 } 444 445 void 446 mpii_attach(struct device *parent, struct device *self, void *aux) 447 { 448 struct mpii_softc *sc = (struct mpii_softc *)self; 449 struct pci_attach_args *pa = aux; 450 pcireg_t memtype; 451 int r; 452 pci_intr_handle_t ih; 453 struct scsibus_attach_args saa; 454 struct mpii_ccb *ccb; 455 456 sc->sc_pc = pa->pa_pc; 457 sc->sc_tag = pa->pa_tag; 458 sc->sc_dmat = pa->pa_dmat; 459 460 mtx_init(&sc->sc_req_mtx, IPL_BIO); 461 mtx_init(&sc->sc_rep_mtx, IPL_BIO); 462 463 /* find the appropriate memory base */ 464 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) { 465 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r); 466 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM) 467 break; 468 } 469 if (r >= PCI_MAPREG_END) { 470 printf(": unable to locate system interface registers\n"); 471 return; 472 } 473 474 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh, 475 NULL, &sc->sc_ios, 0xFF) != 0) { 476 printf(": unable to map system interface registers\n"); 477 return; 478 } 479 480 /* disable the expansion rom */ 481 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG, 482 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) & 483 ~PCI_ROM_ENABLE); 484 485 /* disable interrupts */ 486 mpii_write(sc, MPII_INTR_MASK, 487 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY | 488 MPII_INTR_MASK_DOORBELL); 489 490 /* hook up the interrupt */ 491 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 492 printf(": unable to map interrupt\n"); 493 goto unmap; 494 } 495 printf(": %s\n", pci_intr_string(sc->sc_pc, ih)); 496 497 if (mpii_iocfacts(sc) != 0) { 498 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 499 goto unmap; 500 } 501 502 if (mpii_init(sc) != 0) { 503 printf("%s: unable to initialize ioc\n", DEVNAME(sc)); 504 goto unmap; 505 } 506 507 if (mpii_alloc_ccbs(sc) != 0) { 508 /* error already printed */ 509 goto unmap; 510 } 511 512 if (mpii_alloc_replies(sc) != 0) { 513 printf("%s: unable to allocated reply space\n", DEVNAME(sc)); 514 goto free_ccbs; 515 } 516 517 if (mpii_alloc_queues(sc) != 0) { 518 printf("%s: unable to allocate reply queues\n", DEVNAME(sc)); 519 goto free_replies; 520 } 521 522 if (mpii_iocinit(sc) != 0) { 523 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 524 goto free_queues; 525 } 526 527 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 528 MPII_DOORBELL_STATE_OPER) != 0) { 529 printf("%s: state: 0x%08x\n", DEVNAME(sc), 530 mpii_read_db(sc) & MPII_DOORBELL_STATE); 531 printf("%s: operational state timeout\n", DEVNAME(sc)); 532 goto free_queues; 533 } 534 535 mpii_push_replies(sc); 536 mpii_init_queues(sc); 537 538 if (mpii_board_info(sc) != 0) { 539 printf("%s: unable to get manufacturing page 0\n", 540 DEVNAME(sc)); 541 goto free_queues; 542 } 543 544 if (mpii_portfacts(sc) != 0) { 545 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 546 goto free_queues; 547 } 548 549 if (mpii_target_map(sc) != 0) { 550 printf("%s: unable to setup target mappings\n", DEVNAME(sc)); 551 goto free_queues; 552 } 553 554 if (mpii_cfg_coalescing(sc) != 0) { 555 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 556 goto free_queues; 557 } 558 559 /* XXX bail on unsupported porttype? */ 560 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) || 561 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) || 562 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) { 563 if (mpii_eventnotify(sc) != 0) { 564 printf("%s: unable to enable events\n", DEVNAME(sc)); 565 goto free_queues; 566 } 567 } 568 569 sc->sc_devs = mallocarray(sc->sc_max_devices, 570 sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO); 571 if (sc->sc_devs == NULL) { 572 printf("%s: unable to allocate memory for mpii_device\n", 573 DEVNAME(sc)); 574 goto free_queues; 575 } 576 577 if (mpii_portenable(sc) != 0) { 578 printf("%s: unable to enable port\n", DEVNAME(sc)); 579 goto free_devs; 580 } 581 582 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO, 583 mpii_intr, sc, sc->sc_dev.dv_xname); 584 if (sc->sc_ih == NULL) 585 goto free_devs; 586 587 /* force autoconf to wait for the first sas discovery to complete */ 588 sc->sc_pending = 1; 589 config_pending_incr(); 590 591 saa.saa_adapter = &mpii_switch; 592 saa.saa_adapter_softc = sc; 593 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 594 saa.saa_adapter_buswidth = sc->sc_max_devices; 595 saa.saa_luns = 1; 596 saa.saa_openings = sc->sc_max_cmds - 1; 597 saa.saa_pool = &sc->sc_iopool; 598 saa.saa_quirks = saa.saa_flags = 0; 599 saa.saa_wwpn = saa.saa_wwnn = 0; 600 601 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 602 &saa, scsiprint); 603 604 /* enable interrupts */ 605 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL 606 | MPII_INTR_MASK_RESET); 607 608 #if NBIO > 0 609 if (ISSET(sc->sc_flags, MPII_F_RAID)) { 610 if (bio_register(&sc->sc_dev, mpii_ioctl) != 0) 611 panic("%s: controller registration failed", 612 DEVNAME(sc)); 613 else 614 sc->sc_ioctl = mpii_ioctl; 615 616 #ifndef SMALL_KERNEL 617 if (mpii_create_sensors(sc) != 0) 618 printf("%s: unable to create sensors\n", DEVNAME(sc)); 619 #endif 620 } 621 #endif 622 623 return; 624 625 free_devs: 626 free(sc->sc_devs, M_DEVBUF, 0); 627 sc->sc_devs = NULL; 628 629 free_queues: 630 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq), 631 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD); 632 mpii_dmamem_free(sc, sc->sc_reply_freeq); 633 634 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq), 635 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD); 636 mpii_dmamem_free(sc, sc->sc_reply_postq); 637 638 free_replies: 639 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies), 640 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD); 641 mpii_dmamem_free(sc, sc->sc_replies); 642 643 free_ccbs: 644 while ((ccb = mpii_get_ccb(sc)) != NULL) 645 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 646 mpii_dmamem_free(sc, sc->sc_requests); 647 free(sc->sc_ccbs, M_DEVBUF, 0); 648 649 unmap: 650 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 651 sc->sc_ios = 0; 652 } 653 654 int 655 mpii_detach(struct device *self, int flags) 656 { 657 struct mpii_softc *sc = (struct mpii_softc *)self; 658 659 if (sc->sc_ih != NULL) { 660 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 661 sc->sc_ih = NULL; 662 } 663 if (sc->sc_ios != 0) { 664 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 665 sc->sc_ios = 0; 666 } 667 668 return (0); 669 } 670 671 int 672 mpii_intr(void *arg) 673 { 674 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts); 675 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs); 676 struct mpii_softc *sc = arg; 677 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp; 678 struct mpii_ccb *ccb; 679 struct mpii_rcb *rcb; 680 int smid; 681 u_int idx; 682 int rv = 0; 683 684 mtx_enter(&sc->sc_rep_mtx); 685 bus_dmamap_sync(sc->sc_dmat, 686 MPII_DMA_MAP(sc->sc_reply_postq), 687 0, sc->sc_reply_post_qdepth * sizeof(*rdp), 688 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 689 690 idx = sc->sc_reply_post_host_index; 691 for (;;) { 692 rdp = &postq[idx]; 693 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) == 694 MPII_REPLY_DESCR_UNUSED) 695 break; 696 if (rdp->data == 0xffffffff) { 697 /* 698 * ioc is still writing to the reply post queue 699 * race condition - bail! 700 */ 701 break; 702 } 703 704 smid = lemtoh16(&rdp->smid); 705 rcb = mpii_reply(sc, rdp); 706 707 if (smid) { 708 ccb = &sc->sc_ccbs[smid - 1]; 709 ccb->ccb_state = MPII_CCB_READY; 710 ccb->ccb_rcb = rcb; 711 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link); 712 } else 713 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link); 714 715 if (++idx >= sc->sc_reply_post_qdepth) 716 idx = 0; 717 718 rv = 1; 719 } 720 721 bus_dmamap_sync(sc->sc_dmat, 722 MPII_DMA_MAP(sc->sc_reply_postq), 723 0, sc->sc_reply_post_qdepth * sizeof(*rdp), 724 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 725 726 if (rv) 727 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx); 728 729 mtx_leave(&sc->sc_rep_mtx); 730 731 if (rv == 0) 732 return (0); 733 734 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) { 735 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link); 736 ccb->ccb_done(ccb); 737 } 738 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) { 739 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link); 740 mpii_event_process(sc, rcb); 741 } 742 743 return (1); 744 } 745 746 int 747 mpii_load_xs_sas3(struct mpii_ccb *ccb) 748 { 749 struct mpii_softc *sc = ccb->ccb_sc; 750 struct scsi_xfer *xs = ccb->ccb_cookie; 751 struct mpii_msg_scsi_io *io = ccb->ccb_cmd; 752 struct mpii_ieee_sge *csge, *nsge, *sge; 753 bus_dmamap_t dmap = ccb->ccb_dmamap; 754 int i, error; 755 756 /* Request frame structure is described in the mpii_iocfacts */ 757 nsge = (struct mpii_ieee_sge *)(io + 1); 758 759 /* zero length transfer still requires an SGE */ 760 if (xs->datalen == 0) { 761 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST; 762 return (0); 763 } 764 765 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, 766 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 767 if (error) { 768 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 769 return (1); 770 } 771 772 csge = NULL; 773 if (dmap->dm_nsegs > sc->sc_chain_sge) { 774 csge = nsge + sc->sc_chain_sge; 775 776 /* offset to the chain sge from the beginning */ 777 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge); 778 } 779 780 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) { 781 if (nsge == csge) { 782 nsge++; 783 784 /* address of the next sge */ 785 htolem64(&csge->sg_addr, ccb->ccb_cmd_dva + 786 ((caddr_t)nsge - (caddr_t)io)); 787 htolem32(&csge->sg_len, (dmap->dm_nsegs - i) * 788 sizeof(*sge)); 789 csge->sg_next_chain_offset = 0; 790 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT | 791 MPII_IEEE_SGE_ADDR_SYSTEM; 792 793 if ((dmap->dm_nsegs - i) > sc->sc_max_chain) { 794 csge->sg_next_chain_offset = sc->sc_max_chain; 795 csge += sc->sc_max_chain; 796 } 797 } 798 799 sge = nsge; 800 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM; 801 sge->sg_next_chain_offset = 0; 802 htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len); 803 htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr); 804 } 805 806 /* terminate list */ 807 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST; 808 809 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 810 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 811 BUS_DMASYNC_PREWRITE); 812 813 return (0); 814 } 815 816 int 817 mpii_load_xs(struct mpii_ccb *ccb) 818 { 819 struct mpii_softc *sc = ccb->ccb_sc; 820 struct scsi_xfer *xs = ccb->ccb_cookie; 821 struct mpii_msg_scsi_io *io = ccb->ccb_cmd; 822 struct mpii_sge *csge, *nsge, *sge; 823 bus_dmamap_t dmap = ccb->ccb_dmamap; 824 u_int32_t flags; 825 u_int16_t len; 826 int i, error; 827 828 /* Request frame structure is described in the mpii_iocfacts */ 829 nsge = (struct mpii_sge *)(io + 1); 830 csge = nsge + sc->sc_chain_sge; 831 832 /* zero length transfer still requires an SGE */ 833 if (xs->datalen == 0) { 834 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE | 835 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL); 836 return (0); 837 } 838 839 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, 840 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 841 if (error) { 842 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 843 return (1); 844 } 845 846 /* safe default starting flags */ 847 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64; 848 if (xs->flags & SCSI_DATA_OUT) 849 flags |= MPII_SGE_FL_DIR_OUT; 850 851 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) { 852 if (nsge == csge) { 853 nsge++; 854 /* offset to the chain sge from the beginning */ 855 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4; 856 /* length of the sgl segment we're pointing to */ 857 len = (dmap->dm_nsegs - i) * sizeof(*sge); 858 htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN | 859 MPII_SGE_FL_SIZE_64 | len); 860 /* address of the next sge */ 861 mpii_dvatosge(csge, ccb->ccb_cmd_dva + 862 ((caddr_t)nsge - (caddr_t)io)); 863 } 864 865 sge = nsge; 866 htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len); 867 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr); 868 } 869 870 /* terminate list */ 871 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | 872 MPII_SGE_FL_EOL); 873 874 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 875 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 876 BUS_DMASYNC_PREWRITE); 877 878 return (0); 879 } 880 881 int 882 mpii_scsi_probe(struct scsi_link *link) 883 { 884 struct mpii_softc *sc = link->bus->sb_adapter_softc; 885 struct mpii_cfg_sas_dev_pg0 pg0; 886 struct mpii_ecfg_hdr ehdr; 887 struct mpii_device *dev; 888 uint32_t address; 889 int flags; 890 891 if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) && 892 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) && 893 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE)) 894 return (ENXIO); 895 896 dev = sc->sc_devs[link->target]; 897 if (dev == NULL) 898 return (1); 899 900 flags = dev->flags; 901 if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED)) 902 return (1); 903 904 if (ISSET(flags, MPII_DF_VOLUME)) { 905 struct mpii_cfg_hdr hdr; 906 struct mpii_cfg_raid_vol_pg1 vpg; 907 size_t pagelen; 908 909 address = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle; 910 911 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 912 1, address, MPII_PG_POLL, &hdr) != 0) 913 return (EINVAL); 914 915 memset(&vpg, 0, sizeof(vpg)); 916 /* avoid stack trash on future page growth */ 917 pagelen = min(sizeof(vpg), hdr.page_length * 4); 918 919 if (mpii_req_cfg_page(sc, address, MPII_PG_POLL, &hdr, 1, 920 &vpg, pagelen) != 0) 921 return (EINVAL); 922 923 link->port_wwn = letoh64(vpg.wwid); 924 /* 925 * WWIDs generated by LSI firmware are not IEEE NAA compliant 926 * and historical practise in OBP on sparc64 is to set the top 927 * nibble to 3 to indicate that this is a RAID volume. 928 */ 929 link->port_wwn &= 0x0fffffffffffffff; 930 link->port_wwn |= 0x3000000000000000; 931 932 return (0); 933 } 934 935 memset(&ehdr, 0, sizeof(ehdr)); 936 ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED; 937 ehdr.page_number = 0; 938 ehdr.page_version = 0; 939 ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE; 940 ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */ 941 942 address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle; 943 if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED, 944 &ehdr, 1, &pg0, sizeof(pg0)) != 0) { 945 printf("%s: unable to fetch SAS device page 0 for target %u\n", 946 DEVNAME(sc), link->target); 947 948 return (0); /* the handle should still work */ 949 } 950 951 link->port_wwn = letoh64(pg0.sas_addr); 952 link->node_wwn = letoh64(pg0.device_name); 953 954 if (ISSET(lemtoh32(&pg0.device_info), 955 MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 956 link->flags |= SDEV_ATAPI; 957 link->quirks |= SDEV_ONLYBIG; 958 } 959 960 return (0); 961 } 962 963 u_int32_t 964 mpii_read(struct mpii_softc *sc, bus_size_t r) 965 { 966 u_int32_t rv; 967 968 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 969 BUS_SPACE_BARRIER_READ); 970 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 971 972 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv); 973 974 return (rv); 975 } 976 977 void 978 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v) 979 { 980 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v); 981 982 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 983 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 984 BUS_SPACE_BARRIER_WRITE); 985 } 986 987 988 int 989 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask, 990 u_int32_t target) 991 { 992 int i; 993 994 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r, 995 mask, target); 996 997 for (i = 0; i < 15000; i++) { 998 if ((mpii_read(sc, r) & mask) == target) 999 return (0); 1000 delay(1000); 1001 } 1002 1003 return (1); 1004 } 1005 1006 int 1007 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask, 1008 u_int32_t target) 1009 { 1010 int i; 1011 1012 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r, 1013 mask, target); 1014 1015 for (i = 0; i < 15000; i++) { 1016 if ((mpii_read(sc, r) & mask) != target) 1017 return (0); 1018 delay(1000); 1019 } 1020 1021 return (1); 1022 } 1023 1024 int 1025 mpii_init(struct mpii_softc *sc) 1026 { 1027 u_int32_t db; 1028 int i; 1029 1030 /* spin until the ioc leaves the reset state */ 1031 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 1032 MPII_DOORBELL_STATE_RESET) != 0) { 1033 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave " 1034 "reset state\n", DEVNAME(sc)); 1035 return (1); 1036 } 1037 1038 /* check current ownership */ 1039 db = mpii_read_db(sc); 1040 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) { 1041 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n", 1042 DEVNAME(sc)); 1043 return (0); 1044 } 1045 1046 for (i = 0; i < 5; i++) { 1047 switch (db & MPII_DOORBELL_STATE) { 1048 case MPII_DOORBELL_STATE_READY: 1049 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n", 1050 DEVNAME(sc)); 1051 return (0); 1052 1053 case MPII_DOORBELL_STATE_OPER: 1054 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n", 1055 DEVNAME(sc)); 1056 if (sc->sc_ioc_event_replay) 1057 mpii_reset_soft(sc); 1058 else 1059 mpii_reset_hard(sc); 1060 break; 1061 1062 case MPII_DOORBELL_STATE_FAULT: 1063 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being " 1064 "reset hard\n" , DEVNAME(sc)); 1065 mpii_reset_hard(sc); 1066 break; 1067 1068 case MPII_DOORBELL_STATE_RESET: 1069 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come " 1070 "out of reset\n", DEVNAME(sc)); 1071 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 1072 MPII_DOORBELL_STATE_RESET) != 0) 1073 return (1); 1074 break; 1075 } 1076 db = mpii_read_db(sc); 1077 } 1078 1079 return (1); 1080 } 1081 1082 int 1083 mpii_reset_soft(struct mpii_softc *sc) 1084 { 1085 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc)); 1086 1087 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) { 1088 return (1); 1089 } 1090 1091 mpii_write_db(sc, 1092 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1093 1094 /* XXX LSI waits 15 sec */ 1095 if (mpii_wait_db_ack(sc) != 0) 1096 return (1); 1097 1098 /* XXX LSI waits 15 sec */ 1099 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 1100 MPII_DOORBELL_STATE_READY) != 0) 1101 return (1); 1102 1103 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */ 1104 1105 return (0); 1106 } 1107 1108 int 1109 mpii_reset_hard(struct mpii_softc *sc) 1110 { 1111 u_int16_t i; 1112 1113 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc)); 1114 1115 mpii_write_intr(sc, 0); 1116 1117 /* enable diagnostic register */ 1118 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH); 1119 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1); 1120 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2); 1121 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3); 1122 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4); 1123 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5); 1124 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6); 1125 1126 delay(100); 1127 1128 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) { 1129 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable " 1130 "diagnostic read/write\n", DEVNAME(sc)); 1131 return(1); 1132 } 1133 1134 /* reset ioc */ 1135 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER); 1136 1137 /* 240 milliseconds */ 1138 delay(240000); 1139 1140 1141 /* XXX this whole function should be more robust */ 1142 1143 /* XXX read the host diagnostic reg until reset adapter bit clears ? */ 1144 for (i = 0; i < 30000; i++) { 1145 if ((mpii_read(sc, MPII_HOSTDIAG) & 1146 MPII_HOSTDIAG_RESET_ADAPTER) == 0) 1147 break; 1148 delay(10000); 1149 } 1150 1151 /* disable diagnostic register */ 1152 mpii_write(sc, MPII_WRITESEQ, 0xff); 1153 1154 /* XXX what else? */ 1155 1156 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc)); 1157 1158 return(0); 1159 } 1160 1161 int 1162 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords) 1163 { 1164 u_int32_t *query = buf; 1165 int i; 1166 1167 /* make sure the doorbell is not in use. */ 1168 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) 1169 return (1); 1170 1171 /* clear pending doorbell interrupts */ 1172 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB) 1173 mpii_write_intr(sc, 0); 1174 1175 /* 1176 * first write the doorbell with the handshake function and the 1177 * dword count. 1178 */ 1179 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) | 1180 MPII_DOORBELL_DWORDS(dwords)); 1181 1182 /* 1183 * the doorbell used bit will be set because a doorbell function has 1184 * started. wait for the interrupt and then ack it. 1185 */ 1186 if (mpii_wait_db_int(sc) != 0) 1187 return (1); 1188 mpii_write_intr(sc, 0); 1189 1190 /* poll for the acknowledgement. */ 1191 if (mpii_wait_db_ack(sc) != 0) 1192 return (1); 1193 1194 /* write the query through the doorbell. */ 1195 for (i = 0; i < dwords; i++) { 1196 mpii_write_db(sc, htole32(query[i])); 1197 if (mpii_wait_db_ack(sc) != 0) 1198 return (1); 1199 } 1200 1201 return (0); 1202 } 1203 1204 int 1205 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword) 1206 { 1207 u_int16_t *words = (u_int16_t *)dword; 1208 int i; 1209 1210 for (i = 0; i < 2; i++) { 1211 if (mpii_wait_db_int(sc) != 0) 1212 return (1); 1213 words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK); 1214 mpii_write_intr(sc, 0); 1215 } 1216 1217 return (0); 1218 } 1219 1220 int 1221 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords) 1222 { 1223 struct mpii_msg_reply *reply = buf; 1224 u_int32_t *dbuf = buf, dummy; 1225 int i; 1226 1227 /* get the first dword so we can read the length out of the header. */ 1228 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0) 1229 return (1); 1230 1231 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n", 1232 DEVNAME(sc), dwords, reply->msg_length); 1233 1234 /* 1235 * the total length, in dwords, is in the message length field of the 1236 * reply header. 1237 */ 1238 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1239 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0) 1240 return (1); 1241 } 1242 1243 /* if there's extra stuff to come off the ioc, discard it */ 1244 while (i++ < reply->msg_length) { 1245 if (mpii_handshake_recv_dword(sc, &dummy) != 0) 1246 return (1); 1247 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: " 1248 "0x%08x\n", DEVNAME(sc), dummy); 1249 } 1250 1251 /* wait for the doorbell used bit to be reset and clear the intr */ 1252 if (mpii_wait_db_int(sc) != 0) 1253 return (1); 1254 1255 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0) 1256 return (1); 1257 1258 mpii_write_intr(sc, 0); 1259 1260 return (0); 1261 } 1262 1263 void 1264 mpii_empty_done(struct mpii_ccb *ccb) 1265 { 1266 /* nothing to do */ 1267 } 1268 1269 int 1270 mpii_iocfacts(struct mpii_softc *sc) 1271 { 1272 struct mpii_msg_iocfacts_request ifq; 1273 struct mpii_msg_iocfacts_reply ifp; 1274 int irs; 1275 int sge_size; 1276 u_int qdepth; 1277 1278 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc)); 1279 1280 memset(&ifq, 0, sizeof(ifq)); 1281 memset(&ifp, 0, sizeof(ifp)); 1282 1283 ifq.function = MPII_FUNCTION_IOC_FACTS; 1284 1285 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1286 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n", 1287 DEVNAME(sc)); 1288 return (1); 1289 } 1290 1291 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1292 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n", 1293 DEVNAME(sc)); 1294 return (1); 1295 } 1296 1297 sc->sc_ioc_number = ifp.ioc_number; 1298 sc->sc_vf_id = ifp.vf_id; 1299 1300 sc->sc_max_volumes = ifp.max_volumes; 1301 sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets); 1302 1303 if (ISSET(lemtoh32(&ifp.ioc_capabilities), 1304 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 1305 SET(sc->sc_flags, MPII_F_RAID); 1306 if (ISSET(lemtoh32(&ifp.ioc_capabilities), 1307 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 1308 sc->sc_ioc_event_replay = 1; 1309 1310 sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit), 1311 MPII_REQUEST_CREDIT); 1312 1313 /* SAS3 and 3.5 controllers have different sgl layouts */ 1314 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5) 1315 || (ifp.msg_version_min == 6))) 1316 SET(sc->sc_flags, MPII_F_SAS3); 1317 1318 /* 1319 * The host driver must ensure that there is at least one 1320 * unused entry in the Reply Free Queue. One way to ensure 1321 * that this requirement is met is to never allocate a number 1322 * of reply frames that is a multiple of 16. 1323 */ 1324 sc->sc_num_reply_frames = sc->sc_max_cmds + 32; 1325 if (!(sc->sc_num_reply_frames % 16)) 1326 sc->sc_num_reply_frames--; 1327 1328 /* must be multiple of 16 */ 1329 sc->sc_reply_post_qdepth = sc->sc_max_cmds + 1330 sc->sc_num_reply_frames; 1331 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16); 1332 1333 qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth); 1334 if (sc->sc_reply_post_qdepth > qdepth) { 1335 sc->sc_reply_post_qdepth = qdepth; 1336 if (sc->sc_reply_post_qdepth < 16) { 1337 printf("%s: RDPQ is too shallow\n", DEVNAME(sc)); 1338 return (1); 1339 } 1340 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4; 1341 sc->sc_num_reply_frames = sc->sc_max_cmds + 4; 1342 } 1343 1344 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames + 1345 16 - (sc->sc_num_reply_frames % 16); 1346 1347 /* 1348 * Our request frame for an I/O operation looks like this: 1349 * 1350 * +-------------------+ -. 1351 * | mpii_msg_scsi_io | | 1352 * +-------------------| | 1353 * | mpii_sge | | 1354 * + - - - - - - - - - + | 1355 * | ... | > ioc_request_frame_size 1356 * + - - - - - - - - - + | 1357 * | mpii_sge (tail) | | 1358 * + - - - - - - - - - + | 1359 * | mpii_sge (csge) | | --. 1360 * + - - - - - - - - - + -' | chain sge points to the next sge 1361 * | mpii_sge |<-----' 1362 * + - - - - - - - - - + 1363 * | ... | 1364 * + - - - - - - - - - + 1365 * | mpii_sge (tail) | 1366 * +-------------------+ 1367 * | | 1368 * ~~~~~~~~~~~~~~~~~~~~~ 1369 * | | 1370 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data) 1371 * | scsi_sense_data | 1372 * +-------------------+ 1373 * 1374 * If the controller gives us a maximum chain size, there can be 1375 * multiple chain sges, each of which points to the sge following it. 1376 * Otherwise, there will only be one chain sge. 1377 */ 1378 1379 /* both sizes are in 32-bit words */ 1380 sc->sc_reply_size = ifp.reply_frame_size * 4; 1381 irs = lemtoh16(&ifp.ioc_request_frame_size) * 4; 1382 sc->sc_request_size = MPII_REQUEST_SIZE; 1383 /* make sure we have enough space for scsi sense data */ 1384 if (irs > sc->sc_request_size) { 1385 sc->sc_request_size = irs + sizeof(struct scsi_sense_data); 1386 sc->sc_request_size += 16 - (sc->sc_request_size % 16); 1387 } 1388 1389 if (ISSET(sc->sc_flags, MPII_F_SAS3)) { 1390 sge_size = sizeof(struct mpii_ieee_sge); 1391 } else { 1392 sge_size = sizeof(struct mpii_sge); 1393 } 1394 1395 /* offset to the chain sge */ 1396 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) / 1397 sge_size - 1; 1398 1399 sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size); 1400 1401 /* 1402 * A number of simple scatter-gather elements we can fit into the 1403 * request buffer after the I/O command minus the chain element(s). 1404 */ 1405 sc->sc_max_sgl = (sc->sc_request_size - 1406 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) / 1407 sge_size - 1; 1408 if (sc->sc_max_chain > 0) { 1409 sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) / 1410 sc->sc_max_chain; 1411 } 1412 1413 return (0); 1414 } 1415 1416 int 1417 mpii_iocinit(struct mpii_softc *sc) 1418 { 1419 struct mpii_msg_iocinit_request iiq; 1420 struct mpii_msg_iocinit_reply iip; 1421 1422 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc)); 1423 1424 memset(&iiq, 0, sizeof(iiq)); 1425 memset(&iip, 0, sizeof(iip)); 1426 1427 iiq.function = MPII_FUNCTION_IOC_INIT; 1428 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER; 1429 1430 /* XXX JPG do something about vf_id */ 1431 iiq.vf_id = 0; 1432 1433 iiq.msg_version_maj = 0x02; 1434 iiq.msg_version_min = 0x00; 1435 1436 /* XXX JPG ensure compliance with some level and hard-code? */ 1437 iiq.hdr_version_unit = 0x00; 1438 iiq.hdr_version_dev = 0x00; 1439 1440 htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4); 1441 1442 htolem16(&iiq.reply_descriptor_post_queue_depth, 1443 sc->sc_reply_post_qdepth); 1444 1445 htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth); 1446 1447 htolem32(&iiq.sense_buffer_address_high, 1448 MPII_DMA_DVA(sc->sc_requests) >> 32); 1449 1450 htolem32(&iiq.system_reply_address_high, 1451 MPII_DMA_DVA(sc->sc_replies) >> 32); 1452 1453 htolem32(&iiq.system_request_frame_base_address_lo, 1454 MPII_DMA_DVA(sc->sc_requests)); 1455 htolem32(&iiq.system_request_frame_base_address_hi, 1456 MPII_DMA_DVA(sc->sc_requests) >> 32); 1457 1458 htolem32(&iiq.reply_descriptor_post_queue_address_lo, 1459 MPII_DMA_DVA(sc->sc_reply_postq)); 1460 htolem32(&iiq.reply_descriptor_post_queue_address_hi, 1461 MPII_DMA_DVA(sc->sc_reply_postq) >> 32); 1462 1463 htolem32(&iiq.reply_free_queue_address_lo, 1464 MPII_DMA_DVA(sc->sc_reply_freeq)); 1465 htolem32(&iiq.reply_free_queue_address_hi, 1466 MPII_DMA_DVA(sc->sc_reply_freeq) >> 32); 1467 1468 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 1469 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n", 1470 DEVNAME(sc)); 1471 return (1); 1472 } 1473 1474 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 1475 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n", 1476 DEVNAME(sc)); 1477 return (1); 1478 } 1479 1480 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d " 1481 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 1482 iip.msg_length, iip.whoinit); 1483 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc), 1484 iip.msg_flags); 1485 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc), 1486 iip.vf_id, iip.vp_id); 1487 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1488 lemtoh16(&iip.ioc_status)); 1489 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1490 lemtoh32(&iip.ioc_loginfo)); 1491 1492 if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS || 1493 lemtoh32(&iip.ioc_loginfo)) 1494 return (1); 1495 1496 return (0); 1497 } 1498 1499 void 1500 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb) 1501 { 1502 u_int32_t *rfp; 1503 u_int idx; 1504 1505 if (rcb == NULL) 1506 return; 1507 1508 idx = sc->sc_reply_free_host_index; 1509 1510 rfp = MPII_DMA_KVA(sc->sc_reply_freeq); 1511 htolem32(&rfp[idx], rcb->rcb_reply_dva); 1512 1513 if (++idx >= sc->sc_reply_free_qdepth) 1514 idx = 0; 1515 1516 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx); 1517 } 1518 1519 int 1520 mpii_portfacts(struct mpii_softc *sc) 1521 { 1522 struct mpii_msg_portfacts_request *pfq; 1523 struct mpii_msg_portfacts_reply *pfp; 1524 struct mpii_ccb *ccb; 1525 int rv = 1; 1526 1527 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc)); 1528 1529 ccb = scsi_io_get(&sc->sc_iopool, 0); 1530 if (ccb == NULL) { 1531 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n", 1532 DEVNAME(sc)); 1533 return (rv); 1534 } 1535 1536 ccb->ccb_done = mpii_empty_done; 1537 pfq = ccb->ccb_cmd; 1538 1539 memset(pfq, 0, sizeof(*pfq)); 1540 1541 pfq->function = MPII_FUNCTION_PORT_FACTS; 1542 pfq->chain_offset = 0; 1543 pfq->msg_flags = 0; 1544 pfq->port_number = 0; 1545 pfq->vp_id = 0; 1546 pfq->vf_id = 0; 1547 1548 if (mpii_poll(sc, ccb) != 0) { 1549 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n", 1550 DEVNAME(sc)); 1551 goto err; 1552 } 1553 1554 if (ccb->ccb_rcb == NULL) { 1555 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n", 1556 DEVNAME(sc)); 1557 goto err; 1558 } 1559 1560 pfp = ccb->ccb_rcb->rcb_reply; 1561 sc->sc_porttype = pfp->port_type; 1562 1563 mpii_push_reply(sc, ccb->ccb_rcb); 1564 rv = 0; 1565 err: 1566 scsi_io_put(&sc->sc_iopool, ccb); 1567 1568 return (rv); 1569 } 1570 1571 void 1572 mpii_eventack(void *cookie, void *io) 1573 { 1574 struct mpii_softc *sc = cookie; 1575 struct mpii_ccb *ccb = io; 1576 struct mpii_rcb *rcb, *next; 1577 struct mpii_msg_event_reply *enp; 1578 struct mpii_msg_eventack_request *eaq; 1579 1580 mtx_enter(&sc->sc_evt_ack_mtx); 1581 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 1582 if (rcb != NULL) { 1583 next = SIMPLEQ_NEXT(rcb, rcb_link); 1584 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 1585 } 1586 mtx_leave(&sc->sc_evt_ack_mtx); 1587 1588 if (rcb == NULL) { 1589 scsi_io_put(&sc->sc_iopool, ccb); 1590 return; 1591 } 1592 1593 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1594 1595 ccb->ccb_done = mpii_eventack_done; 1596 eaq = ccb->ccb_cmd; 1597 1598 eaq->function = MPII_FUNCTION_EVENT_ACK; 1599 1600 eaq->event = enp->event; 1601 eaq->event_context = enp->event_context; 1602 1603 mpii_push_reply(sc, rcb); 1604 1605 mpii_start(sc, ccb); 1606 1607 if (next != NULL) 1608 scsi_ioh_add(&sc->sc_evt_ack_handler); 1609 } 1610 1611 void 1612 mpii_eventack_done(struct mpii_ccb *ccb) 1613 { 1614 struct mpii_softc *sc = ccb->ccb_sc; 1615 1616 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 1617 1618 mpii_push_reply(sc, ccb->ccb_rcb); 1619 scsi_io_put(&sc->sc_iopool, ccb); 1620 } 1621 1622 int 1623 mpii_portenable(struct mpii_softc *sc) 1624 { 1625 struct mpii_msg_portenable_request *peq; 1626 struct mpii_ccb *ccb; 1627 1628 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc)); 1629 1630 ccb = scsi_io_get(&sc->sc_iopool, 0); 1631 if (ccb == NULL) { 1632 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n", 1633 DEVNAME(sc)); 1634 return (1); 1635 } 1636 1637 ccb->ccb_done = mpii_empty_done; 1638 peq = ccb->ccb_cmd; 1639 1640 peq->function = MPII_FUNCTION_PORT_ENABLE; 1641 peq->vf_id = sc->sc_vf_id; 1642 1643 if (mpii_poll(sc, ccb) != 0) { 1644 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n", 1645 DEVNAME(sc)); 1646 return (1); 1647 } 1648 1649 if (ccb->ccb_rcb == NULL) { 1650 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n", 1651 DEVNAME(sc)); 1652 return (1); 1653 } 1654 1655 mpii_push_reply(sc, ccb->ccb_rcb); 1656 scsi_io_put(&sc->sc_iopool, ccb); 1657 1658 return (0); 1659 } 1660 1661 int 1662 mpii_cfg_coalescing(struct mpii_softc *sc) 1663 { 1664 struct mpii_cfg_hdr hdr; 1665 struct mpii_cfg_ioc_pg1 ipg; 1666 1667 hdr.page_version = 0; 1668 hdr.page_length = sizeof(ipg) / 4; 1669 hdr.page_number = 1; 1670 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC; 1671 memset(&ipg, 0, sizeof(ipg)); 1672 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg, 1673 sizeof(ipg)) != 0) { 1674 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n" 1675 "page 1\n", DEVNAME(sc)); 1676 return (1); 1677 } 1678 1679 if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING)) 1680 return (0); 1681 1682 /* Disable coalescing */ 1683 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING)); 1684 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg, 1685 sizeof(ipg)) != 0) { 1686 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n", 1687 DEVNAME(sc)); 1688 return (1); 1689 } 1690 1691 return (0); 1692 } 1693 1694 #define MPII_EVENT_MASKALL(enq) do { \ 1695 enq->event_masks[0] = 0xffffffff; \ 1696 enq->event_masks[1] = 0xffffffff; \ 1697 enq->event_masks[2] = 0xffffffff; \ 1698 enq->event_masks[3] = 0xffffffff; \ 1699 } while (0) 1700 1701 #define MPII_EVENT_UNMASK(enq, evt) do { \ 1702 enq->event_masks[evt / 32] &= \ 1703 htole32(~(1 << (evt % 32))); \ 1704 } while (0) 1705 1706 int 1707 mpii_eventnotify(struct mpii_softc *sc) 1708 { 1709 struct mpii_msg_event_request *enq; 1710 struct mpii_ccb *ccb; 1711 1712 ccb = scsi_io_get(&sc->sc_iopool, 0); 1713 if (ccb == NULL) { 1714 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n", 1715 DEVNAME(sc)); 1716 return (1); 1717 } 1718 1719 SIMPLEQ_INIT(&sc->sc_evt_sas_queue); 1720 mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO); 1721 task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc); 1722 1723 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 1724 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 1725 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 1726 mpii_eventack, sc); 1727 1728 ccb->ccb_done = mpii_eventnotify_done; 1729 enq = ccb->ccb_cmd; 1730 1731 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION; 1732 1733 /* 1734 * Enable reporting of the following events: 1735 * 1736 * MPII_EVENT_SAS_DISCOVERY 1737 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST 1738 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE 1739 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE 1740 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST 1741 * MPII_EVENT_IR_VOLUME 1742 * MPII_EVENT_IR_PHYSICAL_DISK 1743 * MPII_EVENT_IR_OPERATION_STATUS 1744 */ 1745 1746 MPII_EVENT_MASKALL(enq); 1747 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY); 1748 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 1749 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE); 1750 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 1751 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST); 1752 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME); 1753 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK); 1754 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS); 1755 1756 mpii_start(sc, ccb); 1757 1758 return (0); 1759 } 1760 1761 void 1762 mpii_eventnotify_done(struct mpii_ccb *ccb) 1763 { 1764 struct mpii_softc *sc = ccb->ccb_sc; 1765 struct mpii_rcb *rcb = ccb->ccb_rcb; 1766 1767 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc)); 1768 1769 scsi_io_put(&sc->sc_iopool, ccb); 1770 mpii_event_process(sc, rcb); 1771 } 1772 1773 void 1774 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp) 1775 { 1776 struct mpii_evt_ir_cfg_change_list *ccl; 1777 struct mpii_evt_ir_cfg_element *ce; 1778 struct mpii_device *dev; 1779 u_int16_t type; 1780 int i; 1781 1782 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1); 1783 if (ccl->num_elements == 0) 1784 return; 1785 1786 if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) { 1787 /* bail on foreign configurations */ 1788 return; 1789 } 1790 1791 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1); 1792 1793 for (i = 0; i < ccl->num_elements; i++, ce++) { 1794 type = (lemtoh16(&ce->element_flags) & 1795 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK); 1796 1797 switch (type) { 1798 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME: 1799 switch (ce->reason_code) { 1800 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED: 1801 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED: 1802 if (mpii_find_dev(sc, 1803 lemtoh16(&ce->vol_dev_handle))) { 1804 printf("%s: device %#x is already " 1805 "configured\n", DEVNAME(sc), 1806 lemtoh16(&ce->vol_dev_handle)); 1807 break; 1808 } 1809 dev = malloc(sizeof(*dev), M_DEVBUF, 1810 M_NOWAIT | M_ZERO); 1811 if (!dev) { 1812 printf("%s: failed to allocate a " 1813 "device structure\n", DEVNAME(sc)); 1814 break; 1815 } 1816 SET(dev->flags, MPII_DF_VOLUME); 1817 dev->slot = sc->sc_vd_id_low; 1818 dev->dev_handle = lemtoh16(&ce->vol_dev_handle); 1819 if (mpii_insert_dev(sc, dev)) { 1820 free(dev, M_DEVBUF, sizeof *dev); 1821 break; 1822 } 1823 sc->sc_vd_count++; 1824 break; 1825 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED: 1826 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED: 1827 if (!(dev = mpii_find_dev(sc, 1828 lemtoh16(&ce->vol_dev_handle)))) 1829 break; 1830 mpii_remove_dev(sc, dev); 1831 sc->sc_vd_count--; 1832 break; 1833 } 1834 break; 1835 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK: 1836 if (ce->reason_code == 1837 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED || 1838 ce->reason_code == 1839 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) { 1840 /* there should be an underlying sas drive */ 1841 if (!(dev = mpii_find_dev(sc, 1842 lemtoh16(&ce->phys_disk_dev_handle)))) 1843 break; 1844 /* promoted from a hot spare? */ 1845 CLR(dev->flags, MPII_DF_HOT_SPARE); 1846 SET(dev->flags, MPII_DF_VOLUME_DISK | 1847 MPII_DF_HIDDEN); 1848 } 1849 break; 1850 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE: 1851 if (ce->reason_code == 1852 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) { 1853 /* there should be an underlying sas drive */ 1854 if (!(dev = mpii_find_dev(sc, 1855 lemtoh16(&ce->phys_disk_dev_handle)))) 1856 break; 1857 SET(dev->flags, MPII_DF_HOT_SPARE | 1858 MPII_DF_HIDDEN); 1859 } 1860 break; 1861 } 1862 } 1863 } 1864 1865 void 1866 mpii_event_sas(void *xsc) 1867 { 1868 struct mpii_softc *sc = xsc; 1869 struct mpii_rcb *rcb, *next; 1870 struct mpii_msg_event_reply *enp; 1871 struct mpii_evt_sas_tcl *tcl; 1872 struct mpii_evt_phy_entry *pe; 1873 struct mpii_device *dev; 1874 int i; 1875 u_int16_t handle; 1876 1877 mtx_enter(&sc->sc_evt_sas_mtx); 1878 rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue); 1879 if (rcb != NULL) { 1880 next = SIMPLEQ_NEXT(rcb, rcb_link); 1881 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link); 1882 } 1883 mtx_leave(&sc->sc_evt_sas_mtx); 1884 1885 if (rcb == NULL) 1886 return; 1887 if (next != NULL) 1888 task_add(systq, &sc->sc_evt_sas_task); 1889 1890 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1891 switch (lemtoh16(&enp->event)) { 1892 case MPII_EVENT_SAS_DISCOVERY: 1893 mpii_event_discovery(sc, enp); 1894 goto done; 1895 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1896 /* handle below */ 1897 break; 1898 default: 1899 panic("%s: unexpected event %#x in sas event queue", 1900 DEVNAME(sc), lemtoh16(&enp->event)); 1901 /* NOTREACHED */ 1902 } 1903 1904 tcl = (struct mpii_evt_sas_tcl *)(enp + 1); 1905 pe = (struct mpii_evt_phy_entry *)(tcl + 1); 1906 1907 for (i = 0; i < tcl->num_entries; i++, pe++) { 1908 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) { 1909 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED: 1910 handle = lemtoh16(&pe->dev_handle); 1911 if (mpii_find_dev(sc, handle)) { 1912 printf("%s: device %#x is already " 1913 "configured\n", DEVNAME(sc), handle); 1914 break; 1915 } 1916 1917 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO); 1918 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i; 1919 dev->dev_handle = handle; 1920 dev->phy_num = tcl->start_phy_num + i; 1921 if (tcl->enclosure_handle) 1922 dev->physical_port = tcl->physical_port; 1923 dev->enclosure = lemtoh16(&tcl->enclosure_handle); 1924 dev->expander = lemtoh16(&tcl->expander_handle); 1925 1926 if (mpii_insert_dev(sc, dev)) { 1927 free(dev, M_DEVBUF, sizeof *dev); 1928 break; 1929 } 1930 1931 if (sc->sc_scsibus != NULL) 1932 scsi_probe_target(sc->sc_scsibus, dev->slot); 1933 break; 1934 1935 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING: 1936 dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle)); 1937 if (dev == NULL) 1938 break; 1939 1940 mpii_remove_dev(sc, dev); 1941 mpii_sas_remove_device(sc, dev->dev_handle); 1942 if (sc->sc_scsibus != NULL && 1943 !ISSET(dev->flags, MPII_DF_HIDDEN)) { 1944 scsi_activate(sc->sc_scsibus, dev->slot, -1, 1945 DVACT_DEACTIVATE); 1946 scsi_detach_target(sc->sc_scsibus, dev->slot, 1947 DETACH_FORCE); 1948 } 1949 1950 free(dev, M_DEVBUF, sizeof *dev); 1951 break; 1952 } 1953 } 1954 1955 done: 1956 mpii_event_done(sc, rcb); 1957 } 1958 1959 void 1960 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp) 1961 { 1962 struct mpii_evt_sas_discovery *esd = 1963 (struct mpii_evt_sas_discovery *)(enp + 1); 1964 1965 if (sc->sc_pending == 0) 1966 return; 1967 1968 switch (esd->reason_code) { 1969 case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED: 1970 ++sc->sc_pending; 1971 break; 1972 case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED: 1973 if (--sc->sc_pending == 1) { 1974 sc->sc_pending = 0; 1975 config_pending_decr(); 1976 } 1977 break; 1978 } 1979 } 1980 1981 void 1982 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb) 1983 { 1984 struct mpii_msg_event_reply *enp; 1985 1986 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1987 1988 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc), 1989 lemtoh16(&enp->event)); 1990 1991 switch (lemtoh16(&enp->event)) { 1992 case MPII_EVENT_EVENT_CHANGE: 1993 /* should be properly ignored */ 1994 break; 1995 case MPII_EVENT_SAS_DISCOVERY: 1996 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1997 mtx_enter(&sc->sc_evt_sas_mtx); 1998 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link); 1999 mtx_leave(&sc->sc_evt_sas_mtx); 2000 task_add(systq, &sc->sc_evt_sas_task); 2001 return; 2002 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE: 2003 break; 2004 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 2005 break; 2006 case MPII_EVENT_IR_VOLUME: { 2007 struct mpii_evt_ir_volume *evd = 2008 (struct mpii_evt_ir_volume *)(enp + 1); 2009 struct mpii_device *dev; 2010 #if NBIO > 0 2011 const char *vol_states[] = { 2012 BIOC_SVINVALID_S, 2013 BIOC_SVOFFLINE_S, 2014 BIOC_SVBUILDING_S, 2015 BIOC_SVONLINE_S, 2016 BIOC_SVDEGRADED_S, 2017 BIOC_SVONLINE_S, 2018 }; 2019 #endif 2020 2021 if (cold) 2022 break; 2023 KERNEL_LOCK(); 2024 dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle)); 2025 KERNEL_UNLOCK(); 2026 if (dev == NULL) 2027 break; 2028 #if NBIO > 0 2029 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED) 2030 printf("%s: volume %d state changed from %s to %s\n", 2031 DEVNAME(sc), dev->slot - sc->sc_vd_id_low, 2032 vol_states[evd->prev_value], 2033 vol_states[evd->new_value]); 2034 #endif 2035 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED && 2036 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) && 2037 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) 2038 printf("%s: started resync on a volume %d\n", 2039 DEVNAME(sc), dev->slot - sc->sc_vd_id_low); 2040 } 2041 break; 2042 case MPII_EVENT_IR_PHYSICAL_DISK: 2043 break; 2044 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST: 2045 mpii_event_raid(sc, enp); 2046 break; 2047 case MPII_EVENT_IR_OPERATION_STATUS: { 2048 struct mpii_evt_ir_status *evs = 2049 (struct mpii_evt_ir_status *)(enp + 1); 2050 struct mpii_device *dev; 2051 2052 KERNEL_LOCK(); 2053 dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle)); 2054 KERNEL_UNLOCK(); 2055 if (dev != NULL && 2056 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC) 2057 dev->percent = evs->percent; 2058 break; 2059 } 2060 default: 2061 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n", 2062 DEVNAME(sc), lemtoh16(&enp->event)); 2063 } 2064 2065 mpii_event_done(sc, rcb); 2066 } 2067 2068 void 2069 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb) 2070 { 2071 struct mpii_msg_event_reply *enp = rcb->rcb_reply; 2072 2073 if (enp->ack_required) { 2074 mtx_enter(&sc->sc_evt_ack_mtx); 2075 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2076 mtx_leave(&sc->sc_evt_ack_mtx); 2077 scsi_ioh_add(&sc->sc_evt_ack_handler); 2078 } else 2079 mpii_push_reply(sc, rcb); 2080 } 2081 2082 void 2083 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle) 2084 { 2085 struct mpii_msg_scsi_task_request *stq; 2086 struct mpii_msg_sas_oper_request *soq; 2087 struct mpii_ccb *ccb; 2088 2089 ccb = scsi_io_get(&sc->sc_iopool, 0); 2090 if (ccb == NULL) 2091 return; 2092 2093 stq = ccb->ccb_cmd; 2094 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT; 2095 stq->task_type = MPII_SCSI_TASK_TARGET_RESET; 2096 htolem16(&stq->dev_handle, handle); 2097 2098 ccb->ccb_done = mpii_empty_done; 2099 mpii_wait(sc, ccb); 2100 2101 if (ccb->ccb_rcb != NULL) 2102 mpii_push_reply(sc, ccb->ccb_rcb); 2103 2104 /* reuse a ccb */ 2105 ccb->ccb_state = MPII_CCB_READY; 2106 ccb->ccb_rcb = NULL; 2107 2108 soq = ccb->ccb_cmd; 2109 memset(soq, 0, sizeof(*soq)); 2110 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL; 2111 soq->operation = MPII_SAS_OP_REMOVE_DEVICE; 2112 htolem16(&soq->dev_handle, handle); 2113 2114 ccb->ccb_done = mpii_empty_done; 2115 mpii_wait(sc, ccb); 2116 if (ccb->ccb_rcb != NULL) 2117 mpii_push_reply(sc, ccb->ccb_rcb); 2118 2119 scsi_io_put(&sc->sc_iopool, ccb); 2120 } 2121 2122 int 2123 mpii_board_info(struct mpii_softc *sc) 2124 { 2125 struct mpii_msg_iocfacts_request ifq; 2126 struct mpii_msg_iocfacts_reply ifp; 2127 struct mpii_cfg_manufacturing_pg0 mpg; 2128 struct mpii_cfg_hdr hdr; 2129 2130 memset(&ifq, 0, sizeof(ifq)); 2131 memset(&ifp, 0, sizeof(ifp)); 2132 2133 ifq.function = MPII_FUNCTION_IOC_FACTS; 2134 2135 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 2136 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n", 2137 DEVNAME(sc)); 2138 return (1); 2139 } 2140 2141 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 2142 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n", 2143 DEVNAME(sc)); 2144 return (1); 2145 } 2146 2147 hdr.page_version = 0; 2148 hdr.page_length = sizeof(mpg) / 4; 2149 hdr.page_number = 0; 2150 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING; 2151 memset(&mpg, 0, sizeof(mpg)); 2152 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg, 2153 sizeof(mpg)) != 0) { 2154 printf("%s: unable to fetch manufacturing page 0\n", 2155 DEVNAME(sc)); 2156 return (EINVAL); 2157 } 2158 2159 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc), 2160 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min, 2161 ifp.fw_version_unit, ifp.fw_version_dev, 2162 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "", 2163 ifp.msg_version_maj, ifp.msg_version_min); 2164 2165 return (0); 2166 } 2167 2168 int 2169 mpii_target_map(struct mpii_softc *sc) 2170 { 2171 struct mpii_cfg_hdr hdr; 2172 struct mpii_cfg_ioc_pg8 ipg; 2173 int flags, pad = 0; 2174 2175 hdr.page_version = 0; 2176 hdr.page_length = sizeof(ipg) / 4; 2177 hdr.page_number = 8; 2178 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC; 2179 memset(&ipg, 0, sizeof(ipg)); 2180 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg, 2181 sizeof(ipg)) != 0) { 2182 printf("%s: unable to fetch ioc page 8\n", 2183 DEVNAME(sc)); 2184 return (EINVAL); 2185 } 2186 2187 if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0) 2188 pad = 1; 2189 2190 flags = lemtoh16(&ipg.ir_volume_mapping_flags) & 2191 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK; 2192 if (ISSET(sc->sc_flags, MPII_F_RAID)) { 2193 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) { 2194 sc->sc_vd_id_low += pad; 2195 pad = sc->sc_max_volumes; /* for sc_pd_id_start */ 2196 } else 2197 sc->sc_vd_id_low = sc->sc_max_devices - 2198 sc->sc_max_volumes; 2199 } 2200 2201 sc->sc_pd_id_start += pad; 2202 2203 return (0); 2204 } 2205 2206 int 2207 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number, 2208 u_int32_t address, int flags, void *p) 2209 { 2210 struct mpii_msg_config_request *cq; 2211 struct mpii_msg_config_reply *cp; 2212 struct mpii_ccb *ccb; 2213 struct mpii_cfg_hdr *hdr = p; 2214 struct mpii_ecfg_hdr *ehdr = p; 2215 int etype = 0; 2216 int rv = 0; 2217 2218 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x " 2219 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2220 address, flags, MPII_PG_FMT); 2221 2222 ccb = scsi_io_get(&sc->sc_iopool, 2223 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0); 2224 if (ccb == NULL) { 2225 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n", 2226 DEVNAME(sc)); 2227 return (1); 2228 } 2229 2230 if (ISSET(flags, MPII_PG_EXTENDED)) { 2231 etype = type; 2232 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2233 } 2234 2235 cq = ccb->ccb_cmd; 2236 2237 cq->function = MPII_FUNCTION_CONFIG; 2238 2239 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER; 2240 2241 cq->config_header.page_number = number; 2242 cq->config_header.page_type = type; 2243 cq->ext_page_type = etype; 2244 htolem32(&cq->page_address, address); 2245 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE | 2246 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL); 2247 2248 ccb->ccb_done = mpii_empty_done; 2249 if (ISSET(flags, MPII_PG_POLL)) { 2250 if (mpii_poll(sc, ccb) != 0) { 2251 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n", 2252 DEVNAME(sc)); 2253 return (1); 2254 } 2255 } else 2256 mpii_wait(sc, ccb); 2257 2258 if (ccb->ccb_rcb == NULL) { 2259 scsi_io_put(&sc->sc_iopool, ccb); 2260 return (1); 2261 } 2262 cp = ccb->ccb_rcb->rcb_reply; 2263 2264 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x " 2265 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action, 2266 cp->sgl_flags, cp->msg_length, cp->function); 2267 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2268 "msg_flags: 0x%02x\n", DEVNAME(sc), 2269 lemtoh16(&cp->ext_page_length), cp->ext_page_type, 2270 cp->msg_flags); 2271 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 2272 cp->vp_id, cp->vf_id); 2273 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2274 lemtoh16(&cp->ioc_status)); 2275 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2276 lemtoh32(&cp->ioc_loginfo)); 2277 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2278 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2279 cp->config_header.page_version, 2280 cp->config_header.page_length, 2281 cp->config_header.page_number, 2282 cp->config_header.page_type); 2283 2284 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS) 2285 rv = 1; 2286 else if (ISSET(flags, MPII_PG_EXTENDED)) { 2287 memset(ehdr, 0, sizeof(*ehdr)); 2288 ehdr->page_version = cp->config_header.page_version; 2289 ehdr->page_number = cp->config_header.page_number; 2290 ehdr->page_type = cp->config_header.page_type; 2291 ehdr->ext_page_length = cp->ext_page_length; 2292 ehdr->ext_page_type = cp->ext_page_type; 2293 } else 2294 *hdr = cp->config_header; 2295 2296 mpii_push_reply(sc, ccb->ccb_rcb); 2297 scsi_io_put(&sc->sc_iopool, ccb); 2298 2299 return (rv); 2300 } 2301 2302 int 2303 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags, 2304 void *p, int read, void *page, size_t len) 2305 { 2306 struct mpii_msg_config_request *cq; 2307 struct mpii_msg_config_reply *cp; 2308 struct mpii_ccb *ccb; 2309 struct mpii_cfg_hdr *hdr = p; 2310 struct mpii_ecfg_hdr *ehdr = p; 2311 caddr_t kva; 2312 int page_length; 2313 int rv = 0; 2314 2315 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d " 2316 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type); 2317 2318 page_length = ISSET(flags, MPII_PG_EXTENDED) ? 2319 lemtoh16(&ehdr->ext_page_length) : hdr->page_length; 2320 2321 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4) 2322 return (1); 2323 2324 ccb = scsi_io_get(&sc->sc_iopool, 2325 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0); 2326 if (ccb == NULL) { 2327 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n", 2328 DEVNAME(sc)); 2329 return (1); 2330 } 2331 2332 cq = ccb->ccb_cmd; 2333 2334 cq->function = MPII_FUNCTION_CONFIG; 2335 2336 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2337 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2338 2339 if (ISSET(flags, MPII_PG_EXTENDED)) { 2340 cq->config_header.page_version = ehdr->page_version; 2341 cq->config_header.page_number = ehdr->page_number; 2342 cq->config_header.page_type = ehdr->page_type; 2343 cq->ext_page_len = ehdr->ext_page_length; 2344 cq->ext_page_type = ehdr->ext_page_type; 2345 } else 2346 cq->config_header = *hdr; 2347 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK; 2348 htolem32(&cq->page_address, address); 2349 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE | 2350 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL | 2351 MPII_SGE_FL_SIZE_64 | (page_length * 4) | 2352 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT)); 2353 2354 /* bounce the page via the request space to avoid more bus_dma games */ 2355 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva + 2356 sizeof(struct mpii_msg_config_request)); 2357 2358 kva = ccb->ccb_cmd; 2359 kva += sizeof(struct mpii_msg_config_request); 2360 2361 if (!read) 2362 memcpy(kva, page, len); 2363 2364 ccb->ccb_done = mpii_empty_done; 2365 if (ISSET(flags, MPII_PG_POLL)) { 2366 if (mpii_poll(sc, ccb) != 0) { 2367 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n", 2368 DEVNAME(sc)); 2369 return (1); 2370 } 2371 } else 2372 mpii_wait(sc, ccb); 2373 2374 if (ccb->ccb_rcb == NULL) { 2375 scsi_io_put(&sc->sc_iopool, ccb); 2376 return (1); 2377 } 2378 cp = ccb->ccb_rcb->rcb_reply; 2379 2380 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d " 2381 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, 2382 cp->function); 2383 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2384 "msg_flags: 0x%02x\n", DEVNAME(sc), 2385 lemtoh16(&cp->ext_page_length), cp->ext_page_type, 2386 cp->msg_flags); 2387 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 2388 cp->vp_id, cp->vf_id); 2389 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2390 lemtoh16(&cp->ioc_status)); 2391 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2392 lemtoh32(&cp->ioc_loginfo)); 2393 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2394 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2395 cp->config_header.page_version, 2396 cp->config_header.page_length, 2397 cp->config_header.page_number, 2398 cp->config_header.page_type); 2399 2400 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS) 2401 rv = 1; 2402 else if (read) 2403 memcpy(page, kva, len); 2404 2405 mpii_push_reply(sc, ccb->ccb_rcb); 2406 scsi_io_put(&sc->sc_iopool, ccb); 2407 2408 return (rv); 2409 } 2410 2411 struct mpii_rcb * 2412 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp) 2413 { 2414 struct mpii_rcb *rcb = NULL; 2415 u_int32_t rfid; 2416 2417 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc)); 2418 2419 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) == 2420 MPII_REPLY_DESCR_ADDRESS_REPLY) { 2421 rfid = (lemtoh32(&rdp->frame_addr) - 2422 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) / 2423 sc->sc_reply_size; 2424 2425 bus_dmamap_sync(sc->sc_dmat, 2426 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid, 2427 sc->sc_reply_size, BUS_DMASYNC_POSTREAD); 2428 2429 rcb = &sc->sc_rcbs[rfid]; 2430 } 2431 2432 memset(rdp, 0xff, sizeof(*rdp)); 2433 2434 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq), 2435 8 * sc->sc_reply_post_host_index, 8, 2436 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2437 2438 return (rcb); 2439 } 2440 2441 struct mpii_dmamem * 2442 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size) 2443 { 2444 struct mpii_dmamem *mdm; 2445 int nsegs; 2446 2447 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO); 2448 if (mdm == NULL) 2449 return (NULL); 2450 2451 mdm->mdm_size = size; 2452 2453 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2454 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 2455 goto mdmfree; 2456 2457 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 2458 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2459 goto destroy; 2460 2461 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 2462 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 2463 goto free; 2464 2465 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 2466 NULL, BUS_DMA_NOWAIT) != 0) 2467 goto unmap; 2468 2469 return (mdm); 2470 2471 unmap: 2472 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 2473 free: 2474 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 2475 destroy: 2476 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 2477 mdmfree: 2478 free(mdm, M_DEVBUF, sizeof *mdm); 2479 2480 return (NULL); 2481 } 2482 2483 void 2484 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm) 2485 { 2486 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm); 2487 2488 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 2489 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 2490 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 2491 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 2492 free(mdm, M_DEVBUF, sizeof *mdm); 2493 } 2494 2495 int 2496 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev) 2497 { 2498 int slot; /* initial hint */ 2499 2500 if (dev == NULL || dev->slot < 0) 2501 return (1); 2502 slot = dev->slot; 2503 2504 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL) 2505 slot++; 2506 2507 if (slot >= sc->sc_max_devices) 2508 return (1); 2509 2510 dev->slot = slot; 2511 sc->sc_devs[slot] = dev; 2512 2513 return (0); 2514 } 2515 2516 int 2517 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev) 2518 { 2519 int i; 2520 2521 if (dev == NULL) 2522 return (1); 2523 2524 for (i = 0; i < sc->sc_max_devices; i++) { 2525 if (sc->sc_devs[i] == NULL) 2526 continue; 2527 2528 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) { 2529 sc->sc_devs[i] = NULL; 2530 return (0); 2531 } 2532 } 2533 2534 return (1); 2535 } 2536 2537 struct mpii_device * 2538 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle) 2539 { 2540 int i; 2541 2542 for (i = 0; i < sc->sc_max_devices; i++) { 2543 if (sc->sc_devs[i] == NULL) 2544 continue; 2545 2546 if (sc->sc_devs[i]->dev_handle == handle) 2547 return (sc->sc_devs[i]); 2548 } 2549 2550 return (NULL); 2551 } 2552 2553 int 2554 mpii_alloc_ccbs(struct mpii_softc *sc) 2555 { 2556 struct mpii_ccb *ccb; 2557 u_int8_t *cmd; 2558 int i; 2559 2560 SIMPLEQ_INIT(&sc->sc_ccb_free); 2561 SIMPLEQ_INIT(&sc->sc_ccb_tmos); 2562 mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO); 2563 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 2564 scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool, 2565 mpii_scsi_cmd_tmo_handler, sc); 2566 2567 sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb), 2568 M_DEVBUF, M_NOWAIT | M_ZERO); 2569 if (sc->sc_ccbs == NULL) { 2570 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 2571 return (1); 2572 } 2573 2574 sc->sc_requests = mpii_dmamem_alloc(sc, 2575 sc->sc_request_size * sc->sc_max_cmds); 2576 if (sc->sc_requests == NULL) { 2577 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 2578 goto free_ccbs; 2579 } 2580 cmd = MPII_DMA_KVA(sc->sc_requests); 2581 2582 /* 2583 * we have sc->sc_max_cmds system request message 2584 * frames, but smid zero cannot be used. so we then 2585 * have (sc->sc_max_cmds - 1) number of ccbs 2586 */ 2587 for (i = 1; i < sc->sc_max_cmds; i++) { 2588 ccb = &sc->sc_ccbs[i - 1]; 2589 2590 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl, 2591 MAXPHYS, 0, 2592 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 2593 &ccb->ccb_dmamap) != 0) { 2594 printf("%s: unable to create dma map\n", DEVNAME(sc)); 2595 goto free_maps; 2596 } 2597 2598 ccb->ccb_sc = sc; 2599 htolem16(&ccb->ccb_smid, i); 2600 ccb->ccb_offset = sc->sc_request_size * i; 2601 2602 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 2603 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) + 2604 ccb->ccb_offset; 2605 2606 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p " 2607 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n", 2608 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 2609 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd, 2610 ccb->ccb_cmd_dva); 2611 2612 mpii_put_ccb(sc, ccb); 2613 } 2614 2615 scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb); 2616 2617 return (0); 2618 2619 free_maps: 2620 while ((ccb = mpii_get_ccb(sc)) != NULL) 2621 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2622 2623 mpii_dmamem_free(sc, sc->sc_requests); 2624 free_ccbs: 2625 free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb)); 2626 2627 return (1); 2628 } 2629 2630 void 2631 mpii_put_ccb(void *cookie, void *io) 2632 { 2633 struct mpii_softc *sc = cookie; 2634 struct mpii_ccb *ccb = io; 2635 2636 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb); 2637 2638 ccb->ccb_state = MPII_CCB_FREE; 2639 ccb->ccb_cookie = NULL; 2640 ccb->ccb_done = NULL; 2641 ccb->ccb_rcb = NULL; 2642 memset(ccb->ccb_cmd, 0, sc->sc_request_size); 2643 2644 KERNEL_UNLOCK(); 2645 mtx_enter(&sc->sc_ccb_free_mtx); 2646 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 2647 mtx_leave(&sc->sc_ccb_free_mtx); 2648 KERNEL_LOCK(); 2649 } 2650 2651 void * 2652 mpii_get_ccb(void *cookie) 2653 { 2654 struct mpii_softc *sc = cookie; 2655 struct mpii_ccb *ccb; 2656 2657 KERNEL_UNLOCK(); 2658 2659 mtx_enter(&sc->sc_ccb_free_mtx); 2660 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free); 2661 if (ccb != NULL) { 2662 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 2663 ccb->ccb_state = MPII_CCB_READY; 2664 } 2665 mtx_leave(&sc->sc_ccb_free_mtx); 2666 2667 KERNEL_LOCK(); 2668 2669 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb); 2670 2671 return (ccb); 2672 } 2673 2674 int 2675 mpii_alloc_replies(struct mpii_softc *sc) 2676 { 2677 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc)); 2678 2679 sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames, 2680 sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT); 2681 if (sc->sc_rcbs == NULL) 2682 return (1); 2683 2684 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size * 2685 sc->sc_num_reply_frames); 2686 if (sc->sc_replies == NULL) { 2687 free(sc->sc_rcbs, M_DEVBUF, 2688 sc->sc_num_reply_frames * sizeof(struct mpii_rcb)); 2689 return (1); 2690 } 2691 2692 return (0); 2693 } 2694 2695 void 2696 mpii_push_replies(struct mpii_softc *sc) 2697 { 2698 struct mpii_rcb *rcb; 2699 caddr_t kva = MPII_DMA_KVA(sc->sc_replies); 2700 int i; 2701 2702 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies), 2703 0, sc->sc_reply_size * sc->sc_num_reply_frames, 2704 BUS_DMASYNC_PREREAD); 2705 2706 for (i = 0; i < sc->sc_num_reply_frames; i++) { 2707 rcb = &sc->sc_rcbs[i]; 2708 2709 rcb->rcb_reply = kva + sc->sc_reply_size * i; 2710 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) + 2711 sc->sc_reply_size * i; 2712 mpii_push_reply(sc, rcb); 2713 } 2714 } 2715 2716 void 2717 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb) 2718 { 2719 struct mpii_request_header *rhp; 2720 struct mpii_request_descr descr; 2721 u_long *rdp = (u_long *)&descr; 2722 2723 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc), 2724 ccb->ccb_cmd_dva); 2725 2726 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests), 2727 ccb->ccb_offset, sc->sc_request_size, 2728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2729 2730 ccb->ccb_state = MPII_CCB_QUEUED; 2731 2732 rhp = ccb->ccb_cmd; 2733 2734 memset(&descr, 0, sizeof(descr)); 2735 2736 switch (rhp->function) { 2737 case MPII_FUNCTION_SCSI_IO_REQUEST: 2738 descr.request_flags = MPII_REQ_DESCR_SCSI_IO; 2739 descr.dev_handle = htole16(ccb->ccb_dev_handle); 2740 break; 2741 case MPII_FUNCTION_SCSI_TASK_MGMT: 2742 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY; 2743 break; 2744 default: 2745 descr.request_flags = MPII_REQ_DESCR_DEFAULT; 2746 } 2747 2748 descr.vf_id = sc->sc_vf_id; 2749 descr.smid = ccb->ccb_smid; 2750 2751 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write " 2752 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp); 2753 2754 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write " 2755 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1)); 2756 2757 #if defined(__LP64__) 2758 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, 2759 MPII_REQ_DESCR_POST_LOW, *rdp); 2760 #else 2761 mtx_enter(&sc->sc_req_mtx); 2762 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, 2763 MPII_REQ_DESCR_POST_LOW, rdp[0]); 2764 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 2765 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE); 2766 2767 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, 2768 MPII_REQ_DESCR_POST_HIGH, rdp[1]); 2769 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 2770 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE); 2771 mtx_leave(&sc->sc_req_mtx); 2772 #endif 2773 } 2774 2775 int 2776 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb) 2777 { 2778 void (*done)(struct mpii_ccb *); 2779 void *cookie; 2780 int rv = 1; 2781 2782 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc)); 2783 2784 done = ccb->ccb_done; 2785 cookie = ccb->ccb_cookie; 2786 2787 ccb->ccb_done = mpii_poll_done; 2788 ccb->ccb_cookie = &rv; 2789 2790 mpii_start(sc, ccb); 2791 2792 while (rv == 1) { 2793 /* avoid excessive polling */ 2794 if (mpii_reply_waiting(sc)) 2795 mpii_intr(sc); 2796 else 2797 delay(10); 2798 } 2799 2800 ccb->ccb_cookie = cookie; 2801 done(ccb); 2802 2803 return (0); 2804 } 2805 2806 void 2807 mpii_poll_done(struct mpii_ccb *ccb) 2808 { 2809 int *rv = ccb->ccb_cookie; 2810 2811 *rv = 0; 2812 } 2813 2814 int 2815 mpii_alloc_queues(struct mpii_softc *sc) 2816 { 2817 u_int32_t *rfp; 2818 int i; 2819 2820 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc)); 2821 2822 sc->sc_reply_freeq = mpii_dmamem_alloc(sc, 2823 sc->sc_reply_free_qdepth * sizeof(*rfp)); 2824 if (sc->sc_reply_freeq == NULL) 2825 return (1); 2826 rfp = MPII_DMA_KVA(sc->sc_reply_freeq); 2827 for (i = 0; i < sc->sc_num_reply_frames; i++) { 2828 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) + 2829 sc->sc_reply_size * i; 2830 } 2831 2832 sc->sc_reply_postq = mpii_dmamem_alloc(sc, 2833 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr)); 2834 if (sc->sc_reply_postq == NULL) 2835 goto free_reply_freeq; 2836 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq); 2837 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth * 2838 sizeof(struct mpii_reply_descr)); 2839 2840 return (0); 2841 2842 free_reply_freeq: 2843 mpii_dmamem_free(sc, sc->sc_reply_freeq); 2844 return (1); 2845 } 2846 2847 void 2848 mpii_init_queues(struct mpii_softc *sc) 2849 { 2850 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc)); 2851 2852 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1; 2853 sc->sc_reply_post_host_index = 0; 2854 mpii_write_reply_free(sc, sc->sc_reply_free_host_index); 2855 mpii_write_reply_post(sc, sc->sc_reply_post_host_index); 2856 } 2857 2858 void 2859 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb) 2860 { 2861 struct mutex mtx = MUTEX_INITIALIZER(IPL_BIO); 2862 void (*done)(struct mpii_ccb *); 2863 void *cookie; 2864 2865 done = ccb->ccb_done; 2866 cookie = ccb->ccb_cookie; 2867 2868 ccb->ccb_done = mpii_wait_done; 2869 ccb->ccb_cookie = &mtx; 2870 2871 /* XXX this will wait forever for the ccb to complete */ 2872 2873 mpii_start(sc, ccb); 2874 2875 mtx_enter(&mtx); 2876 while (ccb->ccb_cookie != NULL) 2877 msleep_nsec(ccb, &mtx, PRIBIO, "mpiiwait", INFSLP); 2878 mtx_leave(&mtx); 2879 2880 ccb->ccb_cookie = cookie; 2881 done(ccb); 2882 } 2883 2884 void 2885 mpii_wait_done(struct mpii_ccb *ccb) 2886 { 2887 struct mutex *mtx = ccb->ccb_cookie; 2888 2889 mtx_enter(mtx); 2890 ccb->ccb_cookie = NULL; 2891 mtx_leave(mtx); 2892 2893 wakeup_one(ccb); 2894 } 2895 2896 void 2897 mpii_scsi_cmd(struct scsi_xfer *xs) 2898 { 2899 struct scsi_link *link = xs->sc_link; 2900 struct mpii_softc *sc = link->bus->sb_adapter_softc; 2901 struct mpii_ccb *ccb = xs->io; 2902 struct mpii_msg_scsi_io *io; 2903 struct mpii_device *dev; 2904 int ret; 2905 2906 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc)); 2907 2908 if (xs->cmdlen > MPII_CDB_LEN) { 2909 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n", 2910 DEVNAME(sc), xs->cmdlen); 2911 memset(&xs->sense, 0, sizeof(xs->sense)); 2912 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 2913 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 2914 xs->sense.add_sense_code = 0x20; 2915 xs->error = XS_SENSE; 2916 scsi_done(xs); 2917 return; 2918 } 2919 2920 if ((dev = sc->sc_devs[link->target]) == NULL) { 2921 /* device no longer exists */ 2922 xs->error = XS_SELTIMEOUT; 2923 scsi_done(xs); 2924 return; 2925 } 2926 2927 KERNEL_UNLOCK(); 2928 2929 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n", 2930 DEVNAME(sc), ccb->ccb_smid, xs->flags); 2931 2932 ccb->ccb_cookie = xs; 2933 ccb->ccb_done = mpii_scsi_cmd_done; 2934 ccb->ccb_dev_handle = dev->dev_handle; 2935 2936 io = ccb->ccb_cmd; 2937 memset(io, 0, sizeof(*io)); 2938 io->function = MPII_FUNCTION_SCSI_IO_REQUEST; 2939 io->sense_buffer_length = sizeof(xs->sense); 2940 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4; 2941 htolem16(&io->io_flags, xs->cmdlen); 2942 htolem16(&io->dev_handle, ccb->ccb_dev_handle); 2943 htobem16(&io->lun[0], link->lun); 2944 2945 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 2946 case SCSI_DATA_IN: 2947 io->direction = MPII_SCSIIO_DIR_READ; 2948 break; 2949 case SCSI_DATA_OUT: 2950 io->direction = MPII_SCSIIO_DIR_WRITE; 2951 break; 2952 default: 2953 io->direction = MPII_SCSIIO_DIR_NONE; 2954 break; 2955 } 2956 2957 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q; 2958 2959 memcpy(io->cdb, xs->cmd, xs->cmdlen); 2960 2961 htolem32(&io->data_length, xs->datalen); 2962 2963 /* sense data is at the end of a request */ 2964 htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva + 2965 sc->sc_request_size - sizeof(struct scsi_sense_data)); 2966 2967 if (ISSET(sc->sc_flags, MPII_F_SAS3)) 2968 ret = mpii_load_xs_sas3(ccb); 2969 else 2970 ret = mpii_load_xs(ccb); 2971 2972 if (ret != 0) { 2973 xs->error = XS_DRIVER_STUFFUP; 2974 goto done; 2975 } 2976 2977 timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb); 2978 if (xs->flags & SCSI_POLL) { 2979 if (mpii_poll(sc, ccb) != 0) { 2980 xs->error = XS_DRIVER_STUFFUP; 2981 goto done; 2982 } 2983 } else { 2984 timeout_add_msec(&xs->stimeout, xs->timeout); 2985 mpii_start(sc, ccb); 2986 } 2987 2988 KERNEL_LOCK(); 2989 return; 2990 2991 done: 2992 KERNEL_LOCK(); 2993 scsi_done(xs); 2994 } 2995 2996 void 2997 mpii_scsi_cmd_tmo(void *xccb) 2998 { 2999 struct mpii_ccb *ccb = xccb; 3000 struct mpii_softc *sc = ccb->ccb_sc; 3001 3002 printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc), 3003 mpii_read_db(sc)); 3004 3005 mtx_enter(&sc->sc_ccb_mtx); 3006 if (ccb->ccb_state == MPII_CCB_QUEUED) { 3007 ccb->ccb_state = MPII_CCB_TIMEOUT; 3008 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link); 3009 } 3010 mtx_leave(&sc->sc_ccb_mtx); 3011 3012 scsi_ioh_add(&sc->sc_ccb_tmo_handler); 3013 } 3014 3015 void 3016 mpii_scsi_cmd_tmo_handler(void *cookie, void *io) 3017 { 3018 struct mpii_softc *sc = cookie; 3019 struct mpii_ccb *tccb = io; 3020 struct mpii_ccb *ccb; 3021 struct mpii_msg_scsi_task_request *stq; 3022 3023 mtx_enter(&sc->sc_ccb_mtx); 3024 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos); 3025 if (ccb != NULL) { 3026 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link); 3027 ccb->ccb_state = MPII_CCB_QUEUED; 3028 } 3029 /* should remove any other ccbs for the same dev handle */ 3030 mtx_leave(&sc->sc_ccb_mtx); 3031 3032 if (ccb == NULL) { 3033 scsi_io_put(&sc->sc_iopool, tccb); 3034 return; 3035 } 3036 3037 stq = tccb->ccb_cmd; 3038 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT; 3039 stq->task_type = MPII_SCSI_TASK_TARGET_RESET; 3040 htolem16(&stq->dev_handle, ccb->ccb_dev_handle); 3041 3042 tccb->ccb_done = mpii_scsi_cmd_tmo_done; 3043 mpii_start(sc, tccb); 3044 } 3045 3046 void 3047 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb) 3048 { 3049 mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb); 3050 } 3051 3052 void 3053 mpii_scsi_cmd_done(struct mpii_ccb *ccb) 3054 { 3055 struct mpii_ccb *tccb; 3056 struct mpii_msg_scsi_io_error *sie; 3057 struct mpii_softc *sc = ccb->ccb_sc; 3058 struct scsi_xfer *xs = ccb->ccb_cookie; 3059 struct scsi_sense_data *sense; 3060 bus_dmamap_t dmap = ccb->ccb_dmamap; 3061 3062 timeout_del(&xs->stimeout); 3063 mtx_enter(&sc->sc_ccb_mtx); 3064 if (ccb->ccb_state == MPII_CCB_TIMEOUT) { 3065 /* ENOSIMPLEQ_REMOVE :( */ 3066 if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos)) 3067 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link); 3068 else { 3069 SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) { 3070 if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) { 3071 SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos, 3072 tccb, ccb_link); 3073 break; 3074 } 3075 } 3076 } 3077 } 3078 3079 ccb->ccb_state = MPII_CCB_READY; 3080 mtx_leave(&sc->sc_ccb_mtx); 3081 3082 if (xs->datalen != 0) { 3083 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 3084 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 3085 BUS_DMASYNC_POSTWRITE); 3086 3087 bus_dmamap_unload(sc->sc_dmat, dmap); 3088 } 3089 3090 xs->error = XS_NOERROR; 3091 xs->resid = 0; 3092 3093 if (ccb->ccb_rcb == NULL) { 3094 /* no scsi error, we're ok so drop out early */ 3095 xs->status = SCSI_OK; 3096 goto done; 3097 } 3098 3099 sie = ccb->ccb_rcb->rcb_reply; 3100 3101 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d " 3102 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 3103 xs->flags); 3104 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d " 3105 "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle), 3106 sie->msg_length, sie->function); 3107 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 3108 sie->vp_id, sie->vf_id); 3109 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 3110 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 3111 sie->scsi_state, lemtoh16(&sie->ioc_status)); 3112 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 3113 lemtoh32(&sie->ioc_loginfo)); 3114 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 3115 lemtoh32(&sie->transfer_count)); 3116 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 3117 lemtoh32(&sie->sense_count)); 3118 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 3119 lemtoh32(&sie->response_info)); 3120 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc), 3121 lemtoh16(&sie->task_tag)); 3122 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n", 3123 DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count)); 3124 3125 if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS) 3126 xs->status = SCSI_TERMINATED; 3127 else 3128 xs->status = sie->scsi_status; 3129 xs->resid = 0; 3130 3131 switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) { 3132 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN: 3133 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count); 3134 /* FALLTHROUGH */ 3135 3136 case MPII_IOCSTATUS_SUCCESS: 3137 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR: 3138 switch (xs->status) { 3139 case SCSI_OK: 3140 xs->error = XS_NOERROR; 3141 break; 3142 3143 case SCSI_CHECK: 3144 xs->error = XS_SENSE; 3145 break; 3146 3147 case SCSI_BUSY: 3148 case SCSI_QUEUE_FULL: 3149 xs->error = XS_BUSY; 3150 break; 3151 3152 default: 3153 xs->error = XS_DRIVER_STUFFUP; 3154 } 3155 break; 3156 3157 case MPII_IOCSTATUS_BUSY: 3158 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES: 3159 xs->error = XS_BUSY; 3160 break; 3161 3162 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED: 3163 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED: 3164 xs->error = XS_RESET; 3165 break; 3166 3167 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 3168 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3169 xs->error = XS_SELTIMEOUT; 3170 break; 3171 3172 default: 3173 xs->error = XS_DRIVER_STUFFUP; 3174 break; 3175 } 3176 3177 sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd + 3178 sc->sc_request_size - sizeof(*sense)); 3179 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID) 3180 memcpy(&xs->sense, sense, sizeof(xs->sense)); 3181 3182 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x\n", DEVNAME(sc), 3183 xs->error, xs->status); 3184 3185 mpii_push_reply(sc, ccb->ccb_rcb); 3186 done: 3187 KERNEL_LOCK(); 3188 scsi_done(xs); 3189 KERNEL_UNLOCK(); 3190 } 3191 3192 int 3193 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 3194 { 3195 struct mpii_softc *sc = link->bus->sb_adapter_softc; 3196 struct mpii_device *dev = sc->sc_devs[link->target]; 3197 3198 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc)); 3199 3200 switch (cmd) { 3201 case DIOCGCACHE: 3202 case DIOCSCACHE: 3203 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) { 3204 return (mpii_ioctl_cache(link, cmd, 3205 (struct dk_cache *)addr)); 3206 } 3207 break; 3208 3209 default: 3210 if (sc->sc_ioctl) 3211 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr)); 3212 3213 break; 3214 } 3215 3216 return (ENOTTY); 3217 } 3218 3219 int 3220 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 3221 { 3222 struct mpii_softc *sc = link->bus->sb_adapter_softc; 3223 struct mpii_device *dev = sc->sc_devs[link->target]; 3224 struct mpii_cfg_raid_vol_pg0 *vpg; 3225 struct mpii_msg_raid_action_request *req; 3226 struct mpii_msg_raid_action_reply *rep; 3227 struct mpii_cfg_hdr hdr; 3228 struct mpii_ccb *ccb; 3229 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle; 3230 size_t pagelen; 3231 int rv = 0; 3232 int enabled; 3233 3234 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3235 addr, MPII_PG_POLL, &hdr) != 0) 3236 return (EINVAL); 3237 3238 pagelen = hdr.page_length * 4; 3239 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3240 if (vpg == NULL) 3241 return (ENOMEM); 3242 3243 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1, 3244 vpg, pagelen) != 0) { 3245 rv = EINVAL; 3246 goto done; 3247 } 3248 3249 enabled = ((lemtoh16(&vpg->volume_settings) & 3250 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) == 3251 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0; 3252 3253 if (cmd == DIOCGCACHE) { 3254 dc->wrcache = enabled; 3255 dc->rdcache = 0; 3256 goto done; 3257 } /* else DIOCSCACHE */ 3258 3259 if (dc->rdcache) { 3260 rv = EOPNOTSUPP; 3261 goto done; 3262 } 3263 3264 if (((dc->wrcache) ? 1 : 0) == enabled) 3265 goto done; 3266 3267 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL); 3268 if (ccb == NULL) { 3269 rv = ENOMEM; 3270 goto done; 3271 } 3272 3273 ccb->ccb_done = mpii_empty_done; 3274 3275 req = ccb->ccb_cmd; 3276 memset(req, 0, sizeof(*req)); 3277 req->function = MPII_FUNCTION_RAID_ACTION; 3278 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE; 3279 htolem16(&req->vol_dev_handle, dev->dev_handle); 3280 htolem32(&req->action_data, dc->wrcache ? 3281 MPII_RAID_VOL_WRITE_CACHE_ENABLE : 3282 MPII_RAID_VOL_WRITE_CACHE_DISABLE); 3283 3284 if (mpii_poll(sc, ccb) != 0) { 3285 rv = EIO; 3286 goto done; 3287 } 3288 3289 if (ccb->ccb_rcb != NULL) { 3290 rep = ccb->ccb_rcb->rcb_reply; 3291 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) || 3292 ((rep->action_data[0] & 3293 MPII_RAID_VOL_WRITE_CACHE_MASK) != 3294 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE : 3295 MPII_RAID_VOL_WRITE_CACHE_DISABLE))) 3296 rv = EINVAL; 3297 mpii_push_reply(sc, ccb->ccb_rcb); 3298 } 3299 3300 scsi_io_put(&sc->sc_iopool, ccb); 3301 3302 done: 3303 free(vpg, M_TEMP, pagelen); 3304 return (rv); 3305 } 3306 3307 #if NBIO > 0 3308 int 3309 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3310 { 3311 struct mpii_softc *sc = (struct mpii_softc *)dev; 3312 int error = 0; 3313 3314 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc)); 3315 3316 switch (cmd) { 3317 case BIOCINQ: 3318 DNPRINTF(MPII_D_IOCTL, "inq\n"); 3319 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr); 3320 break; 3321 case BIOCVOL: 3322 DNPRINTF(MPII_D_IOCTL, "vol\n"); 3323 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr); 3324 break; 3325 case BIOCDISK: 3326 DNPRINTF(MPII_D_IOCTL, "disk\n"); 3327 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr); 3328 break; 3329 default: 3330 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n"); 3331 error = ENOTTY; 3332 } 3333 3334 return (error); 3335 } 3336 3337 int 3338 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi) 3339 { 3340 int i; 3341 3342 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc)); 3343 3344 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3345 for (i = 0; i < sc->sc_max_devices; i++) 3346 if (sc->sc_devs[i] && 3347 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME)) 3348 bi->bi_novol++; 3349 return (0); 3350 } 3351 3352 int 3353 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv) 3354 { 3355 struct mpii_cfg_raid_vol_pg0 *vpg; 3356 struct mpii_cfg_hdr hdr; 3357 struct mpii_device *dev; 3358 struct scsi_link *lnk; 3359 struct device *scdev; 3360 size_t pagelen; 3361 u_int16_t volh; 3362 int rv, hcnt = 0; 3363 3364 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n", 3365 DEVNAME(sc), bv->bv_volid); 3366 3367 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) 3368 return (ENODEV); 3369 volh = dev->dev_handle; 3370 3371 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3372 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) { 3373 printf("%s: unable to fetch header for raid volume page 0\n", 3374 DEVNAME(sc)); 3375 return (EINVAL); 3376 } 3377 3378 pagelen = hdr.page_length * 4; 3379 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3380 if (vpg == NULL) { 3381 printf("%s: unable to allocate space for raid " 3382 "volume page 0\n", DEVNAME(sc)); 3383 return (ENOMEM); 3384 } 3385 3386 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, 3387 &hdr, 1, vpg, pagelen) != 0) { 3388 printf("%s: unable to fetch raid volume page 0\n", 3389 DEVNAME(sc)); 3390 free(vpg, M_TEMP, pagelen); 3391 return (EINVAL); 3392 } 3393 3394 switch (vpg->volume_state) { 3395 case MPII_CFG_RAID_VOL_0_STATE_ONLINE: 3396 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL: 3397 bv->bv_status = BIOC_SVONLINE; 3398 break; 3399 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED: 3400 if (ISSET(lemtoh32(&vpg->volume_status), 3401 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) { 3402 bv->bv_status = BIOC_SVREBUILD; 3403 bv->bv_percent = dev->percent; 3404 } else 3405 bv->bv_status = BIOC_SVDEGRADED; 3406 break; 3407 case MPII_CFG_RAID_VOL_0_STATE_FAILED: 3408 bv->bv_status = BIOC_SVOFFLINE; 3409 break; 3410 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING: 3411 bv->bv_status = BIOC_SVBUILDING; 3412 break; 3413 case MPII_CFG_RAID_VOL_0_STATE_MISSING: 3414 default: 3415 bv->bv_status = BIOC_SVINVALID; 3416 break; 3417 } 3418 3419 switch (vpg->volume_type) { 3420 case MPII_CFG_RAID_VOL_0_TYPE_RAID0: 3421 bv->bv_level = 0; 3422 break; 3423 case MPII_CFG_RAID_VOL_0_TYPE_RAID1: 3424 bv->bv_level = 1; 3425 break; 3426 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E: 3427 case MPII_CFG_RAID_VOL_0_TYPE_RAID10: 3428 bv->bv_level = 10; 3429 break; 3430 default: 3431 bv->bv_level = -1; 3432 } 3433 3434 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) { 3435 free(vpg, M_TEMP, pagelen); 3436 return (rv); 3437 } 3438 3439 bv->bv_nodisk = vpg->num_phys_disks + hcnt; 3440 3441 bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size); 3442 3443 lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0); 3444 if (lnk != NULL) { 3445 scdev = lnk->device_softc; 3446 strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev)); 3447 } 3448 3449 free(vpg, M_TEMP, pagelen); 3450 return (0); 3451 } 3452 3453 int 3454 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd) 3455 { 3456 struct mpii_cfg_raid_vol_pg0 *vpg; 3457 struct mpii_cfg_raid_vol_pg0_physdisk *pd; 3458 struct mpii_cfg_hdr hdr; 3459 struct mpii_device *dev; 3460 size_t pagelen; 3461 u_int16_t volh; 3462 u_int8_t dn; 3463 3464 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n", 3465 DEVNAME(sc), bd->bd_volid, bd->bd_diskid); 3466 3467 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) 3468 return (ENODEV); 3469 volh = dev->dev_handle; 3470 3471 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3472 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) { 3473 printf("%s: unable to fetch header for raid volume page 0\n", 3474 DEVNAME(sc)); 3475 return (EINVAL); 3476 } 3477 3478 pagelen = hdr.page_length * 4; 3479 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3480 if (vpg == NULL) { 3481 printf("%s: unable to allocate space for raid " 3482 "volume page 0\n", DEVNAME(sc)); 3483 return (ENOMEM); 3484 } 3485 3486 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, 3487 &hdr, 1, vpg, pagelen) != 0) { 3488 printf("%s: unable to fetch raid volume page 0\n", 3489 DEVNAME(sc)); 3490 free(vpg, M_TEMP, pagelen); 3491 return (EINVAL); 3492 } 3493 3494 if (bd->bd_diskid >= vpg->num_phys_disks) { 3495 int nvdsk = vpg->num_phys_disks; 3496 int hsmap = vpg->hot_spare_pool; 3497 3498 free(vpg, M_TEMP, pagelen); 3499 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL)); 3500 } 3501 3502 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) + 3503 bd->bd_diskid; 3504 dn = pd->phys_disk_num; 3505 3506 free(vpg, M_TEMP, pagelen); 3507 return (mpii_bio_disk(sc, bd, dn)); 3508 } 3509 3510 int 3511 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk, 3512 int hsmap, int *hscnt) 3513 { 3514 struct mpii_cfg_raid_config_pg0 *cpg; 3515 struct mpii_raid_config_element *el; 3516 struct mpii_ecfg_hdr ehdr; 3517 size_t pagelen; 3518 int i, nhs = 0; 3519 3520 if (bd) 3521 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc), 3522 bd->bd_diskid - nvdsk); 3523 else 3524 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc)); 3525 3526 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG, 3527 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED, 3528 &ehdr) != 0) { 3529 printf("%s: unable to fetch header for raid config page 0\n", 3530 DEVNAME(sc)); 3531 return (EINVAL); 3532 } 3533 3534 pagelen = lemtoh16(&ehdr.ext_page_length) * 4; 3535 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3536 if (cpg == NULL) { 3537 printf("%s: unable to allocate space for raid config page 0\n", 3538 DEVNAME(sc)); 3539 return (ENOMEM); 3540 } 3541 3542 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, 3543 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) { 3544 printf("%s: unable to fetch raid config page 0\n", 3545 DEVNAME(sc)); 3546 free(cpg, M_TEMP, pagelen); 3547 return (EINVAL); 3548 } 3549 3550 el = (struct mpii_raid_config_element *)(cpg + 1); 3551 for (i = 0; i < cpg->num_elements; i++, el++) { 3552 if (ISSET(lemtoh16(&el->element_flags), 3553 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) && 3554 el->hot_spare_pool == hsmap) { 3555 /* 3556 * diskid comparison is based on the idea that all 3557 * disks are counted by the bio(4) in sequence, thus 3558 * substracting the number of disks in the volume 3559 * from the diskid yields us a "relative" hotspare 3560 * number, which is good enough for us. 3561 */ 3562 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) { 3563 u_int8_t dn = el->phys_disk_num; 3564 3565 free(cpg, M_TEMP, pagelen); 3566 return (mpii_bio_disk(sc, bd, dn)); 3567 } 3568 nhs++; 3569 } 3570 } 3571 3572 if (hscnt) 3573 *hscnt = nhs; 3574 3575 free(cpg, M_TEMP, pagelen); 3576 return (0); 3577 } 3578 3579 int 3580 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn) 3581 { 3582 struct mpii_cfg_raid_physdisk_pg0 *ppg; 3583 struct mpii_cfg_hdr hdr; 3584 struct mpii_device *dev; 3585 int len; 3586 3587 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc), 3588 bd->bd_diskid); 3589 3590 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3591 if (ppg == NULL) { 3592 printf("%s: unable to allocate space for raid physical disk " 3593 "page 0\n", DEVNAME(sc)); 3594 return (ENOMEM); 3595 } 3596 3597 hdr.page_version = 0; 3598 hdr.page_length = sizeof(*ppg) / 4; 3599 hdr.page_number = 0; 3600 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD; 3601 3602 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0, 3603 &hdr, 1, ppg, sizeof(*ppg)) != 0) { 3604 printf("%s: unable to fetch raid drive page 0\n", 3605 DEVNAME(sc)); 3606 free(ppg, M_TEMP, sizeof(*ppg)); 3607 return (EINVAL); 3608 } 3609 3610 bd->bd_target = ppg->phys_disk_num; 3611 3612 if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) { 3613 bd->bd_status = BIOC_SDINVALID; 3614 free(ppg, M_TEMP, sizeof(*ppg)); 3615 return (0); 3616 } 3617 3618 switch (ppg->phys_disk_state) { 3619 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3620 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL: 3621 bd->bd_status = BIOC_SDONLINE; 3622 break; 3623 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3624 if (ppg->offline_reason == 3625 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED || 3626 ppg->offline_reason == 3627 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ) 3628 bd->bd_status = BIOC_SDFAILED; 3629 else 3630 bd->bd_status = BIOC_SDOFFLINE; 3631 break; 3632 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED: 3633 bd->bd_status = BIOC_SDFAILED; 3634 break; 3635 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING: 3636 bd->bd_status = BIOC_SDREBUILD; 3637 break; 3638 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE: 3639 bd->bd_status = BIOC_SDHOTSPARE; 3640 break; 3641 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED: 3642 bd->bd_status = BIOC_SDUNUSED; 3643 break; 3644 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE: 3645 default: 3646 bd->bd_status = BIOC_SDINVALID; 3647 break; 3648 } 3649 3650 bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size); 3651 3652 scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id)); 3653 len = strlen(bd->bd_vendor); 3654 bd->bd_vendor[len] = ' '; 3655 scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id, 3656 sizeof(ppg->product_id)); 3657 scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial)); 3658 3659 free(ppg, M_TEMP, sizeof(*ppg)); 3660 return (0); 3661 } 3662 3663 struct mpii_device * 3664 mpii_find_vol(struct mpii_softc *sc, int volid) 3665 { 3666 struct mpii_device *dev = NULL; 3667 3668 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices) 3669 return (NULL); 3670 dev = sc->sc_devs[sc->sc_vd_id_low + volid]; 3671 if (dev && ISSET(dev->flags, MPII_DF_VOLUME)) 3672 return (dev); 3673 return (NULL); 3674 } 3675 3676 #ifndef SMALL_KERNEL 3677 /* 3678 * Non-sleeping lightweight version of the mpii_ioctl_vol 3679 */ 3680 int 3681 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv) 3682 { 3683 struct mpii_cfg_raid_vol_pg0 *vpg; 3684 struct mpii_cfg_hdr hdr; 3685 struct mpii_device *dev = NULL; 3686 size_t pagelen; 3687 u_int16_t volh; 3688 3689 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) 3690 return (ENODEV); 3691 volh = dev->dev_handle; 3692 3693 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3694 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) { 3695 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid " 3696 "volume page 0\n", DEVNAME(sc)); 3697 return (EINVAL); 3698 } 3699 3700 pagelen = hdr.page_length * 4; 3701 vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 3702 if (vpg == NULL) { 3703 DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid " 3704 "volume page 0\n", DEVNAME(sc)); 3705 return (ENOMEM); 3706 } 3707 3708 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 3709 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) { 3710 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume " 3711 "page 0\n", DEVNAME(sc)); 3712 free(vpg, M_TEMP, pagelen); 3713 return (EINVAL); 3714 } 3715 3716 switch (vpg->volume_state) { 3717 case MPII_CFG_RAID_VOL_0_STATE_ONLINE: 3718 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL: 3719 bv->bv_status = BIOC_SVONLINE; 3720 break; 3721 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED: 3722 if (ISSET(lemtoh32(&vpg->volume_status), 3723 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) 3724 bv->bv_status = BIOC_SVREBUILD; 3725 else 3726 bv->bv_status = BIOC_SVDEGRADED; 3727 break; 3728 case MPII_CFG_RAID_VOL_0_STATE_FAILED: 3729 bv->bv_status = BIOC_SVOFFLINE; 3730 break; 3731 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING: 3732 bv->bv_status = BIOC_SVBUILDING; 3733 break; 3734 case MPII_CFG_RAID_VOL_0_STATE_MISSING: 3735 default: 3736 bv->bv_status = BIOC_SVINVALID; 3737 break; 3738 } 3739 3740 free(vpg, M_TEMP, pagelen); 3741 return (0); 3742 } 3743 3744 int 3745 mpii_create_sensors(struct mpii_softc *sc) 3746 { 3747 struct scsibus_softc *ssc = sc->sc_scsibus; 3748 struct device *dev; 3749 struct scsi_link *link; 3750 int i; 3751 3752 sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor), 3753 M_DEVBUF, M_NOWAIT | M_ZERO); 3754 if (sc->sc_sensors == NULL) 3755 return (1); 3756 sc->sc_nsensors = sc->sc_vd_count; 3757 3758 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3759 sizeof(sc->sc_sensordev.xname)); 3760 3761 for (i = 0; i < sc->sc_vd_count; i++) { 3762 link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0); 3763 if (link == NULL) 3764 goto bad; 3765 3766 dev = link->device_softc; 3767 3768 sc->sc_sensors[i].type = SENSOR_DRIVE; 3769 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 3770 3771 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname, 3772 sizeof(sc->sc_sensors[i].desc)); 3773 3774 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]); 3775 } 3776 3777 if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL) 3778 goto bad; 3779 3780 sensordev_install(&sc->sc_sensordev); 3781 3782 return (0); 3783 3784 bad: 3785 free(sc->sc_sensors, M_DEVBUF, 0); 3786 3787 return (1); 3788 } 3789 3790 void 3791 mpii_refresh_sensors(void *arg) 3792 { 3793 struct mpii_softc *sc = arg; 3794 struct bioc_vol bv; 3795 int i; 3796 3797 for (i = 0; i < sc->sc_nsensors; i++) { 3798 memset(&bv, 0, sizeof(bv)); 3799 bv.bv_volid = i; 3800 if (mpii_bio_volstate(sc, &bv)) 3801 return; 3802 switch(bv.bv_status) { 3803 case BIOC_SVOFFLINE: 3804 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL; 3805 sc->sc_sensors[i].status = SENSOR_S_CRIT; 3806 break; 3807 case BIOC_SVDEGRADED: 3808 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL; 3809 sc->sc_sensors[i].status = SENSOR_S_WARN; 3810 break; 3811 case BIOC_SVREBUILD: 3812 sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD; 3813 sc->sc_sensors[i].status = SENSOR_S_WARN; 3814 break; 3815 case BIOC_SVONLINE: 3816 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE; 3817 sc->sc_sensors[i].status = SENSOR_S_OK; 3818 break; 3819 case BIOC_SVINVALID: 3820 /* FALLTHROUGH */ 3821 default: 3822 sc->sc_sensors[i].value = 0; /* unknown */ 3823 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 3824 } 3825 } 3826 } 3827 #endif /* SMALL_KERNEL */ 3828 #endif /* NBIO > 0 */ 3829