1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * Copyright 2000-2020 Broadcom Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* Communications core for Avago Technologies (LSI) MPT3 */ 37 38 /* TODO Move headers to mprvar */ 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/module.h> 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/bio.h> 50 #include <sys/malloc.h> 51 #include <sys/uio.h> 52 #include <sys/sysctl.h> 53 #include <sys/smp.h> 54 #include <sys/queue.h> 55 #include <sys/kthread.h> 56 #include <sys/taskqueue.h> 57 #include <sys/endian.h> 58 #include <sys/eventhandler.h> 59 #include <sys/sbuf.h> 60 #include <sys/priv.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <sys/rman.h> 65 #include <sys/proc.h> 66 67 #include <dev/pci/pcivar.h> 68 69 #include <cam/cam.h> 70 #include <cam/cam_ccb.h> 71 #include <cam/scsi/scsi_all.h> 72 73 #include <dev/mpr/mpi/mpi2_type.h> 74 #include <dev/mpr/mpi/mpi2.h> 75 #include <dev/mpr/mpi/mpi2_ioc.h> 76 #include <dev/mpr/mpi/mpi2_sas.h> 77 #include <dev/mpr/mpi/mpi2_pci.h> 78 #include <dev/mpr/mpi/mpi2_cnfg.h> 79 #include <dev/mpr/mpi/mpi2_init.h> 80 #include <dev/mpr/mpi/mpi2_tool.h> 81 #include <dev/mpr/mpr_ioctl.h> 82 #include <dev/mpr/mprvar.h> 83 #include <dev/mpr/mpr_table.h> 84 #include <dev/mpr/mpr_sas.h> 85 86 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 87 static int mpr_init_queues(struct mpr_softc *sc); 88 static void mpr_resize_queues(struct mpr_softc *sc); 89 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 90 static int mpr_transition_operational(struct mpr_softc *sc); 91 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 92 static void mpr_iocfacts_free(struct mpr_softc *sc); 93 static void mpr_startup(void *arg); 94 static int mpr_send_iocinit(struct mpr_softc *sc); 95 static int mpr_alloc_queues(struct mpr_softc *sc); 96 static int mpr_alloc_hw_queues(struct mpr_softc *sc); 97 static int mpr_alloc_replies(struct mpr_softc *sc); 98 static int mpr_alloc_requests(struct mpr_softc *sc); 99 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 100 static int mpr_attach_log(struct mpr_softc *sc); 101 static __inline void mpr_complete_command(struct mpr_softc *sc, 102 struct mpr_command *cm); 103 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 104 MPI2_EVENT_NOTIFICATION_REPLY *reply); 105 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 106 static void mpr_periodic(void *); 107 static int mpr_reregister_events(struct mpr_softc *sc); 108 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 109 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 110 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 111 static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS); 112 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS); 113 static void mpr_parse_debug(struct mpr_softc *sc, char *list); 114 115 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 116 "MPR Driver Parameters"); 117 118 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 119 120 /* 121 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 122 * any state and back to its initialization state machine. 123 */ 124 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 125 126 /* 127 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 128 * Compiler only supports uint64_t to be passed as an argument. 129 * Otherwise it will throw this error: 130 * "aggregate value used where an integer was expected" 131 */ 132 typedef union _reply_descriptor { 133 u64 word; 134 struct { 135 u32 low; 136 u32 high; 137 } u; 138 } reply_descriptor, request_descriptor; 139 140 /* Rate limit chain-fail messages to 1 per minute */ 141 static struct timeval mpr_chainfail_interval = { 60, 0 }; 142 143 /* 144 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 145 * If this function is called from process context, it can sleep 146 * and there is no harm to sleep, in case if this fuction is called 147 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 148 * based on sleep flags driver will call either msleep, pause or DELAY. 149 * msleep and pause are of same variant, but pause is used when mpr_mtx 150 * is not hold by driver. 151 */ 152 static int 153 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 154 { 155 uint32_t reg; 156 int i, error, tries = 0; 157 uint8_t first_wait_done = FALSE; 158 159 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 160 161 /* Clear any pending interrupts */ 162 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 163 164 /* 165 * Force NO_SLEEP for threads prohibited to sleep 166 * e.a Thread from interrupt handler are prohibited to sleep. 167 */ 168 #if __FreeBSD_version >= 1000029 169 if (curthread->td_no_sleeping) 170 #else //__FreeBSD_version < 1000029 171 if (curthread->td_pflags & TDP_NOSLEEPING) 172 #endif //__FreeBSD_version >= 1000029 173 sleep_flag = NO_SLEEP; 174 175 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag); 176 /* Push the magic sequence */ 177 error = ETIMEDOUT; 178 while (tries++ < 20) { 179 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 180 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 181 mpt2_reset_magic[i]); 182 183 /* wait 100 msec */ 184 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 185 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 186 "mprdiag", hz/10); 187 else if (sleep_flag == CAN_SLEEP) 188 pause("mprdiag", hz/10); 189 else 190 DELAY(100 * 1000); 191 192 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 193 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 194 error = 0; 195 break; 196 } 197 } 198 if (error) { 199 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n", 200 error); 201 return (error); 202 } 203 204 /* Send the actual reset. XXX need to refresh the reg? */ 205 reg |= MPI2_DIAG_RESET_ADAPTER; 206 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n", 207 reg); 208 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); 209 210 /* Wait up to 300 seconds in 50ms intervals */ 211 error = ETIMEDOUT; 212 for (i = 0; i < 6000; i++) { 213 /* 214 * Wait 50 msec. If this is the first time through, wait 256 215 * msec to satisfy Diag Reset timing requirements. 216 */ 217 if (first_wait_done) { 218 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 219 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 220 "mprdiag", hz/20); 221 else if (sleep_flag == CAN_SLEEP) 222 pause("mprdiag", hz/20); 223 else 224 DELAY(50 * 1000); 225 } else { 226 DELAY(256 * 1000); 227 first_wait_done = TRUE; 228 } 229 /* 230 * Check for the RESET_ADAPTER bit to be cleared first, then 231 * wait for the RESET state to be cleared, which takes a little 232 * longer. 233 */ 234 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 235 if (reg & MPI2_DIAG_RESET_ADAPTER) { 236 continue; 237 } 238 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 239 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 240 error = 0; 241 break; 242 } 243 } 244 if (error) { 245 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n", 246 error); 247 return (error); 248 } 249 250 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 251 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n"); 252 253 return (0); 254 } 255 256 static int 257 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 258 { 259 int error; 260 261 MPR_FUNCTRACE(sc); 262 263 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 264 265 error = 0; 266 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 267 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 268 MPI2_DOORBELL_FUNCTION_SHIFT); 269 270 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 271 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 272 "Doorbell handshake failed\n"); 273 error = ETIMEDOUT; 274 } 275 276 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 277 return (error); 278 } 279 280 static int 281 mpr_transition_ready(struct mpr_softc *sc) 282 { 283 uint32_t reg, state; 284 int error, tries = 0; 285 int sleep_flags; 286 287 MPR_FUNCTRACE(sc); 288 /* If we are in attach call, do not sleep */ 289 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 290 ? CAN_SLEEP : NO_SLEEP; 291 292 error = 0; 293 294 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n", 295 __func__, sleep_flags); 296 297 while (tries++ < 1200) { 298 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 299 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg); 300 301 /* 302 * Ensure the IOC is ready to talk. If it's not, try 303 * resetting it. 304 */ 305 if (reg & MPI2_DOORBELL_USED) { 306 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag " 307 "reset\n"); 308 mpr_diag_reset(sc, sleep_flags); 309 DELAY(50000); 310 continue; 311 } 312 313 /* Is the adapter owned by another peer? */ 314 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 315 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 316 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the " 317 "control of another peer host, aborting " 318 "initialization.\n"); 319 error = ENXIO; 320 break; 321 } 322 323 state = reg & MPI2_IOC_STATE_MASK; 324 if (state == MPI2_IOC_STATE_READY) { 325 /* Ready to go! */ 326 error = 0; 327 break; 328 } else if (state == MPI2_IOC_STATE_FAULT) { 329 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault " 330 "state 0x%x, resetting\n", 331 state & MPI2_DOORBELL_FAULT_CODE_MASK); 332 mpr_diag_reset(sc, sleep_flags); 333 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 334 /* Need to take ownership */ 335 mpr_message_unit_reset(sc, sleep_flags); 336 } else if (state == MPI2_IOC_STATE_RESET) { 337 /* Wait a bit, IOC might be in transition */ 338 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 339 "IOC in unexpected reset state\n"); 340 } else { 341 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 342 "IOC in unknown state 0x%x\n", state); 343 error = EINVAL; 344 break; 345 } 346 347 /* Wait 50ms for things to settle down. */ 348 DELAY(50000); 349 } 350 351 if (error) 352 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 353 "Cannot transition IOC to ready\n"); 354 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 355 return (error); 356 } 357 358 static int 359 mpr_transition_operational(struct mpr_softc *sc) 360 { 361 uint32_t reg, state; 362 int error; 363 364 MPR_FUNCTRACE(sc); 365 366 error = 0; 367 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 368 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); 369 370 state = reg & MPI2_IOC_STATE_MASK; 371 if (state != MPI2_IOC_STATE_READY) { 372 mpr_dprint(sc, MPR_INIT, "IOC not ready\n"); 373 if ((error = mpr_transition_ready(sc)) != 0) { 374 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 375 "failed to transition ready, exit\n"); 376 return (error); 377 } 378 } 379 380 error = mpr_send_iocinit(sc); 381 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 382 383 return (error); 384 } 385 386 static void 387 mpr_resize_queues(struct mpr_softc *sc) 388 { 389 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size; 390 391 /* 392 * Size the queues. Since the reply queues always need one free 393 * entry, we'll deduct one reply message here. The LSI documents 394 * suggest instead to add a count to the request queue, but I think 395 * that it's better to deduct from reply queue. 396 */ 397 prireqcr = MAX(1, sc->max_prireqframes); 398 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); 399 400 reqcr = MAX(2, sc->max_reqframes); 401 reqcr = MIN(reqcr, sc->facts->RequestCredit); 402 403 sc->num_reqs = prireqcr + reqcr; 404 sc->num_prireqs = prireqcr; 405 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, 406 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 407 408 /* Store the request frame size in bytes rather than as 32bit words */ 409 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4; 410 411 /* 412 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 413 * get the size of a Chain Frame. Previous versions use the size as a 414 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 415 * is 0, use the default value. The IOCMaxChainSegmentSize is the 416 * number of 16-byte elelements that can fit in a Chain Frame, which is 417 * the size of an IEEE Simple SGE. 418 */ 419 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 420 chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize); 421 if (chain_seg_size == 0) 422 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE; 423 sc->chain_frame_size = chain_seg_size * 424 MPR_MAX_CHAIN_ELEMENT_SIZE; 425 } else { 426 sc->chain_frame_size = sc->reqframesz; 427 } 428 429 /* 430 * Max IO Size is Page Size * the following: 431 * ((SGEs per frame - 1 for chain element) * Max Chain Depth) 432 * + 1 for no chain needed in last frame 433 * 434 * If user suggests a Max IO size to use, use the smaller of the 435 * user's value and the calculated value as long as the user's 436 * value is larger than 0. The user's value is in pages. 437 */ 438 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1; 439 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE; 440 441 /* 442 * If I/O size limitation requested then use it and pass up to CAM. 443 * If not, use MAXPHYS as an optimization hint, but report HW limit. 444 */ 445 if (sc->max_io_pages > 0) { 446 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); 447 sc->maxio = maxio; 448 } else { 449 sc->maxio = maxio; 450 maxio = min(maxio, MAXPHYS); 451 } 452 453 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / 454 sges_per_frame * reqcr; 455 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains) 456 sc->num_chains = sc->max_chains; 457 458 /* 459 * Figure out the number of MSIx-based queues. If the firmware or 460 * user has done something crazy and not allowed enough credit for 461 * the queues to be useful then don't enable multi-queue. 462 */ 463 if (sc->facts->MaxMSIxVectors < 2) 464 sc->msi_msgs = 1; 465 466 if (sc->msi_msgs > 1) { 467 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); 468 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); 469 if (sc->num_reqs / sc->msi_msgs < 2) 470 sc->msi_msgs = 1; 471 } 472 473 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", 474 sc->msi_msgs, sc->num_reqs, sc->num_replies); 475 } 476 477 /* 478 * This is called during attach and when re-initializing due to a Diag Reset. 479 * IOC Facts is used to allocate many of the structures needed by the driver. 480 * If called from attach, de-allocation is not required because the driver has 481 * not allocated any structures yet, but if called from a Diag Reset, previously 482 * allocated structures based on IOC Facts will need to be freed and re- 483 * allocated bases on the latest IOC Facts. 484 */ 485 static int 486 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 487 { 488 int error; 489 Mpi2IOCFactsReply_t saved_facts; 490 uint8_t saved_mode, reallocating; 491 492 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__); 493 494 /* Save old IOC Facts and then only reallocate if Facts have changed */ 495 if (!attaching) { 496 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 497 } 498 499 /* 500 * Get IOC Facts. In all cases throughout this function, panic if doing 501 * a re-initialization and only return the error if attaching so the OS 502 * can handle it. 503 */ 504 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 505 if (attaching) { 506 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get " 507 "IOC Facts with error %d, exit\n", error); 508 return (error); 509 } else { 510 panic("%s failed to get IOC Facts with error %d\n", 511 __func__, error); 512 } 513 } 514 515 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); 516 517 snprintf(sc->fw_version, sizeof(sc->fw_version), 518 "%02d.%02d.%02d.%02d", 519 sc->facts->FWVersion.Struct.Major, 520 sc->facts->FWVersion.Struct.Minor, 521 sc->facts->FWVersion.Struct.Unit, 522 sc->facts->FWVersion.Struct.Dev); 523 524 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d", 525 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >> 526 MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT, 527 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >> 528 MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT); 529 530 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, 531 MPR_DRIVER_VERSION); 532 mpr_dprint(sc, MPR_INFO, 533 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 534 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 535 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 536 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 537 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); 538 539 /* 540 * If the chip doesn't support event replay then a hard reset will be 541 * required to trigger a full discovery. Do the reset here then 542 * retransition to Ready. A hard reset might have already been done, 543 * but it doesn't hurt to do it again. Only do this if attaching, not 544 * for a Diag Reset. 545 */ 546 if (attaching && ((sc->facts->IOCCapabilities & 547 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { 548 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n"); 549 mpr_diag_reset(sc, NO_SLEEP); 550 if ((error = mpr_transition_ready(sc)) != 0) { 551 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 552 "transition to ready with error %d, exit\n", 553 error); 554 return (error); 555 } 556 } 557 558 /* 559 * Set flag if IR Firmware is loaded. If the RAID Capability has 560 * changed from the previous IOC Facts, log a warning, but only if 561 * checking this after a Diag Reset and not during attach. 562 */ 563 saved_mode = sc->ir_firmware; 564 if (sc->facts->IOCCapabilities & 565 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 566 sc->ir_firmware = 1; 567 if (!attaching) { 568 if (sc->ir_firmware != saved_mode) { 569 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " 570 "in IOC Facts does not match previous mode\n"); 571 } 572 } 573 574 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 575 reallocating = FALSE; 576 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; 577 578 if ((!attaching) && 579 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 580 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 581 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 582 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 583 (saved_facts.ProductID != sc->facts->ProductID) || 584 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 585 (saved_facts.IOCRequestFrameSize != 586 sc->facts->IOCRequestFrameSize) || 587 (saved_facts.IOCMaxChainSegmentSize != 588 sc->facts->IOCMaxChainSegmentSize) || 589 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 590 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 591 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 592 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 593 (saved_facts.MaxReplyDescriptorPostQueueDepth != 594 sc->facts->MaxReplyDescriptorPostQueueDepth) || 595 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 596 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 597 (saved_facts.MaxPersistentEntries != 598 sc->facts->MaxPersistentEntries))) { 599 reallocating = TRUE; 600 601 /* Record that we reallocated everything */ 602 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; 603 } 604 605 /* 606 * Some things should be done if attaching or re-allocating after a Diag 607 * Reset, but are not needed after a Diag Reset if the FW has not 608 * changed. 609 */ 610 if (attaching || reallocating) { 611 /* 612 * Check if controller supports FW diag buffers and set flag to 613 * enable each type. 614 */ 615 if (sc->facts->IOCCapabilities & 616 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 617 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 618 enabled = TRUE; 619 if (sc->facts->IOCCapabilities & 620 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 621 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 622 enabled = TRUE; 623 if (sc->facts->IOCCapabilities & 624 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 625 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 626 enabled = TRUE; 627 628 /* 629 * Set flags for some supported items. 630 */ 631 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 632 sc->eedp_enabled = TRUE; 633 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 634 sc->control_TLR = TRUE; 635 if ((sc->facts->IOCCapabilities & 636 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) && 637 (sc->mpr_flags & MPR_FLAGS_SEA_IOC)) 638 sc->atomic_desc_capable = TRUE; 639 640 mpr_resize_queues(sc); 641 642 /* 643 * Initialize all Tail Queues 644 */ 645 TAILQ_INIT(&sc->req_list); 646 TAILQ_INIT(&sc->high_priority_req_list); 647 TAILQ_INIT(&sc->chain_list); 648 TAILQ_INIT(&sc->prp_page_list); 649 TAILQ_INIT(&sc->tm_list); 650 } 651 652 /* 653 * If doing a Diag Reset and the FW is significantly different 654 * (reallocating will be set above in IOC Facts comparison), then all 655 * buffers based on the IOC Facts will need to be freed before they are 656 * reallocated. 657 */ 658 if (reallocating) { 659 mpr_iocfacts_free(sc); 660 mprsas_realloc_targets(sc, saved_facts.MaxTargets + 661 saved_facts.MaxVolumes); 662 } 663 664 /* 665 * Any deallocation has been completed. Now start reallocating 666 * if needed. Will only need to reallocate if attaching or if the new 667 * IOC Facts are different from the previous IOC Facts after a Diag 668 * Reset. Targets have already been allocated above if needed. 669 */ 670 error = 0; 671 while (attaching || reallocating) { 672 if ((error = mpr_alloc_hw_queues(sc)) != 0) 673 break; 674 if ((error = mpr_alloc_replies(sc)) != 0) 675 break; 676 if ((error = mpr_alloc_requests(sc)) != 0) 677 break; 678 if ((error = mpr_alloc_queues(sc)) != 0) 679 break; 680 break; 681 } 682 if (error) { 683 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 684 "Failed to alloc queues with error %d\n", error); 685 mpr_free(sc); 686 return (error); 687 } 688 689 /* Always initialize the queues */ 690 bzero(sc->free_queue, sc->fqdepth * 4); 691 mpr_init_queues(sc); 692 693 /* 694 * Always get the chip out of the reset state, but only panic if not 695 * attaching. If attaching and there is an error, that is handled by 696 * the OS. 697 */ 698 error = mpr_transition_operational(sc); 699 if (error != 0) { 700 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 701 "transition to operational with error %d\n", error); 702 mpr_free(sc); 703 return (error); 704 } 705 706 /* 707 * Finish the queue initialization. 708 * These are set here instead of in mpr_init_queues() because the 709 * IOC resets these values during the state transition in 710 * mpr_transition_operational(). The free index is set to 1 711 * because the corresponding index in the IOC is set to 0, and the 712 * IOC treats the queues as full if both are set to the same value. 713 * Hence the reason that the queue can't hold all of the possible 714 * replies. 715 */ 716 sc->replypostindex = 0; 717 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 718 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 719 720 /* 721 * Attach the subsystems so they can prepare their event masks. 722 * XXX Should be dynamic so that IM/IR and user modules can attach 723 */ 724 error = 0; 725 while (attaching) { 726 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n"); 727 if ((error = mpr_attach_log(sc)) != 0) 728 break; 729 if ((error = mpr_attach_sas(sc)) != 0) 730 break; 731 if ((error = mpr_attach_user(sc)) != 0) 732 break; 733 break; 734 } 735 if (error) { 736 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 737 "Failed to attach all subsystems: error %d\n", error); 738 mpr_free(sc); 739 return (error); 740 } 741 742 /* 743 * XXX If the number of MSI-X vectors changes during re-init, this 744 * won't see it and adjust. 745 */ 746 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) { 747 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 748 "Failed to setup interrupts\n"); 749 mpr_free(sc); 750 return (error); 751 } 752 753 return (error); 754 } 755 756 /* 757 * This is called if memory is being free (during detach for example) and when 758 * buffers need to be reallocated due to a Diag Reset. 759 */ 760 static void 761 mpr_iocfacts_free(struct mpr_softc *sc) 762 { 763 struct mpr_command *cm; 764 int i; 765 766 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 767 768 if (sc->free_busaddr != 0) 769 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 770 if (sc->free_queue != NULL) 771 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 772 sc->queues_map); 773 if (sc->queues_dmat != NULL) 774 bus_dma_tag_destroy(sc->queues_dmat); 775 776 if (sc->chain_frames != NULL) { 777 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 778 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 779 sc->chain_map); 780 } 781 if (sc->chain_dmat != NULL) 782 bus_dma_tag_destroy(sc->chain_dmat); 783 784 if (sc->sense_busaddr != 0) 785 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 786 if (sc->sense_frames != NULL) 787 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 788 sc->sense_map); 789 if (sc->sense_dmat != NULL) 790 bus_dma_tag_destroy(sc->sense_dmat); 791 792 if (sc->prp_page_busaddr != 0) 793 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 794 if (sc->prp_pages != NULL) 795 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 796 sc->prp_page_map); 797 if (sc->prp_page_dmat != NULL) 798 bus_dma_tag_destroy(sc->prp_page_dmat); 799 800 if (sc->reply_busaddr != 0) 801 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 802 if (sc->reply_frames != NULL) 803 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 804 sc->reply_map); 805 if (sc->reply_dmat != NULL) 806 bus_dma_tag_destroy(sc->reply_dmat); 807 808 if (sc->req_busaddr != 0) 809 bus_dmamap_unload(sc->req_dmat, sc->req_map); 810 if (sc->req_frames != NULL) 811 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 812 if (sc->req_dmat != NULL) 813 bus_dma_tag_destroy(sc->req_dmat); 814 815 if (sc->chains != NULL) 816 free(sc->chains, M_MPR); 817 if (sc->prps != NULL) 818 free(sc->prps, M_MPR); 819 if (sc->commands != NULL) { 820 for (i = 1; i < sc->num_reqs; i++) { 821 cm = &sc->commands[i]; 822 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 823 } 824 free(sc->commands, M_MPR); 825 } 826 if (sc->buffer_dmat != NULL) 827 bus_dma_tag_destroy(sc->buffer_dmat); 828 829 mpr_pci_free_interrupts(sc); 830 free(sc->queues, M_MPR); 831 sc->queues = NULL; 832 } 833 834 /* 835 * The terms diag reset and hard reset are used interchangeably in the MPI 836 * docs to mean resetting the controller chip. In this code diag reset 837 * cleans everything up, and the hard reset function just sends the reset 838 * sequence to the chip. This should probably be refactored so that every 839 * subsystem gets a reset notification of some sort, and can clean up 840 * appropriately. 841 */ 842 int 843 mpr_reinit(struct mpr_softc *sc) 844 { 845 int error; 846 struct mprsas_softc *sassc; 847 848 sassc = sc->sassc; 849 850 MPR_FUNCTRACE(sc); 851 852 mtx_assert(&sc->mpr_mtx, MA_OWNED); 853 854 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n"); 855 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 856 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n"); 857 return 0; 858 } 859 860 /* 861 * Make sure the completion callbacks can recognize they're getting 862 * a NULL cm_reply due to a reset. 863 */ 864 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 865 866 /* 867 * Mask interrupts here. 868 */ 869 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n"); 870 mpr_mask_intr(sc); 871 872 error = mpr_diag_reset(sc, CAN_SLEEP); 873 if (error != 0) { 874 panic("%s hard reset failed with error %d\n", __func__, error); 875 } 876 877 /* Restore the PCI state, including the MSI-X registers */ 878 mpr_pci_restore(sc); 879 880 /* Give the I/O subsystem special priority to get itself prepared */ 881 mprsas_handle_reinit(sc); 882 883 /* 884 * Get IOC Facts and allocate all structures based on this information. 885 * The attach function will also call mpr_iocfacts_allocate at startup. 886 * If relevant values have changed in IOC Facts, this function will free 887 * all of the memory based on IOC Facts and reallocate that memory. 888 */ 889 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 890 panic("%s IOC Facts based allocation failed with error %d\n", 891 __func__, error); 892 } 893 894 /* 895 * Mapping structures will be re-allocated after getting IOC Page8, so 896 * free these structures here. 897 */ 898 mpr_mapping_exit(sc); 899 900 /* 901 * The static page function currently read is IOC Page8. Others can be 902 * added in future. It's possible that the values in IOC Page8 have 903 * changed after a Diag Reset due to user modification, so always read 904 * these. Interrupts are masked, so unmask them before getting config 905 * pages. 906 */ 907 mpr_unmask_intr(sc); 908 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 909 mpr_base_static_config_pages(sc); 910 911 /* 912 * Some mapping info is based in IOC Page8 data, so re-initialize the 913 * mapping tables. 914 */ 915 mpr_mapping_initialize(sc); 916 917 /* 918 * Restart will reload the event masks clobbered by the reset, and 919 * then enable the port. 920 */ 921 mpr_reregister_events(sc); 922 923 /* the end of discovery will release the simq, so we're done. */ 924 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n", 925 sc, sc->replypostindex, sc->replyfreeindex); 926 mprsas_release_simq_reinit(sassc); 927 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 928 929 return 0; 930 } 931 932 /* Wait for the chip to ACK a word that we've put into its FIFO 933 * Wait for <timeout> seconds. In single loop wait for busy loop 934 * for 500 microseconds. 935 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds. 936 * */ 937 static int 938 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 939 { 940 u32 cntdn, count; 941 u32 int_status; 942 u32 doorbell; 943 944 count = 0; 945 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 946 do { 947 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 948 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 949 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), " 950 "timeout(%d)\n", __func__, count, timeout); 951 return 0; 952 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 953 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 954 if ((doorbell & MPI2_IOC_STATE_MASK) == 955 MPI2_IOC_STATE_FAULT) { 956 mpr_dprint(sc, MPR_FAULT, 957 "fault_state(0x%04x)!\n", doorbell); 958 return (EFAULT); 959 } 960 } else if (int_status == 0xFFFFFFFF) 961 goto out; 962 963 /* 964 * If it can sleep, sleep for 1 milisecond, else busy loop for 965 * 0.5 milisecond 966 */ 967 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 968 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", 969 hz/1000); 970 else if (sleep_flag == CAN_SLEEP) 971 pause("mprdba", hz/1000); 972 else 973 DELAY(500); 974 count++; 975 } while (--cntdn); 976 977 out: 978 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 979 "int_status(%x)!\n", __func__, count, int_status); 980 return (ETIMEDOUT); 981 } 982 983 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 984 static int 985 mpr_wait_db_int(struct mpr_softc *sc) 986 { 987 int retry; 988 989 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 990 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 991 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 992 return (0); 993 DELAY(2000); 994 } 995 return (ETIMEDOUT); 996 } 997 998 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 999 static int 1000 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 1001 int req_sz, int reply_sz, int timeout) 1002 { 1003 uint32_t *data32; 1004 uint16_t *data16; 1005 int i, count, ioc_sz, residual; 1006 int sleep_flags = CAN_SLEEP; 1007 1008 #if __FreeBSD_version >= 1000029 1009 if (curthread->td_no_sleeping) 1010 #else //__FreeBSD_version < 1000029 1011 if (curthread->td_pflags & TDP_NOSLEEPING) 1012 #endif //__FreeBSD_version >= 1000029 1013 sleep_flags = NO_SLEEP; 1014 1015 /* Step 1 */ 1016 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1017 1018 /* Step 2 */ 1019 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1020 return (EBUSY); 1021 1022 /* Step 3 1023 * Announce that a message is coming through the doorbell. Messages 1024 * are pushed at 32bit words, so round up if needed. 1025 */ 1026 count = (req_sz + 3) / 4; 1027 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 1028 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 1029 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 1030 1031 /* Step 4 */ 1032 if (mpr_wait_db_int(sc) || 1033 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 1034 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 1035 return (ENXIO); 1036 } 1037 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1038 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1039 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 1040 return (ENXIO); 1041 } 1042 1043 /* Step 5 */ 1044 /* Clock out the message data synchronously in 32-bit dwords*/ 1045 data32 = (uint32_t *)req; 1046 for (i = 0; i < count; i++) { 1047 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 1048 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1049 mpr_dprint(sc, MPR_FAULT, 1050 "Timeout while writing doorbell\n"); 1051 return (ENXIO); 1052 } 1053 } 1054 1055 /* Step 6 */ 1056 /* Clock in the reply in 16-bit words. The total length of the 1057 * message is always in the 4th byte, so clock out the first 2 words 1058 * manually, then loop the rest. 1059 */ 1060 data16 = (uint16_t *)reply; 1061 if (mpr_wait_db_int(sc) != 0) { 1062 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 1063 return (ENXIO); 1064 } 1065 data16[0] = 1066 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1067 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1068 if (mpr_wait_db_int(sc) != 0) { 1069 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 1070 return (ENXIO); 1071 } 1072 data16[1] = 1073 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1074 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1075 1076 /* Number of 32bit words in the message */ 1077 ioc_sz = reply->MsgLength; 1078 1079 /* 1080 * Figure out how many 16bit words to clock in without overrunning. 1081 * The precision loss with dividing reply_sz can safely be 1082 * ignored because the messages can only be multiples of 32bits. 1083 */ 1084 residual = 0; 1085 count = MIN((reply_sz / 4), ioc_sz) * 2; 1086 if (count < ioc_sz * 2) { 1087 residual = ioc_sz * 2 - count; 1088 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 1089 "residual message words\n", residual); 1090 } 1091 1092 for (i = 2; i < count; i++) { 1093 if (mpr_wait_db_int(sc) != 0) { 1094 mpr_dprint(sc, MPR_FAULT, 1095 "Timeout reading doorbell %d\n", i); 1096 return (ENXIO); 1097 } 1098 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 1099 MPI2_DOORBELL_DATA_MASK; 1100 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1101 } 1102 1103 /* 1104 * Pull out residual words that won't fit into the provided buffer. 1105 * This keeps the chip from hanging due to a driver programming 1106 * error. 1107 */ 1108 while (residual--) { 1109 if (mpr_wait_db_int(sc) != 0) { 1110 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 1111 return (ENXIO); 1112 } 1113 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1114 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1115 } 1116 1117 /* Step 7 */ 1118 if (mpr_wait_db_int(sc) != 0) { 1119 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 1120 return (ENXIO); 1121 } 1122 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1123 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 1124 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1125 1126 return (0); 1127 } 1128 1129 static void 1130 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 1131 { 1132 request_descriptor rd; 1133 1134 MPR_FUNCTRACE(sc); 1135 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 1136 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 1137 1138 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 1139 MPR_FLAGS_SHUTDOWN)) 1140 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1141 1142 if (++sc->io_cmds_active > sc->io_cmds_highwater) 1143 sc->io_cmds_highwater++; 1144 1145 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("command not busy\n")); 1146 cm->cm_state = MPR_CM_STATE_INQUEUE; 1147 1148 if (sc->atomic_desc_capable) { 1149 rd.u.low = cm->cm_desc.Words.Low; 1150 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 1151 rd.u.low); 1152 } else { 1153 rd.u.low = cm->cm_desc.Words.Low; 1154 rd.u.high = cm->cm_desc.Words.High; 1155 rd.word = htole64(rd.word); 1156 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1157 rd.u.low); 1158 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1159 rd.u.high); 1160 } 1161 } 1162 1163 /* 1164 * Just the FACTS, ma'am. 1165 */ 1166 static int 1167 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1168 { 1169 MPI2_DEFAULT_REPLY *reply; 1170 MPI2_IOC_FACTS_REQUEST request; 1171 int error, req_sz, reply_sz; 1172 1173 MPR_FUNCTRACE(sc); 1174 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1175 1176 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1177 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1178 reply = (MPI2_DEFAULT_REPLY *)facts; 1179 1180 bzero(&request, req_sz); 1181 request.Function = MPI2_FUNCTION_IOC_FACTS; 1182 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1183 1184 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 1185 return (error); 1186 } 1187 1188 static int 1189 mpr_send_iocinit(struct mpr_softc *sc) 1190 { 1191 MPI2_IOC_INIT_REQUEST init; 1192 MPI2_DEFAULT_REPLY reply; 1193 int req_sz, reply_sz, error; 1194 struct timeval now; 1195 uint64_t time_in_msec; 1196 1197 MPR_FUNCTRACE(sc); 1198 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1199 1200 /* Do a quick sanity check on proper initialization */ 1201 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0) 1202 || (sc->replyframesz == 0)) { 1203 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 1204 "Driver not fully initialized for IOCInit\n"); 1205 return (EINVAL); 1206 } 1207 1208 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1209 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1210 bzero(&init, req_sz); 1211 bzero(&reply, reply_sz); 1212 1213 /* 1214 * Fill in the init block. Note that most addresses are 1215 * deliberately in the lower 32bits of memory. This is a micro- 1216 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1217 */ 1218 init.Function = MPI2_FUNCTION_IOC_INIT; 1219 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1220 init.MsgVersion = htole16(MPI2_VERSION); 1221 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1222 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4)); 1223 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1224 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1225 init.SenseBufferAddressHigh = 0; 1226 init.SystemReplyAddressHigh = 0; 1227 init.SystemRequestFrameBaseAddress.High = 0; 1228 init.SystemRequestFrameBaseAddress.Low = 1229 htole32((uint32_t)sc->req_busaddr); 1230 init.ReplyDescriptorPostQueueAddress.High = 0; 1231 init.ReplyDescriptorPostQueueAddress.Low = 1232 htole32((uint32_t)sc->post_busaddr); 1233 init.ReplyFreeQueueAddress.High = 0; 1234 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1235 getmicrotime(&now); 1236 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1237 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1238 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1239 init.HostPageSize = HOST_PAGE_SIZE_4K; 1240 1241 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1242 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1243 error = ENXIO; 1244 1245 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1246 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 1247 return (error); 1248 } 1249 1250 void 1251 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1252 { 1253 bus_addr_t *addr; 1254 1255 addr = arg; 1256 *addr = segs[0].ds_addr; 1257 } 1258 1259 void 1260 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1261 { 1262 struct mpr_busdma_context *ctx; 1263 int need_unload, need_free; 1264 1265 ctx = (struct mpr_busdma_context *)arg; 1266 need_unload = 0; 1267 need_free = 0; 1268 1269 mpr_lock(ctx->softc); 1270 ctx->error = error; 1271 ctx->completed = 1; 1272 if ((error == 0) && (ctx->abandoned == 0)) { 1273 *ctx->addr = segs[0].ds_addr; 1274 } else { 1275 if (nsegs != 0) 1276 need_unload = 1; 1277 if (ctx->abandoned != 0) 1278 need_free = 1; 1279 } 1280 if (need_free == 0) 1281 wakeup(ctx); 1282 1283 mpr_unlock(ctx->softc); 1284 1285 if (need_unload != 0) { 1286 bus_dmamap_unload(ctx->buffer_dmat, 1287 ctx->buffer_dmamap); 1288 *ctx->addr = 0; 1289 } 1290 1291 if (need_free != 0) 1292 free(ctx, M_MPR); 1293 } 1294 1295 static int 1296 mpr_alloc_queues(struct mpr_softc *sc) 1297 { 1298 struct mpr_queue *q; 1299 int nq, i; 1300 1301 nq = sc->msi_msgs; 1302 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq); 1303 1304 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR, 1305 M_NOWAIT|M_ZERO); 1306 if (sc->queues == NULL) 1307 return (ENOMEM); 1308 1309 for (i = 0; i < nq; i++) { 1310 q = &sc->queues[i]; 1311 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q); 1312 q->sc = sc; 1313 q->qnum = i; 1314 } 1315 return (0); 1316 } 1317 1318 static int 1319 mpr_alloc_hw_queues(struct mpr_softc *sc) 1320 { 1321 bus_dma_tag_template_t t; 1322 bus_addr_t queues_busaddr; 1323 uint8_t *queues; 1324 int qsize, fqsize, pqsize; 1325 1326 /* 1327 * The reply free queue contains 4 byte entries in multiples of 16 and 1328 * aligned on a 16 byte boundary. There must always be an unused entry. 1329 * This queue supplies fresh reply frames for the firmware to use. 1330 * 1331 * The reply descriptor post queue contains 8 byte entries in 1332 * multiples of 16 and aligned on a 16 byte boundary. This queue 1333 * contains filled-in reply frames sent from the firmware to the host. 1334 * 1335 * These two queues are allocated together for simplicity. 1336 */ 1337 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1338 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1339 fqsize= sc->fqdepth * 4; 1340 pqsize = sc->pqdepth * 8; 1341 qsize = fqsize + pqsize; 1342 1343 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1344 t.alignment = 16; 1345 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1346 t.maxsize = t.maxsegsize = qsize; 1347 t.nsegments = 1; 1348 if (bus_dma_template_tag(&t, &sc->queues_dmat)) { 1349 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n"); 1350 return (ENOMEM); 1351 } 1352 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1353 &sc->queues_map)) { 1354 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n"); 1355 return (ENOMEM); 1356 } 1357 bzero(queues, qsize); 1358 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1359 mpr_memaddr_cb, &queues_busaddr, 0); 1360 1361 sc->free_queue = (uint32_t *)queues; 1362 sc->free_busaddr = queues_busaddr; 1363 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1364 sc->post_busaddr = queues_busaddr + fqsize; 1365 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n", 1366 (uintmax_t)sc->free_busaddr, fqsize); 1367 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n", 1368 (uintmax_t)sc->post_busaddr, pqsize); 1369 1370 return (0); 1371 } 1372 1373 static int 1374 mpr_alloc_replies(struct mpr_softc *sc) 1375 { 1376 bus_dma_tag_template_t t; 1377 int rsize, num_replies; 1378 1379 /* Store the reply frame size in bytes rather than as 32bit words */ 1380 sc->replyframesz = sc->facts->ReplyFrameSize * 4; 1381 1382 /* 1383 * sc->num_replies should be one less than sc->fqdepth. We need to 1384 * allocate space for sc->fqdepth replies, but only sc->num_replies 1385 * replies can be used at once. 1386 */ 1387 num_replies = max(sc->fqdepth, sc->num_replies); 1388 1389 rsize = sc->replyframesz * num_replies; 1390 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1391 t.alignment = 4; 1392 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1393 t.maxsize = t.maxsegsize = rsize; 1394 t.nsegments = 1; 1395 if (bus_dma_template_tag(&t, &sc->reply_dmat)) { 1396 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n"); 1397 return (ENOMEM); 1398 } 1399 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1400 BUS_DMA_NOWAIT, &sc->reply_map)) { 1401 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n"); 1402 return (ENOMEM); 1403 } 1404 bzero(sc->reply_frames, rsize); 1405 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1406 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1407 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n", 1408 (uintmax_t)sc->reply_busaddr, rsize); 1409 1410 return (0); 1411 } 1412 1413 static void 1414 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1415 { 1416 struct mpr_softc *sc = arg; 1417 struct mpr_chain *chain; 1418 bus_size_t bo; 1419 int i, o, s; 1420 1421 if (error != 0) 1422 return; 1423 1424 for (i = 0, o = 0, s = 0; s < nsegs; s++) { 1425 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len; 1426 bo += sc->chain_frame_size) { 1427 chain = &sc->chains[i++]; 1428 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o); 1429 chain->chain_busaddr = segs[s].ds_addr + bo; 1430 o += sc->chain_frame_size; 1431 mpr_free_chain(sc, chain); 1432 } 1433 if (bo != segs[s].ds_len) 1434 o += segs[s].ds_len - bo; 1435 } 1436 sc->chain_free_lowwater = i; 1437 } 1438 1439 static int 1440 mpr_alloc_requests(struct mpr_softc *sc) 1441 { 1442 bus_dma_tag_template_t t; 1443 struct mpr_command *cm; 1444 int i, rsize, nsegs; 1445 1446 rsize = sc->reqframesz * sc->num_reqs; 1447 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1448 t.alignment = 16; 1449 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1450 t.maxsize = t.maxsegsize = rsize; 1451 t.nsegments = 1; 1452 if (bus_dma_template_tag(&t, &sc->req_dmat)) { 1453 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n"); 1454 return (ENOMEM); 1455 } 1456 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1457 BUS_DMA_NOWAIT, &sc->req_map)) { 1458 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n"); 1459 return (ENOMEM); 1460 } 1461 bzero(sc->req_frames, rsize); 1462 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1463 mpr_memaddr_cb, &sc->req_busaddr, 0); 1464 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n", 1465 (uintmax_t)sc->req_busaddr, rsize); 1466 1467 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR, 1468 M_NOWAIT | M_ZERO); 1469 if (!sc->chains) { 1470 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1471 return (ENOMEM); 1472 } 1473 rsize = sc->chain_frame_size * sc->num_chains; 1474 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1475 t.alignment = 16; 1476 t.maxsize = t.maxsegsize = rsize; 1477 t.nsegments = howmany(rsize, PAGE_SIZE); 1478 if (bus_dma_template_tag(&t, &sc->chain_dmat)) { 1479 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n"); 1480 return (ENOMEM); 1481 } 1482 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1483 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) { 1484 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1485 return (ENOMEM); 1486 } 1487 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, 1488 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) { 1489 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n"); 1490 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 1491 sc->chain_map); 1492 return (ENOMEM); 1493 } 1494 1495 rsize = MPR_SENSE_LEN * sc->num_reqs; 1496 bus_dma_template_clone(&t, sc->req_dmat); 1497 t.maxsize = t.maxsegsize = rsize; 1498 if (bus_dma_template_tag(&t, &sc->sense_dmat)) { 1499 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n"); 1500 return (ENOMEM); 1501 } 1502 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1503 BUS_DMA_NOWAIT, &sc->sense_map)) { 1504 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n"); 1505 return (ENOMEM); 1506 } 1507 bzero(sc->sense_frames, rsize); 1508 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1509 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1510 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n", 1511 (uintmax_t)sc->sense_busaddr, rsize); 1512 1513 /* 1514 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1515 * these devices. 1516 */ 1517 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1518 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1519 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1520 return (ENOMEM); 1521 } 1522 1523 nsegs = (sc->maxio / PAGE_SIZE) + 1; 1524 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1525 t.nsegments = nsegs; 1526 t.flags = BUS_DMA_ALLOCNOW; 1527 t.lockfunc = busdma_lock_mutex; 1528 t.lockfuncarg = &sc->mpr_mtx; 1529 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) { 1530 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n"); 1531 return (ENOMEM); 1532 } 1533 1534 /* 1535 * SMID 0 cannot be used as a free command per the firmware spec. 1536 * Just drop that command instead of risking accounting bugs. 1537 */ 1538 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, 1539 M_MPR, M_WAITOK | M_ZERO); 1540 if (!sc->commands) { 1541 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n"); 1542 return (ENOMEM); 1543 } 1544 for (i = 1; i < sc->num_reqs; i++) { 1545 cm = &sc->commands[i]; 1546 cm->cm_req = sc->req_frames + i * sc->reqframesz; 1547 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; 1548 cm->cm_sense = &sc->sense_frames[i]; 1549 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1550 cm->cm_desc.Default.SMID = i; 1551 cm->cm_sc = sc; 1552 cm->cm_state = MPR_CM_STATE_BUSY; 1553 TAILQ_INIT(&cm->cm_chain_list); 1554 TAILQ_INIT(&cm->cm_prp_page_list); 1555 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); 1556 1557 /* XXX Is a failure here a critical problem? */ 1558 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1559 == 0) { 1560 if (i <= sc->num_prireqs) 1561 mpr_free_high_priority_command(sc, cm); 1562 else 1563 mpr_free_command(sc, cm); 1564 } else { 1565 panic("failed to allocate command %d\n", i); 1566 sc->num_reqs = i; 1567 break; 1568 } 1569 } 1570 1571 return (0); 1572 } 1573 1574 /* 1575 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1576 * which are scatter/gather lists for NVMe devices. 1577 * 1578 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1579 * and translated by FW. 1580 * 1581 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1582 */ 1583 static int 1584 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1585 { 1586 bus_dma_tag_template_t t; 1587 struct mpr_prp_page *prp_page; 1588 int PRPs_per_page, PRPs_required, pages_required; 1589 int rsize, i; 1590 1591 /* 1592 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1593 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1594 * MAX_IO_SIZE / PAGE_SIZE = 256 1595 * 1596 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1597 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1598 * page (4096 / 8 = 512), so only one page is required for each I/O. 1599 * 1600 * Each of these buffers will need to be contiguous. For simplicity, 1601 * only one buffer is allocated here, which has all of the space 1602 * required for the NVMe Queue Depth. If there are problems allocating 1603 * this one buffer, this function will need to change to allocate 1604 * individual, contiguous NVME_QDEPTH buffers. 1605 * 1606 * The real calculation will use the real max io size. Above is just an 1607 * example. 1608 * 1609 */ 1610 PRPs_required = sc->maxio / PAGE_SIZE; 1611 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1612 pages_required = (PRPs_required / PRPs_per_page) + 1; 1613 1614 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1615 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1616 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1617 t.alignment = 4; 1618 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1619 t.maxsize = t.maxsegsize = rsize; 1620 t.nsegments = 1; 1621 if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) { 1622 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA " 1623 "tag\n"); 1624 return (ENOMEM); 1625 } 1626 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1627 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1628 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n"); 1629 return (ENOMEM); 1630 } 1631 bzero(sc->prp_pages, rsize); 1632 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1633 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1634 1635 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1636 M_WAITOK | M_ZERO); 1637 for (i = 0; i < NVME_QDEPTH; i++) { 1638 prp_page = &sc->prps[i]; 1639 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1640 i * sc->prp_buffer_size); 1641 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1642 i * sc->prp_buffer_size); 1643 mpr_free_prp_page(sc, prp_page); 1644 sc->prp_pages_free_lowwater++; 1645 } 1646 1647 return (0); 1648 } 1649 1650 static int 1651 mpr_init_queues(struct mpr_softc *sc) 1652 { 1653 int i; 1654 1655 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1656 1657 /* 1658 * According to the spec, we need to use one less reply than we 1659 * have space for on the queue. So sc->num_replies (the number we 1660 * use) should be less than sc->fqdepth (allocated size). 1661 */ 1662 if (sc->num_replies >= sc->fqdepth) 1663 return (EINVAL); 1664 1665 /* 1666 * Initialize all of the free queue entries. 1667 */ 1668 for (i = 0; i < sc->fqdepth; i++) { 1669 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz); 1670 } 1671 sc->replyfreeindex = sc->num_replies; 1672 1673 return (0); 1674 } 1675 1676 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1677 * Next are the global settings, if they exist. Highest are the per-unit 1678 * settings, if they exist. 1679 */ 1680 void 1681 mpr_get_tunables(struct mpr_softc *sc) 1682 { 1683 char tmpstr[80], mpr_debug[80]; 1684 1685 /* XXX default to some debugging for now */ 1686 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1687 sc->disable_msix = 0; 1688 sc->disable_msi = 0; 1689 sc->max_msix = MPR_MSIX_MAX; 1690 sc->max_chains = MPR_CHAIN_FRAMES; 1691 sc->max_io_pages = MPR_MAXIO_PAGES; 1692 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1693 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1694 sc->use_phynum = 1; 1695 sc->max_reqframes = MPR_REQ_FRAMES; 1696 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; 1697 sc->max_replyframes = MPR_REPLY_FRAMES; 1698 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; 1699 1700 /* 1701 * Grab the global variables. 1702 */ 1703 bzero(mpr_debug, 80); 1704 if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0) 1705 mpr_parse_debug(sc, mpr_debug); 1706 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); 1707 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); 1708 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); 1709 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1710 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1711 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1712 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1713 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1714 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); 1715 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); 1716 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); 1717 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); 1718 1719 /* Grab the unit-instance variables */ 1720 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1721 device_get_unit(sc->mpr_dev)); 1722 bzero(mpr_debug, 80); 1723 if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0) 1724 mpr_parse_debug(sc, mpr_debug); 1725 1726 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", 1727 device_get_unit(sc->mpr_dev)); 1728 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 1729 1730 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", 1731 device_get_unit(sc->mpr_dev)); 1732 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 1733 1734 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix", 1735 device_get_unit(sc->mpr_dev)); 1736 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); 1737 1738 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1739 device_get_unit(sc->mpr_dev)); 1740 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1741 1742 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1743 device_get_unit(sc->mpr_dev)); 1744 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1745 1746 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1747 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1748 device_get_unit(sc->mpr_dev)); 1749 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1750 1751 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1752 device_get_unit(sc->mpr_dev)); 1753 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1754 1755 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1756 device_get_unit(sc->mpr_dev)); 1757 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1758 1759 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1760 device_get_unit(sc->mpr_dev)); 1761 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1762 1763 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", 1764 device_get_unit(sc->mpr_dev)); 1765 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); 1766 1767 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes", 1768 device_get_unit(sc->mpr_dev)); 1769 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); 1770 1771 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes", 1772 device_get_unit(sc->mpr_dev)); 1773 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); 1774 1775 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes", 1776 device_get_unit(sc->mpr_dev)); 1777 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); 1778 } 1779 1780 static void 1781 mpr_setup_sysctl(struct mpr_softc *sc) 1782 { 1783 struct sysctl_ctx_list *sysctl_ctx = NULL; 1784 struct sysctl_oid *sysctl_tree = NULL; 1785 char tmpstr[80], tmpstr2[80]; 1786 1787 /* 1788 * Setup the sysctl variable so the user can change the debug level 1789 * on the fly. 1790 */ 1791 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1792 device_get_unit(sc->mpr_dev)); 1793 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1794 1795 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1796 if (sysctl_ctx != NULL) 1797 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1798 1799 if (sysctl_tree == NULL) { 1800 sysctl_ctx_init(&sc->sysctl_ctx); 1801 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1802 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1803 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr); 1804 if (sc->sysctl_tree == NULL) 1805 return; 1806 sysctl_ctx = &sc->sysctl_ctx; 1807 sysctl_tree = sc->sysctl_tree; 1808 } 1809 1810 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1811 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1812 sc, 0, mpr_debug_sysctl, "A", "mpr debug level"); 1813 1814 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1815 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 1816 "Disable the use of MSI-X interrupts"); 1817 1818 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1819 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, 1820 "User-defined maximum number of MSIX queues"); 1821 1822 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1823 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, 1824 "Negotiated number of MSIX queues"); 1825 1826 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1827 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, 1828 "Total number of allocated request frames"); 1829 1830 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1831 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, 1832 "Total number of allocated high priority request frames"); 1833 1834 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1835 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, 1836 "Total number of allocated reply frames"); 1837 1838 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1839 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, 1840 "Total number of event frames allocated"); 1841 1842 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1843 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 1844 strlen(sc->fw_version), "firmware version"); 1845 1846 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1847 OID_AUTO, "driver_version", CTLFLAG_RD, MPR_DRIVER_VERSION, 1848 strlen(MPR_DRIVER_VERSION), "driver version"); 1849 1850 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1851 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version, 1852 strlen(sc->msg_version), "message interface version"); 1853 1854 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1855 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1856 &sc->io_cmds_active, 0, "number of currently active commands"); 1857 1858 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1859 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1860 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1861 1862 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1863 OID_AUTO, "chain_free", CTLFLAG_RD, 1864 &sc->chain_free, 0, "number of free chain elements"); 1865 1866 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1867 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1868 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1869 1870 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1871 OID_AUTO, "max_chains", CTLFLAG_RD, 1872 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1873 1874 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1875 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1876 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1877 "IOCFacts)"); 1878 1879 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1880 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1881 "enable SSU to SATA SSD/HDD at shutdown"); 1882 1883 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1884 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1885 &sc->chain_alloc_fail, "chain allocation failures"); 1886 1887 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1888 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1889 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1890 "spinup after SATA ID error"); 1891 1892 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1893 OID_AUTO, "dump_reqs", 1894 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_NEEDGIANT, 1895 sc, 0, mpr_dump_reqs, "I", "Dump Active Requests"); 1896 1897 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1898 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1899 "Use the phy number for enumeration"); 1900 1901 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1902 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1903 &sc->prp_pages_free, 0, "number of free PRP pages"); 1904 1905 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1906 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1907 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1908 1909 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1910 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1911 &sc->prp_page_alloc_fail, "PRP page allocation failures"); 1912 } 1913 1914 static struct mpr_debug_string { 1915 char *name; 1916 int flag; 1917 } mpr_debug_strings[] = { 1918 {"info", MPR_INFO}, 1919 {"fault", MPR_FAULT}, 1920 {"event", MPR_EVENT}, 1921 {"log", MPR_LOG}, 1922 {"recovery", MPR_RECOVERY}, 1923 {"error", MPR_ERROR}, 1924 {"init", MPR_INIT}, 1925 {"xinfo", MPR_XINFO}, 1926 {"user", MPR_USER}, 1927 {"mapping", MPR_MAPPING}, 1928 {"trace", MPR_TRACE} 1929 }; 1930 1931 enum mpr_debug_level_combiner { 1932 COMB_NONE, 1933 COMB_ADD, 1934 COMB_SUB 1935 }; 1936 1937 static int 1938 mpr_debug_sysctl(SYSCTL_HANDLER_ARGS) 1939 { 1940 struct mpr_softc *sc; 1941 struct mpr_debug_string *string; 1942 struct sbuf *sbuf; 1943 char *buffer; 1944 size_t sz; 1945 int i, len, debug, error; 1946 1947 sc = (struct mpr_softc *)arg1; 1948 1949 error = sysctl_wire_old_buffer(req, 0); 1950 if (error != 0) 1951 return (error); 1952 1953 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 1954 debug = sc->mpr_debug; 1955 1956 sbuf_printf(sbuf, "%#x", debug); 1957 1958 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 1959 for (i = 0; i < sz; i++) { 1960 string = &mpr_debug_strings[i]; 1961 if (debug & string->flag) 1962 sbuf_printf(sbuf, ",%s", string->name); 1963 } 1964 1965 error = sbuf_finish(sbuf); 1966 sbuf_delete(sbuf); 1967 1968 if (error || req->newptr == NULL) 1969 return (error); 1970 1971 len = req->newlen - req->newidx; 1972 if (len == 0) 1973 return (0); 1974 1975 buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK); 1976 error = SYSCTL_IN(req, buffer, len); 1977 1978 mpr_parse_debug(sc, buffer); 1979 1980 free(buffer, M_MPR); 1981 return (error); 1982 } 1983 1984 static void 1985 mpr_parse_debug(struct mpr_softc *sc, char *list) 1986 { 1987 struct mpr_debug_string *string; 1988 enum mpr_debug_level_combiner op; 1989 char *token, *endtoken; 1990 size_t sz; 1991 int flags, i; 1992 1993 if (list == NULL || *list == '\0') 1994 return; 1995 1996 if (*list == '+') { 1997 op = COMB_ADD; 1998 list++; 1999 } else if (*list == '-') { 2000 op = COMB_SUB; 2001 list++; 2002 } else 2003 op = COMB_NONE; 2004 if (*list == '\0') 2005 return; 2006 2007 flags = 0; 2008 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 2009 while ((token = strsep(&list, ":,")) != NULL) { 2010 2011 /* Handle integer flags */ 2012 flags |= strtol(token, &endtoken, 0); 2013 if (token != endtoken) 2014 continue; 2015 2016 /* Handle text flags */ 2017 for (i = 0; i < sz; i++) { 2018 string = &mpr_debug_strings[i]; 2019 if (strcasecmp(token, string->name) == 0) { 2020 flags |= string->flag; 2021 break; 2022 } 2023 } 2024 } 2025 2026 switch (op) { 2027 case COMB_NONE: 2028 sc->mpr_debug = flags; 2029 break; 2030 case COMB_ADD: 2031 sc->mpr_debug |= flags; 2032 break; 2033 case COMB_SUB: 2034 sc->mpr_debug &= (~flags); 2035 break; 2036 } 2037 return; 2038 } 2039 2040 struct mpr_dumpreq_hdr { 2041 uint32_t smid; 2042 uint32_t state; 2043 uint32_t numframes; 2044 uint32_t deschi; 2045 uint32_t desclo; 2046 }; 2047 2048 static int 2049 mpr_dump_reqs(SYSCTL_HANDLER_ARGS) 2050 { 2051 struct mpr_softc *sc; 2052 struct mpr_chain *chain, *chain1; 2053 struct mpr_command *cm; 2054 struct mpr_dumpreq_hdr hdr; 2055 struct sbuf *sb; 2056 uint32_t smid, state; 2057 int i, numreqs, error = 0; 2058 2059 sc = (struct mpr_softc *)arg1; 2060 2061 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) { 2062 printf("priv check error %d\n", error); 2063 return (error); 2064 } 2065 2066 state = MPR_CM_STATE_INQUEUE; 2067 smid = 1; 2068 numreqs = sc->num_reqs; 2069 2070 if (req->newptr != NULL) 2071 return (EINVAL); 2072 2073 if (smid == 0 || smid > sc->num_reqs) 2074 return (EINVAL); 2075 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs)) 2076 numreqs = sc->num_reqs; 2077 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 2078 2079 /* Best effort, no locking */ 2080 for (i = smid; i < numreqs; i++) { 2081 cm = &sc->commands[i]; 2082 if (cm->cm_state != state) 2083 continue; 2084 hdr.smid = i; 2085 hdr.state = cm->cm_state; 2086 hdr.numframes = 1; 2087 hdr.deschi = cm->cm_desc.Words.High; 2088 hdr.desclo = cm->cm_desc.Words.Low; 2089 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, 2090 chain1) 2091 hdr.numframes++; 2092 sbuf_bcat(sb, &hdr, sizeof(hdr)); 2093 sbuf_bcat(sb, cm->cm_req, 128); 2094 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, 2095 chain1) 2096 sbuf_bcat(sb, chain->chain, 128); 2097 } 2098 2099 error = sbuf_finish(sb); 2100 sbuf_delete(sb); 2101 return (error); 2102 } 2103 2104 int 2105 mpr_attach(struct mpr_softc *sc) 2106 { 2107 int error; 2108 2109 MPR_FUNCTRACE(sc); 2110 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2111 2112 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); 2113 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); 2114 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); 2115 TAILQ_INIT(&sc->event_list); 2116 timevalclear(&sc->lastfail); 2117 2118 if ((error = mpr_transition_ready(sc)) != 0) { 2119 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 2120 "Failed to transition ready\n"); 2121 return (error); 2122 } 2123 2124 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 2125 M_ZERO|M_NOWAIT); 2126 if (!sc->facts) { 2127 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 2128 "Cannot allocate memory, exit\n"); 2129 return (ENOMEM); 2130 } 2131 2132 /* 2133 * Get IOC Facts and allocate all structures based on this information. 2134 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 2135 * Facts. If relevant values have changed in IOC Facts, this function 2136 * will free all of the memory based on IOC Facts and reallocate that 2137 * memory. If this fails, any allocated memory should already be freed. 2138 */ 2139 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 2140 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation " 2141 "failed with error %d\n", error); 2142 return (error); 2143 } 2144 2145 /* Start the periodic watchdog check on the IOC Doorbell */ 2146 mpr_periodic(sc); 2147 2148 /* 2149 * The portenable will kick off discovery events that will drive the 2150 * rest of the initialization process. The CAM/SAS module will 2151 * hold up the boot sequence until discovery is complete. 2152 */ 2153 sc->mpr_ich.ich_func = mpr_startup; 2154 sc->mpr_ich.ich_arg = sc; 2155 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 2156 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2157 "Cannot establish MPR config hook\n"); 2158 error = EINVAL; 2159 } 2160 2161 /* 2162 * Allow IR to shutdown gracefully when shutdown occurs. 2163 */ 2164 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 2165 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 2166 2167 if (sc->shutdown_eh == NULL) 2168 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2169 "shutdown event registration failed\n"); 2170 2171 mpr_setup_sysctl(sc); 2172 2173 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 2174 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 2175 2176 return (error); 2177 } 2178 2179 /* Run through any late-start handlers. */ 2180 static void 2181 mpr_startup(void *arg) 2182 { 2183 struct mpr_softc *sc; 2184 2185 sc = (struct mpr_softc *)arg; 2186 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2187 2188 mpr_lock(sc); 2189 mpr_unmask_intr(sc); 2190 2191 /* initialize device mapping tables */ 2192 mpr_base_static_config_pages(sc); 2193 mpr_mapping_initialize(sc); 2194 mprsas_startup(sc); 2195 mpr_unlock(sc); 2196 2197 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n"); 2198 config_intrhook_disestablish(&sc->mpr_ich); 2199 sc->mpr_ich.ich_arg = NULL; 2200 2201 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2202 } 2203 2204 /* Periodic watchdog. Is called with the driver lock already held. */ 2205 static void 2206 mpr_periodic(void *arg) 2207 { 2208 struct mpr_softc *sc; 2209 uint32_t db; 2210 2211 sc = (struct mpr_softc *)arg; 2212 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 2213 return; 2214 2215 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 2216 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2217 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 2218 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 2219 panic("TEMPERATURE FAULT: STOPPING."); 2220 } 2221 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 2222 mpr_reinit(sc); 2223 } 2224 2225 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 2226 } 2227 2228 static void 2229 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 2230 MPI2_EVENT_NOTIFICATION_REPLY *event) 2231 { 2232 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 2233 2234 MPR_DPRINT_EVENT(sc, generic, event); 2235 2236 switch (event->Event) { 2237 case MPI2_EVENT_LOG_DATA: 2238 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 2239 if (sc->mpr_debug & MPR_EVENT) 2240 hexdump(event->EventData, event->EventDataLength, NULL, 2241 0); 2242 break; 2243 case MPI2_EVENT_LOG_ENTRY_ADDED: 2244 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 2245 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 2246 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 2247 entry->LogSequence); 2248 break; 2249 default: 2250 break; 2251 } 2252 return; 2253 } 2254 2255 static int 2256 mpr_attach_log(struct mpr_softc *sc) 2257 { 2258 uint8_t events[16]; 2259 2260 bzero(events, 16); 2261 setbit(events, MPI2_EVENT_LOG_DATA); 2262 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 2263 2264 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 2265 &sc->mpr_log_eh); 2266 2267 return (0); 2268 } 2269 2270 static int 2271 mpr_detach_log(struct mpr_softc *sc) 2272 { 2273 2274 if (sc->mpr_log_eh != NULL) 2275 mpr_deregister_events(sc, sc->mpr_log_eh); 2276 return (0); 2277 } 2278 2279 /* 2280 * Free all of the driver resources and detach submodules. Should be called 2281 * without the lock held. 2282 */ 2283 int 2284 mpr_free(struct mpr_softc *sc) 2285 { 2286 int error; 2287 2288 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2289 /* Turn off the watchdog */ 2290 mpr_lock(sc); 2291 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 2292 mpr_unlock(sc); 2293 /* Lock must not be held for this */ 2294 callout_drain(&sc->periodic); 2295 callout_drain(&sc->device_check_callout); 2296 2297 if (((error = mpr_detach_log(sc)) != 0) || 2298 ((error = mpr_detach_sas(sc)) != 0)) { 2299 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach " 2300 "subsystems, error= %d, exit\n", error); 2301 return (error); 2302 } 2303 2304 mpr_detach_user(sc); 2305 2306 /* Put the IOC back in the READY state. */ 2307 mpr_lock(sc); 2308 if ((error = mpr_transition_ready(sc)) != 0) { 2309 mpr_unlock(sc); 2310 return (error); 2311 } 2312 mpr_unlock(sc); 2313 2314 if (sc->facts != NULL) 2315 free(sc->facts, M_MPR); 2316 2317 /* 2318 * Free all buffers that are based on IOC Facts. A Diag Reset may need 2319 * to free these buffers too. 2320 */ 2321 mpr_iocfacts_free(sc); 2322 2323 if (sc->sysctl_tree != NULL) 2324 sysctl_ctx_free(&sc->sysctl_ctx); 2325 2326 /* Deregister the shutdown function */ 2327 if (sc->shutdown_eh != NULL) 2328 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 2329 2330 mtx_destroy(&sc->mpr_mtx); 2331 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2332 2333 return (0); 2334 } 2335 2336 static __inline void 2337 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 2338 { 2339 MPR_FUNCTRACE(sc); 2340 2341 if (cm == NULL) { 2342 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 2343 return; 2344 } 2345 2346 cm->cm_state = MPR_CM_STATE_BUSY; 2347 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 2348 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 2349 2350 if (cm->cm_complete != NULL) { 2351 mpr_dprint(sc, MPR_TRACE, 2352 "%s cm %p calling cm_complete %p data %p reply %p\n", 2353 __func__, cm, cm->cm_complete, cm->cm_complete_data, 2354 cm->cm_reply); 2355 cm->cm_complete(sc, cm); 2356 } 2357 2358 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 2359 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 2360 wakeup(cm); 2361 } 2362 2363 if (sc->io_cmds_active != 0) { 2364 sc->io_cmds_active--; 2365 } else { 2366 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 2367 "out of sync - resynching to 0\n"); 2368 } 2369 } 2370 2371 static void 2372 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 2373 { 2374 union loginfo_type { 2375 u32 loginfo; 2376 struct { 2377 u32 subcode:16; 2378 u32 code:8; 2379 u32 originator:4; 2380 u32 bus_type:4; 2381 } dw; 2382 }; 2383 union loginfo_type sas_loginfo; 2384 char *originator_str = NULL; 2385 2386 sas_loginfo.loginfo = log_info; 2387 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 2388 return; 2389 2390 /* each nexus loss loginfo */ 2391 if (log_info == 0x31170000) 2392 return; 2393 2394 /* eat the loginfos associated with task aborts */ 2395 if ((log_info == 30050000) || (log_info == 0x31140000) || 2396 (log_info == 0x31130000)) 2397 return; 2398 2399 switch (sas_loginfo.dw.originator) { 2400 case 0: 2401 originator_str = "IOP"; 2402 break; 2403 case 1: 2404 originator_str = "PL"; 2405 break; 2406 case 2: 2407 originator_str = "IR"; 2408 break; 2409 } 2410 2411 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 2412 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 2413 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 2414 } 2415 2416 static void 2417 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 2418 { 2419 MPI2DefaultReply_t *mpi_reply; 2420 u16 sc_status; 2421 2422 mpi_reply = (MPI2DefaultReply_t*)reply; 2423 sc_status = le16toh(mpi_reply->IOCStatus); 2424 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 2425 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 2426 } 2427 2428 void 2429 mpr_intr(void *data) 2430 { 2431 struct mpr_softc *sc; 2432 uint32_t status; 2433 2434 sc = (struct mpr_softc *)data; 2435 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2436 2437 /* 2438 * Check interrupt status register to flush the bus. This is 2439 * needed for both INTx interrupts and driver-driven polling 2440 */ 2441 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 2442 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 2443 return; 2444 2445 mpr_lock(sc); 2446 mpr_intr_locked(data); 2447 mpr_unlock(sc); 2448 return; 2449 } 2450 2451 /* 2452 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 2453 * chip. Hopefully this theory is correct. 2454 */ 2455 void 2456 mpr_intr_msi(void *data) 2457 { 2458 struct mpr_softc *sc; 2459 2460 sc = (struct mpr_softc *)data; 2461 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2462 mpr_lock(sc); 2463 mpr_intr_locked(data); 2464 mpr_unlock(sc); 2465 return; 2466 } 2467 2468 /* 2469 * The locking is overly broad and simplistic, but easy to deal with for now. 2470 */ 2471 void 2472 mpr_intr_locked(void *data) 2473 { 2474 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2475 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2476 mpr_fw_diagnostic_buffer_t *pBuffer; 2477 struct mpr_softc *sc; 2478 uint64_t tdesc; 2479 struct mpr_command *cm = NULL; 2480 uint8_t flags; 2481 u_int pq; 2482 2483 sc = (struct mpr_softc *)data; 2484 2485 pq = sc->replypostindex; 2486 mpr_dprint(sc, MPR_TRACE, 2487 "%s sc %p starting with replypostindex %u\n", 2488 __func__, sc, sc->replypostindex); 2489 2490 for ( ;; ) { 2491 cm = NULL; 2492 desc = &sc->post_queue[sc->replypostindex]; 2493 2494 /* 2495 * Copy and clear out the descriptor so that any reentry will 2496 * immediately know that this descriptor has already been 2497 * looked at. There is unfortunate casting magic because the 2498 * MPI API doesn't have a cardinal 64bit type. 2499 */ 2500 tdesc = 0xffffffffffffffff; 2501 tdesc = atomic_swap_64((uint64_t *)desc, tdesc); 2502 desc = (MPI2_REPLY_DESCRIPTORS_UNION *)&tdesc; 2503 2504 flags = desc->Default.ReplyFlags & 2505 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2506 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2507 (le32toh(desc->Words.High) == 0xffffffff)) 2508 break; 2509 2510 /* increment the replypostindex now, so that event handlers 2511 * and cm completion handlers which decide to do a diag 2512 * reset can zero it without it getting incremented again 2513 * afterwards, and we break out of this loop on the next 2514 * iteration since the reply post queue has been cleared to 2515 * 0xFF and all descriptors look unused (which they are). 2516 */ 2517 if (++sc->replypostindex >= sc->pqdepth) 2518 sc->replypostindex = 0; 2519 2520 switch (flags) { 2521 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2522 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2523 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2524 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2525 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, 2526 ("command not inqueue\n")); 2527 cm->cm_state = MPR_CM_STATE_BUSY; 2528 cm->cm_reply = NULL; 2529 break; 2530 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2531 { 2532 uint32_t baddr; 2533 uint8_t *reply; 2534 2535 /* 2536 * Re-compose the reply address from the address 2537 * sent back from the chip. The ReplyFrameAddress 2538 * is the lower 32 bits of the physical address of 2539 * particular reply frame. Convert that address to 2540 * host format, and then use that to provide the 2541 * offset against the virtual address base 2542 * (sc->reply_frames). 2543 */ 2544 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2545 reply = sc->reply_frames + 2546 (baddr - ((uint32_t)sc->reply_busaddr)); 2547 /* 2548 * Make sure the reply we got back is in a valid 2549 * range. If not, go ahead and panic here, since 2550 * we'll probably panic as soon as we deference the 2551 * reply pointer anyway. 2552 */ 2553 if ((reply < sc->reply_frames) 2554 || (reply > (sc->reply_frames + 2555 (sc->fqdepth * sc->replyframesz)))) { 2556 printf("%s: WARNING: reply %p out of range!\n", 2557 __func__, reply); 2558 printf("%s: reply_frames %p, fqdepth %d, " 2559 "frame size %d\n", __func__, 2560 sc->reply_frames, sc->fqdepth, 2561 sc->replyframesz); 2562 printf("%s: baddr %#x,\n", __func__, baddr); 2563 /* LSI-TODO. See Linux Code for Graceful exit */ 2564 panic("Reply address out of range"); 2565 } 2566 if (le16toh(desc->AddressReply.SMID) == 0) { 2567 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2568 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2569 /* 2570 * If SMID is 0 for Diag Buffer Post, 2571 * this implies that the reply is due to 2572 * a release function with a status that 2573 * the buffer has been released. Set 2574 * the buffer flags accordingly. 2575 */ 2576 rel_rep = 2577 (MPI2_DIAG_RELEASE_REPLY *)reply; 2578 if ((le16toh(rel_rep->IOCStatus) & 2579 MPI2_IOCSTATUS_MASK) == 2580 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2581 { 2582 pBuffer = 2583 &sc->fw_diag_buffer_list[ 2584 rel_rep->BufferType]; 2585 pBuffer->valid_data = TRUE; 2586 pBuffer->owned_by_firmware = 2587 FALSE; 2588 pBuffer->immediate = FALSE; 2589 } 2590 } else 2591 mpr_dispatch_event(sc, baddr, 2592 (MPI2_EVENT_NOTIFICATION_REPLY *) 2593 reply); 2594 } else { 2595 cm = &sc->commands[ 2596 le16toh(desc->AddressReply.SMID)]; 2597 if (cm->cm_state == MPR_CM_STATE_INQUEUE) { 2598 cm->cm_reply = reply; 2599 cm->cm_reply_data = 2600 le32toh(desc->AddressReply. 2601 ReplyFrameAddress); 2602 } else { 2603 mpr_dprint(sc, MPR_RECOVERY, 2604 "Bad state for ADDRESS_REPLY status," 2605 " ignoring state %d cm %p\n", 2606 cm->cm_state, cm); 2607 } 2608 } 2609 break; 2610 } 2611 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2612 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2613 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2614 default: 2615 /* Unhandled */ 2616 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2617 desc->Default.ReplyFlags); 2618 cm = NULL; 2619 break; 2620 } 2621 2622 if (cm != NULL) { 2623 // Print Error reply frame 2624 if (cm->cm_reply) 2625 mpr_display_reply_info(sc,cm->cm_reply); 2626 mpr_complete_command(sc, cm); 2627 } 2628 } 2629 2630 if (pq != sc->replypostindex) { 2631 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n", 2632 __func__, sc, sc->replypostindex); 2633 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2634 sc->replypostindex); 2635 } 2636 2637 return; 2638 } 2639 2640 static void 2641 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2642 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2643 { 2644 struct mpr_event_handle *eh; 2645 int event, handled = 0; 2646 2647 event = le16toh(reply->Event); 2648 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2649 if (isset(eh->mask, event)) { 2650 eh->callback(sc, data, reply); 2651 handled++; 2652 } 2653 } 2654 2655 if (handled == 0) 2656 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2657 le16toh(event)); 2658 2659 /* 2660 * This is the only place that the event/reply should be freed. 2661 * Anything wanting to hold onto the event data should have 2662 * already copied it into their own storage. 2663 */ 2664 mpr_free_reply(sc, data); 2665 } 2666 2667 static void 2668 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2669 { 2670 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2671 2672 if (cm->cm_reply) 2673 MPR_DPRINT_EVENT(sc, generic, 2674 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2675 2676 mpr_free_command(sc, cm); 2677 2678 /* next, send a port enable */ 2679 mprsas_startup(sc); 2680 } 2681 2682 /* 2683 * For both register_events and update_events, the caller supplies a bitmap 2684 * of events that it _wants_. These functions then turn that into a bitmask 2685 * suitable for the controller. 2686 */ 2687 int 2688 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2689 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2690 { 2691 struct mpr_event_handle *eh; 2692 int error = 0; 2693 2694 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2695 if (!eh) { 2696 mpr_dprint(sc, MPR_EVENT|MPR_ERROR, 2697 "Cannot allocate event memory\n"); 2698 return (ENOMEM); 2699 } 2700 eh->callback = cb; 2701 eh->data = data; 2702 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2703 if (mask != NULL) 2704 error = mpr_update_events(sc, eh, mask); 2705 *handle = eh; 2706 2707 return (error); 2708 } 2709 2710 int 2711 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2712 uint8_t *mask) 2713 { 2714 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2715 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; 2716 struct mpr_command *cm = NULL; 2717 struct mpr_event_handle *eh; 2718 int error, i; 2719 2720 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2721 2722 if ((mask != NULL) && (handle != NULL)) 2723 bcopy(mask, &handle->mask[0], 16); 2724 memset(sc->event_mask, 0xff, 16); 2725 2726 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2727 for (i = 0; i < 16; i++) 2728 sc->event_mask[i] &= ~eh->mask[i]; 2729 } 2730 2731 if ((cm = mpr_alloc_command(sc)) == NULL) 2732 return (EBUSY); 2733 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2734 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2735 evtreq->MsgFlags = 0; 2736 evtreq->SASBroadcastPrimitiveMasks = 0; 2737 #ifdef MPR_DEBUG_ALL_EVENTS 2738 { 2739 u_char fullmask[16]; 2740 memset(fullmask, 0x00, 16); 2741 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2742 } 2743 #else 2744 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2745 #endif 2746 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2747 cm->cm_data = NULL; 2748 2749 error = mpr_request_polled(sc, &cm); 2750 if (cm != NULL) 2751 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2752 if ((reply == NULL) || 2753 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2754 error = ENXIO; 2755 2756 if (reply) 2757 MPR_DPRINT_EVENT(sc, generic, reply); 2758 2759 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2760 2761 if (cm != NULL) 2762 mpr_free_command(sc, cm); 2763 return (error); 2764 } 2765 2766 static int 2767 mpr_reregister_events(struct mpr_softc *sc) 2768 { 2769 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2770 struct mpr_command *cm; 2771 struct mpr_event_handle *eh; 2772 int error, i; 2773 2774 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2775 2776 /* first, reregister events */ 2777 2778 memset(sc->event_mask, 0xff, 16); 2779 2780 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2781 for (i = 0; i < 16; i++) 2782 sc->event_mask[i] &= ~eh->mask[i]; 2783 } 2784 2785 if ((cm = mpr_alloc_command(sc)) == NULL) 2786 return (EBUSY); 2787 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2788 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2789 evtreq->MsgFlags = 0; 2790 evtreq->SASBroadcastPrimitiveMasks = 0; 2791 #ifdef MPR_DEBUG_ALL_EVENTS 2792 { 2793 u_char fullmask[16]; 2794 memset(fullmask, 0x00, 16); 2795 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2796 } 2797 #else 2798 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2799 #endif 2800 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2801 cm->cm_data = NULL; 2802 cm->cm_complete = mpr_reregister_events_complete; 2803 2804 error = mpr_map_command(sc, cm); 2805 2806 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2807 error); 2808 return (error); 2809 } 2810 2811 int 2812 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2813 { 2814 2815 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2816 free(handle, M_MPR); 2817 return (mpr_update_events(sc, NULL, NULL)); 2818 } 2819 2820 /** 2821 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2822 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2823 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2824 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2825 * describe a larger data buffer. If the data buffer is too large to describe 2826 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2827 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2828 * elsewhere in memory to describe the remaining data memory segments. The PRP 2829 * list will be contiguous. 2830 2831 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2832 * consists of a list of PRP entries to describe a number of noncontigous 2833 * physical memory segments as a single memory buffer, just as a SGL does. Note 2834 * however, that this function is only used by the IOCTL call, so the memory 2835 * given will be guaranteed to be contiguous. There is no need to translate 2836 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2837 * space that is one page size each. 2838 * 2839 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2840 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2841 * the second PRP element if the memory being described fits within 2 PRP 2842 * entries, or a PRP list pointer if the PRP spans more than two entries. 2843 * 2844 * A PRP list pointer contains the address of a PRP list, structured as a linear 2845 * array of PRP entries. Each PRP entry in this list describes a segment of 2846 * physical memory. 2847 * 2848 * Each 64-bit PRP entry comprises an address and an offset field. The address 2849 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2850 * offset describes where within that page the memory segment begins. Only the 2851 * first element in a PRP list may contain a non-zero offest, implying that all 2852 * memory segments following the first begin at the start of a PAGE_SIZE page. 2853 * 2854 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2855 * with exceptions for the first and last elements in the list. If the memory 2856 * being described by the list begins at a non-zero offset within the first page, 2857 * then the first PRP element will contain a non-zero offset indicating where the 2858 * region begins within the page. The last memory segment may end before the end 2859 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2860 * described by the PRP list. 2861 * 2862 * Since PRP entries lack any indication of size, the overall data buffer length 2863 * is used to determine where the end of the data memory buffer is located, and 2864 * how many PRP entries are required to describe it. 2865 * 2866 * Returns nothing. 2867 */ 2868 void 2869 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2870 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2871 uint32_t data_in_sz, uint32_t data_out_sz) 2872 { 2873 int prp_size = PRP_ENTRY_SIZE; 2874 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2875 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2876 uint32_t offset, entry_len, page_mask_result, page_mask; 2877 bus_addr_t paddr; 2878 size_t length; 2879 struct mpr_prp_page *prp_page_info = NULL; 2880 2881 /* 2882 * Not all commands require a data transfer. If no data, just return 2883 * without constructing any PRP. 2884 */ 2885 if (!data_in_sz && !data_out_sz) 2886 return; 2887 2888 /* 2889 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2890 * located at a 24 byte offset from the start of the NVMe command. Then 2891 * set the current PRP entry pointer to PRP1. 2892 */ 2893 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2894 NVME_CMD_PRP1_OFFSET); 2895 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2896 NVME_CMD_PRP2_OFFSET); 2897 prp_entry = prp1_entry; 2898 2899 /* 2900 * For the PRP entries, use the specially allocated buffer of 2901 * contiguous memory. PRP Page allocation failures should not happen 2902 * because there should be enough PRP page buffers to account for the 2903 * possible NVMe QDepth. 2904 */ 2905 prp_page_info = mpr_alloc_prp_page(sc); 2906 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2907 "used for building a native NVMe SGL.\n", __func__)); 2908 prp_page = (uint64_t *)prp_page_info->prp_page; 2909 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2910 2911 /* 2912 * Insert the allocated PRP page into the command's PRP page list. This 2913 * will be freed when the command is freed. 2914 */ 2915 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2916 2917 /* 2918 * Check if we are within 1 entry of a page boundary we don't want our 2919 * first entry to be a PRP List entry. 2920 */ 2921 page_mask = PAGE_SIZE - 1; 2922 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2923 page_mask; 2924 if (!page_mask_result) 2925 { 2926 /* Bump up to next page boundary. */ 2927 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2928 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2929 prp_size); 2930 } 2931 2932 /* 2933 * Set PRP physical pointer, which initially points to the current PRP 2934 * DMA memory page. 2935 */ 2936 prp_entry_phys = prp_page_phys; 2937 2938 /* Get physical address and length of the data buffer. */ 2939 paddr = (bus_addr_t)(uintptr_t)data; 2940 if (data_in_sz) 2941 length = data_in_sz; 2942 else 2943 length = data_out_sz; 2944 2945 /* Loop while the length is not zero. */ 2946 while (length) 2947 { 2948 /* 2949 * Check if we need to put a list pointer here if we are at page 2950 * boundary - prp_size (8 bytes). 2951 */ 2952 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2953 prp_size) & page_mask; 2954 if (!page_mask_result) 2955 { 2956 /* 2957 * This is the last entry in a PRP List, so we need to 2958 * put a PRP list pointer here. What this does is: 2959 * - bump the current memory pointer to the next 2960 * address, which will be the next full page. 2961 * - set the PRP Entry to point to that page. This is 2962 * now the PRP List pointer. 2963 * - bump the PRP Entry pointer the start of the next 2964 * page. Since all of this PRP memory is contiguous, 2965 * no need to get a new page - it's just the next 2966 * address. 2967 */ 2968 prp_entry_phys++; 2969 *prp_entry = 2970 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2971 prp_entry++; 2972 } 2973 2974 /* Need to handle if entry will be part of a page. */ 2975 offset = (uint32_t)paddr & page_mask; 2976 entry_len = PAGE_SIZE - offset; 2977 2978 if (prp_entry == prp1_entry) 2979 { 2980 /* 2981 * Must fill in the first PRP pointer (PRP1) before 2982 * moving on. 2983 */ 2984 *prp1_entry = htole64((uint64_t)paddr); 2985 2986 /* 2987 * Now point to the second PRP entry within the 2988 * command (PRP2). 2989 */ 2990 prp_entry = prp2_entry; 2991 } 2992 else if (prp_entry == prp2_entry) 2993 { 2994 /* 2995 * Should the PRP2 entry be a PRP List pointer or just a 2996 * regular PRP pointer? If there is more than one more 2997 * page of data, must use a PRP List pointer. 2998 */ 2999 if (length > PAGE_SIZE) 3000 { 3001 /* 3002 * PRP2 will contain a PRP List pointer because 3003 * more PRP's are needed with this command. The 3004 * list will start at the beginning of the 3005 * contiguous buffer. 3006 */ 3007 *prp2_entry = 3008 htole64( 3009 (uint64_t)(uintptr_t)prp_entry_phys); 3010 3011 /* 3012 * The next PRP Entry will be the start of the 3013 * first PRP List. 3014 */ 3015 prp_entry = prp_page; 3016 } 3017 else 3018 { 3019 /* 3020 * After this, the PRP Entries are complete. 3021 * This command uses 2 PRP's and no PRP list. 3022 */ 3023 *prp2_entry = htole64((uint64_t)paddr); 3024 } 3025 } 3026 else 3027 { 3028 /* 3029 * Put entry in list and bump the addresses. 3030 * 3031 * After PRP1 and PRP2 are filled in, this will fill in 3032 * all remaining PRP entries in a PRP List, one per each 3033 * time through the loop. 3034 */ 3035 *prp_entry = htole64((uint64_t)paddr); 3036 prp_entry++; 3037 prp_entry_phys++; 3038 } 3039 3040 /* 3041 * Bump the phys address of the command's data buffer by the 3042 * entry_len. 3043 */ 3044 paddr += entry_len; 3045 3046 /* Decrement length accounting for last partial page. */ 3047 if (entry_len > length) 3048 length = 0; 3049 else 3050 length -= entry_len; 3051 } 3052 } 3053 3054 /* 3055 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 3056 * determine if the driver needs to build a native SGL. If so, that native SGL 3057 * is built in the contiguous buffers allocated especially for PCIe SGL 3058 * creation. If the driver will not build a native SGL, return TRUE and a 3059 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 3060 * only. 3061 * 3062 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 3063 */ 3064 static int 3065 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 3066 bus_dma_segment_t *segs, int segs_left) 3067 { 3068 uint32_t i, sge_dwords, length, offset, entry_len; 3069 uint32_t num_entries, buff_len = 0, sges_in_segment; 3070 uint32_t page_mask, page_mask_result, *curr_buff; 3071 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 3072 uint32_t first_page_data_size, end_residual; 3073 uint64_t *msg_phys; 3074 bus_addr_t paddr; 3075 int build_native_sgl = 0, first_prp_entry; 3076 int prp_size = PRP_ENTRY_SIZE; 3077 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 3078 struct mpr_prp_page *prp_page_info = NULL; 3079 3080 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 3081 3082 /* 3083 * Add up the sizes of each segment length to get the total transfer 3084 * size, which will be checked against the Maximum Data Transfer Size. 3085 * If the data transfer length exceeds the MDTS for this device, just 3086 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 3087 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 3088 */ 3089 for (i = 0; i < segs_left; i++) 3090 buff_len += htole32(segs[i].ds_len); 3091 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 3092 return 1; 3093 3094 /* Create page_mask (to get offset within page) */ 3095 page_mask = PAGE_SIZE - 1; 3096 3097 /* 3098 * Check if the number of elements exceeds the max number that can be 3099 * put in the main message frame (H/W can only translate an SGL that 3100 * is contained entirely in the main message frame). 3101 */ 3102 sges_in_segment = (sc->reqframesz - 3103 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 3104 if (segs_left > sges_in_segment) 3105 build_native_sgl = 1; 3106 else 3107 { 3108 /* 3109 * NVMe uses one PRP for each physical page (or part of physical 3110 * page). 3111 * if 4 pages or less then IEEE is OK 3112 * if > 5 pages then we need to build a native SGL 3113 * if > 4 and <= 5 pages, then check the physical address of 3114 * the first SG entry, then if this first size in the page 3115 * is >= the residual beyond 4 pages then use IEEE, 3116 * otherwise use native SGL 3117 */ 3118 if (buff_len > (PAGE_SIZE * 5)) 3119 build_native_sgl = 1; 3120 else if ((buff_len > (PAGE_SIZE * 4)) && 3121 (buff_len <= (PAGE_SIZE * 5)) ) 3122 { 3123 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr; 3124 first_page_offset = 3125 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 3126 page_mask); 3127 first_page_data_size = PAGE_SIZE - first_page_offset; 3128 end_residual = buff_len % PAGE_SIZE; 3129 3130 /* 3131 * If offset into first page pushes the end of the data 3132 * beyond end of the 5th page, we need the extra PRP 3133 * list. 3134 */ 3135 if (first_page_data_size < end_residual) 3136 build_native_sgl = 1; 3137 3138 /* 3139 * Check if first SG entry size is < residual beyond 4 3140 * pages. 3141 */ 3142 if (htole32(segs[0].ds_len) < 3143 (buff_len - (PAGE_SIZE * 4))) 3144 build_native_sgl = 1; 3145 } 3146 } 3147 3148 /* check if native SGL is needed */ 3149 if (!build_native_sgl) 3150 return 1; 3151 3152 /* 3153 * Native SGL is needed. 3154 * Put a chain element in main message frame that points to the first 3155 * chain buffer. 3156 * 3157 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 3158 * a native SGL. 3159 */ 3160 3161 /* Set main message chain element pointer */ 3162 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 3163 3164 /* 3165 * For NVMe the chain element needs to be the 2nd SGL entry in the main 3166 * message. 3167 */ 3168 main_chain_element = (Mpi25IeeeSgeChain64_t *) 3169 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 3170 3171 /* 3172 * For the PRP entries, use the specially allocated buffer of 3173 * contiguous memory. PRP Page allocation failures should not happen 3174 * because there should be enough PRP page buffers to account for the 3175 * possible NVMe QDepth. 3176 */ 3177 prp_page_info = mpr_alloc_prp_page(sc); 3178 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 3179 "used for building a native NVMe SGL.\n", __func__)); 3180 curr_buff = (uint32_t *)prp_page_info->prp_page; 3181 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 3182 3183 /* 3184 * Insert the allocated PRP page into the command's PRP page list. This 3185 * will be freed when the command is freed. 3186 */ 3187 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 3188 3189 /* 3190 * Check if we are within 1 entry of a page boundary we don't want our 3191 * first entry to be a PRP List entry. 3192 */ 3193 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 3194 page_mask; 3195 if (!page_mask_result) { 3196 /* Bump up to next page boundary. */ 3197 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 3198 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 3199 } 3200 3201 /* Fill in the chain element and make it an NVMe segment type. */ 3202 main_chain_element->Address.High = 3203 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 3204 main_chain_element->Address.Low = 3205 htole32((uint32_t)(uintptr_t)msg_phys); 3206 main_chain_element->NextChainOffset = 0; 3207 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3208 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3209 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 3210 3211 /* Set SGL pointer to start of contiguous PCIe buffer. */ 3212 ptr_sgl = curr_buff; 3213 sge_dwords = 2; 3214 num_entries = 0; 3215 3216 /* 3217 * NVMe has a very convoluted PRP format. One PRP is required for each 3218 * page or partial page. We need to split up OS SG entries if they are 3219 * longer than one page or cross a page boundary. We also have to insert 3220 * a PRP list pointer entry as the last entry in each physical page of 3221 * the PRP list. 3222 * 3223 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 3224 * in the main message in IEEE 64 format. The 2nd entry in the main 3225 * message is the chain element, and the rest of the PRP entries are 3226 * built in the contiguous PCIe buffer. 3227 */ 3228 first_prp_entry = 1; 3229 ptr_first_sgl = (uint32_t *)cm->cm_sge; 3230 3231 for (i = 0; i < segs_left; i++) { 3232 /* Get physical address and length of this SG entry. */ 3233 paddr = segs[i].ds_addr; 3234 length = segs[i].ds_len; 3235 3236 /* 3237 * Check whether a given SGE buffer lies on a non-PAGED 3238 * boundary if this is not the first page. If so, this is not 3239 * expected so have FW build the SGL. 3240 */ 3241 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) { 3242 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 3243 "building NVMe PRPs, low address is 0x%x\n", 3244 (uint32_t)paddr); 3245 return 1; 3246 } 3247 3248 /* Apart from last SGE, if any other SGE boundary is not page 3249 * aligned then it means that hole exists. Existence of hole 3250 * leads to data corruption. So fallback to IEEE SGEs. 3251 */ 3252 if (i != (segs_left - 1)) { 3253 if (((uint32_t)paddr + length) & page_mask) { 3254 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 3255 "boundary while building NVMe PRPs, low " 3256 "address: 0x%x and length: %u\n", 3257 (uint32_t)paddr, length); 3258 return 1; 3259 } 3260 } 3261 3262 /* Loop while the length is not zero. */ 3263 while (length) { 3264 /* 3265 * Check if we need to put a list pointer here if we are 3266 * at page boundary - prp_size. 3267 */ 3268 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 3269 prp_size) & page_mask; 3270 if (!page_mask_result) { 3271 /* 3272 * Need to put a PRP list pointer here. 3273 */ 3274 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3275 prp_size); 3276 *ptr_sgl = htole32((uintptr_t)msg_phys); 3277 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 3278 msg_phys >> 32); 3279 ptr_sgl += sge_dwords; 3280 num_entries++; 3281 } 3282 3283 /* Need to handle if entry will be part of a page. */ 3284 offset = (uint32_t)paddr & page_mask; 3285 entry_len = PAGE_SIZE - offset; 3286 if (first_prp_entry) { 3287 /* 3288 * Put IEEE entry in first SGE in main message. 3289 * (Simple element, System addr, not end of 3290 * list.) 3291 */ 3292 *ptr_first_sgl = htole32((uint32_t)paddr); 3293 *(ptr_first_sgl + 1) = 3294 htole32((uint32_t)((uint64_t)paddr >> 32)); 3295 *(ptr_first_sgl + 2) = htole32(entry_len); 3296 *(ptr_first_sgl + 3) = 0; 3297 3298 /* No longer the first PRP entry. */ 3299 first_prp_entry = 0; 3300 } else { 3301 /* Put entry in list. */ 3302 *ptr_sgl = htole32((uint32_t)paddr); 3303 *(ptr_sgl + 1) = 3304 htole32((uint32_t)((uint64_t)paddr >> 32)); 3305 3306 /* Bump ptr_sgl, msg_phys, and num_entries. */ 3307 ptr_sgl += sge_dwords; 3308 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3309 prp_size); 3310 num_entries++; 3311 } 3312 3313 /* Bump the phys address by the entry_len. */ 3314 paddr += entry_len; 3315 3316 /* Decrement length accounting for last partial page. */ 3317 if (entry_len > length) 3318 length = 0; 3319 else 3320 length -= entry_len; 3321 } 3322 } 3323 3324 /* Set chain element Length. */ 3325 main_chain_element->Length = htole32(num_entries * prp_size); 3326 3327 /* Return 0, indicating we built a native SGL. */ 3328 return 0; 3329 } 3330 3331 /* 3332 * Add a chain element as the next SGE for the specified command. 3333 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 3334 * only required for IEEE commands. Therefore there is no code for commands 3335 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 3336 * shouldn't be requesting chains). 3337 */ 3338 static int 3339 mpr_add_chain(struct mpr_command *cm, int segsleft) 3340 { 3341 struct mpr_softc *sc = cm->cm_sc; 3342 MPI2_REQUEST_HEADER *req; 3343 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 3344 struct mpr_chain *chain; 3345 int sgc_size, current_segs, rem_segs, segs_per_frame; 3346 uint8_t next_chain_offset = 0; 3347 3348 /* 3349 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 3350 * only IEEE commands should be requesting chains. Return some error 3351 * code other than 0. 3352 */ 3353 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 3354 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 3355 "an MPI SGL.\n"); 3356 return(ENOBUFS); 3357 } 3358 3359 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 3360 if (cm->cm_sglsize < sgc_size) 3361 panic("MPR: Need SGE Error Code\n"); 3362 3363 chain = mpr_alloc_chain(cm->cm_sc); 3364 if (chain == NULL) 3365 return (ENOBUFS); 3366 3367 /* 3368 * Note: a double-linked list is used to make it easier to walk for 3369 * debugging. 3370 */ 3371 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 3372 3373 /* 3374 * Need to know if the number of frames left is more than 1 or not. If 3375 * more than 1 frame is required, NextChainOffset will need to be set, 3376 * which will just be the last segment of the frame. 3377 */ 3378 rem_segs = 0; 3379 if (cm->cm_sglsize < (sgc_size * segsleft)) { 3380 /* 3381 * rem_segs is the number of segements remaining after the 3382 * segments that will go into the current frame. Since it is 3383 * known that at least one more frame is required, account for 3384 * the chain element. To know if more than one more frame is 3385 * required, just check if there will be a remainder after using 3386 * the current frame (with this chain) and the next frame. If 3387 * so the NextChainOffset must be the last element of the next 3388 * frame. 3389 */ 3390 current_segs = (cm->cm_sglsize / sgc_size) - 1; 3391 rem_segs = segsleft - current_segs; 3392 segs_per_frame = sc->chain_frame_size / sgc_size; 3393 if (rem_segs > segs_per_frame) { 3394 next_chain_offset = segs_per_frame - 1; 3395 } 3396 } 3397 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 3398 ieee_sgc->Length = next_chain_offset ? 3399 htole32((uint32_t)sc->chain_frame_size) : 3400 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 3401 ieee_sgc->NextChainOffset = next_chain_offset; 3402 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3403 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3404 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 3405 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 3406 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 3407 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 3408 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 3409 3410 cm->cm_sglsize = sc->chain_frame_size; 3411 return (0); 3412 } 3413 3414 /* 3415 * Add one scatter-gather element to the scatter-gather list for a command. 3416 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 3417 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 3418 * chain, so don't consider any chain additions. 3419 */ 3420 int 3421 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 3422 int segsleft) 3423 { 3424 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3425 u32 sge_flags; 3426 3427 /* 3428 * case 1: >=1 more segment, no room for anything (error) 3429 * case 2: 1 more segment and enough room for it 3430 */ 3431 3432 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 3433 mpr_dprint(cm->cm_sc, MPR_ERROR, 3434 "%s: warning: Not enough room for MPI SGL in frame.\n", 3435 __func__); 3436 return(ENOBUFS); 3437 } 3438 3439 KASSERT(segsleft == 1, 3440 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 3441 segsleft)); 3442 3443 /* 3444 * There is one more segment left to add for the MPI SGL and there is 3445 * enough room in the frame to add it. This is the normal case because 3446 * MPI SGL's don't have chains, otherwise something is wrong. 3447 * 3448 * If this is a bi-directional request, need to account for that 3449 * here. Save the pre-filled sge values. These will be used 3450 * either for the 2nd SGL or for a single direction SGL. If 3451 * cm_out_len is non-zero, this is a bi-directional request, so 3452 * fill in the OUT SGL first, then the IN SGL, otherwise just 3453 * fill in the IN SGL. Note that at this time, when filling in 3454 * 2 SGL's for a bi-directional request, they both use the same 3455 * DMA buffer (same cm command). 3456 */ 3457 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 3458 saved_address_low = sge->Address.Low; 3459 saved_address_high = sge->Address.High; 3460 if (cm->cm_out_len) { 3461 sge->FlagsLength = cm->cm_out_len | 3462 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3463 MPI2_SGE_FLAGS_END_OF_BUFFER | 3464 MPI2_SGE_FLAGS_HOST_TO_IOC | 3465 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3466 MPI2_SGE_FLAGS_SHIFT); 3467 cm->cm_sglsize -= len; 3468 /* Endian Safe code */ 3469 sge_flags = sge->FlagsLength; 3470 sge->FlagsLength = htole32(sge_flags); 3471 sge->Address.High = htole32(sge->Address.High); 3472 sge->Address.Low = htole32(sge->Address.Low); 3473 bcopy(sge, cm->cm_sge, len); 3474 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3475 } 3476 sge->FlagsLength = saved_buf_len | 3477 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3478 MPI2_SGE_FLAGS_END_OF_BUFFER | 3479 MPI2_SGE_FLAGS_LAST_ELEMENT | 3480 MPI2_SGE_FLAGS_END_OF_LIST | 3481 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3482 MPI2_SGE_FLAGS_SHIFT); 3483 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3484 sge->FlagsLength |= 3485 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3486 MPI2_SGE_FLAGS_SHIFT); 3487 } else { 3488 sge->FlagsLength |= 3489 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3490 MPI2_SGE_FLAGS_SHIFT); 3491 } 3492 sge->Address.Low = saved_address_low; 3493 sge->Address.High = saved_address_high; 3494 3495 cm->cm_sglsize -= len; 3496 /* Endian Safe code */ 3497 sge_flags = sge->FlagsLength; 3498 sge->FlagsLength = htole32(sge_flags); 3499 sge->Address.High = htole32(sge->Address.High); 3500 sge->Address.Low = htole32(sge->Address.Low); 3501 bcopy(sge, cm->cm_sge, len); 3502 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3503 return (0); 3504 } 3505 3506 /* 3507 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3508 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3509 * remaining size and pointer to the next SGE to fill in, respectively. 3510 */ 3511 int 3512 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3513 { 3514 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3515 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3516 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3517 uint32_t sge_length; 3518 3519 /* 3520 * case 1: No room for chain or segment (error). 3521 * case 2: Two or more segments left but only room for chain. 3522 * case 3: Last segment and room for it, so set flags. 3523 */ 3524 3525 /* 3526 * There should be room for at least one element, or there is a big 3527 * problem. 3528 */ 3529 if (cm->cm_sglsize < ieee_sge_size) 3530 panic("MPR: Need SGE Error Code\n"); 3531 3532 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3533 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3534 return (error); 3535 } 3536 3537 if (segsleft == 1) { 3538 /* 3539 * If this is a bi-directional request, need to account for that 3540 * here. Save the pre-filled sge values. These will be used 3541 * either for the 2nd SGL or for a single direction SGL. If 3542 * cm_out_len is non-zero, this is a bi-directional request, so 3543 * fill in the OUT SGL first, then the IN SGL, otherwise just 3544 * fill in the IN SGL. Note that at this time, when filling in 3545 * 2 SGL's for a bi-directional request, they both use the same 3546 * DMA buffer (same cm command). 3547 */ 3548 saved_buf_len = sge->Length; 3549 saved_address_low = sge->Address.Low; 3550 saved_address_high = sge->Address.High; 3551 if (cm->cm_out_len) { 3552 sge->Length = cm->cm_out_len; 3553 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3554 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3555 cm->cm_sglsize -= ieee_sge_size; 3556 /* Endian Safe code */ 3557 sge_length = sge->Length; 3558 sge->Length = htole32(sge_length); 3559 sge->Address.High = htole32(sge->Address.High); 3560 sge->Address.Low = htole32(sge->Address.Low); 3561 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3562 cm->cm_sge = 3563 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3564 ieee_sge_size); 3565 } 3566 sge->Length = saved_buf_len; 3567 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3568 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3569 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3570 sge->Address.Low = saved_address_low; 3571 sge->Address.High = saved_address_high; 3572 } 3573 3574 cm->cm_sglsize -= ieee_sge_size; 3575 /* Endian Safe code */ 3576 sge_length = sge->Length; 3577 sge->Length = htole32(sge_length); 3578 sge->Address.High = htole32(sge->Address.High); 3579 sge->Address.Low = htole32(sge->Address.Low); 3580 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3581 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3582 ieee_sge_size); 3583 return (0); 3584 } 3585 3586 /* 3587 * Add one dma segment to the scatter-gather list for a command. 3588 */ 3589 int 3590 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3591 int segsleft) 3592 { 3593 MPI2_SGE_SIMPLE64 sge; 3594 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3595 3596 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3597 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3598 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3599 ieee_sge.Length = len; 3600 mpr_from_u64(pa, &ieee_sge.Address); 3601 3602 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3603 } else { 3604 /* 3605 * This driver always uses 64-bit address elements for 3606 * simplicity. 3607 */ 3608 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3609 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3610 /* Set Endian safe macro in mpr_push_sge */ 3611 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3612 mpr_from_u64(pa, &sge.Address); 3613 3614 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3615 } 3616 } 3617 3618 static void 3619 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3620 { 3621 struct mpr_softc *sc; 3622 struct mpr_command *cm; 3623 u_int i, dir, sflags; 3624 3625 cm = (struct mpr_command *)arg; 3626 sc = cm->cm_sc; 3627 3628 /* 3629 * In this case, just print out a warning and let the chip tell the 3630 * user they did the wrong thing. 3631 */ 3632 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3633 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3634 "segments, more than the %d allowed\n", __func__, nsegs, 3635 cm->cm_max_segs); 3636 } 3637 3638 /* 3639 * Set up DMA direction flags. Bi-directional requests are also handled 3640 * here. In that case, both direction flags will be set. 3641 */ 3642 sflags = 0; 3643 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3644 /* 3645 * We have to add a special case for SMP passthrough, there 3646 * is no easy way to generically handle it. The first 3647 * S/G element is used for the command (therefore the 3648 * direction bit needs to be set). The second one is used 3649 * for the reply. We'll leave it to the caller to make 3650 * sure we only have two buffers. 3651 */ 3652 /* 3653 * Even though the busdma man page says it doesn't make 3654 * sense to have both direction flags, it does in this case. 3655 * We have one s/g element being accessed in each direction. 3656 */ 3657 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3658 3659 /* 3660 * Set the direction flag on the first buffer in the SMP 3661 * passthrough request. We'll clear it for the second one. 3662 */ 3663 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3664 MPI2_SGE_FLAGS_END_OF_BUFFER; 3665 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3666 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3667 dir = BUS_DMASYNC_PREWRITE; 3668 } else 3669 dir = BUS_DMASYNC_PREREAD; 3670 3671 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3672 if (cm->cm_targ && cm->cm_targ->is_nvme && 3673 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3674 /* A native SG list was built, skip to end. */ 3675 goto out; 3676 } 3677 3678 for (i = 0; i < nsegs; i++) { 3679 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3680 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3681 } 3682 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3683 sflags, nsegs - i); 3684 if (error != 0) { 3685 /* Resource shortage, roll back! */ 3686 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3687 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3688 "consider increasing hw.mpr.max_chains.\n"); 3689 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3690 mpr_complete_command(sc, cm); 3691 return; 3692 } 3693 } 3694 3695 out: 3696 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3697 mpr_enqueue_request(sc, cm); 3698 3699 return; 3700 } 3701 3702 static void 3703 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3704 int error) 3705 { 3706 mpr_data_cb(arg, segs, nsegs, error); 3707 } 3708 3709 /* 3710 * This is the routine to enqueue commands ansynchronously. 3711 * Note that the only error path here is from bus_dmamap_load(), which can 3712 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3713 * assumed that if you have a command in-hand, then you have enough credits 3714 * to use it. 3715 */ 3716 int 3717 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3718 { 3719 int error = 0; 3720 3721 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3722 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3723 &cm->cm_uio, mpr_data_cb2, cm, 0); 3724 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3725 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3726 cm->cm_data, mpr_data_cb, cm, 0); 3727 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3728 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3729 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3730 } else { 3731 /* Add a zero-length element as needed */ 3732 if (cm->cm_sge != NULL) 3733 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3734 mpr_enqueue_request(sc, cm); 3735 } 3736 3737 return (error); 3738 } 3739 3740 /* 3741 * This is the routine to enqueue commands synchronously. An error of 3742 * EINPROGRESS from mpr_map_command() is ignored since the command will 3743 * be executed and enqueued automatically. Other errors come from msleep(). 3744 */ 3745 int 3746 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout, 3747 int sleep_flag) 3748 { 3749 int error, rc; 3750 struct timeval cur_time, start_time; 3751 struct mpr_command *cm = *cmp; 3752 3753 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3754 return EBUSY; 3755 3756 cm->cm_complete = NULL; 3757 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3758 error = mpr_map_command(sc, cm); 3759 if ((error != 0) && (error != EINPROGRESS)) 3760 return (error); 3761 3762 // Check for context and wait for 50 mSec at a time until time has 3763 // expired or the command has finished. If msleep can't be used, need 3764 // to poll. 3765 #if __FreeBSD_version >= 1000029 3766 if (curthread->td_no_sleeping) 3767 #else //__FreeBSD_version < 1000029 3768 if (curthread->td_pflags & TDP_NOSLEEPING) 3769 #endif //__FreeBSD_version >= 1000029 3770 sleep_flag = NO_SLEEP; 3771 getmicrouptime(&start_time); 3772 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { 3773 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); 3774 if (error == EWOULDBLOCK) { 3775 /* 3776 * Record the actual elapsed time in the case of a 3777 * timeout for the message below. 3778 */ 3779 getmicrouptime(&cur_time); 3780 timevalsub(&cur_time, &start_time); 3781 } 3782 } else { 3783 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3784 mpr_intr_locked(sc); 3785 if (sleep_flag == CAN_SLEEP) 3786 pause("mprwait", hz/20); 3787 else 3788 DELAY(50000); 3789 3790 getmicrouptime(&cur_time); 3791 timevalsub(&cur_time, &start_time); 3792 if (cur_time.tv_sec > timeout) { 3793 error = EWOULDBLOCK; 3794 break; 3795 } 3796 } 3797 } 3798 3799 if (error == EWOULDBLOCK) { 3800 if (cm->cm_timeout_handler == NULL) { 3801 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," 3802 " elapsed=%jd\n", __func__, timeout, 3803 (intmax_t)cur_time.tv_sec); 3804 rc = mpr_reinit(sc); 3805 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3806 "failed"); 3807 } else 3808 cm->cm_timeout_handler(sc, cm); 3809 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3810 /* 3811 * Tell the caller that we freed the command in a 3812 * reinit. 3813 */ 3814 *cmp = NULL; 3815 } 3816 error = ETIMEDOUT; 3817 } 3818 return (error); 3819 } 3820 3821 /* 3822 * This is the routine to enqueue a command synchonously and poll for 3823 * completion. Its use should be rare. 3824 */ 3825 int 3826 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp) 3827 { 3828 int error, rc; 3829 struct timeval cur_time, start_time; 3830 struct mpr_command *cm = *cmp; 3831 3832 error = 0; 3833 3834 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3835 cm->cm_complete = NULL; 3836 mpr_map_command(sc, cm); 3837 3838 getmicrouptime(&start_time); 3839 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3840 mpr_intr_locked(sc); 3841 3842 if (mtx_owned(&sc->mpr_mtx)) 3843 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 3844 "mprpoll", hz/20); 3845 else 3846 pause("mprpoll", hz/20); 3847 3848 /* 3849 * Check for real-time timeout and fail if more than 60 seconds. 3850 */ 3851 getmicrouptime(&cur_time); 3852 timevalsub(&cur_time, &start_time); 3853 if (cur_time.tv_sec > 60) { 3854 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3855 error = ETIMEDOUT; 3856 break; 3857 } 3858 } 3859 cm->cm_state = MPR_CM_STATE_BUSY; 3860 if (error) { 3861 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3862 rc = mpr_reinit(sc); 3863 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3864 "failed"); 3865 3866 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3867 /* 3868 * Tell the caller that we freed the command in a 3869 * reinit. 3870 */ 3871 *cmp = NULL; 3872 } 3873 } 3874 return (error); 3875 } 3876 3877 /* 3878 * The MPT driver had a verbose interface for config pages. In this driver, 3879 * reduce it to much simpler terms, similar to the Linux driver. 3880 */ 3881 int 3882 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3883 { 3884 MPI2_CONFIG_REQUEST *req; 3885 struct mpr_command *cm; 3886 int error; 3887 3888 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3889 return (EBUSY); 3890 } 3891 3892 cm = mpr_alloc_command(sc); 3893 if (cm == NULL) { 3894 return (EBUSY); 3895 } 3896 3897 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3898 req->Function = MPI2_FUNCTION_CONFIG; 3899 req->Action = params->action; 3900 req->SGLFlags = 0; 3901 req->ChainOffset = 0; 3902 req->PageAddress = params->page_address; 3903 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3904 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3905 3906 hdr = ¶ms->hdr.Ext; 3907 req->ExtPageType = hdr->ExtPageType; 3908 req->ExtPageLength = hdr->ExtPageLength; 3909 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3910 req->Header.PageLength = 0; /* Must be set to zero */ 3911 req->Header.PageNumber = hdr->PageNumber; 3912 req->Header.PageVersion = hdr->PageVersion; 3913 } else { 3914 MPI2_CONFIG_PAGE_HEADER *hdr; 3915 3916 hdr = ¶ms->hdr.Struct; 3917 req->Header.PageType = hdr->PageType; 3918 req->Header.PageNumber = hdr->PageNumber; 3919 req->Header.PageLength = hdr->PageLength; 3920 req->Header.PageVersion = hdr->PageVersion; 3921 } 3922 3923 cm->cm_data = params->buffer; 3924 cm->cm_length = params->length; 3925 if (cm->cm_data != NULL) { 3926 cm->cm_sge = &req->PageBufferSGE; 3927 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3928 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3929 } else 3930 cm->cm_sge = NULL; 3931 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3932 3933 cm->cm_complete_data = params; 3934 if (params->callback != NULL) { 3935 cm->cm_complete = mpr_config_complete; 3936 return (mpr_map_command(sc, cm)); 3937 } else { 3938 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); 3939 if (error) { 3940 mpr_dprint(sc, MPR_FAULT, 3941 "Error %d reading config page\n", error); 3942 if (cm != NULL) 3943 mpr_free_command(sc, cm); 3944 return (error); 3945 } 3946 mpr_config_complete(sc, cm); 3947 } 3948 3949 return (0); 3950 } 3951 3952 int 3953 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3954 { 3955 return (EINVAL); 3956 } 3957 3958 static void 3959 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3960 { 3961 MPI2_CONFIG_REPLY *reply; 3962 struct mpr_config_params *params; 3963 3964 MPR_FUNCTRACE(sc); 3965 params = cm->cm_complete_data; 3966 3967 if (cm->cm_data != NULL) { 3968 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3969 BUS_DMASYNC_POSTREAD); 3970 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3971 } 3972 3973 /* 3974 * XXX KDM need to do more error recovery? This results in the 3975 * device in question not getting probed. 3976 */ 3977 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3978 params->status = MPI2_IOCSTATUS_BUSY; 3979 goto done; 3980 } 3981 3982 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3983 if (reply == NULL) { 3984 params->status = MPI2_IOCSTATUS_BUSY; 3985 goto done; 3986 } 3987 params->status = reply->IOCStatus; 3988 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3989 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3990 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3991 params->hdr.Ext.PageType = reply->Header.PageType; 3992 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3993 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3994 } else { 3995 params->hdr.Struct.PageType = reply->Header.PageType; 3996 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3997 params->hdr.Struct.PageLength = reply->Header.PageLength; 3998 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3999 } 4000 4001 done: 4002 mpr_free_command(sc, cm); 4003 if (params->callback != NULL) 4004 params->callback(sc, params); 4005 4006 return; 4007 } 4008