1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * Copyright 2000-2020 Broadcom Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD 30 * 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* Communications core for Avago Technologies (LSI) MPT3 */ 37 38 /* TODO Move headers to mprvar */ 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/selinfo.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/module.h> 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/bio.h> 50 #include <sys/malloc.h> 51 #include <sys/uio.h> 52 #include <sys/sysctl.h> 53 #include <sys/smp.h> 54 #include <sys/queue.h> 55 #include <sys/kthread.h> 56 #include <sys/taskqueue.h> 57 #include <sys/endian.h> 58 #include <sys/eventhandler.h> 59 #include <sys/sbuf.h> 60 #include <sys/priv.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <sys/rman.h> 65 #include <sys/proc.h> 66 67 #include <dev/pci/pcivar.h> 68 69 #include <cam/cam.h> 70 #include <cam/cam_ccb.h> 71 #include <cam/scsi/scsi_all.h> 72 73 #include <dev/mpr/mpi/mpi2_type.h> 74 #include <dev/mpr/mpi/mpi2.h> 75 #include <dev/mpr/mpi/mpi2_ioc.h> 76 #include <dev/mpr/mpi/mpi2_sas.h> 77 #include <dev/mpr/mpi/mpi2_pci.h> 78 #include <dev/mpr/mpi/mpi2_cnfg.h> 79 #include <dev/mpr/mpi/mpi2_init.h> 80 #include <dev/mpr/mpi/mpi2_tool.h> 81 #include <dev/mpr/mpr_ioctl.h> 82 #include <dev/mpr/mprvar.h> 83 #include <dev/mpr/mpr_table.h> 84 #include <dev/mpr/mpr_sas.h> 85 86 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 87 static int mpr_init_queues(struct mpr_softc *sc); 88 static void mpr_resize_queues(struct mpr_softc *sc); 89 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 90 static int mpr_transition_operational(struct mpr_softc *sc); 91 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 92 static void mpr_iocfacts_free(struct mpr_softc *sc); 93 static void mpr_startup(void *arg); 94 static int mpr_send_iocinit(struct mpr_softc *sc); 95 static int mpr_alloc_queues(struct mpr_softc *sc); 96 static int mpr_alloc_hw_queues(struct mpr_softc *sc); 97 static int mpr_alloc_replies(struct mpr_softc *sc); 98 static int mpr_alloc_requests(struct mpr_softc *sc); 99 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 100 static int mpr_attach_log(struct mpr_softc *sc); 101 static __inline void mpr_complete_command(struct mpr_softc *sc, 102 struct mpr_command *cm); 103 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 104 MPI2_EVENT_NOTIFICATION_REPLY *reply); 105 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 106 static void mpr_periodic(void *); 107 static int mpr_reregister_events(struct mpr_softc *sc); 108 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 109 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 110 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 111 static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS); 112 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS); 113 static void mpr_parse_debug(struct mpr_softc *sc, char *list); 114 115 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); 116 117 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 118 119 /* 120 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 121 * any state and back to its initialization state machine. 122 */ 123 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 124 125 /* 126 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 127 * Compiler only supports uint64_t to be passed as an argument. 128 * Otherwise it will throw this error: 129 * "aggregate value used where an integer was expected" 130 */ 131 typedef union _reply_descriptor { 132 u64 word; 133 struct { 134 u32 low; 135 u32 high; 136 } u; 137 } reply_descriptor, request_descriptor; 138 139 /* Rate limit chain-fail messages to 1 per minute */ 140 static struct timeval mpr_chainfail_interval = { 60, 0 }; 141 142 /* 143 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 144 * If this function is called from process context, it can sleep 145 * and there is no harm to sleep, in case if this fuction is called 146 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 147 * based on sleep flags driver will call either msleep, pause or DELAY. 148 * msleep and pause are of same variant, but pause is used when mpr_mtx 149 * is not hold by driver. 150 */ 151 static int 152 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 153 { 154 uint32_t reg; 155 int i, error, tries = 0; 156 uint8_t first_wait_done = FALSE; 157 158 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 159 160 /* Clear any pending interrupts */ 161 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 162 163 /* 164 * Force NO_SLEEP for threads prohibited to sleep 165 * e.a Thread from interrupt handler are prohibited to sleep. 166 */ 167 #if __FreeBSD_version >= 1000029 168 if (curthread->td_no_sleeping) 169 #else //__FreeBSD_version < 1000029 170 if (curthread->td_pflags & TDP_NOSLEEPING) 171 #endif //__FreeBSD_version >= 1000029 172 sleep_flag = NO_SLEEP; 173 174 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag); 175 /* Push the magic sequence */ 176 error = ETIMEDOUT; 177 while (tries++ < 20) { 178 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 179 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 180 mpt2_reset_magic[i]); 181 182 /* wait 100 msec */ 183 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 184 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 185 "mprdiag", hz/10); 186 else if (sleep_flag == CAN_SLEEP) 187 pause("mprdiag", hz/10); 188 else 189 DELAY(100 * 1000); 190 191 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 192 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 193 error = 0; 194 break; 195 } 196 } 197 if (error) { 198 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n", 199 error); 200 return (error); 201 } 202 203 /* Send the actual reset. XXX need to refresh the reg? */ 204 reg |= MPI2_DIAG_RESET_ADAPTER; 205 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n", 206 reg); 207 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); 208 209 /* Wait up to 300 seconds in 50ms intervals */ 210 error = ETIMEDOUT; 211 for (i = 0; i < 6000; i++) { 212 /* 213 * Wait 50 msec. If this is the first time through, wait 256 214 * msec to satisfy Diag Reset timing requirements. 215 */ 216 if (first_wait_done) { 217 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 218 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 219 "mprdiag", hz/20); 220 else if (sleep_flag == CAN_SLEEP) 221 pause("mprdiag", hz/20); 222 else 223 DELAY(50 * 1000); 224 } else { 225 DELAY(256 * 1000); 226 first_wait_done = TRUE; 227 } 228 /* 229 * Check for the RESET_ADAPTER bit to be cleared first, then 230 * wait for the RESET state to be cleared, which takes a little 231 * longer. 232 */ 233 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 234 if (reg & MPI2_DIAG_RESET_ADAPTER) { 235 continue; 236 } 237 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 238 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 239 error = 0; 240 break; 241 } 242 } 243 if (error) { 244 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n", 245 error); 246 return (error); 247 } 248 249 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 250 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n"); 251 252 return (0); 253 } 254 255 static int 256 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 257 { 258 int error; 259 260 MPR_FUNCTRACE(sc); 261 262 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 263 264 error = 0; 265 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 266 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 267 MPI2_DOORBELL_FUNCTION_SHIFT); 268 269 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 270 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 271 "Doorbell handshake failed\n"); 272 error = ETIMEDOUT; 273 } 274 275 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 276 return (error); 277 } 278 279 static int 280 mpr_transition_ready(struct mpr_softc *sc) 281 { 282 uint32_t reg, state; 283 int error, tries = 0; 284 int sleep_flags; 285 286 MPR_FUNCTRACE(sc); 287 /* If we are in attach call, do not sleep */ 288 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 289 ? CAN_SLEEP : NO_SLEEP; 290 291 error = 0; 292 293 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n", 294 __func__, sleep_flags); 295 296 while (tries++ < 1200) { 297 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 298 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg); 299 300 /* 301 * Ensure the IOC is ready to talk. If it's not, try 302 * resetting it. 303 */ 304 if (reg & MPI2_DOORBELL_USED) { 305 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag " 306 "reset\n"); 307 mpr_diag_reset(sc, sleep_flags); 308 DELAY(50000); 309 continue; 310 } 311 312 /* Is the adapter owned by another peer? */ 313 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 314 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 315 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the " 316 "control of another peer host, aborting " 317 "initialization.\n"); 318 error = ENXIO; 319 break; 320 } 321 322 state = reg & MPI2_IOC_STATE_MASK; 323 if (state == MPI2_IOC_STATE_READY) { 324 /* Ready to go! */ 325 error = 0; 326 break; 327 } else if (state == MPI2_IOC_STATE_FAULT) { 328 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault " 329 "state 0x%x, resetting\n", 330 state & MPI2_DOORBELL_FAULT_CODE_MASK); 331 mpr_diag_reset(sc, sleep_flags); 332 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 333 /* Need to take ownership */ 334 mpr_message_unit_reset(sc, sleep_flags); 335 } else if (state == MPI2_IOC_STATE_RESET) { 336 /* Wait a bit, IOC might be in transition */ 337 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 338 "IOC in unexpected reset state\n"); 339 } else { 340 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 341 "IOC in unknown state 0x%x\n", state); 342 error = EINVAL; 343 break; 344 } 345 346 /* Wait 50ms for things to settle down. */ 347 DELAY(50000); 348 } 349 350 if (error) 351 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 352 "Cannot transition IOC to ready\n"); 353 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 354 return (error); 355 } 356 357 static int 358 mpr_transition_operational(struct mpr_softc *sc) 359 { 360 uint32_t reg, state; 361 int error; 362 363 MPR_FUNCTRACE(sc); 364 365 error = 0; 366 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 367 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); 368 369 state = reg & MPI2_IOC_STATE_MASK; 370 if (state != MPI2_IOC_STATE_READY) { 371 mpr_dprint(sc, MPR_INIT, "IOC not ready\n"); 372 if ((error = mpr_transition_ready(sc)) != 0) { 373 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 374 "failed to transition ready, exit\n"); 375 return (error); 376 } 377 } 378 379 error = mpr_send_iocinit(sc); 380 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 381 382 return (error); 383 } 384 385 static void 386 mpr_resize_queues(struct mpr_softc *sc) 387 { 388 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size; 389 390 /* 391 * Size the queues. Since the reply queues always need one free 392 * entry, we'll deduct one reply message here. The LSI documents 393 * suggest instead to add a count to the request queue, but I think 394 * that it's better to deduct from reply queue. 395 */ 396 prireqcr = MAX(1, sc->max_prireqframes); 397 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); 398 399 reqcr = MAX(2, sc->max_reqframes); 400 reqcr = MIN(reqcr, sc->facts->RequestCredit); 401 402 sc->num_reqs = prireqcr + reqcr; 403 sc->num_prireqs = prireqcr; 404 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, 405 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 406 407 /* Store the request frame size in bytes rather than as 32bit words */ 408 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4; 409 410 /* 411 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 412 * get the size of a Chain Frame. Previous versions use the size as a 413 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 414 * is 0, use the default value. The IOCMaxChainSegmentSize is the 415 * number of 16-byte elelements that can fit in a Chain Frame, which is 416 * the size of an IEEE Simple SGE. 417 */ 418 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 419 chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize); 420 if (chain_seg_size == 0) 421 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE; 422 sc->chain_frame_size = chain_seg_size * 423 MPR_MAX_CHAIN_ELEMENT_SIZE; 424 } else { 425 sc->chain_frame_size = sc->reqframesz; 426 } 427 428 /* 429 * Max IO Size is Page Size * the following: 430 * ((SGEs per frame - 1 for chain element) * Max Chain Depth) 431 * + 1 for no chain needed in last frame 432 * 433 * If user suggests a Max IO size to use, use the smaller of the 434 * user's value and the calculated value as long as the user's 435 * value is larger than 0. The user's value is in pages. 436 */ 437 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1; 438 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE; 439 440 /* 441 * If I/O size limitation requested then use it and pass up to CAM. 442 * If not, use MAXPHYS as an optimization hint, but report HW limit. 443 */ 444 if (sc->max_io_pages > 0) { 445 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); 446 sc->maxio = maxio; 447 } else { 448 sc->maxio = maxio; 449 maxio = min(maxio, MAXPHYS); 450 } 451 452 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / 453 sges_per_frame * reqcr; 454 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains) 455 sc->num_chains = sc->max_chains; 456 457 /* 458 * Figure out the number of MSIx-based queues. If the firmware or 459 * user has done something crazy and not allowed enough credit for 460 * the queues to be useful then don't enable multi-queue. 461 */ 462 if (sc->facts->MaxMSIxVectors < 2) 463 sc->msi_msgs = 1; 464 465 if (sc->msi_msgs > 1) { 466 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); 467 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); 468 if (sc->num_reqs / sc->msi_msgs < 2) 469 sc->msi_msgs = 1; 470 } 471 472 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", 473 sc->msi_msgs, sc->num_reqs, sc->num_replies); 474 } 475 476 /* 477 * This is called during attach and when re-initializing due to a Diag Reset. 478 * IOC Facts is used to allocate many of the structures needed by the driver. 479 * If called from attach, de-allocation is not required because the driver has 480 * not allocated any structures yet, but if called from a Diag Reset, previously 481 * allocated structures based on IOC Facts will need to be freed and re- 482 * allocated bases on the latest IOC Facts. 483 */ 484 static int 485 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 486 { 487 int error; 488 Mpi2IOCFactsReply_t saved_facts; 489 uint8_t saved_mode, reallocating; 490 491 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__); 492 493 /* Save old IOC Facts and then only reallocate if Facts have changed */ 494 if (!attaching) { 495 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 496 } 497 498 /* 499 * Get IOC Facts. In all cases throughout this function, panic if doing 500 * a re-initialization and only return the error if attaching so the OS 501 * can handle it. 502 */ 503 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 504 if (attaching) { 505 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get " 506 "IOC Facts with error %d, exit\n", error); 507 return (error); 508 } else { 509 panic("%s failed to get IOC Facts with error %d\n", 510 __func__, error); 511 } 512 } 513 514 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); 515 516 snprintf(sc->fw_version, sizeof(sc->fw_version), 517 "%02d.%02d.%02d.%02d", 518 sc->facts->FWVersion.Struct.Major, 519 sc->facts->FWVersion.Struct.Minor, 520 sc->facts->FWVersion.Struct.Unit, 521 sc->facts->FWVersion.Struct.Dev); 522 523 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d", 524 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >> 525 MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT, 526 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >> 527 MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT); 528 529 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, 530 MPR_DRIVER_VERSION); 531 mpr_dprint(sc, MPR_INFO, 532 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 533 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 534 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 535 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 536 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); 537 538 /* 539 * If the chip doesn't support event replay then a hard reset will be 540 * required to trigger a full discovery. Do the reset here then 541 * retransition to Ready. A hard reset might have already been done, 542 * but it doesn't hurt to do it again. Only do this if attaching, not 543 * for a Diag Reset. 544 */ 545 if (attaching && ((sc->facts->IOCCapabilities & 546 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { 547 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n"); 548 mpr_diag_reset(sc, NO_SLEEP); 549 if ((error = mpr_transition_ready(sc)) != 0) { 550 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 551 "transition to ready with error %d, exit\n", 552 error); 553 return (error); 554 } 555 } 556 557 /* 558 * Set flag if IR Firmware is loaded. If the RAID Capability has 559 * changed from the previous IOC Facts, log a warning, but only if 560 * checking this after a Diag Reset and not during attach. 561 */ 562 saved_mode = sc->ir_firmware; 563 if (sc->facts->IOCCapabilities & 564 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 565 sc->ir_firmware = 1; 566 if (!attaching) { 567 if (sc->ir_firmware != saved_mode) { 568 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " 569 "in IOC Facts does not match previous mode\n"); 570 } 571 } 572 573 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 574 reallocating = FALSE; 575 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; 576 577 if ((!attaching) && 578 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 579 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 580 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 581 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 582 (saved_facts.ProductID != sc->facts->ProductID) || 583 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 584 (saved_facts.IOCRequestFrameSize != 585 sc->facts->IOCRequestFrameSize) || 586 (saved_facts.IOCMaxChainSegmentSize != 587 sc->facts->IOCMaxChainSegmentSize) || 588 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 589 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 590 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 591 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 592 (saved_facts.MaxReplyDescriptorPostQueueDepth != 593 sc->facts->MaxReplyDescriptorPostQueueDepth) || 594 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 595 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 596 (saved_facts.MaxPersistentEntries != 597 sc->facts->MaxPersistentEntries))) { 598 reallocating = TRUE; 599 600 /* Record that we reallocated everything */ 601 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; 602 } 603 604 /* 605 * Some things should be done if attaching or re-allocating after a Diag 606 * Reset, but are not needed after a Diag Reset if the FW has not 607 * changed. 608 */ 609 if (attaching || reallocating) { 610 /* 611 * Check if controller supports FW diag buffers and set flag to 612 * enable each type. 613 */ 614 if (sc->facts->IOCCapabilities & 615 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 616 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 617 enabled = TRUE; 618 if (sc->facts->IOCCapabilities & 619 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 620 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 621 enabled = TRUE; 622 if (sc->facts->IOCCapabilities & 623 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 624 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 625 enabled = TRUE; 626 627 /* 628 * Set flags for some supported items. 629 */ 630 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 631 sc->eedp_enabled = TRUE; 632 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 633 sc->control_TLR = TRUE; 634 if ((sc->facts->IOCCapabilities & 635 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) && 636 (sc->mpr_flags & MPR_FLAGS_SEA_IOC)) 637 sc->atomic_desc_capable = TRUE; 638 639 mpr_resize_queues(sc); 640 641 /* 642 * Initialize all Tail Queues 643 */ 644 TAILQ_INIT(&sc->req_list); 645 TAILQ_INIT(&sc->high_priority_req_list); 646 TAILQ_INIT(&sc->chain_list); 647 TAILQ_INIT(&sc->prp_page_list); 648 TAILQ_INIT(&sc->tm_list); 649 } 650 651 /* 652 * If doing a Diag Reset and the FW is significantly different 653 * (reallocating will be set above in IOC Facts comparison), then all 654 * buffers based on the IOC Facts will need to be freed before they are 655 * reallocated. 656 */ 657 if (reallocating) { 658 mpr_iocfacts_free(sc); 659 mprsas_realloc_targets(sc, saved_facts.MaxTargets + 660 saved_facts.MaxVolumes); 661 } 662 663 /* 664 * Any deallocation has been completed. Now start reallocating 665 * if needed. Will only need to reallocate if attaching or if the new 666 * IOC Facts are different from the previous IOC Facts after a Diag 667 * Reset. Targets have already been allocated above if needed. 668 */ 669 error = 0; 670 while (attaching || reallocating) { 671 if ((error = mpr_alloc_hw_queues(sc)) != 0) 672 break; 673 if ((error = mpr_alloc_replies(sc)) != 0) 674 break; 675 if ((error = mpr_alloc_requests(sc)) != 0) 676 break; 677 if ((error = mpr_alloc_queues(sc)) != 0) 678 break; 679 break; 680 } 681 if (error) { 682 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 683 "Failed to alloc queues with error %d\n", error); 684 mpr_free(sc); 685 return (error); 686 } 687 688 /* Always initialize the queues */ 689 bzero(sc->free_queue, sc->fqdepth * 4); 690 mpr_init_queues(sc); 691 692 /* 693 * Always get the chip out of the reset state, but only panic if not 694 * attaching. If attaching and there is an error, that is handled by 695 * the OS. 696 */ 697 error = mpr_transition_operational(sc); 698 if (error != 0) { 699 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 700 "transition to operational with error %d\n", error); 701 mpr_free(sc); 702 return (error); 703 } 704 705 /* 706 * Finish the queue initialization. 707 * These are set here instead of in mpr_init_queues() because the 708 * IOC resets these values during the state transition in 709 * mpr_transition_operational(). The free index is set to 1 710 * because the corresponding index in the IOC is set to 0, and the 711 * IOC treats the queues as full if both are set to the same value. 712 * Hence the reason that the queue can't hold all of the possible 713 * replies. 714 */ 715 sc->replypostindex = 0; 716 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 717 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 718 719 /* 720 * Attach the subsystems so they can prepare their event masks. 721 * XXX Should be dynamic so that IM/IR and user modules can attach 722 */ 723 error = 0; 724 while (attaching) { 725 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n"); 726 if ((error = mpr_attach_log(sc)) != 0) 727 break; 728 if ((error = mpr_attach_sas(sc)) != 0) 729 break; 730 if ((error = mpr_attach_user(sc)) != 0) 731 break; 732 break; 733 } 734 if (error) { 735 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 736 "Failed to attach all subsystems: error %d\n", error); 737 mpr_free(sc); 738 return (error); 739 } 740 741 /* 742 * XXX If the number of MSI-X vectors changes during re-init, this 743 * won't see it and adjust. 744 */ 745 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) { 746 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 747 "Failed to setup interrupts\n"); 748 mpr_free(sc); 749 return (error); 750 } 751 752 return (error); 753 } 754 755 /* 756 * This is called if memory is being free (during detach for example) and when 757 * buffers need to be reallocated due to a Diag Reset. 758 */ 759 static void 760 mpr_iocfacts_free(struct mpr_softc *sc) 761 { 762 struct mpr_command *cm; 763 int i; 764 765 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 766 767 if (sc->free_busaddr != 0) 768 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 769 if (sc->free_queue != NULL) 770 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 771 sc->queues_map); 772 if (sc->queues_dmat != NULL) 773 bus_dma_tag_destroy(sc->queues_dmat); 774 775 if (sc->chain_frames != NULL) { 776 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 777 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 778 sc->chain_map); 779 } 780 if (sc->chain_dmat != NULL) 781 bus_dma_tag_destroy(sc->chain_dmat); 782 783 if (sc->sense_busaddr != 0) 784 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 785 if (sc->sense_frames != NULL) 786 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 787 sc->sense_map); 788 if (sc->sense_dmat != NULL) 789 bus_dma_tag_destroy(sc->sense_dmat); 790 791 if (sc->prp_page_busaddr != 0) 792 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 793 if (sc->prp_pages != NULL) 794 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 795 sc->prp_page_map); 796 if (sc->prp_page_dmat != NULL) 797 bus_dma_tag_destroy(sc->prp_page_dmat); 798 799 if (sc->reply_busaddr != 0) 800 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 801 if (sc->reply_frames != NULL) 802 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 803 sc->reply_map); 804 if (sc->reply_dmat != NULL) 805 bus_dma_tag_destroy(sc->reply_dmat); 806 807 if (sc->req_busaddr != 0) 808 bus_dmamap_unload(sc->req_dmat, sc->req_map); 809 if (sc->req_frames != NULL) 810 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 811 if (sc->req_dmat != NULL) 812 bus_dma_tag_destroy(sc->req_dmat); 813 814 if (sc->chains != NULL) 815 free(sc->chains, M_MPR); 816 if (sc->prps != NULL) 817 free(sc->prps, M_MPR); 818 if (sc->commands != NULL) { 819 for (i = 1; i < sc->num_reqs; i++) { 820 cm = &sc->commands[i]; 821 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 822 } 823 free(sc->commands, M_MPR); 824 } 825 if (sc->buffer_dmat != NULL) 826 bus_dma_tag_destroy(sc->buffer_dmat); 827 828 mpr_pci_free_interrupts(sc); 829 free(sc->queues, M_MPR); 830 sc->queues = NULL; 831 } 832 833 /* 834 * The terms diag reset and hard reset are used interchangeably in the MPI 835 * docs to mean resetting the controller chip. In this code diag reset 836 * cleans everything up, and the hard reset function just sends the reset 837 * sequence to the chip. This should probably be refactored so that every 838 * subsystem gets a reset notification of some sort, and can clean up 839 * appropriately. 840 */ 841 int 842 mpr_reinit(struct mpr_softc *sc) 843 { 844 int error; 845 struct mprsas_softc *sassc; 846 847 sassc = sc->sassc; 848 849 MPR_FUNCTRACE(sc); 850 851 mtx_assert(&sc->mpr_mtx, MA_OWNED); 852 853 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n"); 854 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 855 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n"); 856 return 0; 857 } 858 859 /* 860 * Make sure the completion callbacks can recognize they're getting 861 * a NULL cm_reply due to a reset. 862 */ 863 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 864 865 /* 866 * Mask interrupts here. 867 */ 868 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n"); 869 mpr_mask_intr(sc); 870 871 error = mpr_diag_reset(sc, CAN_SLEEP); 872 if (error != 0) { 873 panic("%s hard reset failed with error %d\n", __func__, error); 874 } 875 876 /* Restore the PCI state, including the MSI-X registers */ 877 mpr_pci_restore(sc); 878 879 /* Give the I/O subsystem special priority to get itself prepared */ 880 mprsas_handle_reinit(sc); 881 882 /* 883 * Get IOC Facts and allocate all structures based on this information. 884 * The attach function will also call mpr_iocfacts_allocate at startup. 885 * If relevant values have changed in IOC Facts, this function will free 886 * all of the memory based on IOC Facts and reallocate that memory. 887 */ 888 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 889 panic("%s IOC Facts based allocation failed with error %d\n", 890 __func__, error); 891 } 892 893 /* 894 * Mapping structures will be re-allocated after getting IOC Page8, so 895 * free these structures here. 896 */ 897 mpr_mapping_exit(sc); 898 899 /* 900 * The static page function currently read is IOC Page8. Others can be 901 * added in future. It's possible that the values in IOC Page8 have 902 * changed after a Diag Reset due to user modification, so always read 903 * these. Interrupts are masked, so unmask them before getting config 904 * pages. 905 */ 906 mpr_unmask_intr(sc); 907 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 908 mpr_base_static_config_pages(sc); 909 910 /* 911 * Some mapping info is based in IOC Page8 data, so re-initialize the 912 * mapping tables. 913 */ 914 mpr_mapping_initialize(sc); 915 916 /* 917 * Restart will reload the event masks clobbered by the reset, and 918 * then enable the port. 919 */ 920 mpr_reregister_events(sc); 921 922 /* the end of discovery will release the simq, so we're done. */ 923 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n", 924 sc, sc->replypostindex, sc->replyfreeindex); 925 mprsas_release_simq_reinit(sassc); 926 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 927 928 return 0; 929 } 930 931 /* Wait for the chip to ACK a word that we've put into its FIFO 932 * Wait for <timeout> seconds. In single loop wait for busy loop 933 * for 500 microseconds. 934 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds. 935 * */ 936 static int 937 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 938 { 939 u32 cntdn, count; 940 u32 int_status; 941 u32 doorbell; 942 943 count = 0; 944 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 945 do { 946 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 947 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 948 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), " 949 "timeout(%d)\n", __func__, count, timeout); 950 return 0; 951 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 952 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 953 if ((doorbell & MPI2_IOC_STATE_MASK) == 954 MPI2_IOC_STATE_FAULT) { 955 mpr_dprint(sc, MPR_FAULT, 956 "fault_state(0x%04x)!\n", doorbell); 957 return (EFAULT); 958 } 959 } else if (int_status == 0xFFFFFFFF) 960 goto out; 961 962 /* 963 * If it can sleep, sleep for 1 milisecond, else busy loop for 964 * 0.5 milisecond 965 */ 966 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 967 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", 968 hz/1000); 969 else if (sleep_flag == CAN_SLEEP) 970 pause("mprdba", hz/1000); 971 else 972 DELAY(500); 973 count++; 974 } while (--cntdn); 975 976 out: 977 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 978 "int_status(%x)!\n", __func__, count, int_status); 979 return (ETIMEDOUT); 980 } 981 982 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 983 static int 984 mpr_wait_db_int(struct mpr_softc *sc) 985 { 986 int retry; 987 988 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 989 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 990 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 991 return (0); 992 DELAY(2000); 993 } 994 return (ETIMEDOUT); 995 } 996 997 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 998 static int 999 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 1000 int req_sz, int reply_sz, int timeout) 1001 { 1002 uint32_t *data32; 1003 uint16_t *data16; 1004 int i, count, ioc_sz, residual; 1005 int sleep_flags = CAN_SLEEP; 1006 1007 #if __FreeBSD_version >= 1000029 1008 if (curthread->td_no_sleeping) 1009 #else //__FreeBSD_version < 1000029 1010 if (curthread->td_pflags & TDP_NOSLEEPING) 1011 #endif //__FreeBSD_version >= 1000029 1012 sleep_flags = NO_SLEEP; 1013 1014 /* Step 1 */ 1015 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1016 1017 /* Step 2 */ 1018 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1019 return (EBUSY); 1020 1021 /* Step 3 1022 * Announce that a message is coming through the doorbell. Messages 1023 * are pushed at 32bit words, so round up if needed. 1024 */ 1025 count = (req_sz + 3) / 4; 1026 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 1027 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 1028 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 1029 1030 /* Step 4 */ 1031 if (mpr_wait_db_int(sc) || 1032 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 1033 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 1034 return (ENXIO); 1035 } 1036 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1037 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1038 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 1039 return (ENXIO); 1040 } 1041 1042 /* Step 5 */ 1043 /* Clock out the message data synchronously in 32-bit dwords*/ 1044 data32 = (uint32_t *)req; 1045 for (i = 0; i < count; i++) { 1046 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 1047 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1048 mpr_dprint(sc, MPR_FAULT, 1049 "Timeout while writing doorbell\n"); 1050 return (ENXIO); 1051 } 1052 } 1053 1054 /* Step 6 */ 1055 /* Clock in the reply in 16-bit words. The total length of the 1056 * message is always in the 4th byte, so clock out the first 2 words 1057 * manually, then loop the rest. 1058 */ 1059 data16 = (uint16_t *)reply; 1060 if (mpr_wait_db_int(sc) != 0) { 1061 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 1062 return (ENXIO); 1063 } 1064 data16[0] = 1065 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1066 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1067 if (mpr_wait_db_int(sc) != 0) { 1068 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 1069 return (ENXIO); 1070 } 1071 data16[1] = 1072 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1073 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1074 1075 /* Number of 32bit words in the message */ 1076 ioc_sz = reply->MsgLength; 1077 1078 /* 1079 * Figure out how many 16bit words to clock in without overrunning. 1080 * The precision loss with dividing reply_sz can safely be 1081 * ignored because the messages can only be multiples of 32bits. 1082 */ 1083 residual = 0; 1084 count = MIN((reply_sz / 4), ioc_sz) * 2; 1085 if (count < ioc_sz * 2) { 1086 residual = ioc_sz * 2 - count; 1087 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 1088 "residual message words\n", residual); 1089 } 1090 1091 for (i = 2; i < count; i++) { 1092 if (mpr_wait_db_int(sc) != 0) { 1093 mpr_dprint(sc, MPR_FAULT, 1094 "Timeout reading doorbell %d\n", i); 1095 return (ENXIO); 1096 } 1097 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 1098 MPI2_DOORBELL_DATA_MASK; 1099 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1100 } 1101 1102 /* 1103 * Pull out residual words that won't fit into the provided buffer. 1104 * This keeps the chip from hanging due to a driver programming 1105 * error. 1106 */ 1107 while (residual--) { 1108 if (mpr_wait_db_int(sc) != 0) { 1109 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 1110 return (ENXIO); 1111 } 1112 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1113 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1114 } 1115 1116 /* Step 7 */ 1117 if (mpr_wait_db_int(sc) != 0) { 1118 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 1119 return (ENXIO); 1120 } 1121 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1122 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 1123 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1124 1125 return (0); 1126 } 1127 1128 static void 1129 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 1130 { 1131 request_descriptor rd; 1132 1133 MPR_FUNCTRACE(sc); 1134 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 1135 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 1136 1137 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 1138 MPR_FLAGS_SHUTDOWN)) 1139 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1140 1141 if (++sc->io_cmds_active > sc->io_cmds_highwater) 1142 sc->io_cmds_highwater++; 1143 1144 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("command not busy\n")); 1145 cm->cm_state = MPR_CM_STATE_INQUEUE; 1146 1147 if (sc->atomic_desc_capable) { 1148 rd.u.low = cm->cm_desc.Words.Low; 1149 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 1150 rd.u.low); 1151 } else { 1152 rd.u.low = cm->cm_desc.Words.Low; 1153 rd.u.high = cm->cm_desc.Words.High; 1154 rd.word = htole64(rd.word); 1155 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1156 rd.u.low); 1157 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1158 rd.u.high); 1159 } 1160 } 1161 1162 /* 1163 * Just the FACTS, ma'am. 1164 */ 1165 static int 1166 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1167 { 1168 MPI2_DEFAULT_REPLY *reply; 1169 MPI2_IOC_FACTS_REQUEST request; 1170 int error, req_sz, reply_sz; 1171 1172 MPR_FUNCTRACE(sc); 1173 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1174 1175 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1176 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1177 reply = (MPI2_DEFAULT_REPLY *)facts; 1178 1179 bzero(&request, req_sz); 1180 request.Function = MPI2_FUNCTION_IOC_FACTS; 1181 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1182 1183 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 1184 return (error); 1185 } 1186 1187 static int 1188 mpr_send_iocinit(struct mpr_softc *sc) 1189 { 1190 MPI2_IOC_INIT_REQUEST init; 1191 MPI2_DEFAULT_REPLY reply; 1192 int req_sz, reply_sz, error; 1193 struct timeval now; 1194 uint64_t time_in_msec; 1195 1196 MPR_FUNCTRACE(sc); 1197 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1198 1199 /* Do a quick sanity check on proper initialization */ 1200 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0) 1201 || (sc->replyframesz == 0)) { 1202 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 1203 "Driver not fully initialized for IOCInit\n"); 1204 return (EINVAL); 1205 } 1206 1207 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1208 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1209 bzero(&init, req_sz); 1210 bzero(&reply, reply_sz); 1211 1212 /* 1213 * Fill in the init block. Note that most addresses are 1214 * deliberately in the lower 32bits of memory. This is a micro- 1215 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1216 */ 1217 init.Function = MPI2_FUNCTION_IOC_INIT; 1218 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1219 init.MsgVersion = htole16(MPI2_VERSION); 1220 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1221 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4)); 1222 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1223 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1224 init.SenseBufferAddressHigh = 0; 1225 init.SystemReplyAddressHigh = 0; 1226 init.SystemRequestFrameBaseAddress.High = 0; 1227 init.SystemRequestFrameBaseAddress.Low = 1228 htole32((uint32_t)sc->req_busaddr); 1229 init.ReplyDescriptorPostQueueAddress.High = 0; 1230 init.ReplyDescriptorPostQueueAddress.Low = 1231 htole32((uint32_t)sc->post_busaddr); 1232 init.ReplyFreeQueueAddress.High = 0; 1233 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1234 getmicrotime(&now); 1235 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1236 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1237 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1238 init.HostPageSize = HOST_PAGE_SIZE_4K; 1239 1240 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1241 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1242 error = ENXIO; 1243 1244 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1245 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 1246 return (error); 1247 } 1248 1249 void 1250 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1251 { 1252 bus_addr_t *addr; 1253 1254 addr = arg; 1255 *addr = segs[0].ds_addr; 1256 } 1257 1258 void 1259 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1260 { 1261 struct mpr_busdma_context *ctx; 1262 int need_unload, need_free; 1263 1264 ctx = (struct mpr_busdma_context *)arg; 1265 need_unload = 0; 1266 need_free = 0; 1267 1268 mpr_lock(ctx->softc); 1269 ctx->error = error; 1270 ctx->completed = 1; 1271 if ((error == 0) && (ctx->abandoned == 0)) { 1272 *ctx->addr = segs[0].ds_addr; 1273 } else { 1274 if (nsegs != 0) 1275 need_unload = 1; 1276 if (ctx->abandoned != 0) 1277 need_free = 1; 1278 } 1279 if (need_free == 0) 1280 wakeup(ctx); 1281 1282 mpr_unlock(ctx->softc); 1283 1284 if (need_unload != 0) { 1285 bus_dmamap_unload(ctx->buffer_dmat, 1286 ctx->buffer_dmamap); 1287 *ctx->addr = 0; 1288 } 1289 1290 if (need_free != 0) 1291 free(ctx, M_MPR); 1292 } 1293 1294 static int 1295 mpr_alloc_queues(struct mpr_softc *sc) 1296 { 1297 struct mpr_queue *q; 1298 int nq, i; 1299 1300 nq = sc->msi_msgs; 1301 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq); 1302 1303 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR, 1304 M_NOWAIT|M_ZERO); 1305 if (sc->queues == NULL) 1306 return (ENOMEM); 1307 1308 for (i = 0; i < nq; i++) { 1309 q = &sc->queues[i]; 1310 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q); 1311 q->sc = sc; 1312 q->qnum = i; 1313 } 1314 return (0); 1315 } 1316 1317 static int 1318 mpr_alloc_hw_queues(struct mpr_softc *sc) 1319 { 1320 bus_dma_tag_template_t t; 1321 bus_addr_t queues_busaddr; 1322 uint8_t *queues; 1323 int qsize, fqsize, pqsize; 1324 1325 /* 1326 * The reply free queue contains 4 byte entries in multiples of 16 and 1327 * aligned on a 16 byte boundary. There must always be an unused entry. 1328 * This queue supplies fresh reply frames for the firmware to use. 1329 * 1330 * The reply descriptor post queue contains 8 byte entries in 1331 * multiples of 16 and aligned on a 16 byte boundary. This queue 1332 * contains filled-in reply frames sent from the firmware to the host. 1333 * 1334 * These two queues are allocated together for simplicity. 1335 */ 1336 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1337 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1338 fqsize= sc->fqdepth * 4; 1339 pqsize = sc->pqdepth * 8; 1340 qsize = fqsize + pqsize; 1341 1342 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1343 t.alignment = 16; 1344 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1345 t.maxsize = t.maxsegsize = qsize; 1346 t.nsegments = 1; 1347 if (bus_dma_template_tag(&t, &sc->queues_dmat)) { 1348 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n"); 1349 return (ENOMEM); 1350 } 1351 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1352 &sc->queues_map)) { 1353 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n"); 1354 return (ENOMEM); 1355 } 1356 bzero(queues, qsize); 1357 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1358 mpr_memaddr_cb, &queues_busaddr, 0); 1359 1360 sc->free_queue = (uint32_t *)queues; 1361 sc->free_busaddr = queues_busaddr; 1362 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1363 sc->post_busaddr = queues_busaddr + fqsize; 1364 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n", 1365 (uintmax_t)sc->free_busaddr, fqsize); 1366 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n", 1367 (uintmax_t)sc->post_busaddr, pqsize); 1368 1369 return (0); 1370 } 1371 1372 static int 1373 mpr_alloc_replies(struct mpr_softc *sc) 1374 { 1375 bus_dma_tag_template_t t; 1376 int rsize, num_replies; 1377 1378 /* Store the reply frame size in bytes rather than as 32bit words */ 1379 sc->replyframesz = sc->facts->ReplyFrameSize * 4; 1380 1381 /* 1382 * sc->num_replies should be one less than sc->fqdepth. We need to 1383 * allocate space for sc->fqdepth replies, but only sc->num_replies 1384 * replies can be used at once. 1385 */ 1386 num_replies = max(sc->fqdepth, sc->num_replies); 1387 1388 rsize = sc->replyframesz * num_replies; 1389 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1390 t.alignment = 4; 1391 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1392 t.maxsize = t.maxsegsize = rsize; 1393 t.nsegments = 1; 1394 if (bus_dma_template_tag(&t, &sc->reply_dmat)) { 1395 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n"); 1396 return (ENOMEM); 1397 } 1398 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1399 BUS_DMA_NOWAIT, &sc->reply_map)) { 1400 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n"); 1401 return (ENOMEM); 1402 } 1403 bzero(sc->reply_frames, rsize); 1404 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1405 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1406 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n", 1407 (uintmax_t)sc->reply_busaddr, rsize); 1408 1409 return (0); 1410 } 1411 1412 static void 1413 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1414 { 1415 struct mpr_softc *sc = arg; 1416 struct mpr_chain *chain; 1417 bus_size_t bo; 1418 int i, o, s; 1419 1420 if (error != 0) 1421 return; 1422 1423 for (i = 0, o = 0, s = 0; s < nsegs; s++) { 1424 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len; 1425 bo += sc->chain_frame_size) { 1426 chain = &sc->chains[i++]; 1427 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o); 1428 chain->chain_busaddr = segs[s].ds_addr + bo; 1429 o += sc->chain_frame_size; 1430 mpr_free_chain(sc, chain); 1431 } 1432 if (bo != segs[s].ds_len) 1433 o += segs[s].ds_len - bo; 1434 } 1435 sc->chain_free_lowwater = i; 1436 } 1437 1438 static int 1439 mpr_alloc_requests(struct mpr_softc *sc) 1440 { 1441 bus_dma_tag_template_t t; 1442 struct mpr_command *cm; 1443 int i, rsize, nsegs; 1444 1445 rsize = sc->reqframesz * sc->num_reqs; 1446 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1447 t.alignment = 16; 1448 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1449 t.maxsize = t.maxsegsize = rsize; 1450 t.nsegments = 1; 1451 if (bus_dma_template_tag(&t, &sc->req_dmat)) { 1452 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n"); 1453 return (ENOMEM); 1454 } 1455 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1456 BUS_DMA_NOWAIT, &sc->req_map)) { 1457 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n"); 1458 return (ENOMEM); 1459 } 1460 bzero(sc->req_frames, rsize); 1461 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1462 mpr_memaddr_cb, &sc->req_busaddr, 0); 1463 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n", 1464 (uintmax_t)sc->req_busaddr, rsize); 1465 1466 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR, 1467 M_NOWAIT | M_ZERO); 1468 if (!sc->chains) { 1469 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1470 return (ENOMEM); 1471 } 1472 rsize = sc->chain_frame_size * sc->num_chains; 1473 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1474 t.alignment = 16; 1475 t.maxsize = t.maxsegsize = rsize; 1476 t.nsegments = howmany(rsize, PAGE_SIZE); 1477 if (bus_dma_template_tag(&t, &sc->chain_dmat)) { 1478 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n"); 1479 return (ENOMEM); 1480 } 1481 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1482 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) { 1483 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1484 return (ENOMEM); 1485 } 1486 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, 1487 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) { 1488 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n"); 1489 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 1490 sc->chain_map); 1491 return (ENOMEM); 1492 } 1493 1494 rsize = MPR_SENSE_LEN * sc->num_reqs; 1495 bus_dma_template_clone(&t, sc->req_dmat); 1496 t.maxsize = t.maxsegsize = rsize; 1497 if (bus_dma_template_tag(&t, &sc->sense_dmat)) { 1498 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n"); 1499 return (ENOMEM); 1500 } 1501 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1502 BUS_DMA_NOWAIT, &sc->sense_map)) { 1503 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n"); 1504 return (ENOMEM); 1505 } 1506 bzero(sc->sense_frames, rsize); 1507 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1508 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1509 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n", 1510 (uintmax_t)sc->sense_busaddr, rsize); 1511 1512 /* 1513 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1514 * these devices. 1515 */ 1516 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1517 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1518 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1519 return (ENOMEM); 1520 } 1521 1522 nsegs = (sc->maxio / PAGE_SIZE) + 1; 1523 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1524 t.nsegments = nsegs; 1525 t.flags = BUS_DMA_ALLOCNOW; 1526 t.lockfunc = busdma_lock_mutex; 1527 t.lockfuncarg = &sc->mpr_mtx; 1528 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) { 1529 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n"); 1530 return (ENOMEM); 1531 } 1532 1533 /* 1534 * SMID 0 cannot be used as a free command per the firmware spec. 1535 * Just drop that command instead of risking accounting bugs. 1536 */ 1537 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, 1538 M_MPR, M_WAITOK | M_ZERO); 1539 if (!sc->commands) { 1540 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n"); 1541 return (ENOMEM); 1542 } 1543 for (i = 1; i < sc->num_reqs; i++) { 1544 cm = &sc->commands[i]; 1545 cm->cm_req = sc->req_frames + i * sc->reqframesz; 1546 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; 1547 cm->cm_sense = &sc->sense_frames[i]; 1548 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1549 cm->cm_desc.Default.SMID = i; 1550 cm->cm_sc = sc; 1551 cm->cm_state = MPR_CM_STATE_BUSY; 1552 TAILQ_INIT(&cm->cm_chain_list); 1553 TAILQ_INIT(&cm->cm_prp_page_list); 1554 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); 1555 1556 /* XXX Is a failure here a critical problem? */ 1557 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1558 == 0) { 1559 if (i <= sc->num_prireqs) 1560 mpr_free_high_priority_command(sc, cm); 1561 else 1562 mpr_free_command(sc, cm); 1563 } else { 1564 panic("failed to allocate command %d\n", i); 1565 sc->num_reqs = i; 1566 break; 1567 } 1568 } 1569 1570 return (0); 1571 } 1572 1573 /* 1574 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1575 * which are scatter/gather lists for NVMe devices. 1576 * 1577 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1578 * and translated by FW. 1579 * 1580 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1581 */ 1582 static int 1583 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1584 { 1585 bus_dma_tag_template_t t; 1586 struct mpr_prp_page *prp_page; 1587 int PRPs_per_page, PRPs_required, pages_required; 1588 int rsize, i; 1589 1590 /* 1591 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1592 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1593 * MAX_IO_SIZE / PAGE_SIZE = 256 1594 * 1595 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1596 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1597 * page (4096 / 8 = 512), so only one page is required for each I/O. 1598 * 1599 * Each of these buffers will need to be contiguous. For simplicity, 1600 * only one buffer is allocated here, which has all of the space 1601 * required for the NVMe Queue Depth. If there are problems allocating 1602 * this one buffer, this function will need to change to allocate 1603 * individual, contiguous NVME_QDEPTH buffers. 1604 * 1605 * The real calculation will use the real max io size. Above is just an 1606 * example. 1607 * 1608 */ 1609 PRPs_required = sc->maxio / PAGE_SIZE; 1610 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1611 pages_required = (PRPs_required / PRPs_per_page) + 1; 1612 1613 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1614 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1615 bus_dma_template_init(&t, sc->mpr_parent_dmat); 1616 t.alignment = 4; 1617 t.lowaddr = BUS_SPACE_MAXADDR_32BIT; 1618 t.maxsize = t.maxsegsize = rsize; 1619 t.nsegments = 1; 1620 if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) { 1621 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA " 1622 "tag\n"); 1623 return (ENOMEM); 1624 } 1625 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1626 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1627 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n"); 1628 return (ENOMEM); 1629 } 1630 bzero(sc->prp_pages, rsize); 1631 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1632 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1633 1634 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1635 M_WAITOK | M_ZERO); 1636 for (i = 0; i < NVME_QDEPTH; i++) { 1637 prp_page = &sc->prps[i]; 1638 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1639 i * sc->prp_buffer_size); 1640 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1641 i * sc->prp_buffer_size); 1642 mpr_free_prp_page(sc, prp_page); 1643 sc->prp_pages_free_lowwater++; 1644 } 1645 1646 return (0); 1647 } 1648 1649 static int 1650 mpr_init_queues(struct mpr_softc *sc) 1651 { 1652 int i; 1653 1654 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1655 1656 /* 1657 * According to the spec, we need to use one less reply than we 1658 * have space for on the queue. So sc->num_replies (the number we 1659 * use) should be less than sc->fqdepth (allocated size). 1660 */ 1661 if (sc->num_replies >= sc->fqdepth) 1662 return (EINVAL); 1663 1664 /* 1665 * Initialize all of the free queue entries. 1666 */ 1667 for (i = 0; i < sc->fqdepth; i++) { 1668 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz); 1669 } 1670 sc->replyfreeindex = sc->num_replies; 1671 1672 return (0); 1673 } 1674 1675 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1676 * Next are the global settings, if they exist. Highest are the per-unit 1677 * settings, if they exist. 1678 */ 1679 void 1680 mpr_get_tunables(struct mpr_softc *sc) 1681 { 1682 char tmpstr[80], mpr_debug[80]; 1683 1684 /* XXX default to some debugging for now */ 1685 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1686 sc->disable_msix = 0; 1687 sc->disable_msi = 0; 1688 sc->max_msix = MPR_MSIX_MAX; 1689 sc->max_chains = MPR_CHAIN_FRAMES; 1690 sc->max_io_pages = MPR_MAXIO_PAGES; 1691 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1692 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1693 sc->use_phynum = 1; 1694 sc->max_reqframes = MPR_REQ_FRAMES; 1695 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; 1696 sc->max_replyframes = MPR_REPLY_FRAMES; 1697 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; 1698 1699 /* 1700 * Grab the global variables. 1701 */ 1702 bzero(mpr_debug, 80); 1703 if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0) 1704 mpr_parse_debug(sc, mpr_debug); 1705 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); 1706 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); 1707 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); 1708 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1709 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1710 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1711 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1712 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1713 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); 1714 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); 1715 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); 1716 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); 1717 1718 /* Grab the unit-instance variables */ 1719 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1720 device_get_unit(sc->mpr_dev)); 1721 bzero(mpr_debug, 80); 1722 if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0) 1723 mpr_parse_debug(sc, mpr_debug); 1724 1725 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", 1726 device_get_unit(sc->mpr_dev)); 1727 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 1728 1729 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", 1730 device_get_unit(sc->mpr_dev)); 1731 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 1732 1733 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix", 1734 device_get_unit(sc->mpr_dev)); 1735 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); 1736 1737 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1738 device_get_unit(sc->mpr_dev)); 1739 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1740 1741 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1742 device_get_unit(sc->mpr_dev)); 1743 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1744 1745 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1746 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1747 device_get_unit(sc->mpr_dev)); 1748 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1749 1750 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1751 device_get_unit(sc->mpr_dev)); 1752 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1753 1754 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1755 device_get_unit(sc->mpr_dev)); 1756 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1757 1758 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1759 device_get_unit(sc->mpr_dev)); 1760 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1761 1762 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", 1763 device_get_unit(sc->mpr_dev)); 1764 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); 1765 1766 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes", 1767 device_get_unit(sc->mpr_dev)); 1768 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); 1769 1770 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes", 1771 device_get_unit(sc->mpr_dev)); 1772 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); 1773 1774 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes", 1775 device_get_unit(sc->mpr_dev)); 1776 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); 1777 } 1778 1779 static void 1780 mpr_setup_sysctl(struct mpr_softc *sc) 1781 { 1782 struct sysctl_ctx_list *sysctl_ctx = NULL; 1783 struct sysctl_oid *sysctl_tree = NULL; 1784 char tmpstr[80], tmpstr2[80]; 1785 1786 /* 1787 * Setup the sysctl variable so the user can change the debug level 1788 * on the fly. 1789 */ 1790 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1791 device_get_unit(sc->mpr_dev)); 1792 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1793 1794 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1795 if (sysctl_ctx != NULL) 1796 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1797 1798 if (sysctl_tree == NULL) { 1799 sysctl_ctx_init(&sc->sysctl_ctx); 1800 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1801 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1802 CTLFLAG_RD, 0, tmpstr); 1803 if (sc->sysctl_tree == NULL) 1804 return; 1805 sysctl_ctx = &sc->sysctl_ctx; 1806 sysctl_tree = sc->sysctl_tree; 1807 } 1808 1809 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1810 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1811 sc, 0, mpr_debug_sysctl, "A", "mpr debug level"); 1812 1813 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1814 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 1815 "Disable the use of MSI-X interrupts"); 1816 1817 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1818 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, 1819 "User-defined maximum number of MSIX queues"); 1820 1821 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1822 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, 1823 "Negotiated number of MSIX queues"); 1824 1825 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1826 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, 1827 "Total number of allocated request frames"); 1828 1829 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1830 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, 1831 "Total number of allocated high priority request frames"); 1832 1833 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1834 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, 1835 "Total number of allocated reply frames"); 1836 1837 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1838 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, 1839 "Total number of event frames allocated"); 1840 1841 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1842 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 1843 strlen(sc->fw_version), "firmware version"); 1844 1845 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1846 OID_AUTO, "driver_version", CTLFLAG_RD, MPR_DRIVER_VERSION, 1847 strlen(MPR_DRIVER_VERSION), "driver version"); 1848 1849 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1850 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version, 1851 strlen(sc->msg_version), "message interface version"); 1852 1853 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1854 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1855 &sc->io_cmds_active, 0, "number of currently active commands"); 1856 1857 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1858 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1859 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1860 1861 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1862 OID_AUTO, "chain_free", CTLFLAG_RD, 1863 &sc->chain_free, 0, "number of free chain elements"); 1864 1865 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1866 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1867 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1868 1869 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1870 OID_AUTO, "max_chains", CTLFLAG_RD, 1871 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1872 1873 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1874 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1875 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1876 "IOCFacts)"); 1877 1878 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1879 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1880 "enable SSU to SATA SSD/HDD at shutdown"); 1881 1882 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1883 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1884 &sc->chain_alloc_fail, "chain allocation failures"); 1885 1886 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1887 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1888 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1889 "spinup after SATA ID error"); 1890 1891 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1892 OID_AUTO, "dump_reqs", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP, sc, 0, 1893 mpr_dump_reqs, "I", "Dump Active Requests"); 1894 1895 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1896 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1897 "Use the phy number for enumeration"); 1898 1899 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1900 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1901 &sc->prp_pages_free, 0, "number of free PRP pages"); 1902 1903 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1904 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1905 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1906 1907 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1908 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1909 &sc->prp_page_alloc_fail, "PRP page allocation failures"); 1910 } 1911 1912 static struct mpr_debug_string { 1913 char *name; 1914 int flag; 1915 } mpr_debug_strings[] = { 1916 {"info", MPR_INFO}, 1917 {"fault", MPR_FAULT}, 1918 {"event", MPR_EVENT}, 1919 {"log", MPR_LOG}, 1920 {"recovery", MPR_RECOVERY}, 1921 {"error", MPR_ERROR}, 1922 {"init", MPR_INIT}, 1923 {"xinfo", MPR_XINFO}, 1924 {"user", MPR_USER}, 1925 {"mapping", MPR_MAPPING}, 1926 {"trace", MPR_TRACE} 1927 }; 1928 1929 enum mpr_debug_level_combiner { 1930 COMB_NONE, 1931 COMB_ADD, 1932 COMB_SUB 1933 }; 1934 1935 static int 1936 mpr_debug_sysctl(SYSCTL_HANDLER_ARGS) 1937 { 1938 struct mpr_softc *sc; 1939 struct mpr_debug_string *string; 1940 struct sbuf *sbuf; 1941 char *buffer; 1942 size_t sz; 1943 int i, len, debug, error; 1944 1945 sc = (struct mpr_softc *)arg1; 1946 1947 error = sysctl_wire_old_buffer(req, 0); 1948 if (error != 0) 1949 return (error); 1950 1951 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 1952 debug = sc->mpr_debug; 1953 1954 sbuf_printf(sbuf, "%#x", debug); 1955 1956 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 1957 for (i = 0; i < sz; i++) { 1958 string = &mpr_debug_strings[i]; 1959 if (debug & string->flag) 1960 sbuf_printf(sbuf, ",%s", string->name); 1961 } 1962 1963 error = sbuf_finish(sbuf); 1964 sbuf_delete(sbuf); 1965 1966 if (error || req->newptr == NULL) 1967 return (error); 1968 1969 len = req->newlen - req->newidx; 1970 if (len == 0) 1971 return (0); 1972 1973 buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK); 1974 error = SYSCTL_IN(req, buffer, len); 1975 1976 mpr_parse_debug(sc, buffer); 1977 1978 free(buffer, M_MPR); 1979 return (error); 1980 } 1981 1982 static void 1983 mpr_parse_debug(struct mpr_softc *sc, char *list) 1984 { 1985 struct mpr_debug_string *string; 1986 enum mpr_debug_level_combiner op; 1987 char *token, *endtoken; 1988 size_t sz; 1989 int flags, i; 1990 1991 if (list == NULL || *list == '\0') 1992 return; 1993 1994 if (*list == '+') { 1995 op = COMB_ADD; 1996 list++; 1997 } else if (*list == '-') { 1998 op = COMB_SUB; 1999 list++; 2000 } else 2001 op = COMB_NONE; 2002 if (*list == '\0') 2003 return; 2004 2005 flags = 0; 2006 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 2007 while ((token = strsep(&list, ":,")) != NULL) { 2008 2009 /* Handle integer flags */ 2010 flags |= strtol(token, &endtoken, 0); 2011 if (token != endtoken) 2012 continue; 2013 2014 /* Handle text flags */ 2015 for (i = 0; i < sz; i++) { 2016 string = &mpr_debug_strings[i]; 2017 if (strcasecmp(token, string->name) == 0) { 2018 flags |= string->flag; 2019 break; 2020 } 2021 } 2022 } 2023 2024 switch (op) { 2025 case COMB_NONE: 2026 sc->mpr_debug = flags; 2027 break; 2028 case COMB_ADD: 2029 sc->mpr_debug |= flags; 2030 break; 2031 case COMB_SUB: 2032 sc->mpr_debug &= (~flags); 2033 break; 2034 } 2035 return; 2036 } 2037 2038 struct mpr_dumpreq_hdr { 2039 uint32_t smid; 2040 uint32_t state; 2041 uint32_t numframes; 2042 uint32_t deschi; 2043 uint32_t desclo; 2044 }; 2045 2046 static int 2047 mpr_dump_reqs(SYSCTL_HANDLER_ARGS) 2048 { 2049 struct mpr_softc *sc; 2050 struct mpr_chain *chain, *chain1; 2051 struct mpr_command *cm; 2052 struct mpr_dumpreq_hdr hdr; 2053 struct sbuf *sb; 2054 uint32_t smid, state; 2055 int i, numreqs, error = 0; 2056 2057 sc = (struct mpr_softc *)arg1; 2058 2059 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) { 2060 printf("priv check error %d\n", error); 2061 return (error); 2062 } 2063 2064 state = MPR_CM_STATE_INQUEUE; 2065 smid = 1; 2066 numreqs = sc->num_reqs; 2067 2068 if (req->newptr != NULL) 2069 return (EINVAL); 2070 2071 if (smid == 0 || smid > sc->num_reqs) 2072 return (EINVAL); 2073 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs)) 2074 numreqs = sc->num_reqs; 2075 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 2076 2077 /* Best effort, no locking */ 2078 for (i = smid; i < numreqs; i++) { 2079 cm = &sc->commands[i]; 2080 if (cm->cm_state != state) 2081 continue; 2082 hdr.smid = i; 2083 hdr.state = cm->cm_state; 2084 hdr.numframes = 1; 2085 hdr.deschi = cm->cm_desc.Words.High; 2086 hdr.desclo = cm->cm_desc.Words.Low; 2087 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, 2088 chain1) 2089 hdr.numframes++; 2090 sbuf_bcat(sb, &hdr, sizeof(hdr)); 2091 sbuf_bcat(sb, cm->cm_req, 128); 2092 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, 2093 chain1) 2094 sbuf_bcat(sb, chain->chain, 128); 2095 } 2096 2097 error = sbuf_finish(sb); 2098 sbuf_delete(sb); 2099 return (error); 2100 } 2101 2102 int 2103 mpr_attach(struct mpr_softc *sc) 2104 { 2105 int error; 2106 2107 MPR_FUNCTRACE(sc); 2108 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2109 2110 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); 2111 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); 2112 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); 2113 TAILQ_INIT(&sc->event_list); 2114 timevalclear(&sc->lastfail); 2115 2116 if ((error = mpr_transition_ready(sc)) != 0) { 2117 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 2118 "Failed to transition ready\n"); 2119 return (error); 2120 } 2121 2122 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 2123 M_ZERO|M_NOWAIT); 2124 if (!sc->facts) { 2125 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 2126 "Cannot allocate memory, exit\n"); 2127 return (ENOMEM); 2128 } 2129 2130 /* 2131 * Get IOC Facts and allocate all structures based on this information. 2132 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 2133 * Facts. If relevant values have changed in IOC Facts, this function 2134 * will free all of the memory based on IOC Facts and reallocate that 2135 * memory. If this fails, any allocated memory should already be freed. 2136 */ 2137 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 2138 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation " 2139 "failed with error %d\n", error); 2140 return (error); 2141 } 2142 2143 /* Start the periodic watchdog check on the IOC Doorbell */ 2144 mpr_periodic(sc); 2145 2146 /* 2147 * The portenable will kick off discovery events that will drive the 2148 * rest of the initialization process. The CAM/SAS module will 2149 * hold up the boot sequence until discovery is complete. 2150 */ 2151 sc->mpr_ich.ich_func = mpr_startup; 2152 sc->mpr_ich.ich_arg = sc; 2153 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 2154 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2155 "Cannot establish MPR config hook\n"); 2156 error = EINVAL; 2157 } 2158 2159 /* 2160 * Allow IR to shutdown gracefully when shutdown occurs. 2161 */ 2162 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 2163 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 2164 2165 if (sc->shutdown_eh == NULL) 2166 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2167 "shutdown event registration failed\n"); 2168 2169 mpr_setup_sysctl(sc); 2170 2171 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 2172 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 2173 2174 return (error); 2175 } 2176 2177 /* Run through any late-start handlers. */ 2178 static void 2179 mpr_startup(void *arg) 2180 { 2181 struct mpr_softc *sc; 2182 2183 sc = (struct mpr_softc *)arg; 2184 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2185 2186 mpr_lock(sc); 2187 mpr_unmask_intr(sc); 2188 2189 /* initialize device mapping tables */ 2190 mpr_base_static_config_pages(sc); 2191 mpr_mapping_initialize(sc); 2192 mprsas_startup(sc); 2193 mpr_unlock(sc); 2194 2195 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n"); 2196 config_intrhook_disestablish(&sc->mpr_ich); 2197 sc->mpr_ich.ich_arg = NULL; 2198 2199 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2200 } 2201 2202 /* Periodic watchdog. Is called with the driver lock already held. */ 2203 static void 2204 mpr_periodic(void *arg) 2205 { 2206 struct mpr_softc *sc; 2207 uint32_t db; 2208 2209 sc = (struct mpr_softc *)arg; 2210 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 2211 return; 2212 2213 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 2214 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2215 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 2216 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 2217 panic("TEMPERATURE FAULT: STOPPING."); 2218 } 2219 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 2220 mpr_reinit(sc); 2221 } 2222 2223 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 2224 } 2225 2226 static void 2227 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 2228 MPI2_EVENT_NOTIFICATION_REPLY *event) 2229 { 2230 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 2231 2232 MPR_DPRINT_EVENT(sc, generic, event); 2233 2234 switch (event->Event) { 2235 case MPI2_EVENT_LOG_DATA: 2236 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 2237 if (sc->mpr_debug & MPR_EVENT) 2238 hexdump(event->EventData, event->EventDataLength, NULL, 2239 0); 2240 break; 2241 case MPI2_EVENT_LOG_ENTRY_ADDED: 2242 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 2243 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 2244 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 2245 entry->LogSequence); 2246 break; 2247 default: 2248 break; 2249 } 2250 return; 2251 } 2252 2253 static int 2254 mpr_attach_log(struct mpr_softc *sc) 2255 { 2256 uint8_t events[16]; 2257 2258 bzero(events, 16); 2259 setbit(events, MPI2_EVENT_LOG_DATA); 2260 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 2261 2262 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 2263 &sc->mpr_log_eh); 2264 2265 return (0); 2266 } 2267 2268 static int 2269 mpr_detach_log(struct mpr_softc *sc) 2270 { 2271 2272 if (sc->mpr_log_eh != NULL) 2273 mpr_deregister_events(sc, sc->mpr_log_eh); 2274 return (0); 2275 } 2276 2277 /* 2278 * Free all of the driver resources and detach submodules. Should be called 2279 * without the lock held. 2280 */ 2281 int 2282 mpr_free(struct mpr_softc *sc) 2283 { 2284 int error; 2285 2286 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2287 /* Turn off the watchdog */ 2288 mpr_lock(sc); 2289 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 2290 mpr_unlock(sc); 2291 /* Lock must not be held for this */ 2292 callout_drain(&sc->periodic); 2293 callout_drain(&sc->device_check_callout); 2294 2295 if (((error = mpr_detach_log(sc)) != 0) || 2296 ((error = mpr_detach_sas(sc)) != 0)) { 2297 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach " 2298 "subsystems, error= %d, exit\n", error); 2299 return (error); 2300 } 2301 2302 mpr_detach_user(sc); 2303 2304 /* Put the IOC back in the READY state. */ 2305 mpr_lock(sc); 2306 if ((error = mpr_transition_ready(sc)) != 0) { 2307 mpr_unlock(sc); 2308 return (error); 2309 } 2310 mpr_unlock(sc); 2311 2312 if (sc->facts != NULL) 2313 free(sc->facts, M_MPR); 2314 2315 /* 2316 * Free all buffers that are based on IOC Facts. A Diag Reset may need 2317 * to free these buffers too. 2318 */ 2319 mpr_iocfacts_free(sc); 2320 2321 if (sc->sysctl_tree != NULL) 2322 sysctl_ctx_free(&sc->sysctl_ctx); 2323 2324 /* Deregister the shutdown function */ 2325 if (sc->shutdown_eh != NULL) 2326 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 2327 2328 mtx_destroy(&sc->mpr_mtx); 2329 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2330 2331 return (0); 2332 } 2333 2334 static __inline void 2335 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 2336 { 2337 MPR_FUNCTRACE(sc); 2338 2339 if (cm == NULL) { 2340 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 2341 return; 2342 } 2343 2344 cm->cm_state = MPR_CM_STATE_BUSY; 2345 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 2346 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 2347 2348 if (cm->cm_complete != NULL) { 2349 mpr_dprint(sc, MPR_TRACE, 2350 "%s cm %p calling cm_complete %p data %p reply %p\n", 2351 __func__, cm, cm->cm_complete, cm->cm_complete_data, 2352 cm->cm_reply); 2353 cm->cm_complete(sc, cm); 2354 } 2355 2356 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 2357 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 2358 wakeup(cm); 2359 } 2360 2361 if (sc->io_cmds_active != 0) { 2362 sc->io_cmds_active--; 2363 } else { 2364 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 2365 "out of sync - resynching to 0\n"); 2366 } 2367 } 2368 2369 static void 2370 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 2371 { 2372 union loginfo_type { 2373 u32 loginfo; 2374 struct { 2375 u32 subcode:16; 2376 u32 code:8; 2377 u32 originator:4; 2378 u32 bus_type:4; 2379 } dw; 2380 }; 2381 union loginfo_type sas_loginfo; 2382 char *originator_str = NULL; 2383 2384 sas_loginfo.loginfo = log_info; 2385 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 2386 return; 2387 2388 /* each nexus loss loginfo */ 2389 if (log_info == 0x31170000) 2390 return; 2391 2392 /* eat the loginfos associated with task aborts */ 2393 if ((log_info == 30050000) || (log_info == 0x31140000) || 2394 (log_info == 0x31130000)) 2395 return; 2396 2397 switch (sas_loginfo.dw.originator) { 2398 case 0: 2399 originator_str = "IOP"; 2400 break; 2401 case 1: 2402 originator_str = "PL"; 2403 break; 2404 case 2: 2405 originator_str = "IR"; 2406 break; 2407 } 2408 2409 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 2410 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 2411 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 2412 } 2413 2414 static void 2415 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 2416 { 2417 MPI2DefaultReply_t *mpi_reply; 2418 u16 sc_status; 2419 2420 mpi_reply = (MPI2DefaultReply_t*)reply; 2421 sc_status = le16toh(mpi_reply->IOCStatus); 2422 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 2423 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 2424 } 2425 2426 void 2427 mpr_intr(void *data) 2428 { 2429 struct mpr_softc *sc; 2430 uint32_t status; 2431 2432 sc = (struct mpr_softc *)data; 2433 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2434 2435 /* 2436 * Check interrupt status register to flush the bus. This is 2437 * needed for both INTx interrupts and driver-driven polling 2438 */ 2439 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 2440 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 2441 return; 2442 2443 mpr_lock(sc); 2444 mpr_intr_locked(data); 2445 mpr_unlock(sc); 2446 return; 2447 } 2448 2449 /* 2450 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 2451 * chip. Hopefully this theory is correct. 2452 */ 2453 void 2454 mpr_intr_msi(void *data) 2455 { 2456 struct mpr_softc *sc; 2457 2458 sc = (struct mpr_softc *)data; 2459 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2460 mpr_lock(sc); 2461 mpr_intr_locked(data); 2462 mpr_unlock(sc); 2463 return; 2464 } 2465 2466 /* 2467 * The locking is overly broad and simplistic, but easy to deal with for now. 2468 */ 2469 void 2470 mpr_intr_locked(void *data) 2471 { 2472 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2473 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2474 mpr_fw_diagnostic_buffer_t *pBuffer; 2475 struct mpr_softc *sc; 2476 uint64_t tdesc; 2477 struct mpr_command *cm = NULL; 2478 uint8_t flags; 2479 u_int pq; 2480 2481 sc = (struct mpr_softc *)data; 2482 2483 pq = sc->replypostindex; 2484 mpr_dprint(sc, MPR_TRACE, 2485 "%s sc %p starting with replypostindex %u\n", 2486 __func__, sc, sc->replypostindex); 2487 2488 for ( ;; ) { 2489 cm = NULL; 2490 desc = &sc->post_queue[sc->replypostindex]; 2491 2492 /* 2493 * Copy and clear out the descriptor so that any reentry will 2494 * immediately know that this descriptor has already been 2495 * looked at. There is unfortunate casting magic because the 2496 * MPI API doesn't have a cardinal 64bit type. 2497 */ 2498 tdesc = 0xffffffffffffffff; 2499 tdesc = atomic_swap_64((uint64_t *)desc, tdesc); 2500 desc = (MPI2_REPLY_DESCRIPTORS_UNION *)&tdesc; 2501 2502 flags = desc->Default.ReplyFlags & 2503 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2504 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2505 (le32toh(desc->Words.High) == 0xffffffff)) 2506 break; 2507 2508 /* increment the replypostindex now, so that event handlers 2509 * and cm completion handlers which decide to do a diag 2510 * reset can zero it without it getting incremented again 2511 * afterwards, and we break out of this loop on the next 2512 * iteration since the reply post queue has been cleared to 2513 * 0xFF and all descriptors look unused (which they are). 2514 */ 2515 if (++sc->replypostindex >= sc->pqdepth) 2516 sc->replypostindex = 0; 2517 2518 switch (flags) { 2519 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2520 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2521 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2522 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2523 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, 2524 ("command not inqueue\n")); 2525 cm->cm_state = MPR_CM_STATE_BUSY; 2526 cm->cm_reply = NULL; 2527 break; 2528 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2529 { 2530 uint32_t baddr; 2531 uint8_t *reply; 2532 2533 /* 2534 * Re-compose the reply address from the address 2535 * sent back from the chip. The ReplyFrameAddress 2536 * is the lower 32 bits of the physical address of 2537 * particular reply frame. Convert that address to 2538 * host format, and then use that to provide the 2539 * offset against the virtual address base 2540 * (sc->reply_frames). 2541 */ 2542 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2543 reply = sc->reply_frames + 2544 (baddr - ((uint32_t)sc->reply_busaddr)); 2545 /* 2546 * Make sure the reply we got back is in a valid 2547 * range. If not, go ahead and panic here, since 2548 * we'll probably panic as soon as we deference the 2549 * reply pointer anyway. 2550 */ 2551 if ((reply < sc->reply_frames) 2552 || (reply > (sc->reply_frames + 2553 (sc->fqdepth * sc->replyframesz)))) { 2554 printf("%s: WARNING: reply %p out of range!\n", 2555 __func__, reply); 2556 printf("%s: reply_frames %p, fqdepth %d, " 2557 "frame size %d\n", __func__, 2558 sc->reply_frames, sc->fqdepth, 2559 sc->replyframesz); 2560 printf("%s: baddr %#x,\n", __func__, baddr); 2561 /* LSI-TODO. See Linux Code for Graceful exit */ 2562 panic("Reply address out of range"); 2563 } 2564 if (le16toh(desc->AddressReply.SMID) == 0) { 2565 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2566 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2567 /* 2568 * If SMID is 0 for Diag Buffer Post, 2569 * this implies that the reply is due to 2570 * a release function with a status that 2571 * the buffer has been released. Set 2572 * the buffer flags accordingly. 2573 */ 2574 rel_rep = 2575 (MPI2_DIAG_RELEASE_REPLY *)reply; 2576 if ((le16toh(rel_rep->IOCStatus) & 2577 MPI2_IOCSTATUS_MASK) == 2578 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2579 { 2580 pBuffer = 2581 &sc->fw_diag_buffer_list[ 2582 rel_rep->BufferType]; 2583 pBuffer->valid_data = TRUE; 2584 pBuffer->owned_by_firmware = 2585 FALSE; 2586 pBuffer->immediate = FALSE; 2587 } 2588 } else 2589 mpr_dispatch_event(sc, baddr, 2590 (MPI2_EVENT_NOTIFICATION_REPLY *) 2591 reply); 2592 } else { 2593 cm = &sc->commands[ 2594 le16toh(desc->AddressReply.SMID)]; 2595 if (cm->cm_state == MPR_CM_STATE_INQUEUE) { 2596 cm->cm_reply = reply; 2597 cm->cm_reply_data = 2598 le32toh(desc->AddressReply. 2599 ReplyFrameAddress); 2600 } else { 2601 mpr_dprint(sc, MPR_RECOVERY, 2602 "Bad state for ADDRESS_REPLY status," 2603 " ignoring state %d cm %p\n", 2604 cm->cm_state, cm); 2605 } 2606 } 2607 break; 2608 } 2609 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2610 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2611 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2612 default: 2613 /* Unhandled */ 2614 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2615 desc->Default.ReplyFlags); 2616 cm = NULL; 2617 break; 2618 } 2619 2620 if (cm != NULL) { 2621 // Print Error reply frame 2622 if (cm->cm_reply) 2623 mpr_display_reply_info(sc,cm->cm_reply); 2624 mpr_complete_command(sc, cm); 2625 } 2626 } 2627 2628 if (pq != sc->replypostindex) { 2629 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n", 2630 __func__, sc, sc->replypostindex); 2631 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2632 sc->replypostindex); 2633 } 2634 2635 return; 2636 } 2637 2638 static void 2639 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2640 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2641 { 2642 struct mpr_event_handle *eh; 2643 int event, handled = 0; 2644 2645 event = le16toh(reply->Event); 2646 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2647 if (isset(eh->mask, event)) { 2648 eh->callback(sc, data, reply); 2649 handled++; 2650 } 2651 } 2652 2653 if (handled == 0) 2654 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2655 le16toh(event)); 2656 2657 /* 2658 * This is the only place that the event/reply should be freed. 2659 * Anything wanting to hold onto the event data should have 2660 * already copied it into their own storage. 2661 */ 2662 mpr_free_reply(sc, data); 2663 } 2664 2665 static void 2666 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2667 { 2668 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2669 2670 if (cm->cm_reply) 2671 MPR_DPRINT_EVENT(sc, generic, 2672 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2673 2674 mpr_free_command(sc, cm); 2675 2676 /* next, send a port enable */ 2677 mprsas_startup(sc); 2678 } 2679 2680 /* 2681 * For both register_events and update_events, the caller supplies a bitmap 2682 * of events that it _wants_. These functions then turn that into a bitmask 2683 * suitable for the controller. 2684 */ 2685 int 2686 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2687 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2688 { 2689 struct mpr_event_handle *eh; 2690 int error = 0; 2691 2692 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2693 if (!eh) { 2694 mpr_dprint(sc, MPR_EVENT|MPR_ERROR, 2695 "Cannot allocate event memory\n"); 2696 return (ENOMEM); 2697 } 2698 eh->callback = cb; 2699 eh->data = data; 2700 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2701 if (mask != NULL) 2702 error = mpr_update_events(sc, eh, mask); 2703 *handle = eh; 2704 2705 return (error); 2706 } 2707 2708 int 2709 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2710 uint8_t *mask) 2711 { 2712 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2713 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; 2714 struct mpr_command *cm = NULL; 2715 struct mpr_event_handle *eh; 2716 int error, i; 2717 2718 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2719 2720 if ((mask != NULL) && (handle != NULL)) 2721 bcopy(mask, &handle->mask[0], 16); 2722 memset(sc->event_mask, 0xff, 16); 2723 2724 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2725 for (i = 0; i < 16; i++) 2726 sc->event_mask[i] &= ~eh->mask[i]; 2727 } 2728 2729 if ((cm = mpr_alloc_command(sc)) == NULL) 2730 return (EBUSY); 2731 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2732 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2733 evtreq->MsgFlags = 0; 2734 evtreq->SASBroadcastPrimitiveMasks = 0; 2735 #ifdef MPR_DEBUG_ALL_EVENTS 2736 { 2737 u_char fullmask[16]; 2738 memset(fullmask, 0x00, 16); 2739 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2740 } 2741 #else 2742 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2743 #endif 2744 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2745 cm->cm_data = NULL; 2746 2747 error = mpr_request_polled(sc, &cm); 2748 if (cm != NULL) 2749 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2750 if ((reply == NULL) || 2751 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2752 error = ENXIO; 2753 2754 if (reply) 2755 MPR_DPRINT_EVENT(sc, generic, reply); 2756 2757 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2758 2759 if (cm != NULL) 2760 mpr_free_command(sc, cm); 2761 return (error); 2762 } 2763 2764 static int 2765 mpr_reregister_events(struct mpr_softc *sc) 2766 { 2767 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2768 struct mpr_command *cm; 2769 struct mpr_event_handle *eh; 2770 int error, i; 2771 2772 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2773 2774 /* first, reregister events */ 2775 2776 memset(sc->event_mask, 0xff, 16); 2777 2778 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2779 for (i = 0; i < 16; i++) 2780 sc->event_mask[i] &= ~eh->mask[i]; 2781 } 2782 2783 if ((cm = mpr_alloc_command(sc)) == NULL) 2784 return (EBUSY); 2785 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2786 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2787 evtreq->MsgFlags = 0; 2788 evtreq->SASBroadcastPrimitiveMasks = 0; 2789 #ifdef MPR_DEBUG_ALL_EVENTS 2790 { 2791 u_char fullmask[16]; 2792 memset(fullmask, 0x00, 16); 2793 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2794 } 2795 #else 2796 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2797 #endif 2798 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2799 cm->cm_data = NULL; 2800 cm->cm_complete = mpr_reregister_events_complete; 2801 2802 error = mpr_map_command(sc, cm); 2803 2804 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2805 error); 2806 return (error); 2807 } 2808 2809 int 2810 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2811 { 2812 2813 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2814 free(handle, M_MPR); 2815 return (mpr_update_events(sc, NULL, NULL)); 2816 } 2817 2818 /** 2819 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2820 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2821 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2822 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2823 * describe a larger data buffer. If the data buffer is too large to describe 2824 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2825 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2826 * elsewhere in memory to describe the remaining data memory segments. The PRP 2827 * list will be contiguous. 2828 2829 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2830 * consists of a list of PRP entries to describe a number of noncontigous 2831 * physical memory segments as a single memory buffer, just as a SGL does. Note 2832 * however, that this function is only used by the IOCTL call, so the memory 2833 * given will be guaranteed to be contiguous. There is no need to translate 2834 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2835 * space that is one page size each. 2836 * 2837 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2838 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2839 * the second PRP element if the memory being described fits within 2 PRP 2840 * entries, or a PRP list pointer if the PRP spans more than two entries. 2841 * 2842 * A PRP list pointer contains the address of a PRP list, structured as a linear 2843 * array of PRP entries. Each PRP entry in this list describes a segment of 2844 * physical memory. 2845 * 2846 * Each 64-bit PRP entry comprises an address and an offset field. The address 2847 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2848 * offset describes where within that page the memory segment begins. Only the 2849 * first element in a PRP list may contain a non-zero offest, implying that all 2850 * memory segments following the first begin at the start of a PAGE_SIZE page. 2851 * 2852 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2853 * with exceptions for the first and last elements in the list. If the memory 2854 * being described by the list begins at a non-zero offset within the first page, 2855 * then the first PRP element will contain a non-zero offset indicating where the 2856 * region begins within the page. The last memory segment may end before the end 2857 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2858 * described by the PRP list. 2859 * 2860 * Since PRP entries lack any indication of size, the overall data buffer length 2861 * is used to determine where the end of the data memory buffer is located, and 2862 * how many PRP entries are required to describe it. 2863 * 2864 * Returns nothing. 2865 */ 2866 void 2867 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2868 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2869 uint32_t data_in_sz, uint32_t data_out_sz) 2870 { 2871 int prp_size = PRP_ENTRY_SIZE; 2872 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2873 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2874 uint32_t offset, entry_len, page_mask_result, page_mask; 2875 bus_addr_t paddr; 2876 size_t length; 2877 struct mpr_prp_page *prp_page_info = NULL; 2878 2879 /* 2880 * Not all commands require a data transfer. If no data, just return 2881 * without constructing any PRP. 2882 */ 2883 if (!data_in_sz && !data_out_sz) 2884 return; 2885 2886 /* 2887 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2888 * located at a 24 byte offset from the start of the NVMe command. Then 2889 * set the current PRP entry pointer to PRP1. 2890 */ 2891 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2892 NVME_CMD_PRP1_OFFSET); 2893 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2894 NVME_CMD_PRP2_OFFSET); 2895 prp_entry = prp1_entry; 2896 2897 /* 2898 * For the PRP entries, use the specially allocated buffer of 2899 * contiguous memory. PRP Page allocation failures should not happen 2900 * because there should be enough PRP page buffers to account for the 2901 * possible NVMe QDepth. 2902 */ 2903 prp_page_info = mpr_alloc_prp_page(sc); 2904 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2905 "used for building a native NVMe SGL.\n", __func__)); 2906 prp_page = (uint64_t *)prp_page_info->prp_page; 2907 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2908 2909 /* 2910 * Insert the allocated PRP page into the command's PRP page list. This 2911 * will be freed when the command is freed. 2912 */ 2913 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2914 2915 /* 2916 * Check if we are within 1 entry of a page boundary we don't want our 2917 * first entry to be a PRP List entry. 2918 */ 2919 page_mask = PAGE_SIZE - 1; 2920 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2921 page_mask; 2922 if (!page_mask_result) 2923 { 2924 /* Bump up to next page boundary. */ 2925 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2926 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2927 prp_size); 2928 } 2929 2930 /* 2931 * Set PRP physical pointer, which initially points to the current PRP 2932 * DMA memory page. 2933 */ 2934 prp_entry_phys = prp_page_phys; 2935 2936 /* Get physical address and length of the data buffer. */ 2937 paddr = (bus_addr_t)(uintptr_t)data; 2938 if (data_in_sz) 2939 length = data_in_sz; 2940 else 2941 length = data_out_sz; 2942 2943 /* Loop while the length is not zero. */ 2944 while (length) 2945 { 2946 /* 2947 * Check if we need to put a list pointer here if we are at page 2948 * boundary - prp_size (8 bytes). 2949 */ 2950 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2951 prp_size) & page_mask; 2952 if (!page_mask_result) 2953 { 2954 /* 2955 * This is the last entry in a PRP List, so we need to 2956 * put a PRP list pointer here. What this does is: 2957 * - bump the current memory pointer to the next 2958 * address, which will be the next full page. 2959 * - set the PRP Entry to point to that page. This is 2960 * now the PRP List pointer. 2961 * - bump the PRP Entry pointer the start of the next 2962 * page. Since all of this PRP memory is contiguous, 2963 * no need to get a new page - it's just the next 2964 * address. 2965 */ 2966 prp_entry_phys++; 2967 *prp_entry = 2968 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2969 prp_entry++; 2970 } 2971 2972 /* Need to handle if entry will be part of a page. */ 2973 offset = (uint32_t)paddr & page_mask; 2974 entry_len = PAGE_SIZE - offset; 2975 2976 if (prp_entry == prp1_entry) 2977 { 2978 /* 2979 * Must fill in the first PRP pointer (PRP1) before 2980 * moving on. 2981 */ 2982 *prp1_entry = htole64((uint64_t)paddr); 2983 2984 /* 2985 * Now point to the second PRP entry within the 2986 * command (PRP2). 2987 */ 2988 prp_entry = prp2_entry; 2989 } 2990 else if (prp_entry == prp2_entry) 2991 { 2992 /* 2993 * Should the PRP2 entry be a PRP List pointer or just a 2994 * regular PRP pointer? If there is more than one more 2995 * page of data, must use a PRP List pointer. 2996 */ 2997 if (length > PAGE_SIZE) 2998 { 2999 /* 3000 * PRP2 will contain a PRP List pointer because 3001 * more PRP's are needed with this command. The 3002 * list will start at the beginning of the 3003 * contiguous buffer. 3004 */ 3005 *prp2_entry = 3006 htole64( 3007 (uint64_t)(uintptr_t)prp_entry_phys); 3008 3009 /* 3010 * The next PRP Entry will be the start of the 3011 * first PRP List. 3012 */ 3013 prp_entry = prp_page; 3014 } 3015 else 3016 { 3017 /* 3018 * After this, the PRP Entries are complete. 3019 * This command uses 2 PRP's and no PRP list. 3020 */ 3021 *prp2_entry = htole64((uint64_t)paddr); 3022 } 3023 } 3024 else 3025 { 3026 /* 3027 * Put entry in list and bump the addresses. 3028 * 3029 * After PRP1 and PRP2 are filled in, this will fill in 3030 * all remaining PRP entries in a PRP List, one per each 3031 * time through the loop. 3032 */ 3033 *prp_entry = htole64((uint64_t)paddr); 3034 prp_entry++; 3035 prp_entry_phys++; 3036 } 3037 3038 /* 3039 * Bump the phys address of the command's data buffer by the 3040 * entry_len. 3041 */ 3042 paddr += entry_len; 3043 3044 /* Decrement length accounting for last partial page. */ 3045 if (entry_len > length) 3046 length = 0; 3047 else 3048 length -= entry_len; 3049 } 3050 } 3051 3052 /* 3053 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 3054 * determine if the driver needs to build a native SGL. If so, that native SGL 3055 * is built in the contiguous buffers allocated especially for PCIe SGL 3056 * creation. If the driver will not build a native SGL, return TRUE and a 3057 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 3058 * only. 3059 * 3060 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 3061 */ 3062 static int 3063 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 3064 bus_dma_segment_t *segs, int segs_left) 3065 { 3066 uint32_t i, sge_dwords, length, offset, entry_len; 3067 uint32_t num_entries, buff_len = 0, sges_in_segment; 3068 uint32_t page_mask, page_mask_result, *curr_buff; 3069 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 3070 uint32_t first_page_data_size, end_residual; 3071 uint64_t *msg_phys; 3072 bus_addr_t paddr; 3073 int build_native_sgl = 0, first_prp_entry; 3074 int prp_size = PRP_ENTRY_SIZE; 3075 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 3076 struct mpr_prp_page *prp_page_info = NULL; 3077 3078 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 3079 3080 /* 3081 * Add up the sizes of each segment length to get the total transfer 3082 * size, which will be checked against the Maximum Data Transfer Size. 3083 * If the data transfer length exceeds the MDTS for this device, just 3084 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 3085 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 3086 */ 3087 for (i = 0; i < segs_left; i++) 3088 buff_len += htole32(segs[i].ds_len); 3089 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 3090 return 1; 3091 3092 /* Create page_mask (to get offset within page) */ 3093 page_mask = PAGE_SIZE - 1; 3094 3095 /* 3096 * Check if the number of elements exceeds the max number that can be 3097 * put in the main message frame (H/W can only translate an SGL that 3098 * is contained entirely in the main message frame). 3099 */ 3100 sges_in_segment = (sc->reqframesz - 3101 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 3102 if (segs_left > sges_in_segment) 3103 build_native_sgl = 1; 3104 else 3105 { 3106 /* 3107 * NVMe uses one PRP for each physical page (or part of physical 3108 * page). 3109 * if 4 pages or less then IEEE is OK 3110 * if > 5 pages then we need to build a native SGL 3111 * if > 4 and <= 5 pages, then check the physical address of 3112 * the first SG entry, then if this first size in the page 3113 * is >= the residual beyond 4 pages then use IEEE, 3114 * otherwise use native SGL 3115 */ 3116 if (buff_len > (PAGE_SIZE * 5)) 3117 build_native_sgl = 1; 3118 else if ((buff_len > (PAGE_SIZE * 4)) && 3119 (buff_len <= (PAGE_SIZE * 5)) ) 3120 { 3121 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr; 3122 first_page_offset = 3123 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 3124 page_mask); 3125 first_page_data_size = PAGE_SIZE - first_page_offset; 3126 end_residual = buff_len % PAGE_SIZE; 3127 3128 /* 3129 * If offset into first page pushes the end of the data 3130 * beyond end of the 5th page, we need the extra PRP 3131 * list. 3132 */ 3133 if (first_page_data_size < end_residual) 3134 build_native_sgl = 1; 3135 3136 /* 3137 * Check if first SG entry size is < residual beyond 4 3138 * pages. 3139 */ 3140 if (htole32(segs[0].ds_len) < 3141 (buff_len - (PAGE_SIZE * 4))) 3142 build_native_sgl = 1; 3143 } 3144 } 3145 3146 /* check if native SGL is needed */ 3147 if (!build_native_sgl) 3148 return 1; 3149 3150 /* 3151 * Native SGL is needed. 3152 * Put a chain element in main message frame that points to the first 3153 * chain buffer. 3154 * 3155 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 3156 * a native SGL. 3157 */ 3158 3159 /* Set main message chain element pointer */ 3160 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 3161 3162 /* 3163 * For NVMe the chain element needs to be the 2nd SGL entry in the main 3164 * message. 3165 */ 3166 main_chain_element = (Mpi25IeeeSgeChain64_t *) 3167 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 3168 3169 /* 3170 * For the PRP entries, use the specially allocated buffer of 3171 * contiguous memory. PRP Page allocation failures should not happen 3172 * because there should be enough PRP page buffers to account for the 3173 * possible NVMe QDepth. 3174 */ 3175 prp_page_info = mpr_alloc_prp_page(sc); 3176 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 3177 "used for building a native NVMe SGL.\n", __func__)); 3178 curr_buff = (uint32_t *)prp_page_info->prp_page; 3179 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 3180 3181 /* 3182 * Insert the allocated PRP page into the command's PRP page list. This 3183 * will be freed when the command is freed. 3184 */ 3185 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 3186 3187 /* 3188 * Check if we are within 1 entry of a page boundary we don't want our 3189 * first entry to be a PRP List entry. 3190 */ 3191 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 3192 page_mask; 3193 if (!page_mask_result) { 3194 /* Bump up to next page boundary. */ 3195 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 3196 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 3197 } 3198 3199 /* Fill in the chain element and make it an NVMe segment type. */ 3200 main_chain_element->Address.High = 3201 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 3202 main_chain_element->Address.Low = 3203 htole32((uint32_t)(uintptr_t)msg_phys); 3204 main_chain_element->NextChainOffset = 0; 3205 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3206 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3207 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 3208 3209 /* Set SGL pointer to start of contiguous PCIe buffer. */ 3210 ptr_sgl = curr_buff; 3211 sge_dwords = 2; 3212 num_entries = 0; 3213 3214 /* 3215 * NVMe has a very convoluted PRP format. One PRP is required for each 3216 * page or partial page. We need to split up OS SG entries if they are 3217 * longer than one page or cross a page boundary. We also have to insert 3218 * a PRP list pointer entry as the last entry in each physical page of 3219 * the PRP list. 3220 * 3221 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 3222 * in the main message in IEEE 64 format. The 2nd entry in the main 3223 * message is the chain element, and the rest of the PRP entries are 3224 * built in the contiguous PCIe buffer. 3225 */ 3226 first_prp_entry = 1; 3227 ptr_first_sgl = (uint32_t *)cm->cm_sge; 3228 3229 for (i = 0; i < segs_left; i++) { 3230 /* Get physical address and length of this SG entry. */ 3231 paddr = segs[i].ds_addr; 3232 length = segs[i].ds_len; 3233 3234 /* 3235 * Check whether a given SGE buffer lies on a non-PAGED 3236 * boundary if this is not the first page. If so, this is not 3237 * expected so have FW build the SGL. 3238 */ 3239 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) { 3240 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 3241 "building NVMe PRPs, low address is 0x%x\n", 3242 (uint32_t)paddr); 3243 return 1; 3244 } 3245 3246 /* Apart from last SGE, if any other SGE boundary is not page 3247 * aligned then it means that hole exists. Existence of hole 3248 * leads to data corruption. So fallback to IEEE SGEs. 3249 */ 3250 if (i != (segs_left - 1)) { 3251 if (((uint32_t)paddr + length) & page_mask) { 3252 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 3253 "boundary while building NVMe PRPs, low " 3254 "address: 0x%x and length: %u\n", 3255 (uint32_t)paddr, length); 3256 return 1; 3257 } 3258 } 3259 3260 /* Loop while the length is not zero. */ 3261 while (length) { 3262 /* 3263 * Check if we need to put a list pointer here if we are 3264 * at page boundary - prp_size. 3265 */ 3266 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 3267 prp_size) & page_mask; 3268 if (!page_mask_result) { 3269 /* 3270 * Need to put a PRP list pointer here. 3271 */ 3272 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3273 prp_size); 3274 *ptr_sgl = htole32((uintptr_t)msg_phys); 3275 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 3276 msg_phys >> 32); 3277 ptr_sgl += sge_dwords; 3278 num_entries++; 3279 } 3280 3281 /* Need to handle if entry will be part of a page. */ 3282 offset = (uint32_t)paddr & page_mask; 3283 entry_len = PAGE_SIZE - offset; 3284 if (first_prp_entry) { 3285 /* 3286 * Put IEEE entry in first SGE in main message. 3287 * (Simple element, System addr, not end of 3288 * list.) 3289 */ 3290 *ptr_first_sgl = htole32((uint32_t)paddr); 3291 *(ptr_first_sgl + 1) = 3292 htole32((uint32_t)((uint64_t)paddr >> 32)); 3293 *(ptr_first_sgl + 2) = htole32(entry_len); 3294 *(ptr_first_sgl + 3) = 0; 3295 3296 /* No longer the first PRP entry. */ 3297 first_prp_entry = 0; 3298 } else { 3299 /* Put entry in list. */ 3300 *ptr_sgl = htole32((uint32_t)paddr); 3301 *(ptr_sgl + 1) = 3302 htole32((uint32_t)((uint64_t)paddr >> 32)); 3303 3304 /* Bump ptr_sgl, msg_phys, and num_entries. */ 3305 ptr_sgl += sge_dwords; 3306 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3307 prp_size); 3308 num_entries++; 3309 } 3310 3311 /* Bump the phys address by the entry_len. */ 3312 paddr += entry_len; 3313 3314 /* Decrement length accounting for last partial page. */ 3315 if (entry_len > length) 3316 length = 0; 3317 else 3318 length -= entry_len; 3319 } 3320 } 3321 3322 /* Set chain element Length. */ 3323 main_chain_element->Length = htole32(num_entries * prp_size); 3324 3325 /* Return 0, indicating we built a native SGL. */ 3326 return 0; 3327 } 3328 3329 /* 3330 * Add a chain element as the next SGE for the specified command. 3331 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 3332 * only required for IEEE commands. Therefore there is no code for commands 3333 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 3334 * shouldn't be requesting chains). 3335 */ 3336 static int 3337 mpr_add_chain(struct mpr_command *cm, int segsleft) 3338 { 3339 struct mpr_softc *sc = cm->cm_sc; 3340 MPI2_REQUEST_HEADER *req; 3341 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 3342 struct mpr_chain *chain; 3343 int sgc_size, current_segs, rem_segs, segs_per_frame; 3344 uint8_t next_chain_offset = 0; 3345 3346 /* 3347 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 3348 * only IEEE commands should be requesting chains. Return some error 3349 * code other than 0. 3350 */ 3351 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 3352 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 3353 "an MPI SGL.\n"); 3354 return(ENOBUFS); 3355 } 3356 3357 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 3358 if (cm->cm_sglsize < sgc_size) 3359 panic("MPR: Need SGE Error Code\n"); 3360 3361 chain = mpr_alloc_chain(cm->cm_sc); 3362 if (chain == NULL) 3363 return (ENOBUFS); 3364 3365 /* 3366 * Note: a double-linked list is used to make it easier to walk for 3367 * debugging. 3368 */ 3369 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 3370 3371 /* 3372 * Need to know if the number of frames left is more than 1 or not. If 3373 * more than 1 frame is required, NextChainOffset will need to be set, 3374 * which will just be the last segment of the frame. 3375 */ 3376 rem_segs = 0; 3377 if (cm->cm_sglsize < (sgc_size * segsleft)) { 3378 /* 3379 * rem_segs is the number of segements remaining after the 3380 * segments that will go into the current frame. Since it is 3381 * known that at least one more frame is required, account for 3382 * the chain element. To know if more than one more frame is 3383 * required, just check if there will be a remainder after using 3384 * the current frame (with this chain) and the next frame. If 3385 * so the NextChainOffset must be the last element of the next 3386 * frame. 3387 */ 3388 current_segs = (cm->cm_sglsize / sgc_size) - 1; 3389 rem_segs = segsleft - current_segs; 3390 segs_per_frame = sc->chain_frame_size / sgc_size; 3391 if (rem_segs > segs_per_frame) { 3392 next_chain_offset = segs_per_frame - 1; 3393 } 3394 } 3395 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 3396 ieee_sgc->Length = next_chain_offset ? 3397 htole32((uint32_t)sc->chain_frame_size) : 3398 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 3399 ieee_sgc->NextChainOffset = next_chain_offset; 3400 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3401 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3402 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 3403 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 3404 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 3405 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 3406 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 3407 3408 cm->cm_sglsize = sc->chain_frame_size; 3409 return (0); 3410 } 3411 3412 /* 3413 * Add one scatter-gather element to the scatter-gather list for a command. 3414 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 3415 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 3416 * chain, so don't consider any chain additions. 3417 */ 3418 int 3419 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 3420 int segsleft) 3421 { 3422 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3423 u32 sge_flags; 3424 3425 /* 3426 * case 1: >=1 more segment, no room for anything (error) 3427 * case 2: 1 more segment and enough room for it 3428 */ 3429 3430 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 3431 mpr_dprint(cm->cm_sc, MPR_ERROR, 3432 "%s: warning: Not enough room for MPI SGL in frame.\n", 3433 __func__); 3434 return(ENOBUFS); 3435 } 3436 3437 KASSERT(segsleft == 1, 3438 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 3439 segsleft)); 3440 3441 /* 3442 * There is one more segment left to add for the MPI SGL and there is 3443 * enough room in the frame to add it. This is the normal case because 3444 * MPI SGL's don't have chains, otherwise something is wrong. 3445 * 3446 * If this is a bi-directional request, need to account for that 3447 * here. Save the pre-filled sge values. These will be used 3448 * either for the 2nd SGL or for a single direction SGL. If 3449 * cm_out_len is non-zero, this is a bi-directional request, so 3450 * fill in the OUT SGL first, then the IN SGL, otherwise just 3451 * fill in the IN SGL. Note that at this time, when filling in 3452 * 2 SGL's for a bi-directional request, they both use the same 3453 * DMA buffer (same cm command). 3454 */ 3455 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 3456 saved_address_low = sge->Address.Low; 3457 saved_address_high = sge->Address.High; 3458 if (cm->cm_out_len) { 3459 sge->FlagsLength = cm->cm_out_len | 3460 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3461 MPI2_SGE_FLAGS_END_OF_BUFFER | 3462 MPI2_SGE_FLAGS_HOST_TO_IOC | 3463 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3464 MPI2_SGE_FLAGS_SHIFT); 3465 cm->cm_sglsize -= len; 3466 /* Endian Safe code */ 3467 sge_flags = sge->FlagsLength; 3468 sge->FlagsLength = htole32(sge_flags); 3469 sge->Address.High = htole32(sge->Address.High); 3470 sge->Address.Low = htole32(sge->Address.Low); 3471 bcopy(sge, cm->cm_sge, len); 3472 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3473 } 3474 sge->FlagsLength = saved_buf_len | 3475 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3476 MPI2_SGE_FLAGS_END_OF_BUFFER | 3477 MPI2_SGE_FLAGS_LAST_ELEMENT | 3478 MPI2_SGE_FLAGS_END_OF_LIST | 3479 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3480 MPI2_SGE_FLAGS_SHIFT); 3481 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3482 sge->FlagsLength |= 3483 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3484 MPI2_SGE_FLAGS_SHIFT); 3485 } else { 3486 sge->FlagsLength |= 3487 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3488 MPI2_SGE_FLAGS_SHIFT); 3489 } 3490 sge->Address.Low = saved_address_low; 3491 sge->Address.High = saved_address_high; 3492 3493 cm->cm_sglsize -= len; 3494 /* Endian Safe code */ 3495 sge_flags = sge->FlagsLength; 3496 sge->FlagsLength = htole32(sge_flags); 3497 sge->Address.High = htole32(sge->Address.High); 3498 sge->Address.Low = htole32(sge->Address.Low); 3499 bcopy(sge, cm->cm_sge, len); 3500 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3501 return (0); 3502 } 3503 3504 /* 3505 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3506 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3507 * remaining size and pointer to the next SGE to fill in, respectively. 3508 */ 3509 int 3510 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3511 { 3512 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3513 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3514 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3515 uint32_t sge_length; 3516 3517 /* 3518 * case 1: No room for chain or segment (error). 3519 * case 2: Two or more segments left but only room for chain. 3520 * case 3: Last segment and room for it, so set flags. 3521 */ 3522 3523 /* 3524 * There should be room for at least one element, or there is a big 3525 * problem. 3526 */ 3527 if (cm->cm_sglsize < ieee_sge_size) 3528 panic("MPR: Need SGE Error Code\n"); 3529 3530 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3531 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3532 return (error); 3533 } 3534 3535 if (segsleft == 1) { 3536 /* 3537 * If this is a bi-directional request, need to account for that 3538 * here. Save the pre-filled sge values. These will be used 3539 * either for the 2nd SGL or for a single direction SGL. If 3540 * cm_out_len is non-zero, this is a bi-directional request, so 3541 * fill in the OUT SGL first, then the IN SGL, otherwise just 3542 * fill in the IN SGL. Note that at this time, when filling in 3543 * 2 SGL's for a bi-directional request, they both use the same 3544 * DMA buffer (same cm command). 3545 */ 3546 saved_buf_len = sge->Length; 3547 saved_address_low = sge->Address.Low; 3548 saved_address_high = sge->Address.High; 3549 if (cm->cm_out_len) { 3550 sge->Length = cm->cm_out_len; 3551 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3552 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3553 cm->cm_sglsize -= ieee_sge_size; 3554 /* Endian Safe code */ 3555 sge_length = sge->Length; 3556 sge->Length = htole32(sge_length); 3557 sge->Address.High = htole32(sge->Address.High); 3558 sge->Address.Low = htole32(sge->Address.Low); 3559 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3560 cm->cm_sge = 3561 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3562 ieee_sge_size); 3563 } 3564 sge->Length = saved_buf_len; 3565 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3566 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3567 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3568 sge->Address.Low = saved_address_low; 3569 sge->Address.High = saved_address_high; 3570 } 3571 3572 cm->cm_sglsize -= ieee_sge_size; 3573 /* Endian Safe code */ 3574 sge_length = sge->Length; 3575 sge->Length = htole32(sge_length); 3576 sge->Address.High = htole32(sge->Address.High); 3577 sge->Address.Low = htole32(sge->Address.Low); 3578 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3579 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3580 ieee_sge_size); 3581 return (0); 3582 } 3583 3584 /* 3585 * Add one dma segment to the scatter-gather list for a command. 3586 */ 3587 int 3588 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3589 int segsleft) 3590 { 3591 MPI2_SGE_SIMPLE64 sge; 3592 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3593 3594 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3595 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3596 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3597 ieee_sge.Length = len; 3598 mpr_from_u64(pa, &ieee_sge.Address); 3599 3600 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3601 } else { 3602 /* 3603 * This driver always uses 64-bit address elements for 3604 * simplicity. 3605 */ 3606 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3607 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3608 /* Set Endian safe macro in mpr_push_sge */ 3609 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3610 mpr_from_u64(pa, &sge.Address); 3611 3612 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3613 } 3614 } 3615 3616 static void 3617 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3618 { 3619 struct mpr_softc *sc; 3620 struct mpr_command *cm; 3621 u_int i, dir, sflags; 3622 3623 cm = (struct mpr_command *)arg; 3624 sc = cm->cm_sc; 3625 3626 /* 3627 * In this case, just print out a warning and let the chip tell the 3628 * user they did the wrong thing. 3629 */ 3630 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3631 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3632 "segments, more than the %d allowed\n", __func__, nsegs, 3633 cm->cm_max_segs); 3634 } 3635 3636 /* 3637 * Set up DMA direction flags. Bi-directional requests are also handled 3638 * here. In that case, both direction flags will be set. 3639 */ 3640 sflags = 0; 3641 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3642 /* 3643 * We have to add a special case for SMP passthrough, there 3644 * is no easy way to generically handle it. The first 3645 * S/G element is used for the command (therefore the 3646 * direction bit needs to be set). The second one is used 3647 * for the reply. We'll leave it to the caller to make 3648 * sure we only have two buffers. 3649 */ 3650 /* 3651 * Even though the busdma man page says it doesn't make 3652 * sense to have both direction flags, it does in this case. 3653 * We have one s/g element being accessed in each direction. 3654 */ 3655 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3656 3657 /* 3658 * Set the direction flag on the first buffer in the SMP 3659 * passthrough request. We'll clear it for the second one. 3660 */ 3661 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3662 MPI2_SGE_FLAGS_END_OF_BUFFER; 3663 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3664 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3665 dir = BUS_DMASYNC_PREWRITE; 3666 } else 3667 dir = BUS_DMASYNC_PREREAD; 3668 3669 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3670 if (cm->cm_targ && cm->cm_targ->is_nvme && 3671 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3672 /* A native SG list was built, skip to end. */ 3673 goto out; 3674 } 3675 3676 for (i = 0; i < nsegs; i++) { 3677 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3678 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3679 } 3680 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3681 sflags, nsegs - i); 3682 if (error != 0) { 3683 /* Resource shortage, roll back! */ 3684 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3685 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3686 "consider increasing hw.mpr.max_chains.\n"); 3687 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3688 mpr_complete_command(sc, cm); 3689 return; 3690 } 3691 } 3692 3693 out: 3694 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3695 mpr_enqueue_request(sc, cm); 3696 3697 return; 3698 } 3699 3700 static void 3701 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3702 int error) 3703 { 3704 mpr_data_cb(arg, segs, nsegs, error); 3705 } 3706 3707 /* 3708 * This is the routine to enqueue commands ansynchronously. 3709 * Note that the only error path here is from bus_dmamap_load(), which can 3710 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3711 * assumed that if you have a command in-hand, then you have enough credits 3712 * to use it. 3713 */ 3714 int 3715 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3716 { 3717 int error = 0; 3718 3719 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3720 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3721 &cm->cm_uio, mpr_data_cb2, cm, 0); 3722 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3723 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3724 cm->cm_data, mpr_data_cb, cm, 0); 3725 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3726 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3727 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3728 } else { 3729 /* Add a zero-length element as needed */ 3730 if (cm->cm_sge != NULL) 3731 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3732 mpr_enqueue_request(sc, cm); 3733 } 3734 3735 return (error); 3736 } 3737 3738 /* 3739 * This is the routine to enqueue commands synchronously. An error of 3740 * EINPROGRESS from mpr_map_command() is ignored since the command will 3741 * be executed and enqueued automatically. Other errors come from msleep(). 3742 */ 3743 int 3744 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout, 3745 int sleep_flag) 3746 { 3747 int error, rc; 3748 struct timeval cur_time, start_time; 3749 struct mpr_command *cm = *cmp; 3750 3751 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3752 return EBUSY; 3753 3754 cm->cm_complete = NULL; 3755 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3756 error = mpr_map_command(sc, cm); 3757 if ((error != 0) && (error != EINPROGRESS)) 3758 return (error); 3759 3760 // Check for context and wait for 50 mSec at a time until time has 3761 // expired or the command has finished. If msleep can't be used, need 3762 // to poll. 3763 #if __FreeBSD_version >= 1000029 3764 if (curthread->td_no_sleeping) 3765 #else //__FreeBSD_version < 1000029 3766 if (curthread->td_pflags & TDP_NOSLEEPING) 3767 #endif //__FreeBSD_version >= 1000029 3768 sleep_flag = NO_SLEEP; 3769 getmicrouptime(&start_time); 3770 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { 3771 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); 3772 if (error == EWOULDBLOCK) { 3773 /* 3774 * Record the actual elapsed time in the case of a 3775 * timeout for the message below. 3776 */ 3777 getmicrouptime(&cur_time); 3778 timevalsub(&cur_time, &start_time); 3779 } 3780 } else { 3781 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3782 mpr_intr_locked(sc); 3783 if (sleep_flag == CAN_SLEEP) 3784 pause("mprwait", hz/20); 3785 else 3786 DELAY(50000); 3787 3788 getmicrouptime(&cur_time); 3789 timevalsub(&cur_time, &start_time); 3790 if (cur_time.tv_sec > timeout) { 3791 error = EWOULDBLOCK; 3792 break; 3793 } 3794 } 3795 } 3796 3797 if (error == EWOULDBLOCK) { 3798 if (cm->cm_timeout_handler == NULL) { 3799 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," 3800 " elapsed=%jd\n", __func__, timeout, 3801 (intmax_t)cur_time.tv_sec); 3802 rc = mpr_reinit(sc); 3803 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3804 "failed"); 3805 } else 3806 cm->cm_timeout_handler(sc, cm); 3807 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3808 /* 3809 * Tell the caller that we freed the command in a 3810 * reinit. 3811 */ 3812 *cmp = NULL; 3813 } 3814 error = ETIMEDOUT; 3815 } 3816 return (error); 3817 } 3818 3819 /* 3820 * This is the routine to enqueue a command synchonously and poll for 3821 * completion. Its use should be rare. 3822 */ 3823 int 3824 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp) 3825 { 3826 int error, rc; 3827 struct timeval cur_time, start_time; 3828 struct mpr_command *cm = *cmp; 3829 3830 error = 0; 3831 3832 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3833 cm->cm_complete = NULL; 3834 mpr_map_command(sc, cm); 3835 3836 getmicrouptime(&start_time); 3837 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3838 mpr_intr_locked(sc); 3839 3840 if (mtx_owned(&sc->mpr_mtx)) 3841 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 3842 "mprpoll", hz/20); 3843 else 3844 pause("mprpoll", hz/20); 3845 3846 /* 3847 * Check for real-time timeout and fail if more than 60 seconds. 3848 */ 3849 getmicrouptime(&cur_time); 3850 timevalsub(&cur_time, &start_time); 3851 if (cur_time.tv_sec > 60) { 3852 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3853 error = ETIMEDOUT; 3854 break; 3855 } 3856 } 3857 cm->cm_state = MPR_CM_STATE_BUSY; 3858 if (error) { 3859 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3860 rc = mpr_reinit(sc); 3861 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3862 "failed"); 3863 3864 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3865 /* 3866 * Tell the caller that we freed the command in a 3867 * reinit. 3868 */ 3869 *cmp = NULL; 3870 } 3871 } 3872 return (error); 3873 } 3874 3875 /* 3876 * The MPT driver had a verbose interface for config pages. In this driver, 3877 * reduce it to much simpler terms, similar to the Linux driver. 3878 */ 3879 int 3880 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3881 { 3882 MPI2_CONFIG_REQUEST *req; 3883 struct mpr_command *cm; 3884 int error; 3885 3886 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3887 return (EBUSY); 3888 } 3889 3890 cm = mpr_alloc_command(sc); 3891 if (cm == NULL) { 3892 return (EBUSY); 3893 } 3894 3895 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3896 req->Function = MPI2_FUNCTION_CONFIG; 3897 req->Action = params->action; 3898 req->SGLFlags = 0; 3899 req->ChainOffset = 0; 3900 req->PageAddress = params->page_address; 3901 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3902 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3903 3904 hdr = ¶ms->hdr.Ext; 3905 req->ExtPageType = hdr->ExtPageType; 3906 req->ExtPageLength = hdr->ExtPageLength; 3907 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3908 req->Header.PageLength = 0; /* Must be set to zero */ 3909 req->Header.PageNumber = hdr->PageNumber; 3910 req->Header.PageVersion = hdr->PageVersion; 3911 } else { 3912 MPI2_CONFIG_PAGE_HEADER *hdr; 3913 3914 hdr = ¶ms->hdr.Struct; 3915 req->Header.PageType = hdr->PageType; 3916 req->Header.PageNumber = hdr->PageNumber; 3917 req->Header.PageLength = hdr->PageLength; 3918 req->Header.PageVersion = hdr->PageVersion; 3919 } 3920 3921 cm->cm_data = params->buffer; 3922 cm->cm_length = params->length; 3923 if (cm->cm_data != NULL) { 3924 cm->cm_sge = &req->PageBufferSGE; 3925 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3926 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3927 } else 3928 cm->cm_sge = NULL; 3929 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3930 3931 cm->cm_complete_data = params; 3932 if (params->callback != NULL) { 3933 cm->cm_complete = mpr_config_complete; 3934 return (mpr_map_command(sc, cm)); 3935 } else { 3936 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); 3937 if (error) { 3938 mpr_dprint(sc, MPR_FAULT, 3939 "Error %d reading config page\n", error); 3940 if (cm != NULL) 3941 mpr_free_command(sc, cm); 3942 return (error); 3943 } 3944 mpr_config_complete(sc, cm); 3945 } 3946 3947 return (0); 3948 } 3949 3950 int 3951 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3952 { 3953 return (EINVAL); 3954 } 3955 3956 static void 3957 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3958 { 3959 MPI2_CONFIG_REPLY *reply; 3960 struct mpr_config_params *params; 3961 3962 MPR_FUNCTRACE(sc); 3963 params = cm->cm_complete_data; 3964 3965 if (cm->cm_data != NULL) { 3966 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3967 BUS_DMASYNC_POSTREAD); 3968 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3969 } 3970 3971 /* 3972 * XXX KDM need to do more error recovery? This results in the 3973 * device in question not getting probed. 3974 */ 3975 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3976 params->status = MPI2_IOCSTATUS_BUSY; 3977 goto done; 3978 } 3979 3980 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3981 if (reply == NULL) { 3982 params->status = MPI2_IOCSTATUS_BUSY; 3983 goto done; 3984 } 3985 params->status = reply->IOCStatus; 3986 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3987 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3988 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3989 params->hdr.Ext.PageType = reply->Header.PageType; 3990 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3991 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3992 } else { 3993 params->hdr.Struct.PageType = reply->Header.PageType; 3994 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3995 params->hdr.Struct.PageLength = reply->Header.PageLength; 3996 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3997 } 3998 3999 done: 4000 mpr_free_command(sc, cm); 4001 if (params->callback != NULL) 4002 params->callback(sc, params); 4003 4004 return; 4005 } 4006