1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 * $FreeBSD: head/sys/dev/mpr/mpr.c 330789 2018-03-12 05:02:22Z scottl $ 31 */ 32 33 /* Communications core for Avago Technologies (LSI) MPT3 */ 34 35 /* TODO Move headers to mprvar */ 36 #include <sys/types.h> 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/module.h> 42 #include <sys/bus.h> 43 #include <sys/conf.h> 44 #include <sys/bio.h> 45 #include <sys/malloc.h> 46 #include <sys/uio.h> 47 #include <sys/sysctl.h> 48 #include <sys/queue.h> 49 #include <sys/kthread.h> 50 #include <sys/taskqueue.h> 51 #include <sys/endian.h> 52 #include <sys/eventhandler.h> 53 #include <sys/sbuf.h> 54 #include <sys/priv.h> 55 56 #include <sys/rman.h> 57 #include <sys/proc.h> 58 59 #include <bus/pci/pcivar.h> 60 61 #include <bus/cam/cam.h> 62 #include <bus/cam/cam_ccb.h> 63 #include <bus/cam/scsi/scsi_all.h> 64 65 #include <dev/raid/mpr/mpi/mpi2_type.h> 66 #include <dev/raid/mpr/mpi/mpi2.h> 67 #include <dev/raid/mpr/mpi/mpi2_ioc.h> 68 #include <dev/raid/mpr/mpi/mpi2_sas.h> 69 #include <dev/raid/mpr/mpi/mpi2_pci.h> 70 #include <dev/raid/mpr/mpi/mpi2_cnfg.h> 71 #include <dev/raid/mpr/mpi/mpi2_init.h> 72 #include <dev/raid/mpr/mpi/mpi2_tool.h> 73 #include <dev/raid/mpr/mpr_ioctl.h> 74 #include <dev/raid/mpr/mprvar.h> 75 #include <dev/raid/mpr/mpr_table.h> 76 #include <dev/raid/mpr/mpr_sas.h> 77 78 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 79 static int mpr_init_queues(struct mpr_softc *sc); 80 static void mpr_resize_queues(struct mpr_softc *sc); 81 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 82 static int mpr_transition_operational(struct mpr_softc *sc); 83 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 84 static void mpr_iocfacts_free(struct mpr_softc *sc); 85 static void mpr_startup(void *arg); 86 static int mpr_send_iocinit(struct mpr_softc *sc); 87 static int mpr_alloc_queues(struct mpr_softc *sc); 88 static int mpr_alloc_hw_queues(struct mpr_softc *sc); 89 static int mpr_alloc_replies(struct mpr_softc *sc); 90 static int mpr_alloc_requests(struct mpr_softc *sc); 91 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 92 static int mpr_attach_log(struct mpr_softc *sc); 93 static __inline void mpr_complete_command(struct mpr_softc *sc, 94 struct mpr_command *cm); 95 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 96 MPI2_EVENT_NOTIFICATION_REPLY *reply); 97 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 98 static void mpr_periodic(void *); 99 static int mpr_reregister_events(struct mpr_softc *sc); 100 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 101 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 102 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 103 static int mpr_dump_reqs(SYSCTL_HANDLER_ARGS); 104 105 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); 106 107 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 108 109 /* 110 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 111 * any state and back to its initialization state machine. 112 */ 113 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 114 115 /* 116 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 117 * Compiler only supports uint64_t to be passed as an argument. 118 * Otherwise it will throw this error: 119 * "aggregate value used where an integer was expected" 120 */ 121 typedef union _reply_descriptor { 122 u64 word; 123 struct { 124 u32 low; 125 u32 high; 126 } u; 127 } reply_descriptor, request_descriptor; 128 129 /* Rate limit chain-fail messages to 1 per minute */ 130 static struct timeval mpr_chainfail_interval = { 60, 0 }; 131 132 /* 133 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 134 * If this function is called from process context, it can sleep 135 * and there is no harm to sleep, in case if this fuction is called 136 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 137 * based on sleep flags driver will call either msleep, pause or DELAY. 138 * msleep and pause are of same variant, but pause is used when mpr_mtx 139 * is not hold by driver. 140 */ 141 static int 142 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 143 { 144 uint32_t reg; 145 int i, error, tries = 0; 146 uint8_t first_wait_done = FALSE; 147 148 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 149 150 /* Clear any pending interrupts */ 151 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 152 153 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag); 154 /* Push the magic sequence */ 155 error = ETIMEDOUT; 156 while (tries++ < 20) { 157 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 158 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 159 mpt2_reset_magic[i]); 160 161 /* wait 100 msec */ 162 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP) 163 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0, 164 "mprdiag", hz < 10 ? 1 : hz / 10); 165 else if (sleep_flag == CAN_SLEEP) 166 tsleep(mpr_diag_reset, 0, "mprdiag", hz < 10 ? 1 : hz / 10); 167 else 168 DELAY(100 * 1000); 169 170 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 171 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 172 error = 0; 173 break; 174 } 175 } 176 if (error) { 177 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n", 178 error); 179 return (error); 180 } 181 182 /* Send the actual reset. XXX need to refresh the reg? */ 183 reg |= MPI2_DIAG_RESET_ADAPTER; 184 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n", 185 reg); 186 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); 187 188 /* Wait up to 300 seconds in 50ms intervals */ 189 error = ETIMEDOUT; 190 for (i = 0; i < 6000; i++) { 191 /* 192 * Wait 50 msec. If this is the first time through, wait 256 193 * msec to satisfy Diag Reset timing requirements. 194 */ 195 if (first_wait_done) { 196 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP) 197 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0, 198 "mprdiag", hz < 20 ? 1 : hz / 20); 199 else if (sleep_flag == CAN_SLEEP) 200 tsleep(mpr_diag_reset, 0, "mprdiag", hz < 20 ? 1: hz / 20); 201 else 202 DELAY(50 * 1000); 203 } else { 204 DELAY(256 * 1000); 205 first_wait_done = TRUE; 206 } 207 /* 208 * Check for the RESET_ADAPTER bit to be cleared first, then 209 * wait for the RESET state to be cleared, which takes a little 210 * longer. 211 */ 212 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 213 if (reg & MPI2_DIAG_RESET_ADAPTER) { 214 continue; 215 } 216 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 217 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 218 error = 0; 219 break; 220 } 221 } 222 if (error) { 223 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n", 224 error); 225 return (error); 226 } 227 228 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 229 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n"); 230 231 return (0); 232 } 233 234 static int 235 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 236 { 237 int error; 238 239 MPR_FUNCTRACE(sc); 240 241 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 242 243 error = 0; 244 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 245 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 246 MPI2_DOORBELL_FUNCTION_SHIFT); 247 248 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 249 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 250 "Doorbell handshake failed\n"); 251 error = ETIMEDOUT; 252 } 253 254 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 255 return (error); 256 } 257 258 static int 259 mpr_transition_ready(struct mpr_softc *sc) 260 { 261 uint32_t reg, state; 262 int error, tries = 0; 263 int sleep_flags; 264 265 MPR_FUNCTRACE(sc); 266 /* If we are in attach call, do not sleep */ 267 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 268 ? CAN_SLEEP : NO_SLEEP; 269 270 error = 0; 271 272 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n", 273 __func__, sleep_flags); 274 275 while (tries++ < 1200) { 276 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 277 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg); 278 279 /* 280 * Ensure the IOC is ready to talk. If it's not, try 281 * resetting it. 282 */ 283 if (reg & MPI2_DOORBELL_USED) { 284 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag " 285 "reset\n"); 286 mpr_diag_reset(sc, sleep_flags); 287 DELAY(50000); 288 continue; 289 } 290 291 /* Is the adapter owned by another peer? */ 292 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 293 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 294 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the " 295 "control of another peer host, aborting " 296 "initialization.\n"); 297 error = ENXIO; 298 break; 299 } 300 301 state = reg & MPI2_IOC_STATE_MASK; 302 if (state == MPI2_IOC_STATE_READY) { 303 /* Ready to go! */ 304 error = 0; 305 break; 306 } else if (state == MPI2_IOC_STATE_FAULT) { 307 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault " 308 "state 0x%x, resetting\n", 309 state & MPI2_DOORBELL_FAULT_CODE_MASK); 310 mpr_diag_reset(sc, sleep_flags); 311 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 312 /* Need to take ownership */ 313 mpr_message_unit_reset(sc, sleep_flags); 314 } else if (state == MPI2_IOC_STATE_RESET) { 315 /* Wait a bit, IOC might be in transition */ 316 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 317 "IOC in unexpected reset state\n"); 318 } else { 319 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 320 "IOC in unknown state 0x%x\n", state); 321 error = EINVAL; 322 break; 323 } 324 325 /* Wait 50ms for things to settle down. */ 326 DELAY(50000); 327 } 328 329 if (error) 330 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 331 "Cannot transition IOC to ready\n"); 332 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 333 return (error); 334 } 335 336 static int 337 mpr_transition_operational(struct mpr_softc *sc) 338 { 339 uint32_t reg, state; 340 int error; 341 342 MPR_FUNCTRACE(sc); 343 344 error = 0; 345 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 346 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); 347 348 state = reg & MPI2_IOC_STATE_MASK; 349 if (state != MPI2_IOC_STATE_READY) { 350 mpr_dprint(sc, MPR_INIT, "IOC not ready\n"); 351 if ((error = mpr_transition_ready(sc)) != 0) { 352 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 353 "failed to transition ready, exit\n"); 354 return (error); 355 } 356 } 357 358 error = mpr_send_iocinit(sc); 359 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 360 361 return (error); 362 } 363 364 static void 365 mpr_resize_queues(struct mpr_softc *sc) 366 { 367 u_int reqcr, prireqcr, maxio, sges_per_frame, chain_seg_size; 368 369 /* 370 * Size the queues. Since the reply queues always need one free 371 * entry, we'll deduct one reply message here. The LSI documents 372 * suggest instead to add a count to the request queue, but I think 373 * that it's better to deduct from reply queue. 374 */ 375 prireqcr = MAX(1, sc->max_prireqframes); 376 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); 377 378 reqcr = MAX(2, sc->max_reqframes); 379 reqcr = MIN(reqcr, sc->facts->RequestCredit); 380 381 sc->num_reqs = prireqcr + reqcr; 382 sc->num_prireqs = prireqcr; 383 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, 384 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 385 386 /* Store the request frame size in bytes rather than as 32bit words */ 387 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4; 388 389 /* 390 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 391 * get the size of a Chain Frame. Previous versions use the size as a 392 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 393 * is 0, use the default value. The IOCMaxChainSegmentSize is the 394 * number of 16-byte elelements that can fit in a Chain Frame, which is 395 * the size of an IEEE Simple SGE. 396 */ 397 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 398 chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize); 399 if (chain_seg_size == 0) 400 chain_seg_size = MPR_DEFAULT_CHAIN_SEG_SIZE; 401 sc->chain_frame_size = chain_seg_size * 402 MPR_MAX_CHAIN_ELEMENT_SIZE; 403 } else { 404 sc->chain_frame_size = sc->reqframesz; 405 } 406 407 /* 408 * Max IO Size is Page Size * the following: 409 * ((SGEs per frame - 1 for chain element) * Max Chain Depth) 410 * + 1 for no chain needed in last frame 411 * 412 * If user suggests a Max IO size to use, use the smaller of the 413 * user's value and the calculated value as long as the user's 414 * value is larger than 0. The user's value is in pages. 415 */ 416 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1; 417 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE; 418 419 /* 420 * If I/O size limitation requested then use it and pass up to CAM. 421 * If not, use MAXPHYS as an optimization hint, but report HW limit. 422 */ 423 if (sc->max_io_pages > 0) { 424 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); 425 sc->maxio = maxio; 426 } else { 427 sc->maxio = maxio; 428 maxio = min(maxio, MAXPHYS); 429 } 430 431 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / 432 sges_per_frame * reqcr; 433 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains) 434 sc->num_chains = sc->max_chains; 435 436 /* 437 * Figure out the number of MSIx-based queues. If the firmware or 438 * user has done something crazy and not allowed enough credit for 439 * the queues to be useful then don't enable multi-queue. 440 */ 441 if (sc->facts->MaxMSIxVectors < 2) 442 sc->msi_msgs = 1; 443 444 if (sc->msi_msgs > 1) { 445 sc->msi_msgs = MIN(sc->msi_msgs, ncpus); 446 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); 447 if (sc->num_reqs / sc->msi_msgs < 2) 448 sc->msi_msgs = 1; 449 } 450 451 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", 452 sc->msi_msgs, sc->num_reqs, sc->num_replies); 453 } 454 455 /* 456 * This is called during attach and when re-initializing due to a Diag Reset. 457 * IOC Facts is used to allocate many of the structures needed by the driver. 458 * If called from attach, de-allocation is not required because the driver has 459 * not allocated any structures yet, but if called from a Diag Reset, previously 460 * allocated structures based on IOC Facts will need to be freed and re- 461 * allocated bases on the latest IOC Facts. 462 */ 463 static int 464 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 465 { 466 int error; 467 Mpi2IOCFactsReply_t saved_facts; 468 uint8_t saved_mode, reallocating; 469 470 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__); 471 472 /* Save old IOC Facts and then only reallocate if Facts have changed */ 473 if (!attaching) { 474 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 475 } 476 477 /* 478 * Get IOC Facts. In all cases throughout this function, panic if doing 479 * a re-initialization and only return the error if attaching so the OS 480 * can handle it. 481 */ 482 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 483 if (attaching) { 484 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get " 485 "IOC Facts with error %d, exit\n", error); 486 return (error); 487 } else { 488 panic("%s failed to get IOC Facts with error %d\n", 489 __func__, error); 490 } 491 } 492 493 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); 494 495 ksnprintf(sc->fw_version, sizeof(sc->fw_version), 496 "%02d.%02d.%02d.%02d", 497 sc->facts->FWVersion.Struct.Major, 498 sc->facts->FWVersion.Struct.Minor, 499 sc->facts->FWVersion.Struct.Unit, 500 sc->facts->FWVersion.Struct.Dev); 501 502 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, 503 MPR_DRIVER_VERSION); 504 mpr_dprint(sc, MPR_INFO, 505 "IOCCapabilities: %pb%i\n", 506 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 507 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 508 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 509 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV", 510 sc->facts->IOCCapabilities); 511 512 /* 513 * If the chip doesn't support event replay then a hard reset will be 514 * required to trigger a full discovery. Do the reset here then 515 * retransition to Ready. A hard reset might have already been done, 516 * but it doesn't hurt to do it again. Only do this if attaching, not 517 * for a Diag Reset. 518 */ 519 if (attaching && ((sc->facts->IOCCapabilities & 520 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { 521 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n"); 522 mpr_diag_reset(sc, NO_SLEEP); 523 if ((error = mpr_transition_ready(sc)) != 0) { 524 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 525 "transition to ready with error %d, exit\n", 526 error); 527 return (error); 528 } 529 } 530 531 /* 532 * Set flag if IR Firmware is loaded. If the RAID Capability has 533 * changed from the previous IOC Facts, log a warning, but only if 534 * checking this after a Diag Reset and not during attach. 535 */ 536 saved_mode = sc->ir_firmware; 537 if (sc->facts->IOCCapabilities & 538 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 539 sc->ir_firmware = 1; 540 if (!attaching) { 541 if (sc->ir_firmware != saved_mode) { 542 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " 543 "in IOC Facts does not match previous mode\n"); 544 } 545 } 546 547 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 548 reallocating = FALSE; 549 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; 550 551 if ((!attaching) && 552 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 553 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 554 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 555 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 556 (saved_facts.ProductID != sc->facts->ProductID) || 557 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 558 (saved_facts.IOCRequestFrameSize != 559 sc->facts->IOCRequestFrameSize) || 560 (saved_facts.IOCMaxChainSegmentSize != 561 sc->facts->IOCMaxChainSegmentSize) || 562 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 563 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 564 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 565 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 566 (saved_facts.MaxReplyDescriptorPostQueueDepth != 567 sc->facts->MaxReplyDescriptorPostQueueDepth) || 568 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 569 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 570 (saved_facts.MaxPersistentEntries != 571 sc->facts->MaxPersistentEntries))) { 572 reallocating = TRUE; 573 574 /* Record that we reallocated everything */ 575 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; 576 } 577 578 /* 579 * Some things should be done if attaching or re-allocating after a Diag 580 * Reset, but are not needed after a Diag Reset if the FW has not 581 * changed. 582 */ 583 if (attaching || reallocating) { 584 /* 585 * Check if controller supports FW diag buffers and set flag to 586 * enable each type. 587 */ 588 if (sc->facts->IOCCapabilities & 589 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 590 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 591 enabled = TRUE; 592 if (sc->facts->IOCCapabilities & 593 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 594 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 595 enabled = TRUE; 596 if (sc->facts->IOCCapabilities & 597 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 598 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 599 enabled = TRUE; 600 601 /* 602 * Set flags for some supported items. 603 */ 604 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 605 sc->eedp_enabled = TRUE; 606 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 607 sc->control_TLR = TRUE; 608 if (sc->facts->IOCCapabilities & 609 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 610 sc->atomic_desc_capable = TRUE; 611 612 mpr_resize_queues(sc); 613 614 /* 615 * Initialize all Tail Queues 616 */ 617 TAILQ_INIT(&sc->req_list); 618 TAILQ_INIT(&sc->high_priority_req_list); 619 TAILQ_INIT(&sc->chain_list); 620 TAILQ_INIT(&sc->prp_page_list); 621 TAILQ_INIT(&sc->tm_list); 622 } 623 624 /* 625 * If doing a Diag Reset and the FW is significantly different 626 * (reallocating will be set above in IOC Facts comparison), then all 627 * buffers based on the IOC Facts will need to be freed before they are 628 * reallocated. 629 */ 630 if (reallocating) { 631 mpr_iocfacts_free(sc); 632 mprsas_realloc_targets(sc, saved_facts.MaxTargets + 633 saved_facts.MaxVolumes); 634 } 635 636 /* 637 * Any deallocation has been completed. Now start reallocating 638 * if needed. Will only need to reallocate if attaching or if the new 639 * IOC Facts are different from the previous IOC Facts after a Diag 640 * Reset. Targets have already been allocated above if needed. 641 */ 642 error = 0; 643 while (attaching || reallocating) { 644 if ((error = mpr_alloc_hw_queues(sc)) != 0) 645 break; 646 if ((error = mpr_alloc_replies(sc)) != 0) 647 break; 648 if ((error = mpr_alloc_requests(sc)) != 0) 649 break; 650 if ((error = mpr_alloc_queues(sc)) != 0) 651 break; 652 break; 653 } 654 if (error) { 655 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 656 "Failed to alloc queues with error %d\n", error); 657 mpr_free(sc); 658 return (error); 659 } 660 661 /* Always initialize the queues */ 662 bzero(sc->free_queue, sc->fqdepth * 4); 663 mpr_init_queues(sc); 664 665 /* 666 * Always get the chip out of the reset state, but only panic if not 667 * attaching. If attaching and there is an error, that is handled by 668 * the OS. 669 */ 670 error = mpr_transition_operational(sc); 671 if (error != 0) { 672 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 673 "transition to operational with error %d\n", error); 674 mpr_free(sc); 675 return (error); 676 } 677 678 /* 679 * Finish the queue initialization. 680 * These are set here instead of in mpr_init_queues() because the 681 * IOC resets these values during the state transition in 682 * mpr_transition_operational(). The free index is set to 1 683 * because the corresponding index in the IOC is set to 0, and the 684 * IOC treats the queues as full if both are set to the same value. 685 * Hence the reason that the queue can't hold all of the possible 686 * replies. 687 */ 688 sc->replypostindex = 0; 689 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 690 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 691 692 /* 693 * Attach the subsystems so they can prepare their event masks. 694 * XXX Should be dynamic so that IM/IR and user modules can attach 695 */ 696 error = 0; 697 while (attaching) { 698 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n"); 699 if ((error = mpr_attach_log(sc)) != 0) 700 break; 701 if ((error = mpr_attach_sas(sc)) != 0) 702 break; 703 if ((error = mpr_attach_user(sc)) != 0) 704 break; 705 break; 706 } 707 if (error) { 708 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 709 "Failed to attach all subsystems: error %d\n", error); 710 mpr_free(sc); 711 return (error); 712 } 713 714 /* 715 * XXX If the number of MSI-X vectors changes during re-init, this 716 * won't see it and adjust. 717 */ 718 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) { 719 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 720 "Failed to setup interrupts\n"); 721 mpr_free(sc); 722 return (error); 723 } 724 725 return (error); 726 } 727 728 /* 729 * This is called if memory is being free (during detach for example) and when 730 * buffers need to be reallocated due to a Diag Reset. 731 */ 732 static void 733 mpr_iocfacts_free(struct mpr_softc *sc) 734 { 735 struct mpr_command *cm; 736 int i; 737 738 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 739 740 if (sc->free_busaddr != 0) 741 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 742 if (sc->free_queue != NULL) 743 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 744 sc->queues_map); 745 if (sc->queues_dmat != NULL) 746 bus_dma_tag_destroy(sc->queues_dmat); 747 748 if (sc->chain_frames != NULL) { 749 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 750 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 751 sc->chain_map); 752 } 753 if (sc->chain_dmat != NULL) 754 bus_dma_tag_destroy(sc->chain_dmat); 755 756 if (sc->sense_busaddr != 0) 757 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 758 if (sc->sense_frames != NULL) 759 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 760 sc->sense_map); 761 if (sc->sense_dmat != NULL) 762 bus_dma_tag_destroy(sc->sense_dmat); 763 764 if (sc->prp_page_busaddr != 0) 765 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 766 if (sc->prp_pages != NULL) 767 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 768 sc->prp_page_map); 769 if (sc->prp_page_dmat != NULL) 770 bus_dma_tag_destroy(sc->prp_page_dmat); 771 772 if (sc->reply_busaddr != 0) 773 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 774 if (sc->reply_frames != NULL) 775 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 776 sc->reply_map); 777 if (sc->reply_dmat != NULL) 778 bus_dma_tag_destroy(sc->reply_dmat); 779 780 if (sc->req_busaddr != 0) 781 bus_dmamap_unload(sc->req_dmat, sc->req_map); 782 if (sc->req_frames != NULL) 783 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 784 if (sc->req_dmat != NULL) 785 bus_dma_tag_destroy(sc->req_dmat); 786 787 if (sc->chains != NULL) 788 kfree(sc->chains, M_MPR); 789 if (sc->prps != NULL) 790 kfree(sc->prps, M_MPR); 791 if (sc->commands != NULL) { 792 for (i = 1; i < sc->num_reqs; i++) { 793 cm = &sc->commands[i]; 794 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 795 } 796 kfree(sc->commands, M_MPR); 797 } 798 if (sc->buffer_dmat != NULL) 799 bus_dma_tag_destroy(sc->buffer_dmat); 800 801 mpr_pci_free_interrupts(sc); 802 kfree(sc->queues, M_MPR); 803 sc->queues = NULL; 804 } 805 806 /* 807 * The terms diag reset and hard reset are used interchangeably in the MPI 808 * docs to mean resetting the controller chip. In this code diag reset 809 * cleans everything up, and the hard reset function just sends the reset 810 * sequence to the chip. This should probably be refactored so that every 811 * subsystem gets a reset notification of some sort, and can clean up 812 * appropriately. 813 */ 814 int 815 mpr_reinit(struct mpr_softc *sc) 816 { 817 int error; 818 struct mprsas_softc *sassc; 819 820 sassc = sc->sassc; 821 822 MPR_FUNCTRACE(sc); 823 824 KKASSERT(lockowned(&sc->mpr_lock)); 825 826 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n"); 827 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 828 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n"); 829 return 0; 830 } 831 832 /* 833 * Make sure the completion callbacks can recognize they're getting 834 * a NULL cm_reply due to a reset. 835 */ 836 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 837 838 /* 839 * Mask interrupts here. 840 */ 841 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n"); 842 mpr_mask_intr(sc); 843 844 error = mpr_diag_reset(sc, CAN_SLEEP); 845 if (error != 0) { 846 panic("%s hard reset failed with error %d\n", __func__, error); 847 } 848 849 /* Restore the PCI state, including the MSI-X registers */ 850 mpr_pci_restore(sc); 851 852 /* Give the I/O subsystem special priority to get itself prepared */ 853 mprsas_handle_reinit(sc); 854 855 /* 856 * Get IOC Facts and allocate all structures based on this information. 857 * The attach function will also call mpr_iocfacts_allocate at startup. 858 * If relevant values have changed in IOC Facts, this function will free 859 * all of the memory based on IOC Facts and reallocate that memory. 860 */ 861 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 862 panic("%s IOC Facts based allocation failed with error %d\n", 863 __func__, error); 864 } 865 866 /* 867 * Mapping structures will be re-allocated after getting IOC Page8, so 868 * free these structures here. 869 */ 870 mpr_mapping_exit(sc); 871 872 /* 873 * The static page function currently read is IOC Page8. Others can be 874 * added in future. It's possible that the values in IOC Page8 have 875 * changed after a Diag Reset due to user modification, so always read 876 * these. Interrupts are masked, so unmask them before getting config 877 * pages. 878 */ 879 mpr_unmask_intr(sc); 880 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 881 mpr_base_static_config_pages(sc); 882 883 /* 884 * Some mapping info is based in IOC Page8 data, so re-initialize the 885 * mapping tables. 886 */ 887 mpr_mapping_initialize(sc); 888 889 /* 890 * Restart will reload the event masks clobbered by the reset, and 891 * then enable the port. 892 */ 893 mpr_reregister_events(sc); 894 895 /* the end of discovery will release the simq, so we're done. */ 896 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n", 897 sc, sc->replypostindex, sc->replyfreeindex); 898 mprsas_release_simq_reinit(sassc); 899 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 900 901 return 0; 902 } 903 904 /* Wait for the chip to ACK a word that we've put into its FIFO 905 * Wait for <timeout> seconds. In single loop wait for busy loop 906 * for 500 microseconds. 907 * Total is [ 0.5 * (2000 * <timeout>) ] in milliseconds. 908 * */ 909 static int 910 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 911 { 912 u32 cntdn, count; 913 u32 int_status; 914 u32 doorbell; 915 916 count = 0; 917 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 918 do { 919 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 920 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 921 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), " 922 "timeout(%d)\n", __func__, count, timeout); 923 return 0; 924 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 925 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 926 if ((doorbell & MPI2_IOC_STATE_MASK) == 927 MPI2_IOC_STATE_FAULT) { 928 mpr_dprint(sc, MPR_FAULT, 929 "fault_state(0x%04x)!\n", doorbell); 930 return (EFAULT); 931 } 932 } else if (int_status == 0xFFFFFFFF) 933 goto out; 934 935 /* 936 * If it can sleep, sleep for 1 millisecond, else busy loop for 937 * 0.5 millisecond 938 */ 939 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP) 940 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0, "mprdba", 941 hz < 1000 ? 1 : hz / 1000); 942 else if (sleep_flag == CAN_SLEEP) 943 tsleep(mpr_wait_db_ack, 0, "mprdba", hz < 1000 ? 1 : hz / 1000); 944 else 945 DELAY(500); 946 count++; 947 } while (--cntdn); 948 949 out: 950 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 951 "int_status(%x)!\n", __func__, count, int_status); 952 return (ETIMEDOUT); 953 } 954 955 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 956 static int 957 mpr_wait_db_int(struct mpr_softc *sc) 958 { 959 int retry; 960 961 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 962 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 963 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 964 return (0); 965 DELAY(2000); 966 } 967 return (ETIMEDOUT); 968 } 969 970 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 971 static int 972 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 973 int req_sz, int reply_sz, int timeout) 974 { 975 uint32_t *data32; 976 uint16_t *data16; 977 int i, count, ioc_sz, residual; 978 int sleep_flags = CAN_SLEEP; 979 980 /* Step 1 */ 981 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 982 983 /* Step 2 */ 984 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 985 return (EBUSY); 986 987 /* Step 3 988 * Announce that a message is coming through the doorbell. Messages 989 * are pushed at 32bit words, so round up if needed. 990 */ 991 count = (req_sz + 3) / 4; 992 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 993 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 994 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 995 996 /* Step 4 */ 997 if (mpr_wait_db_int(sc) || 998 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 999 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 1000 return (ENXIO); 1001 } 1002 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1003 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1004 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 1005 return (ENXIO); 1006 } 1007 1008 /* Step 5 */ 1009 /* Clock out the message data synchronously in 32-bit dwords*/ 1010 data32 = (uint32_t *)req; 1011 for (i = 0; i < count; i++) { 1012 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 1013 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 1014 mpr_dprint(sc, MPR_FAULT, 1015 "Timeout while writing doorbell\n"); 1016 return (ENXIO); 1017 } 1018 } 1019 1020 /* Step 6 */ 1021 /* Clock in the reply in 16-bit words. The total length of the 1022 * message is always in the 4th byte, so clock out the first 2 words 1023 * manually, then loop the rest. 1024 */ 1025 data16 = (uint16_t *)reply; 1026 if (mpr_wait_db_int(sc) != 0) { 1027 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 1028 return (ENXIO); 1029 } 1030 data16[0] = 1031 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1032 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1033 if (mpr_wait_db_int(sc) != 0) { 1034 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 1035 return (ENXIO); 1036 } 1037 data16[1] = 1038 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1039 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1040 1041 /* Number of 32bit words in the message */ 1042 ioc_sz = reply->MsgLength; 1043 1044 /* 1045 * Figure out how many 16bit words to clock in without overrunning. 1046 * The precision loss with dividing reply_sz can safely be 1047 * ignored because the messages can only be multiples of 32bits. 1048 */ 1049 residual = 0; 1050 count = MIN((reply_sz / 4), ioc_sz) * 2; 1051 if (count < ioc_sz * 2) { 1052 residual = ioc_sz * 2 - count; 1053 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 1054 "residual message words\n", residual); 1055 } 1056 1057 for (i = 2; i < count; i++) { 1058 if (mpr_wait_db_int(sc) != 0) { 1059 mpr_dprint(sc, MPR_FAULT, 1060 "Timeout reading doorbell %d\n", i); 1061 return (ENXIO); 1062 } 1063 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 1064 MPI2_DOORBELL_DATA_MASK; 1065 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1066 } 1067 1068 /* 1069 * Pull out residual words that won't fit into the provided buffer. 1070 * This keeps the chip from hanging due to a driver programming 1071 * error. 1072 */ 1073 while (residual--) { 1074 if (mpr_wait_db_int(sc) != 0) { 1075 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 1076 return (ENXIO); 1077 } 1078 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1079 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1080 } 1081 1082 /* Step 7 */ 1083 if (mpr_wait_db_int(sc) != 0) { 1084 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 1085 return (ENXIO); 1086 } 1087 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1088 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 1089 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1090 1091 return (0); 1092 } 1093 1094 static void 1095 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 1096 { 1097 request_descriptor rd; 1098 1099 MPR_FUNCTRACE(sc); 1100 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 1101 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 1102 1103 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 1104 MPR_FLAGS_SHUTDOWN)) 1105 KKASSERT(lockowned(&sc->mpr_lock)); 1106 1107 if (++sc->io_cmds_active > sc->io_cmds_highwater) 1108 sc->io_cmds_highwater++; 1109 1110 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, ("command not busy\n")); 1111 cm->cm_state = MPR_CM_STATE_INQUEUE; 1112 1113 if (sc->atomic_desc_capable) { 1114 rd.u.low = cm->cm_desc.Words.Low; 1115 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 1116 rd.u.low); 1117 } else { 1118 rd.u.low = cm->cm_desc.Words.Low; 1119 rd.u.high = cm->cm_desc.Words.High; 1120 rd.word = htole64(rd.word); 1121 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1122 rd.u.low); 1123 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1124 rd.u.high); 1125 } 1126 } 1127 1128 /* 1129 * Just the FACTS, ma'am. 1130 */ 1131 static int 1132 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1133 { 1134 MPI2_DEFAULT_REPLY *reply; 1135 MPI2_IOC_FACTS_REQUEST request; 1136 int error, req_sz, reply_sz; 1137 1138 MPR_FUNCTRACE(sc); 1139 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1140 1141 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1142 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1143 reply = (MPI2_DEFAULT_REPLY *)facts; 1144 1145 bzero(&request, req_sz); 1146 request.Function = MPI2_FUNCTION_IOC_FACTS; 1147 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1148 1149 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 1150 return (error); 1151 } 1152 1153 static int 1154 mpr_send_iocinit(struct mpr_softc *sc) 1155 { 1156 MPI2_IOC_INIT_REQUEST init; 1157 MPI2_DEFAULT_REPLY reply; 1158 int req_sz, reply_sz, error; 1159 struct timeval now; 1160 uint64_t time_in_msec; 1161 1162 MPR_FUNCTRACE(sc); 1163 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1164 1165 /* Do a quick sanity check on proper initialization */ 1166 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0) 1167 || (sc->replyframesz == 0)) { 1168 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 1169 "Driver not fully initialized for IOCInit\n"); 1170 return (EINVAL); 1171 } 1172 1173 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1174 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1175 bzero(&init, req_sz); 1176 bzero(&reply, reply_sz); 1177 1178 /* 1179 * Fill in the init block. Note that most addresses are 1180 * deliberately in the lower 32bits of memory. This is a micro- 1181 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1182 */ 1183 init.Function = MPI2_FUNCTION_IOC_INIT; 1184 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1185 init.MsgVersion = htole16(MPI2_VERSION); 1186 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1187 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4)); 1188 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1189 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1190 init.SenseBufferAddressHigh = 0; 1191 init.SystemReplyAddressHigh = 0; 1192 init.SystemRequestFrameBaseAddress.High = 0; 1193 init.SystemRequestFrameBaseAddress.Low = 1194 htole32((uint32_t)sc->req_busaddr); 1195 init.ReplyDescriptorPostQueueAddress.High = 0; 1196 init.ReplyDescriptorPostQueueAddress.Low = 1197 htole32((uint32_t)sc->post_busaddr); 1198 init.ReplyFreeQueueAddress.High = 0; 1199 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1200 getmicrotime(&now); 1201 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1202 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1203 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1204 init.HostPageSize = HOST_PAGE_SIZE_4K; 1205 1206 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1207 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1208 error = ENXIO; 1209 1210 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1211 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 1212 return (error); 1213 } 1214 1215 void 1216 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1217 { 1218 bus_addr_t *addr; 1219 1220 addr = arg; 1221 *addr = segs[0].ds_addr; 1222 } 1223 1224 void 1225 mpr_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1226 { 1227 struct mpr_busdma_context *ctx; 1228 int need_unload, need_free; 1229 1230 ctx = (struct mpr_busdma_context *)arg; 1231 need_unload = 0; 1232 need_free = 0; 1233 1234 mpr_lock(ctx->softc); 1235 ctx->error = error; 1236 ctx->completed = 1; 1237 if ((error == 0) && (ctx->abandoned == 0)) { 1238 *ctx->addr = segs[0].ds_addr; 1239 } else { 1240 if (nsegs != 0) 1241 need_unload = 1; 1242 if (ctx->abandoned != 0) 1243 need_free = 1; 1244 } 1245 if (need_free == 0) 1246 wakeup(ctx); 1247 1248 mpr_unlock(ctx->softc); 1249 1250 if (need_unload != 0) { 1251 bus_dmamap_unload(ctx->buffer_dmat, 1252 ctx->buffer_dmamap); 1253 *ctx->addr = 0; 1254 } 1255 1256 if (need_free != 0) 1257 kfree(ctx, M_MPR); 1258 } 1259 1260 static int 1261 mpr_alloc_queues(struct mpr_softc *sc) 1262 { 1263 struct mpr_queue *q; 1264 int nq, i; 1265 1266 nq = sc->msi_msgs; 1267 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq); 1268 1269 sc->queues = kmalloc(sizeof(struct mpr_queue) * nq, M_MPR, 1270 M_NOWAIT|M_ZERO); 1271 if (sc->queues == NULL) 1272 return (ENOMEM); 1273 1274 for (i = 0; i < nq; i++) { 1275 q = &sc->queues[i]; 1276 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q); 1277 q->sc = sc; 1278 q->qnum = i; 1279 } 1280 return (0); 1281 } 1282 1283 static int 1284 mpr_alloc_hw_queues(struct mpr_softc *sc) 1285 { 1286 bus_addr_t queues_busaddr; 1287 uint8_t *queues; 1288 int qsize, fqsize, pqsize; 1289 1290 /* 1291 * The reply free queue contains 4 byte entries in multiples of 16 and 1292 * aligned on a 16 byte boundary. There must always be an unused entry. 1293 * This queue supplies fresh reply frames for the firmware to use. 1294 * 1295 * The reply descriptor post queue contains 8 byte entries in 1296 * multiples of 16 and aligned on a 16 byte boundary. This queue 1297 * contains filled-in reply frames sent from the firmware to the host. 1298 * 1299 * These two queues are allocated together for simplicity. 1300 */ 1301 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1302 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1303 fqsize= sc->fqdepth * 4; 1304 pqsize = sc->pqdepth * 8; 1305 qsize = fqsize + pqsize; 1306 1307 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1308 16, 0, /* algnmnt, boundary */ 1309 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1310 BUS_SPACE_MAXADDR, /* highaddr */ 1311 NULL, NULL, /* filter, filterarg */ 1312 qsize, /* maxsize */ 1313 1, /* nsegments */ 1314 qsize, /* maxsegsize */ 1315 0, /* flags */ 1316 &sc->queues_dmat)) { 1317 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n"); 1318 return (ENOMEM); 1319 } 1320 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1321 &sc->queues_map)) { 1322 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n"); 1323 return (ENOMEM); 1324 } 1325 bzero(queues, qsize); 1326 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1327 mpr_memaddr_cb, &queues_busaddr, 0); 1328 1329 sc->free_queue = (uint32_t *)queues; 1330 sc->free_busaddr = queues_busaddr; 1331 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1332 sc->post_busaddr = queues_busaddr + fqsize; 1333 mpr_dprint(sc, MPR_INIT, "free queue busaddr= %#016jx size= %d\n", 1334 (uintmax_t)sc->free_busaddr, fqsize); 1335 mpr_dprint(sc, MPR_INIT, "reply queue busaddr= %#016jx size= %d\n", 1336 (uintmax_t)sc->post_busaddr, pqsize); 1337 1338 return (0); 1339 } 1340 1341 static int 1342 mpr_alloc_replies(struct mpr_softc *sc) 1343 { 1344 int rsize, num_replies; 1345 1346 /* Store the reply frame size in bytes rather than as 32bit words */ 1347 sc->replyframesz = sc->facts->ReplyFrameSize * 4; 1348 1349 /* 1350 * sc->num_replies should be one less than sc->fqdepth. We need to 1351 * allocate space for sc->fqdepth replies, but only sc->num_replies 1352 * replies can be used at once. 1353 */ 1354 num_replies = max(sc->fqdepth, sc->num_replies); 1355 1356 rsize = sc->replyframesz * num_replies; 1357 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1358 4, 0, /* algnmnt, boundary */ 1359 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1360 BUS_SPACE_MAXADDR, /* highaddr */ 1361 NULL, NULL, /* filter, filterarg */ 1362 rsize, /* maxsize */ 1363 1, /* nsegments */ 1364 rsize, /* maxsegsize */ 1365 0, /* flags */ 1366 &sc->reply_dmat)) { 1367 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n"); 1368 return (ENOMEM); 1369 } 1370 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1371 BUS_DMA_NOWAIT, &sc->reply_map)) { 1372 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n"); 1373 return (ENOMEM); 1374 } 1375 bzero(sc->reply_frames, rsize); 1376 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1377 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1378 mpr_dprint(sc, MPR_INIT, "reply frames busaddr= %#016jx size= %d\n", 1379 (uintmax_t)sc->reply_busaddr, rsize); 1380 1381 return (0); 1382 } 1383 1384 static void 1385 mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1386 { 1387 struct mpr_softc *sc = arg; 1388 struct mpr_chain *chain; 1389 bus_size_t bo; 1390 int i, o, s; 1391 1392 if (error != 0) 1393 return; 1394 1395 for (i = 0, o = 0, s = 0; s < nsegs; s++) { 1396 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len; 1397 bo += sc->chain_frame_size) { 1398 chain = &sc->chains[i++]; 1399 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o); 1400 chain->chain_busaddr = segs[s].ds_addr + bo; 1401 o += sc->chain_frame_size; 1402 mpr_free_chain(sc, chain); 1403 } 1404 if (bo != segs[s].ds_len) 1405 o += segs[s].ds_len - bo; 1406 } 1407 sc->chain_free_lowwater = i; 1408 } 1409 1410 static int 1411 mpr_alloc_requests(struct mpr_softc *sc) 1412 { 1413 struct mpr_command *cm; 1414 int i, rsize, nsegs; 1415 1416 rsize = sc->reqframesz * sc->num_reqs; 1417 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1418 16, 0, /* algnmnt, boundary */ 1419 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1420 BUS_SPACE_MAXADDR, /* highaddr */ 1421 NULL, NULL, /* filter, filterarg */ 1422 rsize, /* maxsize */ 1423 1, /* nsegments */ 1424 rsize, /* maxsegsize */ 1425 0, /* flags */ 1426 &sc->req_dmat)) { 1427 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n"); 1428 return (ENOMEM); 1429 } 1430 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1431 BUS_DMA_NOWAIT, &sc->req_map)) { 1432 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n"); 1433 return (ENOMEM); 1434 } 1435 bzero(sc->req_frames, rsize); 1436 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1437 mpr_memaddr_cb, &sc->req_busaddr, 0); 1438 mpr_dprint(sc, MPR_INIT, "request frames busaddr= %#016jx size= %d\n", 1439 (uintmax_t)sc->req_busaddr, rsize); 1440 1441 sc->chains = kmalloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR, 1442 M_NOWAIT | M_ZERO); 1443 if (!sc->chains) { 1444 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1445 return (ENOMEM); 1446 } 1447 rsize = sc->chain_frame_size * sc->num_chains; 1448 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1449 16, 0, /* algnmnt, boundary */ 1450 BUS_SPACE_MAXADDR, /* lowaddr */ 1451 BUS_SPACE_MAXADDR, /* highaddr */ 1452 NULL, NULL, /* filter, filterarg */ 1453 rsize, /* maxsize */ 1454 howmany(rsize, PAGE_SIZE), /* nsegments */ 1455 rsize, /* maxsegsize */ 1456 0, /* flags */ 1457 &sc->chain_dmat)) { 1458 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n"); 1459 return (ENOMEM); 1460 } 1461 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1462 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) { 1463 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1464 return (ENOMEM); 1465 } 1466 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, 1467 rsize, mpr_load_chains_cb, sc, BUS_DMA_NOWAIT)) { 1468 mpr_dprint(sc, MPR_ERROR, "Cannot load chain memory\n"); 1469 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 1470 sc->chain_map); 1471 return (ENOMEM); 1472 } 1473 1474 rsize = MPR_SENSE_LEN * sc->num_reqs; 1475 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1476 1, 0, /* algnmnt, boundary */ 1477 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1478 BUS_SPACE_MAXADDR, /* highaddr */ 1479 NULL, NULL, /* filter, filterarg */ 1480 rsize, /* maxsize */ 1481 1, /* nsegments */ 1482 rsize, /* maxsegsize */ 1483 0, /* flags */ 1484 &sc->sense_dmat)) { 1485 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n"); 1486 return (ENOMEM); 1487 } 1488 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1489 BUS_DMA_NOWAIT, &sc->sense_map)) { 1490 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n"); 1491 return (ENOMEM); 1492 } 1493 bzero(sc->sense_frames, rsize); 1494 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1495 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1496 mpr_dprint(sc, MPR_INIT, "sense frames busaddr= %#016jx size= %d\n", 1497 (uintmax_t)sc->sense_busaddr, rsize); 1498 1499 /* 1500 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1501 * these devices. 1502 */ 1503 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1504 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1505 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1506 return (ENOMEM); 1507 } 1508 1509 nsegs = (sc->maxio / PAGE_SIZE) + 1; 1510 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1511 1, 0, /* algnmnt, boundary */ 1512 BUS_SPACE_MAXADDR, /* lowaddr */ 1513 BUS_SPACE_MAXADDR, /* highaddr */ 1514 NULL, NULL, /* filter, filterarg */ 1515 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 1516 nsegs, /* nsegments */ 1517 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1518 BUS_DMA_ALLOCNOW, /* flags */ 1519 &sc->buffer_dmat)) { 1520 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n"); 1521 return (ENOMEM); 1522 } 1523 1524 /* 1525 * SMID 0 cannot be used as a free command per the firmware spec. 1526 * Just drop that command instead of risking accounting bugs. 1527 */ 1528 sc->commands = kmalloc(sizeof(struct mpr_command) * sc->num_reqs, 1529 M_MPR, M_WAITOK | M_ZERO); 1530 if (!sc->commands) { 1531 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n"); 1532 return (ENOMEM); 1533 } 1534 for (i = 1; i < sc->num_reqs; i++) { 1535 cm = &sc->commands[i]; 1536 cm->cm_req = sc->req_frames + i * sc->reqframesz; 1537 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; 1538 cm->cm_sense = &sc->sense_frames[i]; 1539 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1540 cm->cm_desc.Default.SMID = i; 1541 cm->cm_sc = sc; 1542 cm->cm_state = MPR_CM_STATE_BUSY; 1543 TAILQ_INIT(&cm->cm_chain_list); 1544 TAILQ_INIT(&cm->cm_prp_page_list); 1545 callout_init_lk(&cm->cm_callout, &sc->mpr_lock); 1546 1547 /* XXX Is a failure here a critical problem? */ 1548 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1549 == 0) { 1550 if (i <= sc->num_prireqs) 1551 mpr_free_high_priority_command(sc, cm); 1552 else 1553 mpr_free_command(sc, cm); 1554 } else { 1555 panic("failed to allocate command %d\n", i); 1556 sc->num_reqs = i; 1557 break; 1558 } 1559 } 1560 1561 return (0); 1562 } 1563 1564 /* 1565 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1566 * which are scatter/gather lists for NVMe devices. 1567 * 1568 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1569 * and translated by FW. 1570 * 1571 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1572 */ 1573 static int 1574 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1575 { 1576 int PRPs_per_page, PRPs_required, pages_required; 1577 int rsize, i; 1578 struct mpr_prp_page *prp_page; 1579 1580 /* 1581 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1582 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1583 * MAX_IO_SIZE / PAGE_SIZE = 256 1584 * 1585 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1586 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1587 * page (4096 / 8 = 512), so only one page is required for each I/O. 1588 * 1589 * Each of these buffers will need to be contiguous. For simplicity, 1590 * only one buffer is allocated here, which has all of the space 1591 * required for the NVMe Queue Depth. If there are problems allocating 1592 * this one buffer, this function will need to change to allocate 1593 * individual, contiguous NVME_QDEPTH buffers. 1594 * 1595 * The real calculation will use the real max io size. Above is just an 1596 * example. 1597 * 1598 */ 1599 PRPs_required = sc->maxio / PAGE_SIZE; 1600 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1601 pages_required = (PRPs_required / PRPs_per_page) + 1; 1602 1603 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1604 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1605 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1606 4, 0, /* algnmnt, boundary */ 1607 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1608 BUS_SPACE_MAXADDR, /* highaddr */ 1609 NULL, NULL, /* filter, filterarg */ 1610 rsize, /* maxsize */ 1611 1, /* nsegments */ 1612 rsize, /* maxsegsize */ 1613 0, /* flags */ 1614 &sc->prp_page_dmat)) { 1615 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA " 1616 "tag\n"); 1617 return (ENOMEM); 1618 } 1619 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1620 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1621 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n"); 1622 return (ENOMEM); 1623 } 1624 bzero(sc->prp_pages, rsize); 1625 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1626 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1627 1628 sc->prps = kmalloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1629 M_WAITOK | M_ZERO); 1630 for (i = 0; i < NVME_QDEPTH; i++) { 1631 prp_page = &sc->prps[i]; 1632 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1633 i * sc->prp_buffer_size); 1634 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1635 i * sc->prp_buffer_size); 1636 mpr_free_prp_page(sc, prp_page); 1637 sc->prp_pages_free_lowwater++; 1638 } 1639 1640 return (0); 1641 } 1642 1643 static int 1644 mpr_init_queues(struct mpr_softc *sc) 1645 { 1646 int i; 1647 1648 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1649 1650 /* 1651 * According to the spec, we need to use one less reply than we 1652 * have space for on the queue. So sc->num_replies (the number we 1653 * use) should be less than sc->fqdepth (allocated size). 1654 */ 1655 if (sc->num_replies >= sc->fqdepth) 1656 return (EINVAL); 1657 1658 /* 1659 * Initialize all of the free queue entries. 1660 */ 1661 for (i = 0; i < sc->fqdepth; i++) { 1662 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz); 1663 } 1664 sc->replyfreeindex = sc->num_replies; 1665 1666 return (0); 1667 } 1668 1669 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1670 * Next are the global settings, if they exist. Highest are the per-unit 1671 * settings, if they exist. 1672 */ 1673 void 1674 mpr_get_tunables(struct mpr_softc *sc) 1675 { 1676 char tmpstr[80]; 1677 1678 /* XXX default to some debugging for now */ 1679 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1680 sc->msi_enable = 1; 1681 sc->max_chains = MPR_CHAIN_FRAMES; 1682 sc->max_io_pages = MPR_MAXIO_PAGES; 1683 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1684 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1685 sc->use_phynum = 1; 1686 sc->max_reqframes = MPR_REQ_FRAMES; 1687 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; 1688 sc->max_replyframes = MPR_REPLY_FRAMES; 1689 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; 1690 1691 /* 1692 * Grab the global variables. 1693 */ 1694 TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug); 1695 TUNABLE_INT_FETCH("hw.mpr.msi_enable", &sc->msi_enable); 1696 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1697 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1698 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1699 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1700 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1701 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); 1702 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); 1703 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); 1704 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); 1705 1706 /* Grab the unit-instance variables */ 1707 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1708 device_get_unit(sc->mpr_dev)); 1709 TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug); 1710 1711 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.msi_enable", 1712 device_get_unit(sc->mpr_dev)); 1713 TUNABLE_INT_FETCH(tmpstr, &sc->msi_enable); 1714 1715 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1716 device_get_unit(sc->mpr_dev)); 1717 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1718 1719 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1720 device_get_unit(sc->mpr_dev)); 1721 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1722 1723 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1724 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1725 device_get_unit(sc->mpr_dev)); 1726 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1727 1728 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1729 device_get_unit(sc->mpr_dev)); 1730 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1731 1732 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1733 device_get_unit(sc->mpr_dev)); 1734 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1735 1736 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1737 device_get_unit(sc->mpr_dev)); 1738 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1739 1740 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", 1741 device_get_unit(sc->mpr_dev)); 1742 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); 1743 1744 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes", 1745 device_get_unit(sc->mpr_dev)); 1746 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); 1747 1748 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes", 1749 device_get_unit(sc->mpr_dev)); 1750 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); 1751 1752 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes", 1753 device_get_unit(sc->mpr_dev)); 1754 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); 1755 } 1756 1757 static void 1758 mpr_setup_sysctl(struct mpr_softc *sc) 1759 { 1760 struct sysctl_ctx_list *sysctl_ctx = NULL; 1761 struct sysctl_oid *sysctl_tree = NULL; 1762 char tmpstr[80], tmpstr2[80]; 1763 1764 /* 1765 * Setup the sysctl variable so the user can change the debug level 1766 * on the fly. 1767 */ 1768 ksnprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1769 device_get_unit(sc->mpr_dev)); 1770 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1771 1772 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1773 if (sysctl_ctx != NULL) 1774 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1775 1776 if (sysctl_tree == NULL) { 1777 sysctl_ctx_init(&sc->sysctl_ctx); 1778 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1779 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1780 CTLFLAG_RD, 0, tmpstr); 1781 if (sc->sysctl_tree == NULL) 1782 return; 1783 sysctl_ctx = &sc->sysctl_ctx; 1784 sysctl_tree = sc->sysctl_tree; 1785 } 1786 1787 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1788 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0, 1789 "mpr debug level"); 1790 1791 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1792 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, 1793 "Total number of allocated request frames"); 1794 1795 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1796 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, 1797 "Total number of allocated high priority request frames"); 1798 1799 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1800 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, 1801 "Total number of allocated reply frames"); 1802 1803 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1804 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, 1805 "Total number of event frames allocated"); 1806 1807 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1808 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, 1809 strlen(sc->fw_version), "firmware version"); 1810 1811 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1812 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, 1813 strlen(MPR_DRIVER_VERSION), "driver version"); 1814 1815 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1816 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1817 &sc->io_cmds_active, 0, "number of currently active commands"); 1818 1819 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1820 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1821 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1822 1823 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1824 OID_AUTO, "chain_free", CTLFLAG_RD, 1825 &sc->chain_free, 0, "number of free chain elements"); 1826 1827 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1828 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1829 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1830 1831 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1832 OID_AUTO, "max_chains", CTLFLAG_RD, 1833 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1834 1835 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1836 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1837 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1838 "IOCFacts)"); 1839 1840 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1841 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1842 "enable SSU to SATA SSD/HDD at shutdown"); 1843 1844 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1845 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1846 &sc->chain_alloc_fail, 0, "chain allocation failures"); 1847 1848 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1849 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1850 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1851 "spinup after SATA ID error"); 1852 1853 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1854 OID_AUTO, "dump_reqs", CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP, sc, 0, 1855 mpr_dump_reqs, "I", "Dump Active Requests"); 1856 1857 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1858 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1859 "Use the phy number for enumeration"); 1860 1861 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1862 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1863 &sc->prp_pages_free, 0, "number of free PRP pages"); 1864 1865 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1866 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1867 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1868 1869 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1870 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1871 &sc->prp_page_alloc_fail, 0, "PRP page allocation failures"); 1872 } 1873 1874 struct mpr_dumpreq_hdr { 1875 uint32_t smid; 1876 uint32_t state; 1877 uint32_t numframes; 1878 uint32_t deschi; 1879 uint32_t desclo; 1880 }; 1881 1882 static int 1883 mpr_dump_reqs(SYSCTL_HANDLER_ARGS) 1884 { 1885 struct mpr_softc *sc; 1886 struct mpr_chain *chain, *chain1; 1887 struct mpr_command *cm; 1888 struct mpr_dumpreq_hdr hdr; 1889 struct sbuf *sb; 1890 uint32_t smid, state; 1891 int i, numreqs, error = 0; 1892 1893 sc = (struct mpr_softc *)arg1; 1894 1895 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) { 1896 kprintf("priv check error %d\n", error); 1897 return (error); 1898 } 1899 1900 state = MPR_CM_STATE_INQUEUE; 1901 smid = 1; 1902 numreqs = sc->num_reqs; 1903 1904 if (req->newptr != NULL) 1905 return (EINVAL); 1906 1907 if (smid == 0 || smid > sc->num_reqs) 1908 return (EINVAL); 1909 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs)) 1910 numreqs = sc->num_reqs; 1911 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 1912 1913 /* Best effort, no locking */ 1914 for (i = smid; i < numreqs; i++) { 1915 cm = &sc->commands[i]; 1916 if (cm->cm_state != state) 1917 continue; 1918 hdr.smid = i; 1919 hdr.state = cm->cm_state; 1920 hdr.numframes = 1; 1921 hdr.deschi = cm->cm_desc.Words.High; 1922 hdr.desclo = cm->cm_desc.Words.Low; 1923 TAILQ_FOREACH_MUTABLE(chain, &cm->cm_chain_list, chain_link, 1924 chain1) 1925 hdr.numframes++; 1926 sbuf_bcat(sb, &hdr, sizeof(hdr)); 1927 sbuf_bcat(sb, cm->cm_req, 128); 1928 TAILQ_FOREACH_MUTABLE(chain, &cm->cm_chain_list, chain_link, 1929 chain1) 1930 sbuf_bcat(sb, chain->chain, 128); 1931 } 1932 1933 error = sbuf_finish(sb); 1934 sbuf_delete(sb); 1935 return (error); 1936 } 1937 1938 int 1939 mpr_attach(struct mpr_softc *sc) 1940 { 1941 int error; 1942 1943 MPR_FUNCTRACE(sc); 1944 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1945 1946 lockinit(&sc->mpr_lock, "MPR lock", 0, LK_CANRECURSE); 1947 callout_init_lk(&sc->periodic, &sc->mpr_lock); 1948 callout_init_lk(&sc->device_check_callout, &sc->mpr_lock); 1949 TAILQ_INIT(&sc->event_list); 1950 timevalclear(&sc->lastfail); 1951 1952 if ((error = mpr_transition_ready(sc)) != 0) { 1953 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 1954 "Failed to transition ready\n"); 1955 return (error); 1956 } 1957 1958 sc->facts = kmalloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 1959 M_ZERO|M_NOWAIT); 1960 if (!sc->facts) { 1961 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 1962 "Cannot allocate memory, exit\n"); 1963 return (ENOMEM); 1964 } 1965 1966 /* 1967 * Get IOC Facts and allocate all structures based on this information. 1968 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 1969 * Facts. If relevant values have changed in IOC Facts, this function 1970 * will free all of the memory based on IOC Facts and reallocate that 1971 * memory. If this fails, any allocated memory should already be freed. 1972 */ 1973 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 1974 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation " 1975 "failed with error %d\n", error); 1976 return (error); 1977 } 1978 1979 /* Start the periodic watchdog check on the IOC Doorbell */ 1980 mpr_periodic(sc); 1981 1982 /* 1983 * The portenable will kick off discovery events that will drive the 1984 * rest of the initialization process. The CAM/SAS module will 1985 * hold up the boot sequence until discovery is complete. 1986 */ 1987 sc->mpr_ich.ich_func = mpr_startup; 1988 sc->mpr_ich.ich_arg = sc; 1989 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 1990 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 1991 "Cannot establish MPR config hook\n"); 1992 error = EINVAL; 1993 } 1994 1995 /* 1996 * Allow IR to shutdown gracefully when shutdown occurs. 1997 */ 1998 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1999 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 2000 2001 if (sc->shutdown_eh == NULL) 2002 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2003 "shutdown event registration failed\n"); 2004 2005 mpr_setup_sysctl(sc); 2006 2007 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 2008 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 2009 2010 return (error); 2011 } 2012 2013 /* Run through any late-start handlers. */ 2014 static void 2015 mpr_startup(void *arg) 2016 { 2017 struct mpr_softc *sc; 2018 2019 sc = (struct mpr_softc *)arg; 2020 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2021 2022 mpr_lock(sc); 2023 mpr_unmask_intr(sc); 2024 2025 /* initialize device mapping tables */ 2026 mpr_base_static_config_pages(sc); 2027 mpr_mapping_initialize(sc); 2028 mprsas_startup(sc); 2029 mpr_unlock(sc); 2030 2031 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n"); 2032 config_intrhook_disestablish(&sc->mpr_ich); 2033 sc->mpr_ich.ich_arg = NULL; 2034 2035 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2036 } 2037 2038 /* Periodic watchdog. Is called with the driver lock already held. */ 2039 static void 2040 mpr_periodic(void *arg) 2041 { 2042 struct mpr_softc *sc; 2043 uint32_t db; 2044 2045 sc = (struct mpr_softc *)arg; 2046 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 2047 return; 2048 2049 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 2050 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2051 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 2052 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 2053 panic("TEMPERATURE FAULT: STOPPING."); 2054 } 2055 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 2056 mpr_reinit(sc); 2057 } 2058 2059 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 2060 } 2061 2062 static void 2063 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 2064 MPI2_EVENT_NOTIFICATION_REPLY *event) 2065 { 2066 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 2067 2068 MPR_DPRINT_EVENT(sc, generic, event); 2069 2070 switch (event->Event) { 2071 case MPI2_EVENT_LOG_DATA: 2072 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 2073 if (sc->mpr_debug & MPR_EVENT) 2074 hexdump(event->EventData, event->EventDataLength, NULL, 2075 0); 2076 break; 2077 case MPI2_EVENT_LOG_ENTRY_ADDED: 2078 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 2079 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 2080 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 2081 entry->LogSequence); 2082 break; 2083 default: 2084 break; 2085 } 2086 return; 2087 } 2088 2089 static int 2090 mpr_attach_log(struct mpr_softc *sc) 2091 { 2092 uint8_t events[16]; 2093 2094 bzero(events, 16); 2095 setbit(events, MPI2_EVENT_LOG_DATA); 2096 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 2097 2098 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 2099 &sc->mpr_log_eh); 2100 2101 return (0); 2102 } 2103 2104 static int 2105 mpr_detach_log(struct mpr_softc *sc) 2106 { 2107 2108 if (sc->mpr_log_eh != NULL) 2109 mpr_deregister_events(sc, sc->mpr_log_eh); 2110 return (0); 2111 } 2112 2113 /* 2114 * Free all of the driver resources and detach submodules. Should be called 2115 * without the lock held. 2116 */ 2117 int 2118 mpr_free(struct mpr_softc *sc) 2119 { 2120 int error; 2121 2122 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2123 /* Turn off the watchdog */ 2124 mpr_lock(sc); 2125 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 2126 mpr_unlock(sc); 2127 /* Lock must not be held for this */ 2128 callout_drain(&sc->periodic); 2129 callout_drain(&sc->device_check_callout); 2130 2131 if (((error = mpr_detach_log(sc)) != 0) || 2132 ((error = mpr_detach_sas(sc)) != 0)) { 2133 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach " 2134 "subsystems, error= %d, exit\n", error); 2135 return (error); 2136 } 2137 2138 mpr_detach_user(sc); 2139 2140 /* Put the IOC back in the READY state. */ 2141 mpr_lock(sc); 2142 if ((error = mpr_transition_ready(sc)) != 0) { 2143 mpr_unlock(sc); 2144 return (error); 2145 } 2146 mpr_unlock(sc); 2147 2148 if (sc->facts != NULL) 2149 kfree(sc->facts, M_MPR); 2150 2151 /* 2152 * Free all buffers that are based on IOC Facts. A Diag Reset may need 2153 * to free these buffers too. 2154 */ 2155 mpr_iocfacts_free(sc); 2156 2157 if (sc->sysctl_tree != NULL) 2158 sysctl_ctx_free(&sc->sysctl_ctx); 2159 2160 /* Deregister the shutdown function */ 2161 if (sc->shutdown_eh != NULL) 2162 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 2163 2164 lockuninit(&sc->mpr_lock); 2165 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2166 2167 return (0); 2168 } 2169 2170 static __inline void 2171 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 2172 { 2173 MPR_FUNCTRACE(sc); 2174 2175 if (cm == NULL) { 2176 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 2177 return; 2178 } 2179 2180 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 2181 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 2182 2183 if (cm->cm_complete != NULL) { 2184 mpr_dprint(sc, MPR_TRACE, 2185 "%s cm %p calling cm_complete %p data %p reply %p\n", 2186 __func__, cm, cm->cm_complete, cm->cm_complete_data, 2187 cm->cm_reply); 2188 cm->cm_complete(sc, cm); 2189 } 2190 2191 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 2192 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 2193 wakeup(cm); 2194 } 2195 2196 if (sc->io_cmds_active != 0) { 2197 sc->io_cmds_active--; 2198 } else { 2199 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 2200 "out of sync - resynching to 0\n"); 2201 } 2202 } 2203 2204 static void 2205 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 2206 { 2207 union loginfo_type { 2208 u32 loginfo; 2209 struct { 2210 u32 subcode:16; 2211 u32 code:8; 2212 u32 originator:4; 2213 u32 bus_type:4; 2214 } dw; 2215 }; 2216 union loginfo_type sas_loginfo; 2217 char *originator_str = NULL; 2218 2219 sas_loginfo.loginfo = log_info; 2220 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 2221 return; 2222 2223 /* each nexus loss loginfo */ 2224 if (log_info == 0x31170000) 2225 return; 2226 2227 /* eat the loginfos associated with task aborts */ 2228 if ((log_info == 30050000) || (log_info == 0x31140000) || 2229 (log_info == 0x31130000)) 2230 return; 2231 2232 switch (sas_loginfo.dw.originator) { 2233 case 0: 2234 originator_str = "IOP"; 2235 break; 2236 case 1: 2237 originator_str = "PL"; 2238 break; 2239 case 2: 2240 originator_str = "IR"; 2241 break; 2242 } 2243 2244 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 2245 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 2246 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 2247 } 2248 2249 static void 2250 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 2251 { 2252 MPI2DefaultReply_t *mpi_reply; 2253 u16 sc_status; 2254 2255 mpi_reply = (MPI2DefaultReply_t*)reply; 2256 sc_status = le16toh(mpi_reply->IOCStatus); 2257 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 2258 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 2259 } 2260 2261 void 2262 mpr_intr(void *data) 2263 { 2264 struct mpr_softc *sc; 2265 uint32_t status; 2266 2267 sc = (struct mpr_softc *)data; 2268 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2269 2270 /* 2271 * Check interrupt status register to flush the bus. This is 2272 * needed for both INTx interrupts and driver-driven polling 2273 */ 2274 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 2275 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 2276 return; 2277 2278 mpr_lock(sc); 2279 mpr_intr_locked(data); 2280 mpr_unlock(sc); 2281 return; 2282 } 2283 2284 /* 2285 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 2286 * chip. Hopefully this theory is correct. 2287 */ 2288 void 2289 mpr_intr_msi(void *data) 2290 { 2291 struct mpr_softc *sc; 2292 2293 sc = (struct mpr_softc *)data; 2294 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2295 mpr_lock(sc); 2296 mpr_intr_locked(data); 2297 mpr_unlock(sc); 2298 return; 2299 } 2300 2301 /* 2302 * The locking is overly broad and simplistic, but easy to deal with for now. 2303 */ 2304 void 2305 mpr_intr_locked(void *data) 2306 { 2307 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2308 struct mpr_softc *sc; 2309 struct mpr_command *cm = NULL; 2310 uint8_t flags; 2311 u_int pq; 2312 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2313 mpr_fw_diagnostic_buffer_t *pBuffer; 2314 2315 sc = (struct mpr_softc *)data; 2316 2317 pq = sc->replypostindex; 2318 mpr_dprint(sc, MPR_TRACE, 2319 "%s sc %p starting with replypostindex %u\n", 2320 __func__, sc, sc->replypostindex); 2321 2322 for ( ;; ) { 2323 cm = NULL; 2324 desc = &sc->post_queue[sc->replypostindex]; 2325 flags = desc->Default.ReplyFlags & 2326 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2327 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2328 (le32toh(desc->Words.High) == 0xffffffff)) 2329 break; 2330 2331 /* increment the replypostindex now, so that event handlers 2332 * and cm completion handlers which decide to do a diag 2333 * reset can zero it without it getting incremented again 2334 * afterwards, and we break out of this loop on the next 2335 * iteration since the reply post queue has been cleared to 2336 * 0xFF and all descriptors look unused (which they are). 2337 */ 2338 if (++sc->replypostindex >= sc->pqdepth) 2339 sc->replypostindex = 0; 2340 2341 switch (flags) { 2342 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2343 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2344 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2345 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2346 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, 2347 ("command not inqueue\n")); 2348 cm->cm_state = MPR_CM_STATE_BUSY; 2349 cm->cm_reply = NULL; 2350 break; 2351 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2352 { 2353 uint32_t baddr; 2354 uint8_t *reply; 2355 2356 /* 2357 * Re-compose the reply address from the address 2358 * sent back from the chip. The ReplyFrameAddress 2359 * is the lower 32 bits of the physical address of 2360 * particular reply frame. Convert that address to 2361 * host format, and then use that to provide the 2362 * offset against the virtual address base 2363 * (sc->reply_frames). 2364 */ 2365 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2366 reply = sc->reply_frames + 2367 (baddr - ((uint32_t)sc->reply_busaddr)); 2368 /* 2369 * Make sure the reply we got back is in a valid 2370 * range. If not, go ahead and panic here, since 2371 * we'll probably panic as soon as we deference the 2372 * reply pointer anyway. 2373 */ 2374 if ((reply < sc->reply_frames) 2375 || (reply > (sc->reply_frames + 2376 (sc->fqdepth * sc->replyframesz)))) { 2377 kprintf("%s: WARNING: reply %p out of range!\n", 2378 __func__, reply); 2379 kprintf("%s: reply_frames %p, fqdepth %d, " 2380 "frame size %d\n", __func__, 2381 sc->reply_frames, sc->fqdepth, 2382 sc->replyframesz); 2383 kprintf("%s: baddr %#x,\n", __func__, baddr); 2384 /* LSI-TODO. See Linux Code for Graceful exit */ 2385 panic("Reply address out of range"); 2386 } 2387 if (le16toh(desc->AddressReply.SMID) == 0) { 2388 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2389 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2390 /* 2391 * If SMID is 0 for Diag Buffer Post, 2392 * this implies that the reply is due to 2393 * a release function with a status that 2394 * the buffer has been released. Set 2395 * the buffer flags accordingly. 2396 */ 2397 rel_rep = 2398 (MPI2_DIAG_RELEASE_REPLY *)reply; 2399 if ((le16toh(rel_rep->IOCStatus) & 2400 MPI2_IOCSTATUS_MASK) == 2401 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2402 { 2403 pBuffer = 2404 &sc->fw_diag_buffer_list[ 2405 rel_rep->BufferType]; 2406 pBuffer->valid_data = TRUE; 2407 pBuffer->owned_by_firmware = 2408 FALSE; 2409 pBuffer->immediate = FALSE; 2410 } 2411 } else 2412 mpr_dispatch_event(sc, baddr, 2413 (MPI2_EVENT_NOTIFICATION_REPLY *) 2414 reply); 2415 } else { 2416 cm = &sc->commands[ 2417 le16toh(desc->AddressReply.SMID)]; 2418 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, 2419 ("command not inqueue\n")); 2420 cm->cm_state = MPR_CM_STATE_BUSY; 2421 cm->cm_reply = reply; 2422 cm->cm_reply_data = 2423 le32toh(desc->AddressReply. 2424 ReplyFrameAddress); 2425 } 2426 break; 2427 } 2428 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2429 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2430 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2431 default: 2432 /* Unhandled */ 2433 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2434 desc->Default.ReplyFlags); 2435 cm = NULL; 2436 break; 2437 } 2438 2439 if (cm != NULL) { 2440 // Print Error reply frame 2441 if (cm->cm_reply) 2442 mpr_display_reply_info(sc,cm->cm_reply); 2443 mpr_complete_command(sc, cm); 2444 } 2445 2446 desc->Words.Low = 0xffffffff; 2447 desc->Words.High = 0xffffffff; 2448 } 2449 2450 if (pq != sc->replypostindex) { 2451 mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n", 2452 __func__, sc, sc->replypostindex); 2453 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2454 sc->replypostindex); 2455 } 2456 2457 return; 2458 } 2459 2460 static void 2461 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2462 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2463 { 2464 struct mpr_event_handle *eh; 2465 int event, handled = 0; 2466 2467 event = le16toh(reply->Event); 2468 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2469 if (isset(eh->mask, event)) { 2470 eh->callback(sc, data, reply); 2471 handled++; 2472 } 2473 } 2474 2475 if (handled == 0) 2476 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2477 le16toh(event)); 2478 2479 /* 2480 * This is the only place that the event/reply should be freed. 2481 * Anything wanting to hold onto the event data should have 2482 * already copied it into their own storage. 2483 */ 2484 mpr_free_reply(sc, data); 2485 } 2486 2487 static void 2488 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2489 { 2490 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2491 2492 if (cm->cm_reply) 2493 MPR_DPRINT_EVENT(sc, generic, 2494 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2495 2496 mpr_free_command(sc, cm); 2497 2498 /* next, send a port enable */ 2499 mprsas_startup(sc); 2500 } 2501 2502 /* 2503 * For both register_events and update_events, the caller supplies a bitmap 2504 * of events that it _wants_. These functions then turn that into a bitmask 2505 * suitable for the controller. 2506 */ 2507 int 2508 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2509 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2510 { 2511 struct mpr_event_handle *eh; 2512 int error = 0; 2513 2514 eh = kmalloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2515 if (!eh) { 2516 mpr_dprint(sc, MPR_EVENT|MPR_ERROR, 2517 "Cannot allocate event memory\n"); 2518 return (ENOMEM); 2519 } 2520 eh->callback = cb; 2521 eh->data = data; 2522 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2523 if (mask != NULL) 2524 error = mpr_update_events(sc, eh, mask); 2525 *handle = eh; 2526 2527 return (error); 2528 } 2529 2530 int 2531 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2532 uint8_t *mask) 2533 { 2534 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2535 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; 2536 struct mpr_command *cm = NULL; 2537 struct mpr_event_handle *eh; 2538 int error, i; 2539 2540 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2541 2542 if ((mask != NULL) && (handle != NULL)) 2543 bcopy(mask, &handle->mask[0], 16); 2544 memset(sc->event_mask, 0xff, 16); 2545 2546 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2547 for (i = 0; i < 16; i++) 2548 sc->event_mask[i] &= ~eh->mask[i]; 2549 } 2550 2551 if ((cm = mpr_alloc_command(sc)) == NULL) 2552 return (EBUSY); 2553 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2554 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2555 evtreq->MsgFlags = 0; 2556 evtreq->SASBroadcastPrimitiveMasks = 0; 2557 #ifdef MPR_DEBUG_ALL_EVENTS 2558 { 2559 u_char fullmask[16]; 2560 memset(fullmask, 0x00, 16); 2561 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2562 } 2563 #else 2564 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2565 #endif 2566 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2567 cm->cm_data = NULL; 2568 2569 error = mpr_request_polled(sc, &cm); 2570 if (cm != NULL) 2571 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2572 if ((reply == NULL) || 2573 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2574 error = ENXIO; 2575 2576 if (reply) 2577 MPR_DPRINT_EVENT(sc, generic, reply); 2578 2579 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2580 2581 if (cm != NULL) 2582 mpr_free_command(sc, cm); 2583 return (error); 2584 } 2585 2586 static int 2587 mpr_reregister_events(struct mpr_softc *sc) 2588 { 2589 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2590 struct mpr_command *cm; 2591 struct mpr_event_handle *eh; 2592 int error, i; 2593 2594 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2595 2596 /* first, reregister events */ 2597 2598 memset(sc->event_mask, 0xff, 16); 2599 2600 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2601 for (i = 0; i < 16; i++) 2602 sc->event_mask[i] &= ~eh->mask[i]; 2603 } 2604 2605 if ((cm = mpr_alloc_command(sc)) == NULL) 2606 return (EBUSY); 2607 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2608 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2609 evtreq->MsgFlags = 0; 2610 evtreq->SASBroadcastPrimitiveMasks = 0; 2611 #ifdef MPR_DEBUG_ALL_EVENTS 2612 { 2613 u_char fullmask[16]; 2614 memset(fullmask, 0x00, 16); 2615 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2616 } 2617 #else 2618 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2619 #endif 2620 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2621 cm->cm_data = NULL; 2622 cm->cm_complete = mpr_reregister_events_complete; 2623 2624 error = mpr_map_command(sc, cm); 2625 2626 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2627 error); 2628 return (error); 2629 } 2630 2631 int 2632 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2633 { 2634 2635 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2636 kfree(handle, M_MPR); 2637 return (mpr_update_events(sc, NULL, NULL)); 2638 } 2639 2640 /** 2641 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2642 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2643 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2644 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2645 * describe a larger data buffer. If the data buffer is too large to describe 2646 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2647 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2648 * elsewhere in memory to describe the remaining data memory segments. The PRP 2649 * list will be contiguous. 2650 2651 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2652 * consists of a list of PRP entries to describe a number of noncontigous 2653 * physical memory segments as a single memory buffer, just as a SGL does. Note 2654 * however, that this function is only used by the IOCTL call, so the memory 2655 * given will be guaranteed to be contiguous. There is no need to translate 2656 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2657 * space that is one page size each. 2658 * 2659 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2660 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2661 * the second PRP element if the memory being described fits within 2 PRP 2662 * entries, or a PRP list pointer if the PRP spans more than two entries. 2663 * 2664 * A PRP list pointer contains the address of a PRP list, structured as a linear 2665 * array of PRP entries. Each PRP entry in this list describes a segment of 2666 * physical memory. 2667 * 2668 * Each 64-bit PRP entry comprises an address and an offset field. The address 2669 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2670 * offset describes where within that page the memory segment begins. Only the 2671 * first element in a PRP list may contain a non-zero offest, implying that all 2672 * memory segments following the first begin at the start of a PAGE_SIZE page. 2673 * 2674 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2675 * with exceptions for the first and last elements in the list. If the memory 2676 * being described by the list begins at a non-zero offset within the first page, 2677 * then the first PRP element will contain a non-zero offset indicating where the 2678 * region begins within the page. The last memory segment may end before the end 2679 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2680 * described by the PRP list. 2681 * 2682 * Since PRP entries lack any indication of size, the overall data buffer length 2683 * is used to determine where the end of the data memory buffer is located, and 2684 * how many PRP entries are required to describe it. 2685 * 2686 * Returns nothing. 2687 */ 2688 void 2689 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2690 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2691 uint32_t data_in_sz, uint32_t data_out_sz) 2692 { 2693 int prp_size = PRP_ENTRY_SIZE; 2694 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2695 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2696 uint32_t offset, entry_len, page_mask_result, page_mask; 2697 bus_addr_t paddr; 2698 size_t length; 2699 struct mpr_prp_page *prp_page_info = NULL; 2700 2701 /* 2702 * Not all commands require a data transfer. If no data, just return 2703 * without constructing any PRP. 2704 */ 2705 if (!data_in_sz && !data_out_sz) 2706 return; 2707 2708 /* 2709 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2710 * located at a 24 byte offset from the start of the NVMe command. Then 2711 * set the current PRP entry pointer to PRP1. 2712 */ 2713 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2714 NVME_CMD_PRP1_OFFSET); 2715 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2716 NVME_CMD_PRP2_OFFSET); 2717 prp_entry = prp1_entry; 2718 2719 /* 2720 * For the PRP entries, use the specially allocated buffer of 2721 * contiguous memory. PRP Page allocation failures should not happen 2722 * because there should be enough PRP page buffers to account for the 2723 * possible NVMe QDepth. 2724 */ 2725 prp_page_info = mpr_alloc_prp_page(sc); 2726 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2727 "used for building a native NVMe SGL.\n", __func__)); 2728 prp_page = (uint64_t *)prp_page_info->prp_page; 2729 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2730 2731 /* 2732 * Insert the allocated PRP page into the command's PRP page list. This 2733 * will be freed when the command is freed. 2734 */ 2735 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2736 2737 /* 2738 * Check if we are within 1 entry of a page boundary we don't want our 2739 * first entry to be a PRP List entry. 2740 */ 2741 page_mask = PAGE_SIZE - 1; 2742 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2743 page_mask; 2744 if (!page_mask_result) 2745 { 2746 /* Bump up to next page boundary. */ 2747 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2748 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2749 prp_size); 2750 } 2751 2752 /* 2753 * Set PRP physical pointer, which initially points to the current PRP 2754 * DMA memory page. 2755 */ 2756 prp_entry_phys = prp_page_phys; 2757 2758 /* Get physical address and length of the data buffer. */ 2759 paddr = (bus_addr_t)(uintptr_t)data; 2760 if (data_in_sz) 2761 length = data_in_sz; 2762 else 2763 length = data_out_sz; 2764 2765 /* Loop while the length is not zero. */ 2766 while (length) 2767 { 2768 /* 2769 * Check if we need to put a list pointer here if we are at page 2770 * boundary - prp_size (8 bytes). 2771 */ 2772 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2773 prp_size) & page_mask; 2774 if (!page_mask_result) 2775 { 2776 /* 2777 * This is the last entry in a PRP List, so we need to 2778 * put a PRP list pointer here. What this does is: 2779 * - bump the current memory pointer to the next 2780 * address, which will be the next full page. 2781 * - set the PRP Entry to point to that page. This is 2782 * now the PRP List pointer. 2783 * - bump the PRP Entry pointer the start of the next 2784 * page. Since all of this PRP memory is contiguous, 2785 * no need to get a new page - it's just the next 2786 * address. 2787 */ 2788 prp_entry_phys++; 2789 *prp_entry = 2790 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2791 prp_entry++; 2792 } 2793 2794 /* Need to handle if entry will be part of a page. */ 2795 offset = (uint32_t)paddr & page_mask; 2796 entry_len = PAGE_SIZE - offset; 2797 2798 if (prp_entry == prp1_entry) 2799 { 2800 /* 2801 * Must fill in the first PRP pointer (PRP1) before 2802 * moving on. 2803 */ 2804 *prp1_entry = htole64((uint64_t)paddr); 2805 2806 /* 2807 * Now point to the second PRP entry within the 2808 * command (PRP2). 2809 */ 2810 prp_entry = prp2_entry; 2811 } 2812 else if (prp_entry == prp2_entry) 2813 { 2814 /* 2815 * Should the PRP2 entry be a PRP List pointer or just a 2816 * regular PRP pointer? If there is more than one more 2817 * page of data, must use a PRP List pointer. 2818 */ 2819 if (length > PAGE_SIZE) 2820 { 2821 /* 2822 * PRP2 will contain a PRP List pointer because 2823 * more PRP's are needed with this command. The 2824 * list will start at the beginning of the 2825 * contiguous buffer. 2826 */ 2827 *prp2_entry = 2828 htole64( 2829 (uint64_t)(uintptr_t)prp_entry_phys); 2830 2831 /* 2832 * The next PRP Entry will be the start of the 2833 * first PRP List. 2834 */ 2835 prp_entry = prp_page; 2836 } 2837 else 2838 { 2839 /* 2840 * After this, the PRP Entries are complete. 2841 * This command uses 2 PRP's and no PRP list. 2842 */ 2843 *prp2_entry = htole64((uint64_t)paddr); 2844 } 2845 } 2846 else 2847 { 2848 /* 2849 * Put entry in list and bump the addresses. 2850 * 2851 * After PRP1 and PRP2 are filled in, this will fill in 2852 * all remaining PRP entries in a PRP List, one per each 2853 * time through the loop. 2854 */ 2855 *prp_entry = htole64((uint64_t)paddr); 2856 prp_entry++; 2857 prp_entry_phys++; 2858 } 2859 2860 /* 2861 * Bump the phys address of the command's data buffer by the 2862 * entry_len. 2863 */ 2864 paddr += entry_len; 2865 2866 /* Decrement length accounting for last partial page. */ 2867 if (entry_len > length) 2868 length = 0; 2869 else 2870 length -= entry_len; 2871 } 2872 } 2873 2874 /* 2875 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 2876 * determine if the driver needs to build a native SGL. If so, that native SGL 2877 * is built in the contiguous buffers allocated especially for PCIe SGL 2878 * creation. If the driver will not build a native SGL, return TRUE and a 2879 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 2880 * only. 2881 * 2882 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 2883 */ 2884 static int 2885 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 2886 bus_dma_segment_t *segs, int segs_left) 2887 { 2888 uint32_t i, sge_dwords, length, offset, entry_len; 2889 uint32_t num_entries, buff_len = 0, sges_in_segment; 2890 uint32_t page_mask, page_mask_result, *curr_buff; 2891 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 2892 uint32_t first_page_data_size, end_residual; 2893 uint64_t *msg_phys; 2894 bus_addr_t paddr; 2895 int build_native_sgl = 0, first_prp_entry; 2896 int prp_size = PRP_ENTRY_SIZE; 2897 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 2898 struct mpr_prp_page *prp_page_info = NULL; 2899 2900 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2901 2902 /* 2903 * Add up the sizes of each segment length to get the total transfer 2904 * size, which will be checked against the Maximum Data Transfer Size. 2905 * If the data transfer length exceeds the MDTS for this device, just 2906 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 2907 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 2908 */ 2909 for (i = 0; i < segs_left; i++) 2910 buff_len += htole32(segs[i].ds_len); 2911 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 2912 return 1; 2913 2914 /* Create page_mask (to get offset within page) */ 2915 page_mask = PAGE_SIZE - 1; 2916 2917 /* 2918 * Check if the number of elements exceeds the max number that can be 2919 * put in the main message frame (H/W can only translate an SGL that 2920 * is contained entirely in the main message frame). 2921 */ 2922 sges_in_segment = (sc->reqframesz - 2923 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 2924 if (segs_left > sges_in_segment) 2925 build_native_sgl = 1; 2926 else 2927 { 2928 /* 2929 * NVMe uses one PRP for each physical page (or part of physical 2930 * page). 2931 * if 4 pages or less then IEEE is OK 2932 * if > 5 pages then we need to build a native SGL 2933 * if > 4 and <= 5 pages, then check the physical address of 2934 * the first SG entry, then if this first size in the page 2935 * is >= the residual beyond 4 pages then use IEEE, 2936 * otherwise use native SGL 2937 */ 2938 if (buff_len > (PAGE_SIZE * 5)) 2939 build_native_sgl = 1; 2940 else if ((buff_len > (PAGE_SIZE * 4)) && 2941 (buff_len <= (PAGE_SIZE * 5)) ) 2942 { 2943 msg_phys = (uint64_t *)(uintptr_t)segs[0].ds_addr; 2944 first_page_offset = 2945 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 2946 page_mask); 2947 first_page_data_size = PAGE_SIZE - first_page_offset; 2948 end_residual = buff_len % PAGE_SIZE; 2949 2950 /* 2951 * If offset into first page pushes the end of the data 2952 * beyond end of the 5th page, we need the extra PRP 2953 * list. 2954 */ 2955 if (first_page_data_size < end_residual) 2956 build_native_sgl = 1; 2957 2958 /* 2959 * Check if first SG entry size is < residual beyond 4 2960 * pages. 2961 */ 2962 if (htole32(segs[0].ds_len) < 2963 (buff_len - (PAGE_SIZE * 4))) 2964 build_native_sgl = 1; 2965 } 2966 } 2967 2968 /* check if native SGL is needed */ 2969 if (!build_native_sgl) 2970 return 1; 2971 2972 /* 2973 * Native SGL is needed. 2974 * Put a chain element in main message frame that points to the first 2975 * chain buffer. 2976 * 2977 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2978 * a native SGL. 2979 */ 2980 2981 /* Set main message chain element pointer */ 2982 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 2983 2984 /* 2985 * For NVMe the chain element needs to be the 2nd SGL entry in the main 2986 * message. 2987 */ 2988 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2989 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2990 2991 /* 2992 * For the PRP entries, use the specially allocated buffer of 2993 * contiguous memory. PRP Page allocation failures should not happen 2994 * because there should be enough PRP page buffers to account for the 2995 * possible NVMe QDepth. 2996 */ 2997 prp_page_info = mpr_alloc_prp_page(sc); 2998 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2999 "used for building a native NVMe SGL.\n", __func__)); 3000 curr_buff = (uint32_t *)prp_page_info->prp_page; 3001 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 3002 3003 /* 3004 * Insert the allocated PRP page into the command's PRP page list. This 3005 * will be freed when the command is freed. 3006 */ 3007 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 3008 3009 /* 3010 * Check if we are within 1 entry of a page boundary we don't want our 3011 * first entry to be a PRP List entry. 3012 */ 3013 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 3014 page_mask; 3015 if (!page_mask_result) { 3016 /* Bump up to next page boundary. */ 3017 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 3018 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 3019 } 3020 3021 /* Fill in the chain element and make it an NVMe segment type. */ 3022 main_chain_element->Address.High = 3023 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 3024 main_chain_element->Address.Low = 3025 htole32((uint32_t)(uintptr_t)msg_phys); 3026 main_chain_element->NextChainOffset = 0; 3027 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3028 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3029 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 3030 3031 /* Set SGL pointer to start of contiguous PCIe buffer. */ 3032 ptr_sgl = curr_buff; 3033 sge_dwords = 2; 3034 num_entries = 0; 3035 3036 /* 3037 * NVMe has a very convoluted PRP format. One PRP is required for each 3038 * page or partial page. We need to split up OS SG entries if they are 3039 * longer than one page or cross a page boundary. We also have to insert 3040 * a PRP list pointer entry as the last entry in each physical page of 3041 * the PRP list. 3042 * 3043 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 3044 * in the main message in IEEE 64 format. The 2nd entry in the main 3045 * message is the chain element, and the rest of the PRP entries are 3046 * built in the contiguous PCIe buffer. 3047 */ 3048 first_prp_entry = 1; 3049 ptr_first_sgl = (uint32_t *)cm->cm_sge; 3050 3051 for (i = 0; i < segs_left; i++) { 3052 /* Get physical address and length of this SG entry. */ 3053 paddr = segs[i].ds_addr; 3054 length = segs[i].ds_len; 3055 3056 /* 3057 * Check whether a given SGE buffer lies on a non-PAGED 3058 * boundary if this is not the first page. If so, this is not 3059 * expected so have FW build the SGL. 3060 */ 3061 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) { 3062 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 3063 "building NVMe PRPs, low address is 0x%x\n", 3064 (uint32_t)paddr); 3065 return 1; 3066 } 3067 3068 /* Apart from last SGE, if any other SGE boundary is not page 3069 * aligned then it means that hole exists. Existence of hole 3070 * leads to data corruption. So fallback to IEEE SGEs. 3071 */ 3072 if (i != (segs_left - 1)) { 3073 if (((uint32_t)paddr + length) & page_mask) { 3074 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 3075 "boundary while building NVMe PRPs, low " 3076 "address: 0x%x and length: %u\n", 3077 (uint32_t)paddr, length); 3078 return 1; 3079 } 3080 } 3081 3082 /* Loop while the length is not zero. */ 3083 while (length) { 3084 /* 3085 * Check if we need to put a list pointer here if we are 3086 * at page boundary - prp_size. 3087 */ 3088 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 3089 prp_size) & page_mask; 3090 if (!page_mask_result) { 3091 /* 3092 * Need to put a PRP list pointer here. 3093 */ 3094 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3095 prp_size); 3096 *ptr_sgl = htole32((uintptr_t)msg_phys); 3097 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 3098 msg_phys >> 32); 3099 ptr_sgl += sge_dwords; 3100 num_entries++; 3101 } 3102 3103 /* Need to handle if entry will be part of a page. */ 3104 offset = (uint32_t)paddr & page_mask; 3105 entry_len = PAGE_SIZE - offset; 3106 if (first_prp_entry) { 3107 /* 3108 * Put IEEE entry in first SGE in main message. 3109 * (Simple element, System addr, not end of 3110 * list.) 3111 */ 3112 *ptr_first_sgl = htole32((uint32_t)paddr); 3113 *(ptr_first_sgl + 1) = 3114 htole32((uint32_t)((uint64_t)paddr >> 32)); 3115 *(ptr_first_sgl + 2) = htole32(entry_len); 3116 *(ptr_first_sgl + 3) = 0; 3117 3118 /* No longer the first PRP entry. */ 3119 first_prp_entry = 0; 3120 } else { 3121 /* Put entry in list. */ 3122 *ptr_sgl = htole32((uint32_t)paddr); 3123 *(ptr_sgl + 1) = 3124 htole32((uint32_t)((uint64_t)paddr >> 32)); 3125 3126 /* Bump ptr_sgl, msg_phys, and num_entries. */ 3127 ptr_sgl += sge_dwords; 3128 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3129 prp_size); 3130 num_entries++; 3131 } 3132 3133 /* Bump the phys address by the entry_len. */ 3134 paddr += entry_len; 3135 3136 /* Decrement length accounting for last partial page. */ 3137 if (entry_len > length) 3138 length = 0; 3139 else 3140 length -= entry_len; 3141 } 3142 } 3143 3144 /* Set chain element Length. */ 3145 main_chain_element->Length = htole32(num_entries * prp_size); 3146 3147 /* Return 0, indicating we built a native SGL. */ 3148 return 0; 3149 } 3150 3151 /* 3152 * Add a chain element as the next SGE for the specified command. 3153 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 3154 * only required for IEEE commands. Therefore there is no code for commands 3155 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 3156 * shouldn't be requesting chains). 3157 */ 3158 static int 3159 mpr_add_chain(struct mpr_command *cm, int segsleft) 3160 { 3161 struct mpr_softc *sc = cm->cm_sc; 3162 MPI2_REQUEST_HEADER *req; 3163 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 3164 struct mpr_chain *chain; 3165 int sgc_size, current_segs, rem_segs, segs_per_frame; 3166 uint8_t next_chain_offset = 0; 3167 3168 /* 3169 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 3170 * only IEEE commands should be requesting chains. Return some error 3171 * code other than 0. 3172 */ 3173 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 3174 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 3175 "an MPI SGL.\n"); 3176 return(ENOBUFS); 3177 } 3178 3179 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 3180 if (cm->cm_sglsize < sgc_size) 3181 panic("MPR: Need SGE Error Code\n"); 3182 3183 chain = mpr_alloc_chain(cm->cm_sc); 3184 if (chain == NULL) 3185 return (ENOBUFS); 3186 3187 /* 3188 * Note: a double-linked list is used to make it easier to walk for 3189 * debugging. 3190 */ 3191 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 3192 3193 /* 3194 * Need to know if the number of frames left is more than 1 or not. If 3195 * more than 1 frame is required, NextChainOffset will need to be set, 3196 * which will just be the last segment of the frame. 3197 */ 3198 rem_segs = 0; 3199 if (cm->cm_sglsize < (sgc_size * segsleft)) { 3200 /* 3201 * rem_segs is the number of segements remaining after the 3202 * segments that will go into the current frame. Since it is 3203 * known that at least one more frame is required, account for 3204 * the chain element. To know if more than one more frame is 3205 * required, just check if there will be a remainder after using 3206 * the current frame (with this chain) and the next frame. If 3207 * so the NextChainOffset must be the last element of the next 3208 * frame. 3209 */ 3210 current_segs = (cm->cm_sglsize / sgc_size) - 1; 3211 rem_segs = segsleft - current_segs; 3212 segs_per_frame = sc->chain_frame_size / sgc_size; 3213 if (rem_segs > segs_per_frame) { 3214 next_chain_offset = segs_per_frame - 1; 3215 } 3216 } 3217 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 3218 ieee_sgc->Length = next_chain_offset ? 3219 htole32((uint32_t)sc->chain_frame_size) : 3220 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 3221 ieee_sgc->NextChainOffset = next_chain_offset; 3222 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3223 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3224 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 3225 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 3226 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 3227 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 3228 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 3229 3230 cm->cm_sglsize = sc->chain_frame_size; 3231 return (0); 3232 } 3233 3234 /* 3235 * Add one scatter-gather element to the scatter-gather list for a command. 3236 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 3237 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 3238 * chain, so don't consider any chain additions. 3239 */ 3240 int 3241 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 3242 int segsleft) 3243 { 3244 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3245 u32 sge_flags; 3246 3247 /* 3248 * case 1: >=1 more segment, no room for anything (error) 3249 * case 2: 1 more segment and enough room for it 3250 */ 3251 3252 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 3253 mpr_dprint(cm->cm_sc, MPR_ERROR, 3254 "%s: warning: Not enough room for MPI SGL in frame.\n", 3255 __func__); 3256 return(ENOBUFS); 3257 } 3258 3259 KASSERT(segsleft == 1, 3260 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 3261 segsleft)); 3262 3263 /* 3264 * There is one more segment left to add for the MPI SGL and there is 3265 * enough room in the frame to add it. This is the normal case because 3266 * MPI SGL's don't have chains, otherwise something is wrong. 3267 * 3268 * If this is a bi-directional request, need to account for that 3269 * here. Save the pre-filled sge values. These will be used 3270 * either for the 2nd SGL or for a single direction SGL. If 3271 * cm_out_len is non-zero, this is a bi-directional request, so 3272 * fill in the OUT SGL first, then the IN SGL, otherwise just 3273 * fill in the IN SGL. Note that at this time, when filling in 3274 * 2 SGL's for a bi-directional request, they both use the same 3275 * DMA buffer (same cm command). 3276 */ 3277 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 3278 saved_address_low = sge->Address.Low; 3279 saved_address_high = sge->Address.High; 3280 if (cm->cm_out_len) { 3281 sge->FlagsLength = cm->cm_out_len | 3282 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3283 MPI2_SGE_FLAGS_END_OF_BUFFER | 3284 MPI2_SGE_FLAGS_HOST_TO_IOC | 3285 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3286 MPI2_SGE_FLAGS_SHIFT); 3287 cm->cm_sglsize -= len; 3288 /* Endian Safe code */ 3289 sge_flags = sge->FlagsLength; 3290 sge->FlagsLength = htole32(sge_flags); 3291 sge->Address.High = htole32(sge->Address.High); 3292 sge->Address.Low = htole32(sge->Address.Low); 3293 bcopy(sge, cm->cm_sge, len); 3294 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3295 } 3296 sge->FlagsLength = saved_buf_len | 3297 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3298 MPI2_SGE_FLAGS_END_OF_BUFFER | 3299 MPI2_SGE_FLAGS_LAST_ELEMENT | 3300 MPI2_SGE_FLAGS_END_OF_LIST | 3301 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3302 MPI2_SGE_FLAGS_SHIFT); 3303 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3304 sge->FlagsLength |= 3305 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3306 MPI2_SGE_FLAGS_SHIFT); 3307 } else { 3308 sge->FlagsLength |= 3309 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3310 MPI2_SGE_FLAGS_SHIFT); 3311 } 3312 sge->Address.Low = saved_address_low; 3313 sge->Address.High = saved_address_high; 3314 3315 cm->cm_sglsize -= len; 3316 /* Endian Safe code */ 3317 sge_flags = sge->FlagsLength; 3318 sge->FlagsLength = htole32(sge_flags); 3319 sge->Address.High = htole32(sge->Address.High); 3320 sge->Address.Low = htole32(sge->Address.Low); 3321 bcopy(sge, cm->cm_sge, len); 3322 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3323 return (0); 3324 } 3325 3326 /* 3327 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3328 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3329 * remaining size and pointer to the next SGE to fill in, respectively. 3330 */ 3331 int 3332 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3333 { 3334 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3335 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3336 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3337 uint32_t sge_length; 3338 3339 /* 3340 * case 1: No room for chain or segment (error). 3341 * case 2: Two or more segments left but only room for chain. 3342 * case 3: Last segment and room for it, so set flags. 3343 */ 3344 3345 /* 3346 * There should be room for at least one element, or there is a big 3347 * problem. 3348 */ 3349 if (cm->cm_sglsize < ieee_sge_size) 3350 panic("MPR: Need SGE Error Code\n"); 3351 3352 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3353 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3354 return (error); 3355 } 3356 3357 if (segsleft == 1) { 3358 /* 3359 * If this is a bi-directional request, need to account for that 3360 * here. Save the pre-filled sge values. These will be used 3361 * either for the 2nd SGL or for a single direction SGL. If 3362 * cm_out_len is non-zero, this is a bi-directional request, so 3363 * fill in the OUT SGL first, then the IN SGL, otherwise just 3364 * fill in the IN SGL. Note that at this time, when filling in 3365 * 2 SGL's for a bi-directional request, they both use the same 3366 * DMA buffer (same cm command). 3367 */ 3368 saved_buf_len = sge->Length; 3369 saved_address_low = sge->Address.Low; 3370 saved_address_high = sge->Address.High; 3371 if (cm->cm_out_len) { 3372 sge->Length = cm->cm_out_len; 3373 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3374 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3375 cm->cm_sglsize -= ieee_sge_size; 3376 /* Endian Safe code */ 3377 sge_length = sge->Length; 3378 sge->Length = htole32(sge_length); 3379 sge->Address.High = htole32(sge->Address.High); 3380 sge->Address.Low = htole32(sge->Address.Low); 3381 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3382 cm->cm_sge = 3383 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3384 ieee_sge_size); 3385 } 3386 sge->Length = saved_buf_len; 3387 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3388 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3389 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3390 sge->Address.Low = saved_address_low; 3391 sge->Address.High = saved_address_high; 3392 } 3393 3394 cm->cm_sglsize -= ieee_sge_size; 3395 /* Endian Safe code */ 3396 sge_length = sge->Length; 3397 sge->Length = htole32(sge_length); 3398 sge->Address.High = htole32(sge->Address.High); 3399 sge->Address.Low = htole32(sge->Address.Low); 3400 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3401 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3402 ieee_sge_size); 3403 return (0); 3404 } 3405 3406 /* 3407 * Add one dma segment to the scatter-gather list for a command. 3408 */ 3409 int 3410 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3411 int segsleft) 3412 { 3413 MPI2_SGE_SIMPLE64 sge; 3414 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3415 3416 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3417 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3418 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3419 ieee_sge.Length = len; 3420 mpr_from_u64(pa, &ieee_sge.Address); 3421 3422 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3423 } else { 3424 /* 3425 * This driver always uses 64-bit address elements for 3426 * simplicity. 3427 */ 3428 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3429 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3430 /* Set Endian safe macro in mpr_push_sge */ 3431 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3432 mpr_from_u64(pa, &sge.Address); 3433 3434 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3435 } 3436 } 3437 3438 static void 3439 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3440 { 3441 struct mpr_softc *sc; 3442 struct mpr_command *cm; 3443 u_int i, dir, sflags; 3444 3445 cm = (struct mpr_command *)arg; 3446 sc = cm->cm_sc; 3447 3448 /* 3449 * In this case, just print out a warning and let the chip tell the 3450 * user they did the wrong thing. 3451 */ 3452 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3453 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3454 "segments, more than the %d allowed\n", __func__, nsegs, 3455 cm->cm_max_segs); 3456 } 3457 3458 /* 3459 * Set up DMA direction flags. Bi-directional requests are also handled 3460 * here. In that case, both direction flags will be set. 3461 */ 3462 sflags = 0; 3463 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3464 /* 3465 * We have to add a special case for SMP passthrough, there 3466 * is no easy way to generically handle it. The first 3467 * S/G element is used for the command (therefore the 3468 * direction bit needs to be set). The second one is used 3469 * for the reply. We'll leave it to the caller to make 3470 * sure we only have two buffers. 3471 */ 3472 /* 3473 * Even though the busdma man page says it doesn't make 3474 * sense to have both direction flags, it does in this case. 3475 * We have one s/g element being accessed in each direction. 3476 */ 3477 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3478 3479 /* 3480 * Set the direction flag on the first buffer in the SMP 3481 * passthrough request. We'll clear it for the second one. 3482 */ 3483 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3484 MPI2_SGE_FLAGS_END_OF_BUFFER; 3485 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3486 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3487 dir = BUS_DMASYNC_PREWRITE; 3488 } else 3489 dir = BUS_DMASYNC_PREREAD; 3490 3491 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3492 if (cm->cm_targ && cm->cm_targ->is_nvme && 3493 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3494 /* A native SG list was built, skip to end. */ 3495 goto out; 3496 } 3497 3498 for (i = 0; i < nsegs; i++) { 3499 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3500 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3501 } 3502 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3503 sflags, nsegs - i); 3504 if (error != 0) { 3505 /* Resource shortage, roll back! */ 3506 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3507 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3508 "consider increasing hw.mpr.max_chains.\n"); 3509 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3510 mpr_complete_command(sc, cm); 3511 return; 3512 } 3513 } 3514 3515 out: 3516 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3517 mpr_enqueue_request(sc, cm); 3518 3519 return; 3520 } 3521 3522 static void 3523 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3524 int error) 3525 { 3526 mpr_data_cb(arg, segs, nsegs, error); 3527 } 3528 3529 /* 3530 * This is the routine to enqueue commands ansynchronously. 3531 * Note that the only error path here is from bus_dmamap_load(), which can 3532 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3533 * assumed that if you have a command in-hand, then you have enough credits 3534 * to use it. 3535 */ 3536 int 3537 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3538 { 3539 int error = 0; 3540 3541 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3542 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3543 &cm->cm_uio, mpr_data_cb2, cm, 0); 3544 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3545 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3546 cm->cm_data, mpr_data_cb, cm, 0); 3547 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3548 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3549 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3550 } else { 3551 /* Add a zero-length element as needed */ 3552 if (cm->cm_sge != NULL) 3553 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3554 mpr_enqueue_request(sc, cm); 3555 } 3556 3557 return (error); 3558 } 3559 3560 /* 3561 * This is the routine to enqueue commands synchronously. An error of 3562 * EINPROGRESS from mpr_map_command() is ignored since the command will 3563 * be executed and enqueued automatically. Other errors come from msleep(). 3564 */ 3565 int 3566 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout, 3567 int sleep_flag) 3568 { 3569 int error, rc; 3570 struct timeval cur_time, start_time; 3571 struct mpr_command *cm = *cmp; 3572 3573 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3574 return EBUSY; 3575 3576 cm->cm_complete = NULL; 3577 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3578 error = mpr_map_command(sc, cm); 3579 if ((error != 0) && (error != EINPROGRESS)) 3580 return (error); 3581 3582 // Check for context and wait for 50 mSec at a time until time has 3583 // expired or the command has finished. If msleep can't be used, need 3584 // to poll. 3585 getmicrouptime(&start_time); 3586 if (lockowned(&sc->mpr_lock) && sleep_flag == CAN_SLEEP) { 3587 error = lksleep(cm, &sc->mpr_lock, 0, "mprwait", timeout*hz); 3588 if (error == EWOULDBLOCK) { 3589 /* 3590 * Record the actual elapsed time in the case of a 3591 * timeout for the message below. 3592 */ 3593 getmicrouptime(&cur_time); 3594 timevalsub(&cur_time, &start_time); 3595 } 3596 } else { 3597 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3598 mpr_intr_locked(sc); 3599 if (sleep_flag == CAN_SLEEP) 3600 tsleep(mpr_wait_command, 0, "mprwait", hz < 20 ? 1 : hz / 20); 3601 else 3602 DELAY(50000); 3603 3604 getmicrouptime(&cur_time); 3605 timevalsub(&cur_time, &start_time); 3606 if (cur_time.tv_sec > timeout) { 3607 error = EWOULDBLOCK; 3608 break; 3609 } 3610 } 3611 } 3612 3613 if (error == EWOULDBLOCK) { 3614 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," 3615 " elapsed=%jd\n", __func__, timeout, 3616 (intmax_t)cur_time.tv_sec); 3617 rc = mpr_reinit(sc); 3618 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3619 "failed"); 3620 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3621 /* 3622 * Tell the caller that we freed the command in a 3623 * reinit. 3624 */ 3625 *cmp = NULL; 3626 } 3627 error = ETIMEDOUT; 3628 } 3629 return (error); 3630 } 3631 3632 /* 3633 * This is the routine to enqueue a command synchonously and poll for 3634 * completion. Its use should be rare. 3635 */ 3636 int 3637 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp) 3638 { 3639 int error, rc; 3640 struct timeval cur_time, start_time; 3641 struct mpr_command *cm = *cmp; 3642 3643 error = 0; 3644 3645 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3646 cm->cm_complete = NULL; 3647 mpr_map_command(sc, cm); 3648 3649 getmicrouptime(&start_time); 3650 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3651 mpr_intr_locked(sc); 3652 3653 if (lockowned(&sc->mpr_lock)) 3654 lksleep(&sc->msleep_fake_chan, &sc->mpr_lock, 0, 3655 "mprpoll", hz < 20 ? 1 : hz / 20); 3656 else 3657 tsleep(mpr_request_polled, 0, "mprpoll", hz < 20 ? 1 : hz / 20); 3658 3659 /* 3660 * Check for real-time timeout and fail if more than 60 seconds. 3661 */ 3662 getmicrouptime(&cur_time); 3663 timevalsub(&cur_time, &start_time); 3664 if (cur_time.tv_sec > 60) { 3665 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3666 error = ETIMEDOUT; 3667 break; 3668 } 3669 } 3670 3671 if (error) { 3672 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3673 rc = mpr_reinit(sc); 3674 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3675 "failed"); 3676 3677 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3678 /* 3679 * Tell the caller that we freed the command in a 3680 * reinit. 3681 */ 3682 *cmp = NULL; 3683 } 3684 } 3685 return (error); 3686 } 3687 3688 /* 3689 * The MPT driver had a verbose interface for config pages. In this driver, 3690 * reduce it to much simpler terms, similar to the Linux driver. 3691 */ 3692 int 3693 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3694 { 3695 MPI2_CONFIG_REQUEST *req; 3696 struct mpr_command *cm; 3697 int error; 3698 3699 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3700 return (EBUSY); 3701 } 3702 3703 cm = mpr_alloc_command(sc); 3704 if (cm == NULL) { 3705 return (EBUSY); 3706 } 3707 3708 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3709 req->Function = MPI2_FUNCTION_CONFIG; 3710 req->Action = params->action; 3711 req->SGLFlags = 0; 3712 req->ChainOffset = 0; 3713 req->PageAddress = params->page_address; 3714 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3715 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3716 3717 hdr = ¶ms->hdr.Ext; 3718 req->ExtPageType = hdr->ExtPageType; 3719 req->ExtPageLength = hdr->ExtPageLength; 3720 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3721 req->Header.PageLength = 0; /* Must be set to zero */ 3722 req->Header.PageNumber = hdr->PageNumber; 3723 req->Header.PageVersion = hdr->PageVersion; 3724 } else { 3725 MPI2_CONFIG_PAGE_HEADER *hdr; 3726 3727 hdr = ¶ms->hdr.Struct; 3728 req->Header.PageType = hdr->PageType; 3729 req->Header.PageNumber = hdr->PageNumber; 3730 req->Header.PageLength = hdr->PageLength; 3731 req->Header.PageVersion = hdr->PageVersion; 3732 } 3733 3734 cm->cm_data = params->buffer; 3735 cm->cm_length = params->length; 3736 if (cm->cm_data != NULL) { 3737 cm->cm_sge = &req->PageBufferSGE; 3738 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3739 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3740 } else 3741 cm->cm_sge = NULL; 3742 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3743 3744 cm->cm_complete_data = params; 3745 if (params->callback != NULL) { 3746 cm->cm_complete = mpr_config_complete; 3747 return (mpr_map_command(sc, cm)); 3748 } else { 3749 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); 3750 if (error) { 3751 mpr_dprint(sc, MPR_FAULT, 3752 "Error %d reading config page\n", error); 3753 if (cm != NULL) 3754 mpr_free_command(sc, cm); 3755 return (error); 3756 } 3757 mpr_config_complete(sc, cm); 3758 } 3759 3760 return (0); 3761 } 3762 3763 int 3764 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3765 { 3766 return (EINVAL); 3767 } 3768 3769 static void 3770 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3771 { 3772 MPI2_CONFIG_REPLY *reply; 3773 struct mpr_config_params *params; 3774 3775 MPR_FUNCTRACE(sc); 3776 params = cm->cm_complete_data; 3777 3778 if (cm->cm_data != NULL) { 3779 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3780 BUS_DMASYNC_POSTREAD); 3781 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3782 } 3783 3784 /* 3785 * XXX KDM need to do more error recovery? This results in the 3786 * device in question not getting probed. 3787 */ 3788 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3789 params->status = MPI2_IOCSTATUS_BUSY; 3790 goto done; 3791 } 3792 3793 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3794 if (reply == NULL) { 3795 params->status = MPI2_IOCSTATUS_BUSY; 3796 goto done; 3797 } 3798 params->status = reply->IOCStatus; 3799 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3800 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3801 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3802 params->hdr.Ext.PageType = reply->Header.PageType; 3803 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3804 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3805 } else { 3806 params->hdr.Struct.PageType = reply->Header.PageType; 3807 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3808 params->hdr.Struct.PageLength = reply->Header.PageLength; 3809 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3810 } 3811 3812 done: 3813 mpr_free_command(sc, cm); 3814 if (params->callback != NULL) 3815 params->callback(sc, params); 3816 3817 return; 3818 } 3819