1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 /*- 28 * Copyright (c) 2011 LSI Corp. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * LSI MPT-Fusion Host Adapter FreeBSD 53 * 54 * $FreeBSD: src/sys/dev/mps/mps.c,v 1.14 2012/01/26 18:17:21 ken Exp $ 55 */ 56 57 /* Communications core for LSI MPT2 */ 58 59 /* TODO Move headers to mpsvar */ 60 #include <sys/types.h> 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/lock.h> 65 #include <sys/globaldata.h> 66 #include <sys/module.h> 67 #include <sys/bus.h> 68 #include <sys/conf.h> 69 #include <sys/bio.h> 70 #include <sys/malloc.h> 71 #include <sys/uio.h> 72 #include <sys/sysctl.h> 73 #include <sys/queue.h> 74 #include <sys/kthread.h> 75 #include <sys/endian.h> 76 #include <sys/eventhandler.h> 77 78 #include <sys/rman.h> 79 80 #include <bus/pci/pcivar.h> 81 82 #include <bus/cam/scsi/scsi_all.h> 83 84 #include <dev/raid/mps/mpi/mpi2_type.h> 85 #include <dev/raid/mps/mpi/mpi2.h> 86 #include <dev/raid/mps/mpi/mpi2_ioc.h> 87 #include <dev/raid/mps/mpi/mpi2_sas.h> 88 #include <dev/raid/mps/mpi/mpi2_cnfg.h> 89 #include <dev/raid/mps/mpi/mpi2_init.h> 90 #include <dev/raid/mps/mpi/mpi2_tool.h> 91 #include <dev/raid/mps/mps_ioctl.h> 92 #include <dev/raid/mps/mpsvar.h> 93 #include <dev/raid/mps/mps_table.h> 94 95 static int mps_diag_reset(struct mps_softc *sc); 96 static int mps_init_queues(struct mps_softc *sc); 97 static int mps_message_unit_reset(struct mps_softc *sc); 98 static int mps_transition_operational(struct mps_softc *sc); 99 static void mps_startup(void *arg); 100 static int mps_send_iocinit(struct mps_softc *sc); 101 static int mps_attach_log(struct mps_softc *sc); 102 static __inline void mps_complete_command(struct mps_command *cm); 103 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 104 MPI2_EVENT_NOTIFICATION_REPLY *reply); 105 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm); 106 static void mps_periodic(void *); 107 static int mps_reregister_events(struct mps_softc *sc); 108 static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm); 109 110 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters"); 111 112 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory"); 113 114 /* 115 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 116 * any state and back to its initialization state machine. 117 */ 118 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 119 120 static int 121 mps_diag_reset(struct mps_softc *sc) 122 { 123 uint32_t reg; 124 int i, error, tries = 0; 125 126 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 127 128 /* Clear any pending interrupts */ 129 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 130 131 /* Push the magic sequence */ 132 error = ETIMEDOUT; 133 while (tries++ < 20) { 134 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 135 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 136 mpt2_reset_magic[i]); 137 138 DELAY(100 * 1000); 139 140 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 141 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 142 error = 0; 143 break; 144 } 145 } 146 if (error) 147 return (error); 148 149 /* Send the actual reset. XXX need to refresh the reg? */ 150 mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, 151 reg | MPI2_DIAG_RESET_ADAPTER); 152 153 /* Wait up to 300 seconds in 50ms intervals */ 154 error = ETIMEDOUT; 155 for (i = 0; i < 60000; i++) { 156 DELAY(50000); 157 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 158 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 159 error = 0; 160 break; 161 } 162 } 163 if (error) 164 return (error); 165 166 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 167 168 return (0); 169 } 170 171 static int 172 mps_message_unit_reset(struct mps_softc *sc) 173 { 174 175 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 176 177 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 178 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 179 MPI2_DOORBELL_FUNCTION_SHIFT); 180 DELAY(50000); 181 182 return (0); 183 } 184 185 static int 186 mps_transition_ready(struct mps_softc *sc) 187 { 188 uint32_t reg, state; 189 int error, tries = 0; 190 191 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 192 193 error = 0; 194 while (tries++ < 5) { 195 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 196 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 197 198 /* 199 * Ensure the IOC is ready to talk. If it's not, try 200 * resetting it. 201 */ 202 if (reg & MPI2_DOORBELL_USED) { 203 mps_diag_reset(sc); 204 DELAY(50000); 205 continue; 206 } 207 208 /* Is the adapter owned by another peer? */ 209 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 210 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 211 device_printf(sc->mps_dev, "IOC is under the control " 212 "of another peer host, aborting initialization.\n"); 213 return (ENXIO); 214 } 215 216 state = reg & MPI2_IOC_STATE_MASK; 217 if (state == MPI2_IOC_STATE_READY) { 218 /* Ready to go! */ 219 error = 0; 220 break; 221 } else if (state == MPI2_IOC_STATE_FAULT) { 222 mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n", 223 state & MPI2_DOORBELL_FAULT_CODE_MASK); 224 mps_diag_reset(sc); 225 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 226 /* Need to take ownership */ 227 mps_message_unit_reset(sc); 228 } else if (state == MPI2_IOC_STATE_RESET) { 229 /* Wait a bit, IOC might be in transition */ 230 mps_dprint(sc, MPS_FAULT, 231 "IOC in unexpected reset state\n"); 232 } else { 233 mps_dprint(sc, MPS_FAULT, 234 "IOC in unknown state 0x%x\n", state); 235 error = EINVAL; 236 break; 237 } 238 239 /* Wait 50ms for things to settle down. */ 240 DELAY(50000); 241 } 242 243 if (error) 244 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n"); 245 246 return (error); 247 } 248 249 static int 250 mps_transition_operational(struct mps_softc *sc) 251 { 252 uint32_t reg, state; 253 int error; 254 255 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 256 257 error = 0; 258 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 259 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 260 261 state = reg & MPI2_IOC_STATE_MASK; 262 if (state != MPI2_IOC_STATE_READY) { 263 if ((error = mps_transition_ready(sc)) != 0) { 264 mps_dprint(sc, MPS_FAULT, 265 "%s failed to transition ready\n", __func__); 266 return (error); 267 } 268 } 269 270 error = mps_send_iocinit(sc); 271 return (error); 272 } 273 274 /* 275 * XXX Some of this should probably move to mps.c 276 * 277 * The terms diag reset and hard reset are used interchangeably in the MPI 278 * docs to mean resetting the controller chip. In this code diag reset 279 * cleans everything up, and the hard reset function just sends the reset 280 * sequence to the chip. This should probably be refactored so that every 281 * subsystem gets a reset notification of some sort, and can clean up 282 * appropriately. 283 */ 284 int 285 mps_reinit(struct mps_softc *sc) 286 { 287 int error; 288 uint32_t db; 289 290 mps_printf(sc, "%s sc %p\n", __func__, sc); 291 292 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 293 294 if (sc->mps_flags & MPS_FLAGS_DIAGRESET) { 295 mps_printf(sc, "%s reset already in progress\n", __func__); 296 return 0; 297 } 298 299 /* make sure the completion callbacks can recognize they're getting 300 * a NULL cm_reply due to a reset. 301 */ 302 sc->mps_flags |= MPS_FLAGS_DIAGRESET; 303 304 mps_printf(sc, "%s mask interrupts\n", __func__); 305 mps_mask_intr(sc); 306 307 error = mps_diag_reset(sc); 308 if (error != 0) { 309 panic("%s hard reset failed with error %d", 310 __func__, error); 311 } 312 313 /* Restore the PCI state, including the MSI-X registers */ 314 mps_pci_restore(sc); 315 316 /* Give the I/O subsystem special priority to get itself prepared */ 317 mpssas_handle_reinit(sc); 318 319 /* reinitialize queues after the reset */ 320 bzero(sc->free_queue, sc->fqdepth * 4); 321 mps_init_queues(sc); 322 323 /* get the chip out of the reset state */ 324 error = mps_transition_operational(sc); 325 if (error != 0) 326 panic("%s transition operational failed with error %d", 327 __func__, error); 328 329 /* Reinitialize the reply queue. This is delicate because this 330 * function is typically invoked by task mgmt completion callbacks, 331 * which are called by the interrupt thread. We need to make sure 332 * the interrupt handler loop will exit when we return to it, and 333 * that it will recognize the indexes we've changed. 334 */ 335 sc->replypostindex = 0; 336 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 337 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 338 339 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 340 mps_printf(sc, "%s doorbell 0x%08x\n", __func__, db); 341 342 mps_printf(sc, "%s unmask interrupts post %u free %u\n", __func__, 343 sc->replypostindex, sc->replyfreeindex); 344 345 mps_unmask_intr(sc); 346 347 mps_printf(sc, "%s restarting post %u free %u\n", __func__, 348 sc->replypostindex, sc->replyfreeindex); 349 350 /* restart will reload the event masks clobbered by the reset, and 351 * then enable the port. 352 */ 353 mps_reregister_events(sc); 354 355 /* the end of discovery will release the simq, so we're done. */ 356 mps_printf(sc, "%s finished sc %p post %u free %u\n", 357 __func__, sc, 358 sc->replypostindex, sc->replyfreeindex); 359 360 sc->mps_flags &= ~MPS_FLAGS_DIAGRESET; 361 362 return 0; 363 } 364 365 /* Wait for the chip to ACK a word that we've put into its FIFO */ 366 static int 367 mps_wait_db_ack(struct mps_softc *sc) 368 { 369 int retry; 370 371 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 372 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 373 MPI2_HIS_SYS2IOC_DB_STATUS) == 0) 374 return (0); 375 DELAY(2000); 376 } 377 return (ETIMEDOUT); 378 } 379 380 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 381 static int 382 mps_wait_db_int(struct mps_softc *sc) 383 { 384 int retry; 385 386 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 387 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 388 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 389 return (0); 390 DELAY(2000); 391 } 392 return (ETIMEDOUT); 393 } 394 395 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 396 static int 397 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 398 int req_sz, int reply_sz, int timeout) 399 { 400 uint32_t *data32; 401 uint16_t *data16; 402 int i, count, ioc_sz, residual; 403 404 /* Step 1 */ 405 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 406 407 /* Step 2 */ 408 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 409 return (EBUSY); 410 411 /* Step 3 412 * Announce that a message is coming through the doorbell. Messages 413 * are pushed at 32bit words, so round up if needed. 414 */ 415 count = (req_sz + 3) / 4; 416 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 417 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 418 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 419 420 /* Step 4 */ 421 if (mps_wait_db_int(sc) || 422 (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 423 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n"); 424 return (ENXIO); 425 } 426 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 427 if (mps_wait_db_ack(sc) != 0) { 428 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n"); 429 return (ENXIO); 430 } 431 432 /* Step 5 */ 433 /* Clock out the message data synchronously in 32-bit dwords*/ 434 data32 = (uint32_t *)req; 435 for (i = 0; i < count; i++) { 436 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, data32[i]); 437 if (mps_wait_db_ack(sc) != 0) { 438 mps_dprint(sc, MPS_FAULT, 439 "Timeout while writing doorbell\n"); 440 return (ENXIO); 441 } 442 } 443 444 /* Step 6 */ 445 /* Clock in the reply in 16-bit words. The total length of the 446 * message is always in the 4th byte, so clock out the first 2 words 447 * manually, then loop the rest. 448 */ 449 data16 = (uint16_t *)reply; 450 if (mps_wait_db_int(sc) != 0) { 451 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n"); 452 return (ENXIO); 453 } 454 data16[0] = 455 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 456 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 457 if (mps_wait_db_int(sc) != 0) { 458 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n"); 459 return (ENXIO); 460 } 461 data16[1] = 462 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 463 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 464 465 /* Number of 32bit words in the message */ 466 ioc_sz = reply->MsgLength; 467 468 /* 469 * Figure out how many 16bit words to clock in without overrunning. 470 * The precision loss with dividing reply_sz can safely be 471 * ignored because the messages can only be multiples of 32bits. 472 */ 473 residual = 0; 474 count = MIN((reply_sz / 4), ioc_sz) * 2; 475 if (count < ioc_sz * 2) { 476 residual = ioc_sz * 2 - count; 477 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d " 478 "residual message words\n", residual); 479 } 480 481 for (i = 2; i < count; i++) { 482 if (mps_wait_db_int(sc) != 0) { 483 mps_dprint(sc, MPS_FAULT, 484 "Timeout reading doorbell %d\n", i); 485 return (ENXIO); 486 } 487 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & 488 MPI2_DOORBELL_DATA_MASK; 489 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 490 } 491 492 /* 493 * Pull out residual words that won't fit into the provided buffer. 494 * This keeps the chip from hanging due to a driver programming 495 * error. 496 */ 497 while (residual--) { 498 if (mps_wait_db_int(sc) != 0) { 499 mps_dprint(sc, MPS_FAULT, 500 "Timeout reading doorbell\n"); 501 return (ENXIO); 502 } 503 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET); 504 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 505 } 506 507 /* Step 7 */ 508 if (mps_wait_db_int(sc) != 0) { 509 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n"); 510 return (ENXIO); 511 } 512 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 513 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n"); 514 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 515 516 return (0); 517 } 518 519 static void 520 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm) 521 { 522 523 mps_dprint(sc, MPS_TRACE, "%s SMID %u cm %p ccb %p\n", __func__, 524 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 525 526 if ((sc->mps_flags & MPS_FLAGS_ATTACH_DONE) && 527 !(sc->mps_flags & MPS_FLAGS_SHUTDOWN)) { 528 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 529 } 530 531 if (++sc->io_cmds_active > sc->io_cmds_highwater) 532 sc->io_cmds_highwater++; 533 534 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 535 cm->cm_desc.Words.Low); 536 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 537 cm->cm_desc.Words.High); 538 } 539 540 /* 541 * Just the FACTS, ma'am. 542 */ 543 static int 544 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 545 { 546 MPI2_DEFAULT_REPLY *reply; 547 MPI2_IOC_FACTS_REQUEST request; 548 int error, req_sz, reply_sz; 549 550 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 551 552 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 553 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 554 reply = (MPI2_DEFAULT_REPLY *)facts; 555 556 bzero(&request, req_sz); 557 request.Function = MPI2_FUNCTION_IOC_FACTS; 558 error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 559 560 return (error); 561 } 562 563 static int 564 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port) 565 { 566 MPI2_PORT_FACTS_REQUEST *request; 567 MPI2_PORT_FACTS_REPLY *reply; 568 struct mps_command *cm; 569 int error; 570 571 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 572 573 if ((cm = mps_alloc_command(sc)) == NULL) 574 return (EBUSY); 575 request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req; 576 request->Function = MPI2_FUNCTION_PORT_FACTS; 577 request->PortNumber = port; 578 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 579 cm->cm_data = NULL; 580 error = mps_request_polled(sc, cm); 581 reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply; 582 if (reply == NULL) { 583 mps_printf(sc, "%s NULL reply\n", __func__); 584 goto done; 585 } 586 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { 587 mps_printf(sc, 588 "%s error %d iocstatus 0x%x iocloginfo 0x%x type 0x%x\n", 589 __func__, error, reply->IOCStatus, reply->IOCLogInfo, 590 reply->PortType); 591 error = ENXIO; 592 } 593 bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY)); 594 done: 595 mps_free_command(sc, cm); 596 597 return (error); 598 } 599 600 static int 601 mps_send_iocinit(struct mps_softc *sc) 602 { 603 MPI2_IOC_INIT_REQUEST init; 604 MPI2_DEFAULT_REPLY reply; 605 int req_sz, reply_sz, error; 606 607 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 608 609 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 610 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 611 bzero(&init, req_sz); 612 bzero(&reply, reply_sz); 613 614 /* 615 * Fill in the init block. Note that most addresses are 616 * deliberately in the lower 32bits of memory. This is a micro- 617 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 618 */ 619 init.Function = MPI2_FUNCTION_IOC_INIT; 620 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 621 init.MsgVersion = MPI2_VERSION; 622 init.HeaderVersion = MPI2_HEADER_VERSION; 623 init.SystemRequestFrameSize = sc->facts->IOCRequestFrameSize; 624 init.ReplyDescriptorPostQueueDepth = sc->pqdepth; 625 init.ReplyFreeQueueDepth = sc->fqdepth; 626 init.SenseBufferAddressHigh = 0; 627 init.SystemReplyAddressHigh = 0; 628 init.SystemRequestFrameBaseAddress.High = 0; 629 init.SystemRequestFrameBaseAddress.Low = (uint32_t)sc->req_busaddr; 630 init.ReplyDescriptorPostQueueAddress.High = 0; 631 init.ReplyDescriptorPostQueueAddress.Low = (uint32_t)sc->post_busaddr; 632 init.ReplyFreeQueueAddress.High = 0; 633 init.ReplyFreeQueueAddress.Low = (uint32_t)sc->free_busaddr; 634 init.TimeStamp.High = 0; 635 init.TimeStamp.Low = (uint32_t)time_uptime; 636 637 error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 638 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 639 error = ENXIO; 640 641 mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus); 642 return (error); 643 } 644 645 void 646 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 647 { 648 bus_addr_t *addr; 649 650 addr = arg; 651 *addr = segs[0].ds_addr; 652 } 653 654 static int 655 mps_alloc_queues(struct mps_softc *sc) 656 { 657 bus_addr_t queues_busaddr; 658 uint8_t *queues; 659 int qsize, fqsize, pqsize; 660 661 /* 662 * The reply free queue contains 4 byte entries in multiples of 16 and 663 * aligned on a 16 byte boundary. There must always be an unused entry. 664 * This queue supplies fresh reply frames for the firmware to use. 665 * 666 * The reply descriptor post queue contains 8 byte entries in 667 * multiples of 16 and aligned on a 16 byte boundary. This queue 668 * contains filled-in reply frames sent from the firmware to the host. 669 * 670 * These two queues are allocated together for simplicity. 671 */ 672 sc->fqdepth = roundup2((sc->num_replies + 1), 16); 673 sc->pqdepth = roundup2((sc->num_replies + 1), 16); 674 fqsize= sc->fqdepth * 4; 675 pqsize = sc->pqdepth * 8; 676 qsize = fqsize + pqsize; 677 678 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 679 16, 0, /* algnmnt, boundary */ 680 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 681 BUS_SPACE_MAXADDR, /* highaddr */ 682 NULL, NULL, /* filter, filterarg */ 683 qsize, /* maxsize */ 684 1, /* nsegments */ 685 qsize, /* maxsegsize */ 686 0, /* flags */ 687 &sc->queues_dmat)) { 688 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n"); 689 return (ENOMEM); 690 } 691 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 692 &sc->queues_map)) { 693 device_printf(sc->mps_dev, "Cannot allocate queues memory\n"); 694 return (ENOMEM); 695 } 696 bzero(queues, qsize); 697 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 698 mps_memaddr_cb, &queues_busaddr, 0); 699 700 sc->free_queue = (uint32_t *)queues; 701 sc->free_busaddr = queues_busaddr; 702 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 703 sc->post_busaddr = queues_busaddr + fqsize; 704 705 return (0); 706 } 707 708 static int 709 mps_alloc_replies(struct mps_softc *sc) 710 { 711 int rsize, num_replies; 712 713 /* 714 * sc->num_replies should be one less than sc->fqdepth. We need to 715 * allocate space for sc->fqdepth replies, but only sc->num_replies 716 * replies can be used at once. 717 */ 718 num_replies = max(sc->fqdepth, sc->num_replies); 719 720 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 721 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 722 4, 0, /* algnmnt, boundary */ 723 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 724 BUS_SPACE_MAXADDR, /* highaddr */ 725 NULL, NULL, /* filter, filterarg */ 726 rsize, /* maxsize */ 727 1, /* nsegments */ 728 rsize, /* maxsegsize */ 729 0, /* flags */ 730 &sc->reply_dmat)) { 731 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n"); 732 return (ENOMEM); 733 } 734 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 735 BUS_DMA_NOWAIT, &sc->reply_map)) { 736 device_printf(sc->mps_dev, "Cannot allocate replies memory\n"); 737 return (ENOMEM); 738 } 739 bzero(sc->reply_frames, rsize); 740 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 741 mps_memaddr_cb, &sc->reply_busaddr, 0); 742 743 return (0); 744 } 745 746 static int 747 mps_alloc_requests(struct mps_softc *sc) 748 { 749 struct mps_command *cm; 750 struct mps_chain *chain; 751 int i, rsize, nsegs; 752 753 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 754 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 755 16, 0, /* algnmnt, boundary */ 756 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 757 BUS_SPACE_MAXADDR, /* highaddr */ 758 NULL, NULL, /* filter, filterarg */ 759 rsize, /* maxsize */ 760 1, /* nsegments */ 761 rsize, /* maxsegsize */ 762 0, /* flags */ 763 &sc->req_dmat)) { 764 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n"); 765 return (ENOMEM); 766 } 767 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 768 BUS_DMA_NOWAIT, &sc->req_map)) { 769 device_printf(sc->mps_dev, "Cannot allocate request memory\n"); 770 return (ENOMEM); 771 } 772 bzero(sc->req_frames, rsize); 773 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 774 mps_memaddr_cb, &sc->req_busaddr, 0); 775 776 rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4; 777 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 778 16, 0, /* algnmnt, boundary */ 779 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 780 BUS_SPACE_MAXADDR, /* highaddr */ 781 NULL, NULL, /* filter, filterarg */ 782 rsize, /* maxsize */ 783 1, /* nsegments */ 784 rsize, /* maxsegsize */ 785 0, /* flags */ 786 &sc->chain_dmat)) { 787 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n"); 788 return (ENOMEM); 789 } 790 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 791 BUS_DMA_NOWAIT, &sc->chain_map)) { 792 device_printf(sc->mps_dev, "Cannot allocate chain memory\n"); 793 return (ENOMEM); 794 } 795 bzero(sc->chain_frames, rsize); 796 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 797 mps_memaddr_cb, &sc->chain_busaddr, 0); 798 799 rsize = MPS_SENSE_LEN * sc->num_reqs; 800 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 801 1, 0, /* algnmnt, boundary */ 802 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 803 BUS_SPACE_MAXADDR, /* highaddr */ 804 NULL, NULL, /* filter, filterarg */ 805 rsize, /* maxsize */ 806 1, /* nsegments */ 807 rsize, /* maxsegsize */ 808 0, /* flags */ 809 &sc->sense_dmat)) { 810 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n"); 811 return (ENOMEM); 812 } 813 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 814 BUS_DMA_NOWAIT, &sc->sense_map)) { 815 device_printf(sc->mps_dev, "Cannot allocate sense memory\n"); 816 return (ENOMEM); 817 } 818 bzero(sc->sense_frames, rsize); 819 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 820 mps_memaddr_cb, &sc->sense_busaddr, 0); 821 822 sc->chains = kmalloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2, 823 M_WAITOK | M_ZERO); 824 for (i = 0; i < sc->max_chains; i++) { 825 chain = &sc->chains[i]; 826 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 827 i * sc->facts->IOCRequestFrameSize * 4); 828 chain->chain_busaddr = sc->chain_busaddr + 829 i * sc->facts->IOCRequestFrameSize * 4; 830 mps_free_chain(sc, chain); 831 sc->chain_free_lowwater++; 832 } 833 834 /* XXX Need to pick a more precise value */ 835 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 836 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 837 1, 0, /* algnmnt, boundary */ 838 BUS_SPACE_MAXADDR, /* lowaddr */ 839 BUS_SPACE_MAXADDR, /* highaddr */ 840 NULL, NULL, /* filter, filterarg */ 841 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 842 nsegs, /* nsegments */ 843 BUS_SPACE_MAXSIZE_24BIT,/* maxsegsize */ 844 BUS_DMA_ALLOCNOW, /* flags */ 845 &sc->buffer_dmat)) { 846 device_printf(sc->mps_dev, "Cannot allocate buffer DMA tag\n"); 847 return (ENOMEM); 848 } 849 850 /* 851 * SMID 0 cannot be used as a free command per the firmware spec. 852 * Just drop that command instead of risking accounting bugs. 853 */ 854 sc->commands = kmalloc(sizeof(struct mps_command) * sc->num_reqs, 855 M_MPT2, M_WAITOK | M_ZERO); 856 for (i = 1; i < sc->num_reqs; i++) { 857 cm = &sc->commands[i]; 858 cm->cm_req = sc->req_frames + 859 i * sc->facts->IOCRequestFrameSize * 4; 860 cm->cm_req_busaddr = sc->req_busaddr + 861 i * sc->facts->IOCRequestFrameSize * 4; 862 cm->cm_sense = &sc->sense_frames[i]; 863 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN; 864 cm->cm_desc.Default.SMID = i; 865 cm->cm_sc = sc; 866 TAILQ_INIT(&cm->cm_chain_list); 867 callout_init(&cm->cm_callout); 868 869 /* XXX Is a failure here a critical problem? */ 870 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) 871 if (i <= sc->facts->HighPriorityCredit) 872 mps_free_high_priority_command(sc, cm); 873 else 874 mps_free_command(sc, cm); 875 else { 876 panic("failed to allocate command %d", i); 877 sc->num_reqs = i; 878 break; 879 } 880 } 881 882 return (0); 883 } 884 885 static int 886 mps_init_queues(struct mps_softc *sc) 887 { 888 int i; 889 890 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 891 892 /* 893 * According to the spec, we need to use one less reply than we 894 * have space for on the queue. So sc->num_replies (the number we 895 * use) should be less than sc->fqdepth (allocated size). 896 */ 897 if (sc->num_replies >= sc->fqdepth) 898 return (EINVAL); 899 900 /* 901 * Initialize all of the free queue entries. 902 */ 903 for (i = 0; i < sc->fqdepth; i++) 904 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); 905 sc->replyfreeindex = sc->num_replies; 906 907 return (0); 908 } 909 910 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 911 * Next are the global settings, if they exist. Highest are the per-unit 912 * settings, if they exist. 913 */ 914 static void 915 mps_get_tunables(struct mps_softc *sc) 916 { 917 char tmpstr[80]; 918 919 /* XXX default to some debugging for now */ 920 sc->mps_debug = MPS_FAULT; 921 #if 0 /* XXX swildner */ 922 sc->disable_msix = 0; 923 #endif 924 sc->enable_msi = 1; 925 sc->max_chains = MPS_CHAIN_FRAMES; 926 927 /* 928 * Grab the global variables. 929 */ 930 TUNABLE_INT_FETCH("hw.mps.debug_level", &sc->mps_debug); 931 #if 0 /* XXX swildner */ 932 TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix); 933 #endif 934 TUNABLE_INT_FETCH("hw.mps.msi.enable", &sc->enable_msi); 935 TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains); 936 937 /* Grab the unit-instance variables */ 938 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level", 939 device_get_unit(sc->mps_dev)); 940 TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug); 941 942 #if 0 /* XXX swildner */ 943 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix", 944 device_get_unit(sc->mps_dev)); 945 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 946 #endif 947 948 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.enable_msi", 949 device_get_unit(sc->mps_dev)); 950 TUNABLE_INT_FETCH(tmpstr, &sc->enable_msi); 951 952 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains", 953 device_get_unit(sc->mps_dev)); 954 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 955 } 956 957 static void 958 mps_setup_sysctl(struct mps_softc *sc) 959 { 960 struct sysctl_ctx_list *sysctl_ctx = NULL; 961 struct sysctl_oid *sysctl_tree = NULL; 962 char tmpstr[80], tmpstr2[80]; 963 964 /* 965 * Setup the sysctl variable so the user can change the debug level 966 * on the fly. 967 */ 968 ksnprintf(tmpstr, sizeof(tmpstr), "MPS controller %d", 969 device_get_unit(sc->mps_dev)); 970 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev)); 971 972 sysctl_ctx_init(&sc->sysctl_ctx); 973 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 974 SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, 975 CTLFLAG_RD, 0, tmpstr); 976 if (sc->sysctl_tree == NULL) 977 return; 978 sysctl_ctx = &sc->sysctl_ctx; 979 sysctl_tree = sc->sysctl_tree; 980 981 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 982 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0, 983 "mps debug level"); 984 985 #if 0 /* XXX swildner */ 986 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 987 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 988 "Disable the use of MSI-X interrupts"); 989 #endif 990 991 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 992 OID_AUTO, "enable_msi", CTLFLAG_RD, &sc->enable_msi, 0, 993 "Enable the use of MSI interrupts"); 994 995 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 996 OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version, 997 strlen(sc->fw_version), "firmware version"); 998 999 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1000 OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION, 1001 strlen(MPS_DRIVER_VERSION), "driver version"); 1002 1003 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1004 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1005 &sc->io_cmds_active, 0, "number of currently active commands"); 1006 1007 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1008 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1009 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1010 1011 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1012 OID_AUTO, "chain_free", CTLFLAG_RD, 1013 &sc->chain_free, 0, "number of free chain elements"); 1014 1015 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1016 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1017 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1018 1019 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1020 OID_AUTO, "max_chains", CTLFLAG_RD, 1021 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1022 1023 #if __FreeBSD_version >= 900030 1024 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1025 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1026 &sc->chain_alloc_fail, "chain allocation failures"); 1027 #endif //FreeBSD_version >= 900030 1028 } 1029 1030 int 1031 mps_attach(struct mps_softc *sc) 1032 { 1033 int i, error; 1034 1035 mps_get_tunables(sc); 1036 1037 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1038 1039 lockinit(&sc->mps_lock, "MPT2SAS lock", 0, LK_CANRECURSE); 1040 callout_init(&sc->periodic); 1041 TAILQ_INIT(&sc->event_list); 1042 1043 if ((error = mps_transition_ready(sc)) != 0) { 1044 mps_printf(sc, "%s failed to transition ready\n", __func__); 1045 return (error); 1046 } 1047 1048 sc->facts = kmalloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2, 1049 M_ZERO|M_WAITOK); 1050 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) 1051 return (error); 1052 1053 mps_print_iocfacts(sc, sc->facts); 1054 1055 ksnprintf(sc->fw_version, sizeof(sc->fw_version), 1056 "%02d.%02d.%02d.%02d", 1057 sc->facts->FWVersion.Struct.Major, 1058 sc->facts->FWVersion.Struct.Minor, 1059 sc->facts->FWVersion.Struct.Unit, 1060 sc->facts->FWVersion.Struct.Dev); 1061 1062 mps_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, 1063 MPS_DRIVER_VERSION); 1064 mps_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 1065 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 1066 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 1067 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"); 1068 1069 /* 1070 * If the chip doesn't support event replay then a hard reset will be 1071 * required to trigger a full discovery. Do the reset here then 1072 * retransition to Ready. A hard reset might have already been done, 1073 * but it doesn't hurt to do it again. 1074 */ 1075 if ((sc->facts->IOCCapabilities & 1076 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { 1077 mps_diag_reset(sc); 1078 if ((error = mps_transition_ready(sc)) != 0) 1079 return (error); 1080 } 1081 1082 /* 1083 * Set flag if IR Firmware is loaded. 1084 */ 1085 if (sc->facts->IOCCapabilities & 1086 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 1087 sc->ir_firmware = 1; 1088 1089 /* 1090 * Check if controller supports FW diag buffers and set flag to enable 1091 * each type. 1092 */ 1093 if (sc->facts->IOCCapabilities & 1094 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1095 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].enabled = 1096 TRUE; 1097 if (sc->facts->IOCCapabilities & 1098 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1099 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].enabled = 1100 TRUE; 1101 if (sc->facts->IOCCapabilities & 1102 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1103 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].enabled = 1104 TRUE; 1105 1106 /* 1107 * Set flag if EEDP is supported and if TLR is supported. 1108 */ 1109 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 1110 sc->eedp_enabled = TRUE; 1111 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 1112 sc->control_TLR = TRUE; 1113 1114 /* 1115 * Size the queues. Since the reply queues always need one free entry, 1116 * we'll just deduct one reply message here. 1117 */ 1118 sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit); 1119 sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES, 1120 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 1121 TAILQ_INIT(&sc->req_list); 1122 TAILQ_INIT(&sc->high_priority_req_list); 1123 TAILQ_INIT(&sc->chain_list); 1124 TAILQ_INIT(&sc->tm_list); 1125 1126 if (((error = mps_alloc_queues(sc)) != 0) || 1127 ((error = mps_alloc_replies(sc)) != 0) || 1128 ((error = mps_alloc_requests(sc)) != 0)) { 1129 mps_printf(sc, "%s failed to alloc\n", __func__); 1130 mps_free(sc); 1131 return (error); 1132 } 1133 1134 if (((error = mps_init_queues(sc)) != 0) || 1135 ((error = mps_transition_operational(sc)) != 0)) { 1136 mps_printf(sc, "%s failed to transition operational\n", __func__); 1137 mps_free(sc); 1138 return (error); 1139 } 1140 1141 /* 1142 * Finish the queue initialization. 1143 * These are set here instead of in mps_init_queues() because the 1144 * IOC resets these values during the state transition in 1145 * mps_transition_operational(). The free index is set to 1 1146 * because the corresponding index in the IOC is set to 0, and the 1147 * IOC treats the queues as full if both are set to the same value. 1148 * Hence the reason that the queue can't hold all of the possible 1149 * replies. 1150 */ 1151 sc->replypostindex = 0; 1152 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 1153 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 1154 1155 sc->pfacts = kmalloc(sizeof(MPI2_PORT_FACTS_REPLY) * 1156 sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK); 1157 for (i = 0; i < sc->facts->NumberOfPorts; i++) { 1158 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) { 1159 mps_printf(sc, "%s failed to get portfacts for port %d\n", 1160 __func__, i); 1161 mps_free(sc); 1162 return (error); 1163 } 1164 mps_print_portfacts(sc, &sc->pfacts[i]); 1165 } 1166 1167 /* Attach the subsystems so they can prepare their event masks. */ 1168 /* XXX Should be dynamic so that IM/IR and user modules can attach */ 1169 if (((error = mps_attach_log(sc)) != 0) || 1170 ((error = mps_attach_sas(sc)) != 0) || 1171 ((error = mps_attach_user(sc)) != 0)) { 1172 mps_printf(sc, "%s failed to attach all subsystems: error %d\n", 1173 __func__, error); 1174 mps_free(sc); 1175 return (error); 1176 } 1177 1178 if ((error = mps_pci_setup_interrupts(sc)) != 0) { 1179 mps_printf(sc, "%s failed to setup interrupts\n", __func__); 1180 mps_free(sc); 1181 return (error); 1182 } 1183 1184 /* 1185 * The static page function currently read is ioc page8. Others can be 1186 * added in future. 1187 */ 1188 mps_base_static_config_pages(sc); 1189 1190 /* Start the periodic watchdog check on the IOC Doorbell */ 1191 mps_periodic(sc); 1192 1193 /* 1194 * The portenable will kick off discovery events that will drive the 1195 * rest of the initialization process. The CAM/SAS module will 1196 * hold up the boot sequence until discovery is complete. 1197 */ 1198 sc->mps_ich.ich_func = mps_startup; 1199 sc->mps_ich.ich_arg = sc; 1200 sc->mps_ich.ich_desc = "mps"; 1201 if (config_intrhook_establish(&sc->mps_ich) != 0) { 1202 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n"); 1203 error = EINVAL; 1204 } 1205 1206 /* 1207 * Allow IR to shutdown gracefully when shutdown occurs. 1208 */ 1209 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1210 mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 1211 1212 if (sc->shutdown_eh == NULL) 1213 mps_dprint(sc, MPS_FAULT, "shutdown event registration " 1214 "failed\n"); 1215 1216 mps_setup_sysctl(sc); 1217 1218 sc->mps_flags |= MPS_FLAGS_ATTACH_DONE; 1219 1220 return (error); 1221 } 1222 1223 /* Run through any late-start handlers. */ 1224 static void 1225 mps_startup(void *arg) 1226 { 1227 struct mps_softc *sc; 1228 1229 sc = (struct mps_softc *)arg; 1230 1231 mps_lock(sc); 1232 mps_unmask_intr(sc); 1233 /* initialize device mapping tables */ 1234 mps_mapping_initialize(sc); 1235 mpssas_startup(sc); 1236 mps_unlock(sc); 1237 } 1238 1239 /* Periodic watchdog. Is called with the driver lock already held. */ 1240 static void 1241 mps_periodic(void *arg) 1242 { 1243 struct mps_softc *sc; 1244 uint32_t db; 1245 1246 sc = (struct mps_softc *)arg; 1247 mps_lock(sc); 1248 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN) { 1249 mps_unlock(sc); 1250 return; 1251 } 1252 1253 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 1254 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 1255 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db); 1256 1257 mps_reinit(sc); 1258 } 1259 1260 callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc); 1261 mps_unlock(sc); 1262 } 1263 1264 static void 1265 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data, 1266 MPI2_EVENT_NOTIFICATION_REPLY *event) 1267 { 1268 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 1269 1270 mps_print_event(sc, event); 1271 1272 switch (event->Event) { 1273 case MPI2_EVENT_LOG_DATA: 1274 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n"); 1275 hexdump(event->EventData, event->EventDataLength, NULL, 0); 1276 break; 1277 case MPI2_EVENT_LOG_ENTRY_ADDED: 1278 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 1279 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event " 1280 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 1281 entry->LogSequence); 1282 break; 1283 default: 1284 break; 1285 } 1286 return; 1287 } 1288 1289 static int 1290 mps_attach_log(struct mps_softc *sc) 1291 { 1292 uint8_t events[16]; 1293 1294 bzero(events, 16); 1295 setbit(events, MPI2_EVENT_LOG_DATA); 1296 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 1297 1298 mps_register_events(sc, events, mps_log_evt_handler, NULL, 1299 &sc->mps_log_eh); 1300 1301 return (0); 1302 } 1303 1304 static int 1305 mps_detach_log(struct mps_softc *sc) 1306 { 1307 1308 if (sc->mps_log_eh != NULL) 1309 mps_deregister_events(sc, sc->mps_log_eh); 1310 return (0); 1311 } 1312 1313 /* 1314 * Free all of the driver resources and detach submodules. Should be called 1315 * without the lock held. 1316 */ 1317 int 1318 mps_free(struct mps_softc *sc) 1319 { 1320 struct mps_command *cm; 1321 int i, error; 1322 1323 /* Turn off the watchdog */ 1324 mps_lock(sc); 1325 sc->mps_flags |= MPS_FLAGS_SHUTDOWN; 1326 mps_unlock(sc); 1327 callout_stop_sync(&sc->periodic); 1328 1329 if (((error = mps_detach_log(sc)) != 0) || 1330 ((error = mps_detach_sas(sc)) != 0)) 1331 return (error); 1332 1333 mps_detach_user(sc); 1334 1335 /* Put the IOC back in the READY state. */ 1336 mps_lock(sc); 1337 if ((error = mps_transition_ready(sc)) != 0) { 1338 mps_unlock(sc); 1339 return (error); 1340 } 1341 mps_unlock(sc); 1342 1343 if (sc->facts != NULL) 1344 kfree(sc->facts, M_MPT2); 1345 1346 if (sc->pfacts != NULL) 1347 kfree(sc->pfacts, M_MPT2); 1348 1349 if (sc->post_busaddr != 0) 1350 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 1351 if (sc->post_queue != NULL) 1352 bus_dmamem_free(sc->queues_dmat, sc->post_queue, 1353 sc->queues_map); 1354 if (sc->queues_dmat != NULL) 1355 bus_dma_tag_destroy(sc->queues_dmat); 1356 1357 if (sc->chain_busaddr != 0) 1358 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 1359 if (sc->chain_frames != NULL) 1360 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map); 1361 if (sc->chain_dmat != NULL) 1362 bus_dma_tag_destroy(sc->chain_dmat); 1363 1364 if (sc->sense_busaddr != 0) 1365 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 1366 if (sc->sense_frames != NULL) 1367 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map); 1368 if (sc->sense_dmat != NULL) 1369 bus_dma_tag_destroy(sc->sense_dmat); 1370 1371 if (sc->reply_busaddr != 0) 1372 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 1373 if (sc->reply_frames != NULL) 1374 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map); 1375 if (sc->reply_dmat != NULL) 1376 bus_dma_tag_destroy(sc->reply_dmat); 1377 1378 if (sc->req_busaddr != 0) 1379 bus_dmamap_unload(sc->req_dmat, sc->req_map); 1380 if (sc->req_frames != NULL) 1381 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 1382 if (sc->req_dmat != NULL) 1383 bus_dma_tag_destroy(sc->req_dmat); 1384 1385 if (sc->chains != NULL) 1386 kfree(sc->chains, M_MPT2); 1387 if (sc->commands != NULL) { 1388 for (i = 1; i < sc->num_reqs; i++) { 1389 cm = &sc->commands[i]; 1390 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 1391 } 1392 kfree(sc->commands, M_MPT2); 1393 } 1394 if (sc->buffer_dmat != NULL) 1395 bus_dma_tag_destroy(sc->buffer_dmat); 1396 1397 if (sc->sysctl_tree != NULL) 1398 sysctl_ctx_free(&sc->sysctl_ctx); 1399 1400 mps_mapping_free_memory(sc); 1401 1402 /* Deregister the shutdown function */ 1403 if (sc->shutdown_eh != NULL) 1404 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 1405 1406 lockuninit(&sc->mps_lock); 1407 1408 return (0); 1409 } 1410 1411 static __inline void 1412 mps_complete_command(struct mps_command *cm) 1413 { 1414 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1415 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1416 1417 if (cm->cm_complete != NULL) { 1418 mps_dprint(cm->cm_sc, MPS_TRACE, 1419 "%s cm %p calling cm_complete %p data %p reply %p\n", 1420 __func__, cm, cm->cm_complete, cm->cm_complete_data, 1421 cm->cm_reply); 1422 cm->cm_complete(cm->cm_sc, cm); 1423 } 1424 1425 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1426 mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n", 1427 __func__, cm); 1428 wakeup(cm); 1429 } 1430 1431 if (cm->cm_sc->io_cmds_active != 0) { 1432 cm->cm_sc->io_cmds_active--; 1433 } else { 1434 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: io_cmds_active is " 1435 "out of sync - resynching to 0\n"); 1436 } 1437 } 1438 1439 void 1440 mps_intr(void *data) 1441 { 1442 struct mps_softc *sc; 1443 uint32_t status; 1444 1445 sc = (struct mps_softc *)data; 1446 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1447 1448 /* 1449 * Check interrupt status register to flush the bus. This is 1450 * needed for both INTx interrupts and driver-driven polling 1451 */ 1452 status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 1453 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 1454 return; 1455 1456 mps_lock(sc); 1457 mps_intr_locked(data); 1458 mps_unlock(sc); 1459 return; 1460 } 1461 1462 /* 1463 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 1464 * chip. Hopefully this theory is correct. 1465 */ 1466 void 1467 mps_intr_msi(void *data) 1468 { 1469 struct mps_softc *sc; 1470 1471 sc = (struct mps_softc *)data; 1472 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1473 mps_lock(sc); 1474 mps_intr_locked(data); 1475 mps_unlock(sc); 1476 return; 1477 } 1478 1479 /* 1480 * The locking is overly broad and simplistic, but easy to deal with for now. 1481 */ 1482 void 1483 mps_intr_locked(void *data) 1484 { 1485 MPI2_REPLY_DESCRIPTORS_UNION *desc; 1486 struct mps_softc *sc; 1487 struct mps_command *cm = NULL; 1488 uint8_t flags; 1489 u_int pq; 1490 MPI2_DIAG_RELEASE_REPLY *rel_rep; 1491 mps_fw_diagnostic_buffer_t *pBuffer; 1492 1493 sc = (struct mps_softc *)data; 1494 1495 pq = sc->replypostindex; 1496 mps_dprint(sc, MPS_TRACE, 1497 "%s sc %p starting with replypostindex %u\n", 1498 __func__, sc, sc->replypostindex); 1499 1500 for ( ;; ) { 1501 cm = NULL; 1502 desc = &sc->post_queue[sc->replypostindex]; 1503 flags = desc->Default.ReplyFlags & 1504 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1505 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1506 || (desc->Words.High == 0xffffffff)) 1507 break; 1508 1509 /* increment the replypostindex now, so that event handlers 1510 * and cm completion handlers which decide to do a diag 1511 * reset can zero it without it getting incremented again 1512 * afterwards, and we break out of this loop on the next 1513 * iteration since the reply post queue has been cleared to 1514 * 0xFF and all descriptors look unused (which they are). 1515 */ 1516 if (++sc->replypostindex >= sc->pqdepth) 1517 sc->replypostindex = 0; 1518 1519 switch (flags) { 1520 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 1521 cm = &sc->commands[desc->SCSIIOSuccess.SMID]; 1522 cm->cm_reply = NULL; 1523 break; 1524 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 1525 { 1526 uint32_t baddr; 1527 uint8_t *reply; 1528 1529 /* 1530 * Re-compose the reply address from the address 1531 * sent back from the chip. The ReplyFrameAddress 1532 * is the lower 32 bits of the physical address of 1533 * particular reply frame. Convert that address to 1534 * host format, and then use that to provide the 1535 * offset against the virtual address base 1536 * (sc->reply_frames). 1537 */ 1538 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 1539 reply = sc->reply_frames + 1540 (baddr - ((uint32_t)sc->reply_busaddr)); 1541 /* 1542 * Make sure the reply we got back is in a valid 1543 * range. If not, go ahead and panic here, since 1544 * we'll probably panic as soon as we deference the 1545 * reply pointer anyway. 1546 */ 1547 if ((reply < sc->reply_frames) 1548 || (reply > (sc->reply_frames + 1549 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 1550 kprintf("%s: WARNING: reply %p out of range!\n", 1551 __func__, reply); 1552 kprintf("%s: reply_frames %p, fqdepth %d, " 1553 "frame size %d\n", __func__, 1554 sc->reply_frames, sc->fqdepth, 1555 sc->facts->ReplyFrameSize * 4); 1556 kprintf("%s: baddr %#x,\n", __func__, baddr); 1557 panic("Reply address out of range"); 1558 } 1559 if (desc->AddressReply.SMID == 0) { 1560 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 1561 MPI2_FUNCTION_DIAG_BUFFER_POST) { 1562 /* 1563 * If SMID is 0 for Diag Buffer Post, 1564 * this implies that the reply is due to 1565 * a release function with a status that 1566 * the buffer has been released. Set 1567 * the buffer flags accordingly. 1568 */ 1569 rel_rep = 1570 (MPI2_DIAG_RELEASE_REPLY *)reply; 1571 if (rel_rep->IOCStatus == 1572 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 1573 { 1574 pBuffer = 1575 &sc->fw_diag_buffer_list[ 1576 rel_rep->BufferType]; 1577 pBuffer->valid_data = TRUE; 1578 pBuffer->owned_by_firmware = 1579 FALSE; 1580 pBuffer->immediate = FALSE; 1581 } 1582 } else 1583 mps_dispatch_event(sc, baddr, 1584 (MPI2_EVENT_NOTIFICATION_REPLY *) 1585 reply); 1586 } else { 1587 cm = &sc->commands[desc->AddressReply.SMID]; 1588 cm->cm_reply = reply; 1589 cm->cm_reply_data = 1590 desc->AddressReply.ReplyFrameAddress; 1591 } 1592 break; 1593 } 1594 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 1595 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 1596 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 1597 default: 1598 /* Unhandled */ 1599 device_printf(sc->mps_dev, "Unhandled reply 0x%x\n", 1600 desc->Default.ReplyFlags); 1601 cm = NULL; 1602 break; 1603 } 1604 1605 if (cm != NULL) 1606 mps_complete_command(cm); 1607 1608 desc->Words.Low = 0xffffffff; 1609 desc->Words.High = 0xffffffff; 1610 } 1611 1612 if (pq != sc->replypostindex) { 1613 mps_dprint(sc, MPS_TRACE, 1614 "%s sc %p writing postindex %d\n", 1615 __func__, sc, sc->replypostindex); 1616 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 1617 } 1618 1619 return; 1620 } 1621 1622 static void 1623 mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 1624 MPI2_EVENT_NOTIFICATION_REPLY *reply) 1625 { 1626 struct mps_event_handle *eh; 1627 int event, handled = 0; 1628 1629 event = reply->Event; 1630 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1631 if (isset(eh->mask, event)) { 1632 eh->callback(sc, data, reply); 1633 handled++; 1634 } 1635 } 1636 1637 if (handled == 0) 1638 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event); 1639 1640 /* 1641 * This is the only place that the event/reply should be freed. 1642 * Anything wanting to hold onto the event data should have 1643 * already copied it into their own storage. 1644 */ 1645 mps_free_reply(sc, data); 1646 } 1647 1648 static void 1649 mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm) 1650 { 1651 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1652 1653 if (cm->cm_reply) 1654 mps_print_event(sc, 1655 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 1656 1657 mps_free_command(sc, cm); 1658 1659 /* next, send a port enable */ 1660 mpssas_startup(sc); 1661 } 1662 1663 /* 1664 * For both register_events and update_events, the caller supplies a bitmap 1665 * of events that it _wants_. These functions then turn that into a bitmask 1666 * suitable for the controller. 1667 */ 1668 int 1669 mps_register_events(struct mps_softc *sc, uint8_t *mask, 1670 mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle) 1671 { 1672 struct mps_event_handle *eh; 1673 int error = 0; 1674 1675 eh = kmalloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO); 1676 eh->callback = cb; 1677 eh->data = data; 1678 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 1679 if (mask != NULL) 1680 error = mps_update_events(sc, eh, mask); 1681 *handle = eh; 1682 1683 return (error); 1684 } 1685 1686 int 1687 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle, 1688 uint8_t *mask) 1689 { 1690 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1691 MPI2_EVENT_NOTIFICATION_REPLY *reply; 1692 struct mps_command *cm; 1693 struct mps_event_handle *eh; 1694 int error, i; 1695 1696 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1697 1698 if ((mask != NULL) && (handle != NULL)) 1699 bcopy(mask, &handle->mask[0], 16); 1700 memset(sc->event_mask, 0xff, 16); 1701 1702 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1703 for (i = 0; i < 16; i++) 1704 sc->event_mask[i] &= ~eh->mask[i]; 1705 } 1706 1707 if ((cm = mps_alloc_command(sc)) == NULL) 1708 return (EBUSY); 1709 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1710 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1711 evtreq->MsgFlags = 0; 1712 evtreq->SASBroadcastPrimitiveMasks = 0; 1713 #ifdef MPS_DEBUG_ALL_EVENTS 1714 { 1715 u_char fullmask[16]; 1716 memset(fullmask, 0x00, 16); 1717 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1718 } 1719 #else 1720 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1721 #endif 1722 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1723 cm->cm_data = NULL; 1724 1725 error = mps_request_polled(sc, cm); 1726 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 1727 if ((reply == NULL) || 1728 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1729 error = ENXIO; 1730 mps_print_event(sc, reply); 1731 mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error); 1732 1733 mps_free_command(sc, cm); 1734 return (error); 1735 } 1736 1737 static int 1738 mps_reregister_events(struct mps_softc *sc) 1739 { 1740 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1741 struct mps_command *cm; 1742 struct mps_event_handle *eh; 1743 int error, i; 1744 1745 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1746 1747 /* first, reregister events */ 1748 1749 memset(sc->event_mask, 0xff, 16); 1750 1751 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1752 for (i = 0; i < 16; i++) 1753 sc->event_mask[i] &= ~eh->mask[i]; 1754 } 1755 1756 if ((cm = mps_alloc_command(sc)) == NULL) 1757 return (EBUSY); 1758 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1759 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1760 evtreq->MsgFlags = 0; 1761 evtreq->SASBroadcastPrimitiveMasks = 0; 1762 #ifdef MPS_DEBUG_ALL_EVENTS 1763 { 1764 u_char fullmask[16]; 1765 memset(fullmask, 0x00, 16); 1766 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1767 } 1768 #else 1769 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1770 #endif 1771 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1772 cm->cm_data = NULL; 1773 cm->cm_complete = mps_reregister_events_complete; 1774 1775 error = mps_map_command(sc, cm); 1776 1777 mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error); 1778 return (error); 1779 } 1780 1781 int 1782 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle) 1783 { 1784 1785 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 1786 kfree(handle, M_MPT2); 1787 return (mps_update_events(sc, NULL, NULL)); 1788 } 1789 1790 /* 1791 * Add a chain element as the next SGE for the specified command. 1792 * Reset cm_sge and cm_sgesize to indicate all the available space. 1793 */ 1794 static int 1795 mps_add_chain(struct mps_command *cm) 1796 { 1797 MPI2_SGE_CHAIN32 *sgc; 1798 struct mps_chain *chain; 1799 int space; 1800 1801 if (cm->cm_sglsize < MPS_SGC_SIZE) 1802 panic("MPS: Need SGE Error Code"); 1803 1804 chain = mps_alloc_chain(cm->cm_sc); 1805 if (chain == NULL) 1806 return (ENOBUFS); 1807 1808 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; 1809 1810 /* 1811 * Note: a double-linked list is used to make it easier to 1812 * walk for debugging. 1813 */ 1814 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 1815 1816 sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain; 1817 sgc->Length = space; 1818 sgc->NextChainOffset = 0; 1819 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT; 1820 sgc->Address = chain->chain_busaddr; 1821 1822 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple; 1823 cm->cm_sglsize = space; 1824 return (0); 1825 } 1826 1827 /* 1828 * Add one scatter-gather element (chain, simple, transaction context) 1829 * to the scatter-gather list for a command. Maintain cm_sglsize and 1830 * cm_sge as the remaining size and pointer to the next SGE to fill 1831 * in, respectively. 1832 */ 1833 int 1834 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft) 1835 { 1836 MPI2_SGE_TRANSACTION_UNION *tc = sgep; 1837 MPI2_SGE_SIMPLE64 *sge = sgep; 1838 int error, type; 1839 uint32_t saved_buf_len, saved_address_low, saved_address_high; 1840 1841 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK); 1842 1843 #ifdef INVARIANTS 1844 switch (type) { 1845 case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: { 1846 if (len != tc->DetailsLength + 4) 1847 panic("TC %p length %u or %zu?", tc, 1848 tc->DetailsLength + 4, len); 1849 } 1850 break; 1851 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 1852 /* Driver only uses 32-bit chain elements */ 1853 if (len != MPS_SGC_SIZE) 1854 panic("CHAIN %p length %u or %zu?", sgep, 1855 MPS_SGC_SIZE, len); 1856 break; 1857 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: 1858 /* Driver only uses 64-bit SGE simple elements */ 1859 if (len != MPS_SGE64_SIZE) 1860 panic("SGE simple %p length %u or %zu?", sge, 1861 MPS_SGE64_SIZE, len); 1862 if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) & 1863 MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0) 1864 panic("SGE simple %p not marked 64-bit?", sge); 1865 1866 break; 1867 default: 1868 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags); 1869 } 1870 #endif 1871 1872 /* 1873 * case 1: 1 more segment, enough room for it 1874 * case 2: 2 more segments, enough room for both 1875 * case 3: >=2 more segments, only enough room for 1 and a chain 1876 * case 4: >=1 more segment, enough room for only a chain 1877 * case 5: >=1 more segment, no room for anything (error) 1878 */ 1879 1880 /* 1881 * There should be room for at least a chain element, or this 1882 * code is buggy. Case (5). 1883 */ 1884 if (cm->cm_sglsize < MPS_SGC_SIZE) 1885 panic("MPS: Need SGE Error Code"); 1886 1887 if (segsleft >= 2 && 1888 cm->cm_sglsize >= len + MPS_SGC_SIZE && 1889 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) { 1890 /* 1891 * There are 2 or more segments left to add, and only 1892 * enough room for 1 and a chain. Case (3). 1893 * 1894 * Mark as last element in this chain if necessary. 1895 */ 1896 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1897 sge->FlagsLength |= htole32( 1898 MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT); 1899 } 1900 1901 /* 1902 * Add the item then a chain. Do the chain now, 1903 * rather than on the next iteration, to simplify 1904 * understanding the code. 1905 */ 1906 cm->cm_sglsize -= len; 1907 bcopy(sgep, cm->cm_sge, len); 1908 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1909 return (mps_add_chain(cm)); 1910 } 1911 1912 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) { 1913 /* 1914 * 1 or more segment, enough room for only a chain. 1915 * Hope the previous element wasn't a Simple entry 1916 * that needed to be marked with 1917 * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4). 1918 */ 1919 if ((error = mps_add_chain(cm)) != 0) 1920 return (error); 1921 } 1922 1923 #ifdef INVARIANTS 1924 /* Case 1: 1 more segment, enough room for it. */ 1925 if (segsleft == 1 && cm->cm_sglsize < len) 1926 panic("1 seg left and no room? %u versus %zu", 1927 cm->cm_sglsize, len); 1928 1929 /* Case 2: 2 more segments, enough room for both */ 1930 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE) 1931 panic("2 segs left and no room? %u versus %zu", 1932 cm->cm_sglsize, len); 1933 #endif 1934 1935 if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1936 /* 1937 * If this is a bi-directional request, need to account for that 1938 * here. Save the pre-filled sge values. These will be used 1939 * either for the 2nd SGL or for a single direction SGL. If 1940 * cm_out_len is non-zero, this is a bi-directional request, so 1941 * fill in the OUT SGL first, then the IN SGL, otherwise just 1942 * fill in the IN SGL. Note that at this time, when filling in 1943 * 2 SGL's for a bi-directional request, they both use the same 1944 * DMA buffer (same cm command). 1945 */ 1946 saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF; 1947 saved_address_low = sge->Address.Low; 1948 saved_address_high = sge->Address.High; 1949 if (cm->cm_out_len) { 1950 sge->FlagsLength = htole32(cm->cm_out_len | 1951 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1952 MPI2_SGE_FLAGS_END_OF_BUFFER | 1953 MPI2_SGE_FLAGS_HOST_TO_IOC | 1954 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1955 MPI2_SGE_FLAGS_SHIFT)); 1956 cm->cm_sglsize -= len; 1957 bcopy(sgep, cm->cm_sge, len); 1958 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge 1959 + len); 1960 } 1961 saved_buf_len |= 1962 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1963 MPI2_SGE_FLAGS_END_OF_BUFFER | 1964 MPI2_SGE_FLAGS_LAST_ELEMENT | 1965 MPI2_SGE_FLAGS_END_OF_LIST | 1966 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1967 MPI2_SGE_FLAGS_SHIFT); 1968 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { 1969 saved_buf_len |= 1970 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 1971 MPI2_SGE_FLAGS_SHIFT); 1972 } else { 1973 saved_buf_len |= 1974 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 1975 MPI2_SGE_FLAGS_SHIFT); 1976 } 1977 sge->FlagsLength = htole32(saved_buf_len); 1978 sge->Address.Low = saved_address_low; 1979 sge->Address.High = saved_address_high; 1980 } 1981 1982 cm->cm_sglsize -= len; 1983 bcopy(sgep, cm->cm_sge, len); 1984 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1985 return (0); 1986 } 1987 1988 /* 1989 * Add one dma segment to the scatter-gather list for a command. 1990 */ 1991 int 1992 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags, 1993 int segsleft) 1994 { 1995 MPI2_SGE_SIMPLE64 sge; 1996 1997 /* 1998 * This driver always uses 64-bit address elements for simplicity. 1999 */ 2000 bzero(&sge, sizeof(sge)); 2001 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2002 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 2003 sge.FlagsLength = htole32(len | (flags << MPI2_SGE_FLAGS_SHIFT)); 2004 mps_from_u64(pa, &sge.Address); 2005 2006 return (mps_push_sge(cm, &sge, sizeof sge, segsleft)); 2007 } 2008 2009 static void 2010 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2011 { 2012 struct mps_softc *sc; 2013 struct mps_command *cm; 2014 u_int i, dir, sflags; 2015 2016 cm = (struct mps_command *)arg; 2017 sc = cm->cm_sc; 2018 2019 /* 2020 * In this case, just print out a warning and let the chip tell the 2021 * user they did the wrong thing. 2022 */ 2023 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 2024 mps_printf(sc, "%s: warning: busdma returned %d segments, " 2025 "more than the %d allowed\n", __func__, nsegs, 2026 cm->cm_max_segs); 2027 } 2028 2029 /* 2030 * Set up DMA direction flags. Bi-directional requests are also handled 2031 * here. In that case, both direction flags will be set. 2032 */ 2033 sflags = 0; 2034 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) { 2035 /* 2036 * We have to add a special case for SMP passthrough, there 2037 * is no easy way to generically handle it. The first 2038 * S/G element is used for the command (therefore the 2039 * direction bit needs to be set). The second one is used 2040 * for the reply. We'll leave it to the caller to make 2041 * sure we only have two buffers. 2042 */ 2043 /* 2044 * Even though the busdma man page says it doesn't make 2045 * sense to have both direction flags, it does in this case. 2046 * We have one s/g element being accessed in each direction. 2047 */ 2048 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 2049 2050 /* 2051 * Set the direction flag on the first buffer in the SMP 2052 * passthrough request. We'll clear it for the second one. 2053 */ 2054 sflags |= MPI2_SGE_FLAGS_DIRECTION | 2055 MPI2_SGE_FLAGS_END_OF_BUFFER; 2056 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) { 2057 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 2058 dir = BUS_DMASYNC_PREWRITE; 2059 } else 2060 dir = BUS_DMASYNC_PREREAD; 2061 2062 for (i = 0; i < nsegs; i++) { 2063 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) { 2064 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 2065 } 2066 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 2067 sflags, nsegs - i); 2068 if (error != 0) { 2069 /* Resource shortage, roll back! */ 2070 mps_dprint(sc, MPS_INFO, "out of chain frames\n"); 2071 cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED; 2072 mps_complete_command(cm); 2073 return; 2074 } 2075 } 2076 2077 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2078 mps_enqueue_request(sc, cm); 2079 2080 return; 2081 } 2082 2083 static void 2084 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 2085 int error) 2086 { 2087 mps_data_cb(arg, segs, nsegs, error); 2088 } 2089 2090 /* 2091 * This is the routine to enqueue commands ansynchronously. 2092 * Note that the only error path here is from bus_dmamap_load(), which can 2093 * return EINPROGRESS if it is waiting for resources. Other than this, it's 2094 * assumed that if you have a command in-hand, then you have enough credits 2095 * to use it. 2096 */ 2097 int 2098 mps_map_command(struct mps_softc *sc, struct mps_command *cm) 2099 { 2100 int error = 0; 2101 2102 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) { 2103 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 2104 &cm->cm_uio, mps_data_cb2, cm, 0); 2105 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 2106 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 2107 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0); 2108 } else { 2109 /* Add a zero-length element as needed */ 2110 if (cm->cm_sge != NULL) 2111 mps_add_dmaseg(cm, 0, 0, 0, 1); 2112 mps_enqueue_request(sc, cm); 2113 } 2114 2115 return (error); 2116 } 2117 2118 /* 2119 * This is the routine to enqueue commands synchronously. An error of 2120 * EINPROGRESS from mps_map_command() is ignored since the command will 2121 * be executed and enqueued automatically. Other errors come from msleep(). 2122 */ 2123 int 2124 mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout) 2125 { 2126 int error; 2127 2128 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 2129 2130 cm->cm_complete = NULL; 2131 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP; 2132 error = mps_map_command(sc, cm); 2133 if ((error != 0) && (error != EINPROGRESS)) 2134 return (error); 2135 error = lksleep(cm, &sc->mps_lock, 0, "mpswait", timeout*hz); 2136 if (error == EWOULDBLOCK) 2137 error = ETIMEDOUT; 2138 return (error); 2139 } 2140 2141 /* 2142 * This is the routine to enqueue a command synchonously and poll for 2143 * completion. Its use should be rare. 2144 */ 2145 int 2146 mps_request_polled(struct mps_softc *sc, struct mps_command *cm) 2147 { 2148 int error, timeout = 0; 2149 2150 error = 0; 2151 2152 cm->cm_flags |= MPS_CM_FLAGS_POLLED; 2153 cm->cm_complete = NULL; 2154 mps_map_command(sc, cm); 2155 2156 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) { 2157 mps_intr_locked(sc); 2158 DELAY(50 * 1000); 2159 if (timeout++ > 1000) { 2160 mps_dprint(sc, MPS_FAULT, "polling failed\n"); 2161 error = ETIMEDOUT; 2162 break; 2163 } 2164 } 2165 2166 return (error); 2167 } 2168 2169 /* 2170 * The MPT driver had a verbose interface for config pages. In this driver, 2171 * reduce it to much simplier terms, similar to the Linux driver. 2172 */ 2173 int 2174 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params) 2175 { 2176 MPI2_CONFIG_REQUEST *req; 2177 struct mps_command *cm; 2178 int error; 2179 2180 if (sc->mps_flags & MPS_FLAGS_BUSY) { 2181 return (EBUSY); 2182 } 2183 2184 cm = mps_alloc_command(sc); 2185 if (cm == NULL) { 2186 return (EBUSY); 2187 } 2188 2189 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 2190 req->Function = MPI2_FUNCTION_CONFIG; 2191 req->Action = params->action; 2192 req->SGLFlags = 0; 2193 req->ChainOffset = 0; 2194 req->PageAddress = params->page_address; 2195 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 2196 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 2197 2198 hdr = ¶ms->hdr.Ext; 2199 req->ExtPageType = hdr->ExtPageType; 2200 req->ExtPageLength = hdr->ExtPageLength; 2201 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 2202 req->Header.PageLength = 0; /* Must be set to zero */ 2203 req->Header.PageNumber = hdr->PageNumber; 2204 req->Header.PageVersion = hdr->PageVersion; 2205 } else { 2206 MPI2_CONFIG_PAGE_HEADER *hdr; 2207 2208 hdr = ¶ms->hdr.Struct; 2209 req->Header.PageType = hdr->PageType; 2210 req->Header.PageNumber = hdr->PageNumber; 2211 req->Header.PageLength = hdr->PageLength; 2212 req->Header.PageVersion = hdr->PageVersion; 2213 } 2214 2215 cm->cm_data = params->buffer; 2216 cm->cm_length = params->length; 2217 cm->cm_sge = &req->PageBufferSGE; 2218 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 2219 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; 2220 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2221 2222 cm->cm_complete_data = params; 2223 if (params->callback != NULL) { 2224 cm->cm_complete = mps_config_complete; 2225 return (mps_map_command(sc, cm)); 2226 } else { 2227 error = mps_wait_command(sc, cm, 0); 2228 if (error) { 2229 mps_dprint(sc, MPS_FAULT, 2230 "Error %d reading config page\n", error); 2231 mps_free_command(sc, cm); 2232 return (error); 2233 } 2234 mps_config_complete(sc, cm); 2235 } 2236 2237 return (0); 2238 } 2239 2240 int 2241 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params) 2242 { 2243 return (EINVAL); 2244 } 2245 2246 static void 2247 mps_config_complete(struct mps_softc *sc, struct mps_command *cm) 2248 { 2249 MPI2_CONFIG_REPLY *reply; 2250 struct mps_config_params *params; 2251 2252 params = cm->cm_complete_data; 2253 2254 if (cm->cm_data != NULL) { 2255 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2256 BUS_DMASYNC_POSTREAD); 2257 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2258 } 2259 2260 /* 2261 * XXX KDM need to do more error recovery? This results in the 2262 * device in question not getting probed. 2263 */ 2264 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2265 params->status = MPI2_IOCSTATUS_BUSY; 2266 goto done; 2267 } 2268 2269 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 2270 if (reply == NULL) { 2271 params->status = MPI2_IOCSTATUS_BUSY; 2272 goto done; 2273 } 2274 params->status = reply->IOCStatus; 2275 if (params->hdr.Ext.ExtPageType != 0) { 2276 params->hdr.Ext.ExtPageType = reply->ExtPageType; 2277 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 2278 } else { 2279 params->hdr.Struct.PageType = reply->Header.PageType; 2280 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 2281 params->hdr.Struct.PageLength = reply->Header.PageLength; 2282 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 2283 } 2284 2285 done: 2286 mps_free_command(sc, cm); 2287 if (params->callback != NULL) 2288 params->callback(sc, params); 2289 2290 return; 2291 } 2292