1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 /*- 28 * Copyright (c) 2011 LSI Corp. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * LSI MPT-Fusion Host Adapter FreeBSD 53 * 54 * $FreeBSD: src/sys/dev/mps/mps.c,v 1.14 2012/01/26 18:17:21 ken Exp $ 55 */ 56 57 /* Communications core for LSI MPT2 */ 58 59 /* TODO Move headers to mpsvar */ 60 #include <sys/types.h> 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/lock.h> 65 #include <sys/globaldata.h> 66 #include <sys/module.h> 67 #include <sys/bus.h> 68 #include <sys/conf.h> 69 #include <sys/bio.h> 70 #include <sys/malloc.h> 71 #include <sys/uio.h> 72 #include <sys/sysctl.h> 73 #include <sys/queue.h> 74 #include <sys/kthread.h> 75 #include <sys/endian.h> 76 #include <sys/eventhandler.h> 77 78 #include <sys/rman.h> 79 80 #include <bus/pci/pcivar.h> 81 82 #include <bus/cam/scsi/scsi_all.h> 83 84 #include <dev/raid/mps/mpi/mpi2_type.h> 85 #include <dev/raid/mps/mpi/mpi2.h> 86 #include <dev/raid/mps/mpi/mpi2_ioc.h> 87 #include <dev/raid/mps/mpi/mpi2_sas.h> 88 #include <dev/raid/mps/mpi/mpi2_cnfg.h> 89 #include <dev/raid/mps/mpi/mpi2_init.h> 90 #include <dev/raid/mps/mpi/mpi2_tool.h> 91 #include <dev/raid/mps/mps_ioctl.h> 92 #include <dev/raid/mps/mpsvar.h> 93 #include <dev/raid/mps/mps_table.h> 94 95 static int mps_diag_reset(struct mps_softc *sc); 96 static int mps_init_queues(struct mps_softc *sc); 97 static int mps_message_unit_reset(struct mps_softc *sc); 98 static int mps_transition_operational(struct mps_softc *sc); 99 static void mps_startup(void *arg); 100 static int mps_send_iocinit(struct mps_softc *sc); 101 static int mps_attach_log(struct mps_softc *sc); 102 static __inline void mps_complete_command(struct mps_command *cm); 103 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 104 MPI2_EVENT_NOTIFICATION_REPLY *reply); 105 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm); 106 static void mps_periodic(void *); 107 static int mps_reregister_events(struct mps_softc *sc); 108 static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm); 109 110 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters"); 111 112 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory"); 113 114 /* 115 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 116 * any state and back to its initialization state machine. 117 */ 118 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 119 120 static int 121 mps_diag_reset(struct mps_softc *sc) 122 { 123 uint32_t reg; 124 int i, error, tries = 0; 125 126 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 127 128 /* Clear any pending interrupts */ 129 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 130 131 /* Push the magic sequence */ 132 error = ETIMEDOUT; 133 while (tries++ < 20) { 134 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 135 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 136 mpt2_reset_magic[i]); 137 138 DELAY(100 * 1000); 139 140 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 141 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 142 error = 0; 143 break; 144 } 145 } 146 if (error) 147 return (error); 148 149 /* Send the actual reset. XXX need to refresh the reg? */ 150 mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, 151 reg | MPI2_DIAG_RESET_ADAPTER); 152 153 /* Wait up to 300 seconds in 50ms intervals */ 154 error = ETIMEDOUT; 155 for (i = 0; i < 60000; i++) { 156 DELAY(50000); 157 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 158 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 159 error = 0; 160 break; 161 } 162 } 163 if (error) 164 return (error); 165 166 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 167 168 return (0); 169 } 170 171 static int 172 mps_message_unit_reset(struct mps_softc *sc) 173 { 174 175 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 176 177 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 178 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 179 MPI2_DOORBELL_FUNCTION_SHIFT); 180 DELAY(50000); 181 182 return (0); 183 } 184 185 static int 186 mps_transition_ready(struct mps_softc *sc) 187 { 188 uint32_t reg, state; 189 int error, tries = 0; 190 191 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 192 193 error = 0; 194 while (tries++ < 5) { 195 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 196 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 197 198 /* 199 * Ensure the IOC is ready to talk. If it's not, try 200 * resetting it. 201 */ 202 if (reg & MPI2_DOORBELL_USED) { 203 mps_diag_reset(sc); 204 DELAY(50000); 205 continue; 206 } 207 208 /* Is the adapter owned by another peer? */ 209 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 210 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 211 device_printf(sc->mps_dev, "IOC is under the control " 212 "of another peer host, aborting initialization.\n"); 213 return (ENXIO); 214 } 215 216 state = reg & MPI2_IOC_STATE_MASK; 217 if (state == MPI2_IOC_STATE_READY) { 218 /* Ready to go! */ 219 error = 0; 220 break; 221 } else if (state == MPI2_IOC_STATE_FAULT) { 222 mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n", 223 state & MPI2_DOORBELL_FAULT_CODE_MASK); 224 mps_diag_reset(sc); 225 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 226 /* Need to take ownership */ 227 mps_message_unit_reset(sc); 228 } else if (state == MPI2_IOC_STATE_RESET) { 229 /* Wait a bit, IOC might be in transition */ 230 mps_dprint(sc, MPS_FAULT, 231 "IOC in unexpected reset state\n"); 232 } else { 233 mps_dprint(sc, MPS_FAULT, 234 "IOC in unknown state 0x%x\n", state); 235 error = EINVAL; 236 break; 237 } 238 239 /* Wait 50ms for things to settle down. */ 240 DELAY(50000); 241 } 242 243 if (error) 244 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n"); 245 246 return (error); 247 } 248 249 static int 250 mps_transition_operational(struct mps_softc *sc) 251 { 252 uint32_t reg, state; 253 int error; 254 255 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 256 257 error = 0; 258 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 259 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 260 261 state = reg & MPI2_IOC_STATE_MASK; 262 if (state != MPI2_IOC_STATE_READY) { 263 if ((error = mps_transition_ready(sc)) != 0) { 264 mps_dprint(sc, MPS_FAULT, 265 "%s failed to transition ready\n", __func__); 266 return (error); 267 } 268 } 269 270 error = mps_send_iocinit(sc); 271 return (error); 272 } 273 274 /* 275 * XXX Some of this should probably move to mps.c 276 * 277 * The terms diag reset and hard reset are used interchangeably in the MPI 278 * docs to mean resetting the controller chip. In this code diag reset 279 * cleans everything up, and the hard reset function just sends the reset 280 * sequence to the chip. This should probably be refactored so that every 281 * subsystem gets a reset notification of some sort, and can clean up 282 * appropriately. 283 */ 284 int 285 mps_reinit(struct mps_softc *sc) 286 { 287 int error; 288 uint32_t db; 289 290 mps_printf(sc, "%s sc %p\n", __func__, sc); 291 292 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 293 294 if (sc->mps_flags & MPS_FLAGS_DIAGRESET) { 295 mps_printf(sc, "%s reset already in progress\n", __func__); 296 return 0; 297 } 298 299 /* make sure the completion callbacks can recognize they're getting 300 * a NULL cm_reply due to a reset. 301 */ 302 sc->mps_flags |= MPS_FLAGS_DIAGRESET; 303 304 mps_printf(sc, "%s mask interrupts\n", __func__); 305 mps_mask_intr(sc); 306 307 error = mps_diag_reset(sc); 308 if (error != 0) { 309 panic("%s hard reset failed with error %d", 310 __func__, error); 311 } 312 313 /* Restore the PCI state, including the MSI-X registers */ 314 mps_pci_restore(sc); 315 316 /* Give the I/O subsystem special priority to get itself prepared */ 317 mpssas_handle_reinit(sc); 318 319 /* reinitialize queues after the reset */ 320 bzero(sc->free_queue, sc->fqdepth * 4); 321 mps_init_queues(sc); 322 323 /* get the chip out of the reset state */ 324 error = mps_transition_operational(sc); 325 if (error != 0) 326 panic("%s transition operational failed with error %d", 327 __func__, error); 328 329 /* Reinitialize the reply queue. This is delicate because this 330 * function is typically invoked by task mgmt completion callbacks, 331 * which are called by the interrupt thread. We need to make sure 332 * the interrupt handler loop will exit when we return to it, and 333 * that it will recognize the indexes we've changed. 334 */ 335 sc->replypostindex = 0; 336 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 337 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 338 339 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 340 mps_printf(sc, "%s doorbell 0x%08x\n", __func__, db); 341 342 mps_printf(sc, "%s unmask interrupts post %u free %u\n", __func__, 343 sc->replypostindex, sc->replyfreeindex); 344 345 mps_unmask_intr(sc); 346 347 mps_printf(sc, "%s restarting post %u free %u\n", __func__, 348 sc->replypostindex, sc->replyfreeindex); 349 350 /* restart will reload the event masks clobbered by the reset, and 351 * then enable the port. 352 */ 353 mps_reregister_events(sc); 354 355 /* the end of discovery will release the simq, so we're done. */ 356 mps_printf(sc, "%s finished sc %p post %u free %u\n", 357 __func__, sc, 358 sc->replypostindex, sc->replyfreeindex); 359 360 sc->mps_flags &= ~MPS_FLAGS_DIAGRESET; 361 362 return 0; 363 } 364 365 /* Wait for the chip to ACK a word that we've put into its FIFO */ 366 static int 367 mps_wait_db_ack(struct mps_softc *sc) 368 { 369 int retry; 370 371 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 372 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 373 MPI2_HIS_SYS2IOC_DB_STATUS) == 0) 374 return (0); 375 DELAY(2000); 376 } 377 return (ETIMEDOUT); 378 } 379 380 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 381 static int 382 mps_wait_db_int(struct mps_softc *sc) 383 { 384 int retry; 385 386 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 387 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 388 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 389 return (0); 390 DELAY(2000); 391 } 392 return (ETIMEDOUT); 393 } 394 395 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 396 static int 397 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 398 int req_sz, int reply_sz, int timeout) 399 { 400 uint32_t *data32; 401 uint16_t *data16; 402 int i, count, ioc_sz, residual; 403 404 /* Step 1 */ 405 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 406 407 /* Step 2 */ 408 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 409 return (EBUSY); 410 411 /* Step 3 412 * Announce that a message is coming through the doorbell. Messages 413 * are pushed at 32bit words, so round up if needed. 414 */ 415 count = (req_sz + 3) / 4; 416 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 417 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 418 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 419 420 /* Step 4 */ 421 if (mps_wait_db_int(sc) || 422 (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 423 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n"); 424 return (ENXIO); 425 } 426 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 427 if (mps_wait_db_ack(sc) != 0) { 428 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n"); 429 return (ENXIO); 430 } 431 432 /* Step 5 */ 433 /* Clock out the message data synchronously in 32-bit dwords*/ 434 data32 = (uint32_t *)req; 435 for (i = 0; i < count; i++) { 436 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, data32[i]); 437 if (mps_wait_db_ack(sc) != 0) { 438 mps_dprint(sc, MPS_FAULT, 439 "Timeout while writing doorbell\n"); 440 return (ENXIO); 441 } 442 } 443 444 /* Step 6 */ 445 /* Clock in the reply in 16-bit words. The total length of the 446 * message is always in the 4th byte, so clock out the first 2 words 447 * manually, then loop the rest. 448 */ 449 data16 = (uint16_t *)reply; 450 if (mps_wait_db_int(sc) != 0) { 451 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n"); 452 return (ENXIO); 453 } 454 data16[0] = 455 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 456 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 457 if (mps_wait_db_int(sc) != 0) { 458 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n"); 459 return (ENXIO); 460 } 461 data16[1] = 462 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 463 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 464 465 /* Number of 32bit words in the message */ 466 ioc_sz = reply->MsgLength; 467 468 /* 469 * Figure out how many 16bit words to clock in without overrunning. 470 * The precision loss with dividing reply_sz can safely be 471 * ignored because the messages can only be multiples of 32bits. 472 */ 473 residual = 0; 474 count = MIN((reply_sz / 4), ioc_sz) * 2; 475 if (count < ioc_sz * 2) { 476 residual = ioc_sz * 2 - count; 477 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d " 478 "residual message words\n", residual); 479 } 480 481 for (i = 2; i < count; i++) { 482 if (mps_wait_db_int(sc) != 0) { 483 mps_dprint(sc, MPS_FAULT, 484 "Timeout reading doorbell %d\n", i); 485 return (ENXIO); 486 } 487 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & 488 MPI2_DOORBELL_DATA_MASK; 489 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 490 } 491 492 /* 493 * Pull out residual words that won't fit into the provided buffer. 494 * This keeps the chip from hanging due to a driver programming 495 * error. 496 */ 497 while (residual--) { 498 if (mps_wait_db_int(sc) != 0) { 499 mps_dprint(sc, MPS_FAULT, 500 "Timeout reading doorbell\n"); 501 return (ENXIO); 502 } 503 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET); 504 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 505 } 506 507 /* Step 7 */ 508 if (mps_wait_db_int(sc) != 0) { 509 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n"); 510 return (ENXIO); 511 } 512 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 513 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n"); 514 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 515 516 return (0); 517 } 518 519 static void 520 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm) 521 { 522 523 mps_dprint(sc, MPS_TRACE, "%s SMID %u cm %p ccb %p\n", __func__, 524 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 525 526 if ((sc->mps_flags & MPS_FLAGS_ATTACH_DONE) && 527 !(sc->mps_flags & MPS_FLAGS_SHUTDOWN)) { 528 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 529 } 530 531 if (++sc->io_cmds_active > sc->io_cmds_highwater) 532 sc->io_cmds_highwater++; 533 534 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 535 cm->cm_desc.Words.Low); 536 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 537 cm->cm_desc.Words.High); 538 } 539 540 /* 541 * Just the FACTS, ma'am. 542 */ 543 static int 544 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 545 { 546 MPI2_DEFAULT_REPLY *reply; 547 MPI2_IOC_FACTS_REQUEST request; 548 int error, req_sz, reply_sz; 549 550 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 551 552 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 553 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 554 reply = (MPI2_DEFAULT_REPLY *)facts; 555 556 bzero(&request, req_sz); 557 request.Function = MPI2_FUNCTION_IOC_FACTS; 558 error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 559 560 return (error); 561 } 562 563 static int 564 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port) 565 { 566 MPI2_PORT_FACTS_REQUEST *request; 567 MPI2_PORT_FACTS_REPLY *reply; 568 struct mps_command *cm; 569 int error; 570 571 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 572 573 if ((cm = mps_alloc_command(sc)) == NULL) 574 return (EBUSY); 575 request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req; 576 request->Function = MPI2_FUNCTION_PORT_FACTS; 577 request->PortNumber = port; 578 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 579 cm->cm_data = NULL; 580 error = mps_request_polled(sc, cm); 581 reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply; 582 if (reply == NULL) { 583 mps_printf(sc, "%s NULL reply\n", __func__); 584 goto done; 585 } 586 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { 587 mps_printf(sc, 588 "%s error %d iocstatus 0x%x iocloginfo 0x%x type 0x%x\n", 589 __func__, error, reply->IOCStatus, reply->IOCLogInfo, 590 reply->PortType); 591 error = ENXIO; 592 } 593 bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY)); 594 done: 595 mps_free_command(sc, cm); 596 597 return (error); 598 } 599 600 static int 601 mps_send_iocinit(struct mps_softc *sc) 602 { 603 MPI2_IOC_INIT_REQUEST init; 604 MPI2_DEFAULT_REPLY reply; 605 int req_sz, reply_sz, error; 606 607 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 608 609 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 610 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 611 bzero(&init, req_sz); 612 bzero(&reply, reply_sz); 613 614 /* 615 * Fill in the init block. Note that most addresses are 616 * deliberately in the lower 32bits of memory. This is a micro- 617 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 618 */ 619 init.Function = MPI2_FUNCTION_IOC_INIT; 620 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 621 init.MsgVersion = MPI2_VERSION; 622 init.HeaderVersion = MPI2_HEADER_VERSION; 623 init.SystemRequestFrameSize = sc->facts->IOCRequestFrameSize; 624 init.ReplyDescriptorPostQueueDepth = sc->pqdepth; 625 init.ReplyFreeQueueDepth = sc->fqdepth; 626 init.SenseBufferAddressHigh = 0; 627 init.SystemReplyAddressHigh = 0; 628 init.SystemRequestFrameBaseAddress.High = 0; 629 init.SystemRequestFrameBaseAddress.Low = (uint32_t)sc->req_busaddr; 630 init.ReplyDescriptorPostQueueAddress.High = 0; 631 init.ReplyDescriptorPostQueueAddress.Low = (uint32_t)sc->post_busaddr; 632 init.ReplyFreeQueueAddress.High = 0; 633 init.ReplyFreeQueueAddress.Low = (uint32_t)sc->free_busaddr; 634 init.TimeStamp.High = 0; 635 init.TimeStamp.Low = (uint32_t)time_uptime; 636 637 error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 638 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 639 error = ENXIO; 640 641 mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus); 642 return (error); 643 } 644 645 void 646 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 647 { 648 bus_addr_t *addr; 649 650 addr = arg; 651 *addr = segs[0].ds_addr; 652 } 653 654 static int 655 mps_alloc_queues(struct mps_softc *sc) 656 { 657 bus_addr_t queues_busaddr; 658 uint8_t *queues; 659 int qsize, fqsize, pqsize; 660 661 /* 662 * The reply free queue contains 4 byte entries in multiples of 16 and 663 * aligned on a 16 byte boundary. There must always be an unused entry. 664 * This queue supplies fresh reply frames for the firmware to use. 665 * 666 * The reply descriptor post queue contains 8 byte entries in 667 * multiples of 16 and aligned on a 16 byte boundary. This queue 668 * contains filled-in reply frames sent from the firmware to the host. 669 * 670 * These two queues are allocated together for simplicity. 671 */ 672 sc->fqdepth = roundup2((sc->num_replies + 1), 16); 673 sc->pqdepth = roundup2((sc->num_replies + 1), 16); 674 fqsize= sc->fqdepth * 4; 675 pqsize = sc->pqdepth * 8; 676 qsize = fqsize + pqsize; 677 678 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 679 16, 0, /* algnmnt, boundary */ 680 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 681 BUS_SPACE_MAXADDR, /* highaddr */ 682 qsize, /* maxsize */ 683 1, /* nsegments */ 684 qsize, /* maxsegsize */ 685 0, /* flags */ 686 &sc->queues_dmat)) { 687 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n"); 688 return (ENOMEM); 689 } 690 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 691 &sc->queues_map)) { 692 device_printf(sc->mps_dev, "Cannot allocate queues memory\n"); 693 return (ENOMEM); 694 } 695 bzero(queues, qsize); 696 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 697 mps_memaddr_cb, &queues_busaddr, 0); 698 699 sc->free_queue = (uint32_t *)queues; 700 sc->free_busaddr = queues_busaddr; 701 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 702 sc->post_busaddr = queues_busaddr + fqsize; 703 704 return (0); 705 } 706 707 static int 708 mps_alloc_replies(struct mps_softc *sc) 709 { 710 int rsize, num_replies; 711 712 /* 713 * sc->num_replies should be one less than sc->fqdepth. We need to 714 * allocate space for sc->fqdepth replies, but only sc->num_replies 715 * replies can be used at once. 716 */ 717 num_replies = max(sc->fqdepth, sc->num_replies); 718 719 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 720 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 721 4, 0, /* algnmnt, boundary */ 722 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 723 BUS_SPACE_MAXADDR, /* highaddr */ 724 rsize, /* maxsize */ 725 1, /* nsegments */ 726 rsize, /* maxsegsize */ 727 0, /* flags */ 728 &sc->reply_dmat)) { 729 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n"); 730 return (ENOMEM); 731 } 732 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 733 BUS_DMA_NOWAIT, &sc->reply_map)) { 734 device_printf(sc->mps_dev, "Cannot allocate replies memory\n"); 735 return (ENOMEM); 736 } 737 bzero(sc->reply_frames, rsize); 738 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 739 mps_memaddr_cb, &sc->reply_busaddr, 0); 740 741 return (0); 742 } 743 744 static int 745 mps_alloc_requests(struct mps_softc *sc) 746 { 747 struct mps_command *cm; 748 struct mps_chain *chain; 749 int i, rsize, nsegs; 750 751 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 752 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 753 16, 0, /* algnmnt, boundary */ 754 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 755 BUS_SPACE_MAXADDR, /* highaddr */ 756 rsize, /* maxsize */ 757 1, /* nsegments */ 758 rsize, /* maxsegsize */ 759 0, /* flags */ 760 &sc->req_dmat)) { 761 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n"); 762 return (ENOMEM); 763 } 764 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 765 BUS_DMA_NOWAIT, &sc->req_map)) { 766 device_printf(sc->mps_dev, "Cannot allocate request memory\n"); 767 return (ENOMEM); 768 } 769 bzero(sc->req_frames, rsize); 770 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 771 mps_memaddr_cb, &sc->req_busaddr, 0); 772 773 rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4; 774 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 775 16, 0, /* algnmnt, boundary */ 776 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 777 BUS_SPACE_MAXADDR, /* highaddr */ 778 rsize, /* maxsize */ 779 1, /* nsegments */ 780 rsize, /* maxsegsize */ 781 0, /* flags */ 782 &sc->chain_dmat)) { 783 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n"); 784 return (ENOMEM); 785 } 786 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 787 BUS_DMA_NOWAIT, &sc->chain_map)) { 788 device_printf(sc->mps_dev, "Cannot allocate chain memory\n"); 789 return (ENOMEM); 790 } 791 bzero(sc->chain_frames, rsize); 792 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 793 mps_memaddr_cb, &sc->chain_busaddr, 0); 794 795 rsize = MPS_SENSE_LEN * sc->num_reqs; 796 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 797 1, 0, /* algnmnt, boundary */ 798 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 799 BUS_SPACE_MAXADDR, /* highaddr */ 800 rsize, /* maxsize */ 801 1, /* nsegments */ 802 rsize, /* maxsegsize */ 803 0, /* flags */ 804 &sc->sense_dmat)) { 805 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n"); 806 return (ENOMEM); 807 } 808 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 809 BUS_DMA_NOWAIT, &sc->sense_map)) { 810 device_printf(sc->mps_dev, "Cannot allocate sense memory\n"); 811 return (ENOMEM); 812 } 813 bzero(sc->sense_frames, rsize); 814 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 815 mps_memaddr_cb, &sc->sense_busaddr, 0); 816 817 sc->chains = kmalloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2, 818 M_WAITOK | M_ZERO); 819 for (i = 0; i < sc->max_chains; i++) { 820 chain = &sc->chains[i]; 821 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 822 i * sc->facts->IOCRequestFrameSize * 4); 823 chain->chain_busaddr = sc->chain_busaddr + 824 i * sc->facts->IOCRequestFrameSize * 4; 825 mps_free_chain(sc, chain); 826 sc->chain_free_lowwater++; 827 } 828 829 /* XXX Need to pick a more precise value */ 830 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 831 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 832 1, 0, /* algnmnt, boundary */ 833 BUS_SPACE_MAXADDR, /* lowaddr */ 834 BUS_SPACE_MAXADDR, /* highaddr */ 835 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 836 nsegs, /* nsegments */ 837 BUS_SPACE_MAXSIZE_24BIT,/* maxsegsize */ 838 BUS_DMA_ALLOCNOW, /* flags */ 839 &sc->buffer_dmat)) { 840 device_printf(sc->mps_dev, "Cannot allocate buffer DMA tag\n"); 841 return (ENOMEM); 842 } 843 844 /* 845 * SMID 0 cannot be used as a free command per the firmware spec. 846 * Just drop that command instead of risking accounting bugs. 847 */ 848 sc->commands = kmalloc(sizeof(struct mps_command) * sc->num_reqs, 849 M_MPT2, M_WAITOK | M_ZERO); 850 for (i = 1; i < sc->num_reqs; i++) { 851 cm = &sc->commands[i]; 852 cm->cm_req = sc->req_frames + 853 i * sc->facts->IOCRequestFrameSize * 4; 854 cm->cm_req_busaddr = sc->req_busaddr + 855 i * sc->facts->IOCRequestFrameSize * 4; 856 cm->cm_sense = &sc->sense_frames[i]; 857 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN; 858 cm->cm_desc.Default.SMID = i; 859 cm->cm_sc = sc; 860 TAILQ_INIT(&cm->cm_chain_list); 861 callout_init_mp(&cm->cm_callout); 862 863 /* XXX Is a failure here a critical problem? */ 864 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) 865 if (i <= sc->facts->HighPriorityCredit) 866 mps_free_high_priority_command(sc, cm); 867 else 868 mps_free_command(sc, cm); 869 else { 870 panic("failed to allocate command %d", i); 871 sc->num_reqs = i; 872 break; 873 } 874 } 875 876 return (0); 877 } 878 879 static int 880 mps_init_queues(struct mps_softc *sc) 881 { 882 int i; 883 884 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 885 886 /* 887 * According to the spec, we need to use one less reply than we 888 * have space for on the queue. So sc->num_replies (the number we 889 * use) should be less than sc->fqdepth (allocated size). 890 */ 891 if (sc->num_replies >= sc->fqdepth) 892 return (EINVAL); 893 894 /* 895 * Initialize all of the free queue entries. 896 */ 897 for (i = 0; i < sc->fqdepth; i++) 898 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); 899 sc->replyfreeindex = sc->num_replies; 900 901 return (0); 902 } 903 904 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 905 * Next are the global settings, if they exist. Highest are the per-unit 906 * settings, if they exist. 907 */ 908 static void 909 mps_get_tunables(struct mps_softc *sc) 910 { 911 char tmpstr[80]; 912 913 /* XXX default to some debugging for now */ 914 sc->mps_debug = MPS_FAULT; 915 #if 0 /* XXX swildner */ 916 sc->disable_msix = 0; 917 #endif 918 sc->enable_msi = 1; 919 sc->max_chains = MPS_CHAIN_FRAMES; 920 921 /* 922 * Grab the global variables. 923 */ 924 TUNABLE_INT_FETCH("hw.mps.debug_level", &sc->mps_debug); 925 #if 0 /* XXX swildner */ 926 TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix); 927 #endif 928 TUNABLE_INT_FETCH("hw.mps.msi.enable", &sc->enable_msi); 929 TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains); 930 931 /* Grab the unit-instance variables */ 932 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level", 933 device_get_unit(sc->mps_dev)); 934 TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug); 935 936 #if 0 /* XXX swildner */ 937 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix", 938 device_get_unit(sc->mps_dev)); 939 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 940 #endif 941 942 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.enable_msi", 943 device_get_unit(sc->mps_dev)); 944 TUNABLE_INT_FETCH(tmpstr, &sc->enable_msi); 945 946 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains", 947 device_get_unit(sc->mps_dev)); 948 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 949 } 950 951 static void 952 mps_setup_sysctl(struct mps_softc *sc) 953 { 954 struct sysctl_ctx_list *sysctl_ctx = NULL; 955 struct sysctl_oid *sysctl_tree = NULL; 956 char tmpstr[80], tmpstr2[80]; 957 958 /* 959 * Setup the sysctl variable so the user can change the debug level 960 * on the fly. 961 */ 962 ksnprintf(tmpstr, sizeof(tmpstr), "MPS controller %d", 963 device_get_unit(sc->mps_dev)); 964 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev)); 965 966 sysctl_ctx_init(&sc->sysctl_ctx); 967 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 968 SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, 969 CTLFLAG_RD, 0, tmpstr); 970 if (sc->sysctl_tree == NULL) 971 return; 972 sysctl_ctx = &sc->sysctl_ctx; 973 sysctl_tree = sc->sysctl_tree; 974 975 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 976 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0, 977 "mps debug level"); 978 979 #if 0 /* XXX swildner */ 980 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 981 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 982 "Disable the use of MSI-X interrupts"); 983 #endif 984 985 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 986 OID_AUTO, "enable_msi", CTLFLAG_RD, &sc->enable_msi, 0, 987 "Enable the use of MSI interrupts"); 988 989 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 990 OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version, 991 strlen(sc->fw_version), "firmware version"); 992 993 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 994 OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION, 995 strlen(MPS_DRIVER_VERSION), "driver version"); 996 997 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 998 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 999 &sc->io_cmds_active, 0, "number of currently active commands"); 1000 1001 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1002 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1003 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1004 1005 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1006 OID_AUTO, "chain_free", CTLFLAG_RD, 1007 &sc->chain_free, 0, "number of free chain elements"); 1008 1009 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1010 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1011 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1012 1013 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1014 OID_AUTO, "max_chains", CTLFLAG_RD, 1015 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1016 1017 #if 0 /* __FreeBSD_version >= 900030 */ 1018 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1019 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1020 &sc->chain_alloc_fail, "chain allocation failures"); 1021 #endif //FreeBSD_version >= 900030 1022 } 1023 1024 int 1025 mps_attach(struct mps_softc *sc) 1026 { 1027 int i, error; 1028 1029 mps_get_tunables(sc); 1030 1031 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1032 1033 lockinit(&sc->mps_lock, "MPT2SAS lock", 0, LK_CANRECURSE); 1034 callout_init_mp(&sc->periodic); 1035 TAILQ_INIT(&sc->event_list); 1036 1037 if ((error = mps_transition_ready(sc)) != 0) { 1038 mps_printf(sc, "%s failed to transition ready\n", __func__); 1039 return (error); 1040 } 1041 1042 sc->facts = kmalloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2, 1043 M_ZERO|M_WAITOK); 1044 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) 1045 return (error); 1046 1047 mps_print_iocfacts(sc, sc->facts); 1048 1049 ksnprintf(sc->fw_version, sizeof(sc->fw_version), 1050 "%02d.%02d.%02d.%02d", 1051 sc->facts->FWVersion.Struct.Major, 1052 sc->facts->FWVersion.Struct.Minor, 1053 sc->facts->FWVersion.Struct.Unit, 1054 sc->facts->FWVersion.Struct.Dev); 1055 1056 mps_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, 1057 MPS_DRIVER_VERSION); 1058 mps_printf(sc, "IOCCapabilities: %pb%i\n", 1059 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 1060 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 1061 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc", 1062 sc->facts->IOCCapabilities); 1063 1064 /* 1065 * If the chip doesn't support event replay then a hard reset will be 1066 * required to trigger a full discovery. Do the reset here then 1067 * retransition to Ready. A hard reset might have already been done, 1068 * but it doesn't hurt to do it again. 1069 */ 1070 if ((sc->facts->IOCCapabilities & 1071 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { 1072 mps_diag_reset(sc); 1073 if ((error = mps_transition_ready(sc)) != 0) 1074 return (error); 1075 } 1076 1077 /* 1078 * Set flag if IR Firmware is loaded. 1079 */ 1080 if (sc->facts->IOCCapabilities & 1081 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 1082 sc->ir_firmware = 1; 1083 1084 /* 1085 * Check if controller supports FW diag buffers and set flag to enable 1086 * each type. 1087 */ 1088 if (sc->facts->IOCCapabilities & 1089 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1090 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].enabled = 1091 TRUE; 1092 if (sc->facts->IOCCapabilities & 1093 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1094 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].enabled = 1095 TRUE; 1096 if (sc->facts->IOCCapabilities & 1097 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1098 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].enabled = 1099 TRUE; 1100 1101 /* 1102 * Set flag if EEDP is supported and if TLR is supported. 1103 */ 1104 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 1105 sc->eedp_enabled = TRUE; 1106 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 1107 sc->control_TLR = TRUE; 1108 1109 /* 1110 * Size the queues. Since the reply queues always need one free entry, 1111 * we'll just deduct one reply message here. 1112 */ 1113 sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit); 1114 sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES, 1115 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 1116 TAILQ_INIT(&sc->req_list); 1117 TAILQ_INIT(&sc->high_priority_req_list); 1118 TAILQ_INIT(&sc->chain_list); 1119 TAILQ_INIT(&sc->tm_list); 1120 1121 if (((error = mps_alloc_queues(sc)) != 0) || 1122 ((error = mps_alloc_replies(sc)) != 0) || 1123 ((error = mps_alloc_requests(sc)) != 0)) { 1124 mps_printf(sc, "%s failed to alloc\n", __func__); 1125 mps_free(sc); 1126 return (error); 1127 } 1128 1129 if (((error = mps_init_queues(sc)) != 0) || 1130 ((error = mps_transition_operational(sc)) != 0)) { 1131 mps_printf(sc, "%s failed to transition operational\n", __func__); 1132 mps_free(sc); 1133 return (error); 1134 } 1135 1136 /* 1137 * Finish the queue initialization. 1138 * These are set here instead of in mps_init_queues() because the 1139 * IOC resets these values during the state transition in 1140 * mps_transition_operational(). The free index is set to 1 1141 * because the corresponding index in the IOC is set to 0, and the 1142 * IOC treats the queues as full if both are set to the same value. 1143 * Hence the reason that the queue can't hold all of the possible 1144 * replies. 1145 */ 1146 sc->replypostindex = 0; 1147 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 1148 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 1149 1150 sc->pfacts = kmalloc(sizeof(MPI2_PORT_FACTS_REPLY) * 1151 sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK); 1152 for (i = 0; i < sc->facts->NumberOfPorts; i++) { 1153 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) { 1154 mps_printf(sc, "%s failed to get portfacts for port %d\n", 1155 __func__, i); 1156 mps_free(sc); 1157 return (error); 1158 } 1159 mps_print_portfacts(sc, &sc->pfacts[i]); 1160 } 1161 1162 /* Attach the subsystems so they can prepare their event masks. */ 1163 /* XXX Should be dynamic so that IM/IR and user modules can attach */ 1164 if (((error = mps_attach_log(sc)) != 0) || 1165 ((error = mps_attach_sas(sc)) != 0) || 1166 ((error = mps_attach_user(sc)) != 0)) { 1167 mps_printf(sc, "%s failed to attach all subsystems: error %d\n", 1168 __func__, error); 1169 mps_free(sc); 1170 return (error); 1171 } 1172 1173 if ((error = mps_pci_setup_interrupts(sc)) != 0) { 1174 mps_printf(sc, "%s failed to setup interrupts\n", __func__); 1175 mps_free(sc); 1176 return (error); 1177 } 1178 1179 /* 1180 * The static page function currently read is ioc page8. Others can be 1181 * added in future. 1182 */ 1183 mps_base_static_config_pages(sc); 1184 1185 /* Start the periodic watchdog check on the IOC Doorbell */ 1186 mps_periodic(sc); 1187 1188 /* 1189 * The portenable will kick off discovery events that will drive the 1190 * rest of the initialization process. The CAM/SAS module will 1191 * hold up the boot sequence until discovery is complete. 1192 */ 1193 sc->mps_ich.ich_func = mps_startup; 1194 sc->mps_ich.ich_arg = sc; 1195 sc->mps_ich.ich_desc = "mps"; 1196 if (config_intrhook_establish(&sc->mps_ich) != 0) { 1197 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n"); 1198 error = EINVAL; 1199 } 1200 1201 /* 1202 * Allow IR to shutdown gracefully when shutdown occurs. 1203 */ 1204 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1205 mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 1206 1207 if (sc->shutdown_eh == NULL) 1208 mps_dprint(sc, MPS_FAULT, "shutdown event registration " 1209 "failed\n"); 1210 1211 mps_setup_sysctl(sc); 1212 1213 sc->mps_flags |= MPS_FLAGS_ATTACH_DONE; 1214 1215 return (error); 1216 } 1217 1218 /* Run through any late-start handlers. */ 1219 static void 1220 mps_startup(void *arg) 1221 { 1222 struct mps_softc *sc; 1223 1224 sc = (struct mps_softc *)arg; 1225 1226 mps_lock(sc); 1227 mps_unmask_intr(sc); 1228 /* initialize device mapping tables */ 1229 mps_mapping_initialize(sc); 1230 mpssas_startup(sc); 1231 mps_unlock(sc); 1232 } 1233 1234 /* Periodic watchdog. Is called with the driver lock already held. */ 1235 static void 1236 mps_periodic(void *arg) 1237 { 1238 struct mps_softc *sc; 1239 uint32_t db; 1240 1241 sc = (struct mps_softc *)arg; 1242 mps_lock(sc); 1243 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN) { 1244 mps_unlock(sc); 1245 return; 1246 } 1247 1248 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 1249 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 1250 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db); 1251 1252 mps_reinit(sc); 1253 } 1254 1255 callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc); 1256 mps_unlock(sc); 1257 } 1258 1259 static void 1260 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data, 1261 MPI2_EVENT_NOTIFICATION_REPLY *event) 1262 { 1263 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 1264 1265 mps_print_event(sc, event); 1266 1267 switch (event->Event) { 1268 case MPI2_EVENT_LOG_DATA: 1269 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n"); 1270 hexdump(event->EventData, event->EventDataLength, NULL, 0); 1271 break; 1272 case MPI2_EVENT_LOG_ENTRY_ADDED: 1273 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 1274 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event " 1275 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 1276 entry->LogSequence); 1277 break; 1278 default: 1279 break; 1280 } 1281 return; 1282 } 1283 1284 static int 1285 mps_attach_log(struct mps_softc *sc) 1286 { 1287 uint8_t events[16]; 1288 1289 bzero(events, 16); 1290 setbit(events, MPI2_EVENT_LOG_DATA); 1291 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 1292 1293 mps_register_events(sc, events, mps_log_evt_handler, NULL, 1294 &sc->mps_log_eh); 1295 1296 return (0); 1297 } 1298 1299 static int 1300 mps_detach_log(struct mps_softc *sc) 1301 { 1302 1303 if (sc->mps_log_eh != NULL) 1304 mps_deregister_events(sc, sc->mps_log_eh); 1305 return (0); 1306 } 1307 1308 /* 1309 * Free all of the driver resources and detach submodules. Should be called 1310 * without the lock held. 1311 */ 1312 int 1313 mps_free(struct mps_softc *sc) 1314 { 1315 struct mps_command *cm; 1316 int i, error; 1317 1318 /* Turn off the watchdog */ 1319 mps_lock(sc); 1320 sc->mps_flags |= MPS_FLAGS_SHUTDOWN; 1321 mps_unlock(sc); 1322 callout_terminate(&sc->periodic); 1323 1324 if (((error = mps_detach_log(sc)) != 0) || 1325 ((error = mps_detach_sas(sc)) != 0)) 1326 return (error); 1327 1328 mps_detach_user(sc); 1329 1330 /* Put the IOC back in the READY state. */ 1331 mps_lock(sc); 1332 if ((error = mps_transition_ready(sc)) != 0) { 1333 mps_unlock(sc); 1334 return (error); 1335 } 1336 mps_unlock(sc); 1337 1338 if (sc->facts != NULL) 1339 kfree(sc->facts, M_MPT2); 1340 1341 if (sc->pfacts != NULL) 1342 kfree(sc->pfacts, M_MPT2); 1343 1344 if (sc->post_busaddr != 0) 1345 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 1346 if (sc->post_queue != NULL) 1347 bus_dmamem_free(sc->queues_dmat, sc->post_queue, 1348 sc->queues_map); 1349 if (sc->queues_dmat != NULL) 1350 bus_dma_tag_destroy(sc->queues_dmat); 1351 1352 if (sc->chain_busaddr != 0) 1353 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 1354 if (sc->chain_frames != NULL) 1355 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map); 1356 if (sc->chain_dmat != NULL) 1357 bus_dma_tag_destroy(sc->chain_dmat); 1358 1359 if (sc->sense_busaddr != 0) 1360 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 1361 if (sc->sense_frames != NULL) 1362 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map); 1363 if (sc->sense_dmat != NULL) 1364 bus_dma_tag_destroy(sc->sense_dmat); 1365 1366 if (sc->reply_busaddr != 0) 1367 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 1368 if (sc->reply_frames != NULL) 1369 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map); 1370 if (sc->reply_dmat != NULL) 1371 bus_dma_tag_destroy(sc->reply_dmat); 1372 1373 if (sc->req_busaddr != 0) 1374 bus_dmamap_unload(sc->req_dmat, sc->req_map); 1375 if (sc->req_frames != NULL) 1376 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 1377 if (sc->req_dmat != NULL) 1378 bus_dma_tag_destroy(sc->req_dmat); 1379 1380 if (sc->chains != NULL) 1381 kfree(sc->chains, M_MPT2); 1382 if (sc->commands != NULL) { 1383 for (i = 1; i < sc->num_reqs; i++) { 1384 cm = &sc->commands[i]; 1385 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 1386 } 1387 kfree(sc->commands, M_MPT2); 1388 } 1389 if (sc->buffer_dmat != NULL) 1390 bus_dma_tag_destroy(sc->buffer_dmat); 1391 1392 if (sc->sysctl_tree != NULL) 1393 sysctl_ctx_free(&sc->sysctl_ctx); 1394 1395 mps_mapping_free_memory(sc); 1396 1397 /* Deregister the shutdown function */ 1398 if (sc->shutdown_eh != NULL) 1399 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 1400 1401 lockuninit(&sc->mps_lock); 1402 1403 return (0); 1404 } 1405 1406 static __inline void 1407 mps_complete_command(struct mps_command *cm) 1408 { 1409 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1410 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1411 1412 if (cm->cm_complete != NULL) { 1413 mps_dprint(cm->cm_sc, MPS_TRACE, 1414 "%s cm %p calling cm_complete %p data %p reply %p\n", 1415 __func__, cm, cm->cm_complete, cm->cm_complete_data, 1416 cm->cm_reply); 1417 cm->cm_complete(cm->cm_sc, cm); 1418 } 1419 1420 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1421 mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n", 1422 __func__, cm); 1423 wakeup(cm); 1424 } 1425 1426 if (cm->cm_sc->io_cmds_active != 0) { 1427 cm->cm_sc->io_cmds_active--; 1428 } else { 1429 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: io_cmds_active is " 1430 "out of sync - resynching to 0\n"); 1431 } 1432 } 1433 1434 void 1435 mps_intr(void *data) 1436 { 1437 struct mps_softc *sc; 1438 uint32_t status; 1439 1440 sc = (struct mps_softc *)data; 1441 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1442 1443 /* 1444 * Check interrupt status register to flush the bus. This is 1445 * needed for both INTx interrupts and driver-driven polling 1446 */ 1447 status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 1448 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 1449 return; 1450 1451 mps_lock(sc); 1452 mps_intr_locked(data); 1453 mps_unlock(sc); 1454 return; 1455 } 1456 1457 /* 1458 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 1459 * chip. Hopefully this theory is correct. 1460 */ 1461 void 1462 mps_intr_msi(void *data) 1463 { 1464 struct mps_softc *sc; 1465 1466 sc = (struct mps_softc *)data; 1467 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1468 mps_lock(sc); 1469 mps_intr_locked(data); 1470 mps_unlock(sc); 1471 return; 1472 } 1473 1474 /* 1475 * The locking is overly broad and simplistic, but easy to deal with for now. 1476 */ 1477 void 1478 mps_intr_locked(void *data) 1479 { 1480 MPI2_REPLY_DESCRIPTORS_UNION *desc; 1481 struct mps_softc *sc; 1482 struct mps_command *cm = NULL; 1483 uint8_t flags; 1484 u_int pq; 1485 MPI2_DIAG_RELEASE_REPLY *rel_rep; 1486 mps_fw_diagnostic_buffer_t *pBuffer; 1487 1488 sc = (struct mps_softc *)data; 1489 1490 pq = sc->replypostindex; 1491 mps_dprint(sc, MPS_TRACE, 1492 "%s sc %p starting with replypostindex %u\n", 1493 __func__, sc, sc->replypostindex); 1494 1495 for ( ;; ) { 1496 cm = NULL; 1497 desc = &sc->post_queue[sc->replypostindex]; 1498 flags = desc->Default.ReplyFlags & 1499 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1500 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1501 || (desc->Words.High == 0xffffffff)) 1502 break; 1503 1504 /* increment the replypostindex now, so that event handlers 1505 * and cm completion handlers which decide to do a diag 1506 * reset can zero it without it getting incremented again 1507 * afterwards, and we break out of this loop on the next 1508 * iteration since the reply post queue has been cleared to 1509 * 0xFF and all descriptors look unused (which they are). 1510 */ 1511 if (++sc->replypostindex >= sc->pqdepth) 1512 sc->replypostindex = 0; 1513 1514 switch (flags) { 1515 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 1516 cm = &sc->commands[desc->SCSIIOSuccess.SMID]; 1517 cm->cm_reply = NULL; 1518 break; 1519 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 1520 { 1521 uint32_t baddr; 1522 uint8_t *reply; 1523 1524 /* 1525 * Re-compose the reply address from the address 1526 * sent back from the chip. The ReplyFrameAddress 1527 * is the lower 32 bits of the physical address of 1528 * particular reply frame. Convert that address to 1529 * host format, and then use that to provide the 1530 * offset against the virtual address base 1531 * (sc->reply_frames). 1532 */ 1533 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 1534 reply = sc->reply_frames + 1535 (baddr - ((uint32_t)sc->reply_busaddr)); 1536 /* 1537 * Make sure the reply we got back is in a valid 1538 * range. If not, go ahead and panic here, since 1539 * we'll probably panic as soon as we deference the 1540 * reply pointer anyway. 1541 */ 1542 if ((reply < sc->reply_frames) 1543 || (reply > (sc->reply_frames + 1544 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 1545 kprintf("%s: WARNING: reply %p out of range!\n", 1546 __func__, reply); 1547 kprintf("%s: reply_frames %p, fqdepth %d, " 1548 "frame size %d\n", __func__, 1549 sc->reply_frames, sc->fqdepth, 1550 sc->facts->ReplyFrameSize * 4); 1551 kprintf("%s: baddr %#x,\n", __func__, baddr); 1552 panic("Reply address out of range"); 1553 } 1554 if (desc->AddressReply.SMID == 0) { 1555 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 1556 MPI2_FUNCTION_DIAG_BUFFER_POST) { 1557 /* 1558 * If SMID is 0 for Diag Buffer Post, 1559 * this implies that the reply is due to 1560 * a release function with a status that 1561 * the buffer has been released. Set 1562 * the buffer flags accordingly. 1563 */ 1564 rel_rep = 1565 (MPI2_DIAG_RELEASE_REPLY *)reply; 1566 if (rel_rep->IOCStatus == 1567 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 1568 { 1569 pBuffer = 1570 &sc->fw_diag_buffer_list[ 1571 rel_rep->BufferType]; 1572 pBuffer->valid_data = TRUE; 1573 pBuffer->owned_by_firmware = 1574 FALSE; 1575 pBuffer->immediate = FALSE; 1576 } 1577 } else 1578 mps_dispatch_event(sc, baddr, 1579 (MPI2_EVENT_NOTIFICATION_REPLY *) 1580 reply); 1581 } else { 1582 cm = &sc->commands[desc->AddressReply.SMID]; 1583 cm->cm_reply = reply; 1584 cm->cm_reply_data = 1585 desc->AddressReply.ReplyFrameAddress; 1586 } 1587 break; 1588 } 1589 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 1590 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 1591 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 1592 default: 1593 /* Unhandled */ 1594 device_printf(sc->mps_dev, "Unhandled reply 0x%x\n", 1595 desc->Default.ReplyFlags); 1596 cm = NULL; 1597 break; 1598 } 1599 1600 if (cm != NULL) 1601 mps_complete_command(cm); 1602 1603 desc->Words.Low = 0xffffffff; 1604 desc->Words.High = 0xffffffff; 1605 } 1606 1607 if (pq != sc->replypostindex) { 1608 mps_dprint(sc, MPS_TRACE, 1609 "%s sc %p writing postindex %d\n", 1610 __func__, sc, sc->replypostindex); 1611 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 1612 } 1613 1614 return; 1615 } 1616 1617 static void 1618 mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 1619 MPI2_EVENT_NOTIFICATION_REPLY *reply) 1620 { 1621 struct mps_event_handle *eh; 1622 int event, handled = 0; 1623 1624 event = reply->Event; 1625 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1626 if (isset(eh->mask, event)) { 1627 eh->callback(sc, data, reply); 1628 handled++; 1629 } 1630 } 1631 1632 if (handled == 0) 1633 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event); 1634 1635 /* 1636 * This is the only place that the event/reply should be freed. 1637 * Anything wanting to hold onto the event data should have 1638 * already copied it into their own storage. 1639 */ 1640 mps_free_reply(sc, data); 1641 } 1642 1643 static void 1644 mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm) 1645 { 1646 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1647 1648 if (cm->cm_reply) 1649 mps_print_event(sc, 1650 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 1651 1652 mps_free_command(sc, cm); 1653 1654 /* next, send a port enable */ 1655 mpssas_startup(sc); 1656 } 1657 1658 /* 1659 * For both register_events and update_events, the caller supplies a bitmap 1660 * of events that it _wants_. These functions then turn that into a bitmask 1661 * suitable for the controller. 1662 */ 1663 int 1664 mps_register_events(struct mps_softc *sc, uint8_t *mask, 1665 mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle) 1666 { 1667 struct mps_event_handle *eh; 1668 int error = 0; 1669 1670 eh = kmalloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO); 1671 eh->callback = cb; 1672 eh->data = data; 1673 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 1674 if (mask != NULL) 1675 error = mps_update_events(sc, eh, mask); 1676 *handle = eh; 1677 1678 return (error); 1679 } 1680 1681 int 1682 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle, 1683 uint8_t *mask) 1684 { 1685 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1686 MPI2_EVENT_NOTIFICATION_REPLY *reply; 1687 struct mps_command *cm; 1688 struct mps_event_handle *eh; 1689 int error, i; 1690 1691 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1692 1693 if ((mask != NULL) && (handle != NULL)) 1694 bcopy(mask, &handle->mask[0], 16); 1695 memset(sc->event_mask, 0xff, 16); 1696 1697 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1698 for (i = 0; i < 16; i++) 1699 sc->event_mask[i] &= ~eh->mask[i]; 1700 } 1701 1702 if ((cm = mps_alloc_command(sc)) == NULL) 1703 return (EBUSY); 1704 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1705 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1706 evtreq->MsgFlags = 0; 1707 evtreq->SASBroadcastPrimitiveMasks = 0; 1708 #ifdef MPS_DEBUG_ALL_EVENTS 1709 { 1710 u_char fullmask[16]; 1711 memset(fullmask, 0x00, 16); 1712 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1713 } 1714 #else 1715 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1716 #endif 1717 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1718 cm->cm_data = NULL; 1719 1720 error = mps_request_polled(sc, cm); 1721 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 1722 if ((reply == NULL) || 1723 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1724 error = ENXIO; 1725 mps_print_event(sc, reply); 1726 mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error); 1727 1728 mps_free_command(sc, cm); 1729 return (error); 1730 } 1731 1732 static int 1733 mps_reregister_events(struct mps_softc *sc) 1734 { 1735 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1736 struct mps_command *cm; 1737 struct mps_event_handle *eh; 1738 int error, i; 1739 1740 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1741 1742 /* first, reregister events */ 1743 1744 memset(sc->event_mask, 0xff, 16); 1745 1746 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1747 for (i = 0; i < 16; i++) 1748 sc->event_mask[i] &= ~eh->mask[i]; 1749 } 1750 1751 if ((cm = mps_alloc_command(sc)) == NULL) 1752 return (EBUSY); 1753 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1754 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1755 evtreq->MsgFlags = 0; 1756 evtreq->SASBroadcastPrimitiveMasks = 0; 1757 #ifdef MPS_DEBUG_ALL_EVENTS 1758 { 1759 u_char fullmask[16]; 1760 memset(fullmask, 0x00, 16); 1761 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1762 } 1763 #else 1764 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1765 #endif 1766 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1767 cm->cm_data = NULL; 1768 cm->cm_complete = mps_reregister_events_complete; 1769 1770 error = mps_map_command(sc, cm); 1771 1772 mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error); 1773 return (error); 1774 } 1775 1776 int 1777 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle) 1778 { 1779 1780 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 1781 kfree(handle, M_MPT2); 1782 return (mps_update_events(sc, NULL, NULL)); 1783 } 1784 1785 /* 1786 * Add a chain element as the next SGE for the specified command. 1787 * Reset cm_sge and cm_sgesize to indicate all the available space. 1788 */ 1789 static int 1790 mps_add_chain(struct mps_command *cm) 1791 { 1792 MPI2_SGE_CHAIN32 *sgc; 1793 struct mps_chain *chain; 1794 int space; 1795 1796 if (cm->cm_sglsize < MPS_SGC_SIZE) 1797 panic("MPS: Need SGE Error Code"); 1798 1799 chain = mps_alloc_chain(cm->cm_sc); 1800 if (chain == NULL) 1801 return (ENOBUFS); 1802 1803 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; 1804 1805 /* 1806 * Note: a double-linked list is used to make it easier to 1807 * walk for debugging. 1808 */ 1809 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 1810 1811 sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain; 1812 sgc->Length = space; 1813 sgc->NextChainOffset = 0; 1814 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT; 1815 sgc->Address = chain->chain_busaddr; 1816 1817 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple; 1818 cm->cm_sglsize = space; 1819 return (0); 1820 } 1821 1822 /* 1823 * Add one scatter-gather element (chain, simple, transaction context) 1824 * to the scatter-gather list for a command. Maintain cm_sglsize and 1825 * cm_sge as the remaining size and pointer to the next SGE to fill 1826 * in, respectively. 1827 */ 1828 int 1829 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft) 1830 { 1831 MPI2_SGE_TRANSACTION_UNION *tc = sgep; 1832 MPI2_SGE_SIMPLE64 *sge = sgep; 1833 int error, type; 1834 uint32_t saved_buf_len, saved_address_low, saved_address_high; 1835 1836 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK); 1837 1838 #ifdef INVARIANTS 1839 switch (type) { 1840 case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: { 1841 if (len != tc->DetailsLength + 4) 1842 panic("TC %p length %u or %zu?", tc, 1843 tc->DetailsLength + 4, len); 1844 } 1845 break; 1846 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 1847 /* Driver only uses 32-bit chain elements */ 1848 if (len != MPS_SGC_SIZE) 1849 panic("CHAIN %p length %u or %zu?", sgep, 1850 MPS_SGC_SIZE, len); 1851 break; 1852 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: 1853 /* Driver only uses 64-bit SGE simple elements */ 1854 if (len != MPS_SGE64_SIZE) 1855 panic("SGE simple %p length %u or %zu?", sge, 1856 MPS_SGE64_SIZE, len); 1857 if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) & 1858 MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0) 1859 panic("SGE simple %p not marked 64-bit?", sge); 1860 1861 break; 1862 default: 1863 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags); 1864 } 1865 #endif 1866 1867 /* 1868 * case 1: 1 more segment, enough room for it 1869 * case 2: 2 more segments, enough room for both 1870 * case 3: >=2 more segments, only enough room for 1 and a chain 1871 * case 4: >=1 more segment, enough room for only a chain 1872 * case 5: >=1 more segment, no room for anything (error) 1873 */ 1874 1875 /* 1876 * There should be room for at least a chain element, or this 1877 * code is buggy. Case (5). 1878 */ 1879 if (cm->cm_sglsize < MPS_SGC_SIZE) 1880 panic("MPS: Need SGE Error Code"); 1881 1882 if (segsleft >= 2 && 1883 cm->cm_sglsize >= len + MPS_SGC_SIZE && 1884 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) { 1885 /* 1886 * There are 2 or more segments left to add, and only 1887 * enough room for 1 and a chain. Case (3). 1888 * 1889 * Mark as last element in this chain if necessary. 1890 */ 1891 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1892 sge->FlagsLength |= htole32( 1893 MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT); 1894 } 1895 1896 /* 1897 * Add the item then a chain. Do the chain now, 1898 * rather than on the next iteration, to simplify 1899 * understanding the code. 1900 */ 1901 cm->cm_sglsize -= len; 1902 bcopy(sgep, cm->cm_sge, len); 1903 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1904 return (mps_add_chain(cm)); 1905 } 1906 1907 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) { 1908 /* 1909 * 1 or more segment, enough room for only a chain. 1910 * Hope the previous element wasn't a Simple entry 1911 * that needed to be marked with 1912 * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4). 1913 */ 1914 if ((error = mps_add_chain(cm)) != 0) 1915 return (error); 1916 } 1917 1918 #ifdef INVARIANTS 1919 /* Case 1: 1 more segment, enough room for it. */ 1920 if (segsleft == 1 && cm->cm_sglsize < len) 1921 panic("1 seg left and no room? %u versus %zu", 1922 cm->cm_sglsize, len); 1923 1924 /* Case 2: 2 more segments, enough room for both */ 1925 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE) 1926 panic("2 segs left and no room? %u versus %zu", 1927 cm->cm_sglsize, len); 1928 #endif 1929 1930 if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1931 /* 1932 * If this is a bi-directional request, need to account for that 1933 * here. Save the pre-filled sge values. These will be used 1934 * either for the 2nd SGL or for a single direction SGL. If 1935 * cm_out_len is non-zero, this is a bi-directional request, so 1936 * fill in the OUT SGL first, then the IN SGL, otherwise just 1937 * fill in the IN SGL. Note that at this time, when filling in 1938 * 2 SGL's for a bi-directional request, they both use the same 1939 * DMA buffer (same cm command). 1940 */ 1941 saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF; 1942 saved_address_low = sge->Address.Low; 1943 saved_address_high = sge->Address.High; 1944 if (cm->cm_out_len) { 1945 sge->FlagsLength = htole32(cm->cm_out_len | 1946 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1947 MPI2_SGE_FLAGS_END_OF_BUFFER | 1948 MPI2_SGE_FLAGS_HOST_TO_IOC | 1949 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1950 MPI2_SGE_FLAGS_SHIFT)); 1951 cm->cm_sglsize -= len; 1952 bcopy(sgep, cm->cm_sge, len); 1953 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge 1954 + len); 1955 } 1956 saved_buf_len |= 1957 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1958 MPI2_SGE_FLAGS_END_OF_BUFFER | 1959 MPI2_SGE_FLAGS_LAST_ELEMENT | 1960 MPI2_SGE_FLAGS_END_OF_LIST | 1961 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1962 MPI2_SGE_FLAGS_SHIFT); 1963 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { 1964 saved_buf_len |= 1965 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 1966 MPI2_SGE_FLAGS_SHIFT); 1967 } else { 1968 saved_buf_len |= 1969 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 1970 MPI2_SGE_FLAGS_SHIFT); 1971 } 1972 sge->FlagsLength = htole32(saved_buf_len); 1973 sge->Address.Low = saved_address_low; 1974 sge->Address.High = saved_address_high; 1975 } 1976 1977 cm->cm_sglsize -= len; 1978 bcopy(sgep, cm->cm_sge, len); 1979 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1980 return (0); 1981 } 1982 1983 /* 1984 * Add one dma segment to the scatter-gather list for a command. 1985 */ 1986 int 1987 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags, 1988 int segsleft) 1989 { 1990 MPI2_SGE_SIMPLE64 sge; 1991 1992 /* 1993 * This driver always uses 64-bit address elements for simplicity. 1994 */ 1995 bzero(&sge, sizeof(sge)); 1996 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1997 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 1998 sge.FlagsLength = htole32(len | (flags << MPI2_SGE_FLAGS_SHIFT)); 1999 mps_from_u64(pa, &sge.Address); 2000 2001 return (mps_push_sge(cm, &sge, sizeof sge, segsleft)); 2002 } 2003 2004 static void 2005 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2006 { 2007 struct mps_softc *sc; 2008 struct mps_command *cm; 2009 u_int i, dir, sflags; 2010 2011 cm = (struct mps_command *)arg; 2012 sc = cm->cm_sc; 2013 2014 /* 2015 * In this case, just print out a warning and let the chip tell the 2016 * user they did the wrong thing. 2017 */ 2018 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 2019 mps_printf(sc, "%s: warning: busdma returned %d segments, " 2020 "more than the %d allowed\n", __func__, nsegs, 2021 cm->cm_max_segs); 2022 } 2023 2024 /* 2025 * Set up DMA direction flags. Bi-directional requests are also handled 2026 * here. In that case, both direction flags will be set. 2027 */ 2028 sflags = 0; 2029 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) { 2030 /* 2031 * We have to add a special case for SMP passthrough, there 2032 * is no easy way to generically handle it. The first 2033 * S/G element is used for the command (therefore the 2034 * direction bit needs to be set). The second one is used 2035 * for the reply. We'll leave it to the caller to make 2036 * sure we only have two buffers. 2037 */ 2038 /* 2039 * Even though the busdma man page says it doesn't make 2040 * sense to have both direction flags, it does in this case. 2041 * We have one s/g element being accessed in each direction. 2042 */ 2043 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 2044 2045 /* 2046 * Set the direction flag on the first buffer in the SMP 2047 * passthrough request. We'll clear it for the second one. 2048 */ 2049 sflags |= MPI2_SGE_FLAGS_DIRECTION | 2050 MPI2_SGE_FLAGS_END_OF_BUFFER; 2051 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) { 2052 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 2053 dir = BUS_DMASYNC_PREWRITE; 2054 } else 2055 dir = BUS_DMASYNC_PREREAD; 2056 2057 for (i = 0; i < nsegs; i++) { 2058 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) { 2059 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 2060 } 2061 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 2062 sflags, nsegs - i); 2063 if (error != 0) { 2064 /* Resource shortage, roll back! */ 2065 mps_dprint(sc, MPS_INFO, "out of chain frames\n"); 2066 cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED; 2067 mps_complete_command(cm); 2068 return; 2069 } 2070 } 2071 2072 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2073 mps_enqueue_request(sc, cm); 2074 2075 return; 2076 } 2077 2078 static void 2079 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 2080 int error) 2081 { 2082 mps_data_cb(arg, segs, nsegs, error); 2083 } 2084 2085 /* 2086 * This is the routine to enqueue commands ansynchronously. 2087 * Note that the only error path here is from bus_dmamap_load(), which can 2088 * return EINPROGRESS if it is waiting for resources. Other than this, it's 2089 * assumed that if you have a command in-hand, then you have enough credits 2090 * to use it. 2091 */ 2092 int 2093 mps_map_command(struct mps_softc *sc, struct mps_command *cm) 2094 { 2095 int error = 0; 2096 2097 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) { 2098 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 2099 &cm->cm_uio, mps_data_cb2, cm, 0); 2100 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 2101 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 2102 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0); 2103 } else { 2104 /* Add a zero-length element as needed */ 2105 if (cm->cm_sge != NULL) 2106 mps_add_dmaseg(cm, 0, 0, 0, 1); 2107 mps_enqueue_request(sc, cm); 2108 } 2109 2110 return (error); 2111 } 2112 2113 /* 2114 * This is the routine to enqueue commands synchronously. An error of 2115 * EINPROGRESS from mps_map_command() is ignored since the command will 2116 * be executed and enqueued automatically. Other errors come from msleep(). 2117 */ 2118 int 2119 mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout) 2120 { 2121 int error; 2122 2123 KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0); 2124 2125 cm->cm_complete = NULL; 2126 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP; 2127 error = mps_map_command(sc, cm); 2128 if ((error != 0) && (error != EINPROGRESS)) 2129 return (error); 2130 error = lksleep(cm, &sc->mps_lock, 0, "mpswait", timeout*hz); 2131 if (error == EWOULDBLOCK) 2132 error = ETIMEDOUT; 2133 return (error); 2134 } 2135 2136 /* 2137 * This is the routine to enqueue a command synchonously and poll for 2138 * completion. Its use should be rare. 2139 */ 2140 int 2141 mps_request_polled(struct mps_softc *sc, struct mps_command *cm) 2142 { 2143 int error, timeout = 0; 2144 2145 error = 0; 2146 2147 cm->cm_flags |= MPS_CM_FLAGS_POLLED; 2148 cm->cm_complete = NULL; 2149 mps_map_command(sc, cm); 2150 2151 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) { 2152 mps_intr_locked(sc); 2153 DELAY(50 * 1000); 2154 if (timeout++ > 1000) { 2155 mps_dprint(sc, MPS_FAULT, "polling failed\n"); 2156 error = ETIMEDOUT; 2157 break; 2158 } 2159 } 2160 2161 return (error); 2162 } 2163 2164 /* 2165 * The MPT driver had a verbose interface for config pages. In this driver, 2166 * reduce it to much simplier terms, similar to the Linux driver. 2167 */ 2168 int 2169 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params) 2170 { 2171 MPI2_CONFIG_REQUEST *req; 2172 struct mps_command *cm; 2173 int error; 2174 2175 if (sc->mps_flags & MPS_FLAGS_BUSY) { 2176 return (EBUSY); 2177 } 2178 2179 cm = mps_alloc_command(sc); 2180 if (cm == NULL) { 2181 return (EBUSY); 2182 } 2183 2184 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 2185 req->Function = MPI2_FUNCTION_CONFIG; 2186 req->Action = params->action; 2187 req->SGLFlags = 0; 2188 req->ChainOffset = 0; 2189 req->PageAddress = params->page_address; 2190 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 2191 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 2192 2193 hdr = ¶ms->hdr.Ext; 2194 req->ExtPageType = hdr->ExtPageType; 2195 req->ExtPageLength = hdr->ExtPageLength; 2196 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 2197 req->Header.PageLength = 0; /* Must be set to zero */ 2198 req->Header.PageNumber = hdr->PageNumber; 2199 req->Header.PageVersion = hdr->PageVersion; 2200 } else { 2201 MPI2_CONFIG_PAGE_HEADER *hdr; 2202 2203 hdr = ¶ms->hdr.Struct; 2204 req->Header.PageType = hdr->PageType; 2205 req->Header.PageNumber = hdr->PageNumber; 2206 req->Header.PageLength = hdr->PageLength; 2207 req->Header.PageVersion = hdr->PageVersion; 2208 } 2209 2210 cm->cm_data = params->buffer; 2211 cm->cm_length = params->length; 2212 cm->cm_sge = &req->PageBufferSGE; 2213 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 2214 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; 2215 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2216 2217 cm->cm_complete_data = params; 2218 if (params->callback != NULL) { 2219 cm->cm_complete = mps_config_complete; 2220 return (mps_map_command(sc, cm)); 2221 } else { 2222 error = mps_wait_command(sc, cm, 0); 2223 if (error) { 2224 mps_dprint(sc, MPS_FAULT, 2225 "Error %d reading config page\n", error); 2226 mps_free_command(sc, cm); 2227 return (error); 2228 } 2229 mps_config_complete(sc, cm); 2230 } 2231 2232 return (0); 2233 } 2234 2235 int 2236 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params) 2237 { 2238 return (EINVAL); 2239 } 2240 2241 static void 2242 mps_config_complete(struct mps_softc *sc, struct mps_command *cm) 2243 { 2244 MPI2_CONFIG_REPLY *reply; 2245 struct mps_config_params *params; 2246 2247 params = cm->cm_complete_data; 2248 2249 if (cm->cm_data != NULL) { 2250 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2251 BUS_DMASYNC_POSTREAD); 2252 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2253 } 2254 2255 /* 2256 * XXX KDM need to do more error recovery? This results in the 2257 * device in question not getting probed. 2258 */ 2259 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2260 params->status = MPI2_IOCSTATUS_BUSY; 2261 goto done; 2262 } 2263 2264 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 2265 if (reply == NULL) { 2266 params->status = MPI2_IOCSTATUS_BUSY; 2267 goto done; 2268 } 2269 params->status = reply->IOCStatus; 2270 if (params->hdr.Ext.ExtPageType != 0) { 2271 params->hdr.Ext.ExtPageType = reply->ExtPageType; 2272 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 2273 } else { 2274 params->hdr.Struct.PageType = reply->Header.PageType; 2275 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 2276 params->hdr.Struct.PageLength = reply->Header.PageLength; 2277 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 2278 } 2279 2280 done: 2281 mps_free_command(sc, cm); 2282 if (params->callback != NULL) 2283 params->callback(sc, params); 2284 2285 return; 2286 } 2287