1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 * 96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.68 2009/07/02 00:43:10 delphij Exp $ 97 */ 98 99 #include <dev/disk/mpt/mpt.h> 100 #include <dev/disk/mpt/mpt_cam.h> 101 #include <dev/disk/mpt/mpt_raid.h> 102 103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/disk/mpt/mpilib/mpi_init.h" 105 #include "dev/disk/mpt/mpilib/mpi_targ.h" 106 #include "dev/disk/mpt/mpilib/mpi_fc.h" 107 #include "dev/disk/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 || defined(__DragonFly__) 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_devq_release(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 (unsigned)mpt->mpt_fcport_page0.WWNN.High, 472 (unsigned)mpt->mpt_fcport_page0.WWNN.Low, 473 (unsigned)mpt->mpt_fcport_page0.WWPN.High, 474 (unsigned)mpt->mpt_fcport_page0.WWPN.Low, 475 (unsigned)mpt->mpt_fcport_speed); 476 #if __FreeBSD_version >= 500000 477 MPT_UNLOCK(mpt); 478 { 479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 481 482 snprintf(mpt->scinfo.fc.wwnn, 483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 484 mpt->mpt_fcport_page0.WWNN.High, 485 mpt->mpt_fcport_page0.WWNN.Low); 486 487 snprintf(mpt->scinfo.fc.wwpn, 488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 489 mpt->mpt_fcport_page0.WWPN.High, 490 mpt->mpt_fcport_page0.WWPN.Low); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 494 "World Wide Node Name"); 495 496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 498 "World Wide Port Name"); 499 500 } 501 MPT_LOCK(mpt); 502 #endif 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 513 CONFIG_PAGE_FC_PORT_1 fc; 514 U32 fl; 515 int r, doit = 0; 516 int role; 517 518 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 519 &fc.Header, FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1 header\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 525 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 526 &fc.Header, sizeof (fc), FALSE, 5000); 527 if (r) { 528 mpt_prt(mpt, "failed to read FC page 1\n"); 529 return (mpt_fc_reset_link(mpt, 1)); 530 } 531 mpt2host_config_page_fc_port_1(&fc); 532 533 /* 534 * Check our flags to make sure we support the role we want. 535 */ 536 doit = 0; 537 role = 0; 538 fl = fc.Flags; 539 540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 541 role |= MPT_ROLE_INITIATOR; 542 } 543 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 544 role |= MPT_ROLE_TARGET; 545 } 546 547 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 548 549 if (mpt->do_cfg_role == 0) { 550 role = mpt->cfg_role; 551 } else { 552 mpt->do_cfg_role = 0; 553 } 554 555 if (role != mpt->cfg_role) { 556 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 557 if ((role & MPT_ROLE_INITIATOR) == 0) { 558 mpt_prt(mpt, "adding initiator role\n"); 559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 560 doit++; 561 } else { 562 mpt_prt(mpt, "keeping initiator role\n"); 563 } 564 } else if (role & MPT_ROLE_INITIATOR) { 565 mpt_prt(mpt, "removing initiator role\n"); 566 doit++; 567 } 568 if (mpt->cfg_role & MPT_ROLE_TARGET) { 569 if ((role & MPT_ROLE_TARGET) == 0) { 570 mpt_prt(mpt, "adding target role\n"); 571 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 572 doit++; 573 } else { 574 mpt_prt(mpt, "keeping target role\n"); 575 } 576 } else if (role & MPT_ROLE_TARGET) { 577 mpt_prt(mpt, "removing target role\n"); 578 doit++; 579 } 580 mpt->role = mpt->cfg_role; 581 } 582 583 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 584 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 585 mpt_prt(mpt, "adding OXID option\n"); 586 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 587 doit++; 588 } 589 } 590 591 if (doit) { 592 fc.Flags = fl; 593 host2mpt_config_page_fc_port_1(&fc); 594 r = mpt_write_cfg_page(mpt, 595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 596 sizeof(fc), FALSE, 5000); 597 if (r != 0) { 598 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 599 return (0); 600 } 601 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 602 "effect until next reboot or IOC reset\n"); 603 } 604 return (0); 605 } 606 607 static int 608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 609 { 610 ConfigExtendedPageHeader_t hdr; 611 struct mptsas_phyinfo *phyinfo; 612 SasIOUnitPage0_t *buffer; 613 int error, len, i; 614 615 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 617 &hdr, 0, 10000); 618 if (error) 619 goto out; 620 if (hdr.ExtPageLength == 0) { 621 error = ENXIO; 622 goto out; 623 } 624 625 len = hdr.ExtPageLength * 4; 626 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 627 if (buffer == NULL) { 628 error = ENOMEM; 629 goto out; 630 } 631 632 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 633 0, &hdr, buffer, len, 0, 10000); 634 if (error) { 635 kfree(buffer, M_DEVBUF); 636 goto out; 637 } 638 639 portinfo->num_phys = buffer->NumPhys; 640 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) * 641 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 642 if (portinfo->phy_info == NULL) { 643 kfree(buffer, M_DEVBUF); 644 error = ENOMEM; 645 goto out; 646 } 647 648 for (i = 0; i < portinfo->num_phys; i++) { 649 phyinfo = &portinfo->phy_info[i]; 650 phyinfo->phy_num = i; 651 phyinfo->port_id = buffer->PhyData[i].Port; 652 phyinfo->negotiated_link_rate = 653 buffer->PhyData[i].NegotiatedLinkRate; 654 phyinfo->handle = 655 le16toh(buffer->PhyData[i].ControllerDevHandle); 656 } 657 658 kfree(buffer, M_DEVBUF); 659 out: 660 return (error); 661 } 662 663 static int 664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 665 uint32_t form, uint32_t form_specific) 666 { 667 ConfigExtendedPageHeader_t hdr; 668 SasPhyPage0_t *buffer; 669 int error; 670 671 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 673 0, 10000); 674 if (error) 675 goto out; 676 if (hdr.ExtPageLength == 0) { 677 error = ENXIO; 678 goto out; 679 } 680 681 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 682 if (buffer == NULL) { 683 error = ENOMEM; 684 goto out; 685 } 686 687 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 688 form + form_specific, &hdr, buffer, 689 sizeof(SasPhyPage0_t), 0, 10000); 690 if (error) { 691 kfree(buffer, M_DEVBUF); 692 goto out; 693 } 694 695 phy_info->hw_link_rate = buffer->HwLinkRate; 696 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 697 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 698 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 699 700 kfree(buffer, M_DEVBUF); 701 out: 702 return (error); 703 } 704 705 static int 706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 707 uint32_t form, uint32_t form_specific) 708 { 709 ConfigExtendedPageHeader_t hdr; 710 SasDevicePage0_t *buffer; 711 uint64_t sas_address; 712 int error = 0; 713 714 bzero(device_info, sizeof(*device_info)); 715 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 717 &hdr, 0, 10000); 718 if (error) 719 goto out; 720 if (hdr.ExtPageLength == 0) { 721 error = ENXIO; 722 goto out; 723 } 724 725 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 726 if (buffer == NULL) { 727 error = ENOMEM; 728 goto out; 729 } 730 731 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 732 form + form_specific, &hdr, buffer, 733 sizeof(SasDevicePage0_t), 0, 10000); 734 if (error) { 735 kfree(buffer, M_DEVBUF); 736 goto out; 737 } 738 739 device_info->dev_handle = le16toh(buffer->DevHandle); 740 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 741 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 742 device_info->slot = le16toh(buffer->Slot); 743 device_info->phy_num = buffer->PhyNum; 744 device_info->physical_port = buffer->PhysicalPort; 745 device_info->target_id = buffer->TargetID; 746 device_info->bus = buffer->Bus; 747 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 748 device_info->sas_address = le64toh(sas_address); 749 device_info->device_info = le32toh(buffer->DeviceInfo); 750 751 kfree(buffer, M_DEVBUF); 752 out: 753 return (error); 754 } 755 756 /* 757 * Read SAS configuration information. Nothing to do yet. 758 */ 759 static int 760 mpt_read_config_info_sas(struct mpt_softc *mpt) 761 { 762 struct mptsas_portinfo *portinfo; 763 struct mptsas_phyinfo *phyinfo; 764 int error, i; 765 766 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 767 if (portinfo == NULL) 768 return (ENOMEM); 769 770 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 771 if (error) { 772 kfree(portinfo, M_DEVBUF); 773 return (0); 774 } 775 776 for (i = 0; i < portinfo->num_phys; i++) { 777 phyinfo = &portinfo->phy_info[i]; 778 error = mptsas_sas_phy_pg0(mpt, phyinfo, 779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 780 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 781 if (error) 782 break; 783 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 786 phyinfo->handle); 787 if (error) 788 break; 789 phyinfo->identify.phy_num = phyinfo->phy_num = i; 790 if (phyinfo->attached.dev_handle) 791 error = mptsas_sas_device_pg0(mpt, 792 &phyinfo->attached, 793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 795 phyinfo->attached.dev_handle); 796 if (error) 797 break; 798 } 799 mpt->sas_portinfo = portinfo; 800 return (0); 801 } 802 803 static void 804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 805 int enabled) 806 { 807 SataPassthroughRequest_t *pass; 808 request_t *req; 809 int error, status; 810 811 req = mpt_get_request(mpt, 0); 812 if (req == NULL) 813 return; 814 815 pass = req->req_vbuf; 816 bzero(pass, sizeof(SataPassthroughRequest_t)); 817 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 818 pass->TargetID = devinfo->target_id; 819 pass->Bus = devinfo->bus; 820 pass->PassthroughFlags = 0; 821 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 822 pass->DataLength = 0; 823 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 824 pass->CommandFIS[0] = 0x27; 825 pass->CommandFIS[1] = 0x80; 826 pass->CommandFIS[2] = 0xef; 827 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 828 pass->CommandFIS[7] = 0x40; 829 pass->CommandFIS[15] = 0x08; 830 831 mpt_check_doorbell(mpt); 832 mpt_send_cmd(mpt, req); 833 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 834 10 * 1000); 835 if (error) { 836 mpt_free_request(mpt, req); 837 kprintf("error %d sending passthrough\n", error); 838 return; 839 } 840 841 status = le16toh(req->IOCStatus); 842 if (status != MPI_IOCSTATUS_SUCCESS) { 843 mpt_free_request(mpt, req); 844 kprintf("IOCSTATUS %d\n", status); 845 return; 846 } 847 848 mpt_free_request(mpt, req); 849 } 850 851 /* 852 * Set SAS configuration information. Nothing to do yet. 853 */ 854 static int 855 mpt_set_initial_config_sas(struct mpt_softc *mpt) 856 { 857 struct mptsas_phyinfo *phyinfo; 858 int i; 859 860 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 861 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 862 phyinfo = &mpt->sas_portinfo->phy_info[i]; 863 if (phyinfo->attached.dev_handle == 0) 864 continue; 865 if ((phyinfo->attached.device_info & 866 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 867 continue; 868 if (bootverbose) 869 device_printf(mpt->dev, 870 "%sabling SATA WC on phy %d\n", 871 (mpt_enable_sata_wc) ? "En" : "Dis", i); 872 mptsas_set_sata_wc(mpt, &phyinfo->attached, 873 mpt_enable_sata_wc); 874 } 875 } 876 877 return (0); 878 } 879 880 static int 881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 882 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 883 { 884 if (req != NULL) { 885 886 if (reply_frame != NULL) { 887 req->IOCStatus = le16toh(reply_frame->IOCStatus); 888 } 889 req->state &= ~REQ_STATE_QUEUED; 890 req->state |= REQ_STATE_DONE; 891 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 892 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 893 wakeup(req); 894 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 895 /* 896 * Whew- we can free this request (late completion) 897 */ 898 mpt_free_request(mpt, req); 899 } 900 } 901 902 return (TRUE); 903 } 904 905 /* 906 * Read SCSI configuration information 907 */ 908 static int 909 mpt_read_config_info_spi(struct mpt_softc *mpt) 910 { 911 int rv, i; 912 913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 914 &mpt->mpt_port_page0.Header, FALSE, 5000); 915 if (rv) { 916 return (-1); 917 } 918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 919 mpt->mpt_port_page0.Header.PageVersion, 920 mpt->mpt_port_page0.Header.PageLength, 921 mpt->mpt_port_page0.Header.PageNumber, 922 mpt->mpt_port_page0.Header.PageType); 923 924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 925 &mpt->mpt_port_page1.Header, FALSE, 5000); 926 if (rv) { 927 return (-1); 928 } 929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 930 mpt->mpt_port_page1.Header.PageVersion, 931 mpt->mpt_port_page1.Header.PageLength, 932 mpt->mpt_port_page1.Header.PageNumber, 933 mpt->mpt_port_page1.Header.PageType); 934 935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 936 &mpt->mpt_port_page2.Header, FALSE, 5000); 937 if (rv) { 938 return (-1); 939 } 940 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 941 mpt->mpt_port_page2.Header.PageVersion, 942 mpt->mpt_port_page2.Header.PageLength, 943 mpt->mpt_port_page2.Header.PageNumber, 944 mpt->mpt_port_page2.Header.PageType); 945 946 for (i = 0; i < 16; i++) { 947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 948 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 949 if (rv) { 950 return (-1); 951 } 952 mpt_lprt(mpt, MPT_PRT_DEBUG, 953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 954 mpt->mpt_dev_page0[i].Header.PageVersion, 955 mpt->mpt_dev_page0[i].Header.PageLength, 956 mpt->mpt_dev_page0[i].Header.PageNumber, 957 mpt->mpt_dev_page0[i].Header.PageType); 958 959 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 960 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 961 if (rv) { 962 return (-1); 963 } 964 mpt_lprt(mpt, MPT_PRT_DEBUG, 965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 966 mpt->mpt_dev_page1[i].Header.PageVersion, 967 mpt->mpt_dev_page1[i].Header.PageLength, 968 mpt->mpt_dev_page1[i].Header.PageNumber, 969 mpt->mpt_dev_page1[i].Header.PageType); 970 } 971 972 /* 973 * At this point, we don't *have* to fail. As long as we have 974 * valid config header information, we can (barely) lurch 975 * along. 976 */ 977 978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 979 sizeof(mpt->mpt_port_page0), FALSE, 5000); 980 if (rv) { 981 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 982 } else { 983 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 984 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 986 (unsigned)mpt->mpt_port_page0.Capabilities, 987 (unsigned)mpt->mpt_port_page0.PhysicalInterface); 988 } 989 990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 991 sizeof(mpt->mpt_port_page1), FALSE, 5000); 992 if (rv) { 993 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 994 } else { 995 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 996 mpt_lprt(mpt, MPT_PRT_DEBUG, 997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 998 (unsigned)mpt->mpt_port_page1.Configuration, 999 (unsigned)mpt->mpt_port_page1.OnBusTimerValue); 1000 } 1001 1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1003 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1004 if (rv) { 1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1006 } else { 1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1008 "Port Page 2: Flags %x Settings %x\n", 1009 (unsigned)mpt->mpt_port_page2.PortFlags, 1010 (unsigned)mpt->mpt_port_page2.PortSettings); 1011 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1012 for (i = 0; i < 16; i++) { 1013 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1015 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1016 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1017 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1018 } 1019 } 1020 1021 for (i = 0; i < 16; i++) { 1022 rv = mpt_read_cur_cfg_page(mpt, i, 1023 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1024 FALSE, 5000); 1025 if (rv) { 1026 mpt_prt(mpt, 1027 "cannot read SPI Target %d Device Page 0\n", i); 1028 continue; 1029 } 1030 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1031 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1032 "target %d page 0: Negotiated Params %x Information %x\n", 1033 i, 1034 (unsigned)mpt->mpt_dev_page0[i].NegotiatedParameters, 1035 (unsigned)mpt->mpt_dev_page0[i].Information); 1036 1037 rv = mpt_read_cur_cfg_page(mpt, i, 1038 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1039 FALSE, 5000); 1040 if (rv) { 1041 mpt_prt(mpt, 1042 "cannot read SPI Target %d Device Page 1\n", i); 1043 continue; 1044 } 1045 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1046 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1047 "target %d page 1: Requested Params %x Configuration %x\n", 1048 i, 1049 (unsigned)mpt->mpt_dev_page1[i].RequestedParameters, 1050 (unsigned)mpt->mpt_dev_page1[i].Configuration); 1051 } 1052 return (0); 1053 } 1054 1055 /* 1056 * Validate SPI configuration information. 1057 * 1058 * In particular, validate SPI Port Page 1. 1059 */ 1060 static int 1061 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1062 { 1063 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 1064 int error; 1065 1066 mpt->mpt_disc_enable = 0xff; 1067 mpt->mpt_tag_enable = 0; 1068 1069 if (mpt->mpt_port_page1.Configuration != pp1val) { 1070 CONFIG_PAGE_SCSI_PORT_1 tmp; 1071 1072 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1073 "be %x\n", 1074 (unsigned)mpt->mpt_port_page1.Configuration, 1075 (unsigned)pp1val); 1076 tmp = mpt->mpt_port_page1; 1077 tmp.Configuration = pp1val; 1078 host2mpt_config_page_scsi_port_1(&tmp); 1079 error = mpt_write_cur_cfg_page(mpt, 0, 1080 &tmp.Header, sizeof(tmp), FALSE, 5000); 1081 if (error) { 1082 return (-1); 1083 } 1084 error = mpt_read_cur_cfg_page(mpt, 0, 1085 &tmp.Header, sizeof(tmp), FALSE, 5000); 1086 if (error) { 1087 return (-1); 1088 } 1089 mpt2host_config_page_scsi_port_1(&tmp); 1090 if (tmp.Configuration != pp1val) { 1091 mpt_prt(mpt, 1092 "failed to reset SPI Port Page 1 Config value\n"); 1093 return (-1); 1094 } 1095 mpt->mpt_port_page1 = tmp; 1096 } 1097 1098 /* 1099 * The purpose of this exercise is to get 1100 * all targets back to async/narrow. 1101 * 1102 * We skip this step if the BIOS has already negotiated 1103 * speeds with the targets. 1104 */ 1105 i = mpt->mpt_port_page2.PortSettings & 1106 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1107 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1108 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1109 "honoring BIOS transfer negotiations\n"); 1110 } else { 1111 for (i = 0; i < 16; i++) { 1112 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1113 mpt->mpt_dev_page1[i].Configuration = 0; 1114 (void) mpt_update_spi_config(mpt, i); 1115 } 1116 } 1117 return (0); 1118 } 1119 1120 int 1121 mpt_cam_enable(struct mpt_softc *mpt) 1122 { 1123 int error; 1124 1125 MPT_LOCK(mpt); 1126 1127 error = EIO; 1128 if (mpt->is_fc) { 1129 if (mpt_read_config_info_fc(mpt)) { 1130 goto out; 1131 } 1132 if (mpt_set_initial_config_fc(mpt)) { 1133 goto out; 1134 } 1135 } else if (mpt->is_sas) { 1136 if (mpt_read_config_info_sas(mpt)) { 1137 goto out; 1138 } 1139 if (mpt_set_initial_config_sas(mpt)) { 1140 goto out; 1141 } 1142 } else if (mpt->is_spi) { 1143 if (mpt_read_config_info_spi(mpt)) { 1144 goto out; 1145 } 1146 if (mpt_set_initial_config_spi(mpt)) { 1147 goto out; 1148 } 1149 } 1150 error = 0; 1151 1152 out: 1153 MPT_UNLOCK(mpt); 1154 return (error); 1155 } 1156 1157 void 1158 mpt_cam_ready(struct mpt_softc *mpt) 1159 { 1160 /* 1161 * If we're in target mode, hang out resources now 1162 * so we don't cause the world to hang talking to us. 1163 */ 1164 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1165 /* 1166 * Try to add some target command resources 1167 */ 1168 MPT_LOCK(mpt); 1169 if (mpt_add_target_commands(mpt) == FALSE) { 1170 mpt_prt(mpt, "failed to add target commands\n"); 1171 } 1172 MPT_UNLOCK(mpt); 1173 } 1174 mpt->ready = 1; 1175 } 1176 1177 void 1178 mpt_cam_detach(struct mpt_softc *mpt) 1179 { 1180 mpt_handler_t handler; 1181 1182 MPT_LOCK(mpt); 1183 mpt->ready = 0; 1184 mpt_terminate_recovery_thread(mpt); 1185 1186 handler.reply_handler = mpt_scsi_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 scsi_io_handler_id); 1189 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 scsi_tmf_handler_id); 1192 handler.reply_handler = mpt_fc_els_reply_handler; 1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1194 fc_els_handler_id); 1195 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1197 mpt->scsi_tgt_handler_id); 1198 handler.reply_handler = mpt_sata_pass_reply_handler; 1199 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1200 sata_pass_handler_id); 1201 1202 if (mpt->tmf_req != NULL) { 1203 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1204 mpt_free_request(mpt, mpt->tmf_req); 1205 mpt->tmf_req = NULL; 1206 } 1207 if (mpt->sas_portinfo != NULL) { 1208 kfree(mpt->sas_portinfo, M_DEVBUF); 1209 mpt->sas_portinfo = NULL; 1210 } 1211 MPT_UNLOCK(mpt); 1212 1213 if (mpt->sim != NULL) { 1214 xpt_free_path(mpt->path); 1215 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1216 cam_sim_free(mpt->sim); 1217 mpt->sim = NULL; 1218 } 1219 1220 if (mpt->phydisk_sim != NULL) { 1221 xpt_free_path(mpt->phydisk_path); 1222 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1223 cam_sim_free(mpt->phydisk_sim); 1224 mpt->phydisk_sim = NULL; 1225 } 1226 } 1227 1228 /* This routine is used after a system crash to dump core onto the swap device. 1229 */ 1230 static void 1231 mpt_poll(struct cam_sim *sim) 1232 { 1233 struct mpt_softc *mpt; 1234 1235 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1236 mpt_intr(mpt); 1237 } 1238 1239 /* 1240 * Watchdog timeout routine for SCSI requests. 1241 */ 1242 static void 1243 mpt_timeout(void *arg) 1244 { 1245 union ccb *ccb; 1246 struct mpt_softc *mpt; 1247 request_t *req; 1248 1249 ccb = (union ccb *)arg; 1250 mpt = ccb->ccb_h.ccb_mpt_ptr; 1251 1252 MPT_LOCK(mpt); 1253 req = ccb->ccb_h.ccb_req_ptr; 1254 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1255 req->serno, ccb, req->ccb); 1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1257 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1258 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1259 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1260 req->state |= REQ_STATE_TIMEDOUT; 1261 mpt_wakeup_recovery_thread(mpt); 1262 } 1263 MPT_UNLOCK(mpt); 1264 } 1265 1266 /* 1267 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1268 * 1269 * Takes a list of physical segments and builds the SGL for SCSI IO command 1270 * and forwards the commard to the IOC after one last check that CAM has not 1271 * aborted the transaction. 1272 */ 1273 static void 1274 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1275 { 1276 request_t *req, *trq; 1277 char *mpt_off; 1278 union ccb *ccb; 1279 struct mpt_softc *mpt; 1280 int seg, first_lim; 1281 uint32_t flags, nxt_off; 1282 void *sglp = NULL; 1283 MSG_REQUEST_HEADER *hdrp; 1284 SGE_SIMPLE64 *se; 1285 SGE_CHAIN64 *ce; 1286 int istgt = 0; 1287 1288 req = (request_t *)arg; 1289 ccb = req->ccb; 1290 1291 mpt = ccb->ccb_h.ccb_mpt_ptr; 1292 req = ccb->ccb_h.ccb_req_ptr; 1293 1294 hdrp = req->req_vbuf; 1295 mpt_off = req->req_vbuf; 1296 1297 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1298 error = EFBIG; 1299 } 1300 1301 if (error == 0) { 1302 switch (hdrp->Function) { 1303 case MPI_FUNCTION_SCSI_IO_REQUEST: 1304 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1305 istgt = 0; 1306 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1307 break; 1308 case MPI_FUNCTION_TARGET_ASSIST: 1309 istgt = 1; 1310 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1311 break; 1312 default: 1313 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1314 hdrp->Function); 1315 error = EINVAL; 1316 break; 1317 } 1318 } 1319 1320 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1321 error = EFBIG; 1322 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1323 nseg, mpt->max_seg_cnt); 1324 } 1325 1326 bad: 1327 if (error != 0) { 1328 if (error != EFBIG && error != ENOMEM) { 1329 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1330 } 1331 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1332 cam_status status; 1333 mpt_freeze_ccb(ccb); 1334 if (error == EFBIG) { 1335 status = CAM_REQ_TOO_BIG; 1336 } else if (error == ENOMEM) { 1337 if (mpt->outofbeer == 0) { 1338 mpt->outofbeer = 1; 1339 xpt_freeze_simq(mpt->sim, 1); 1340 mpt_lprt(mpt, MPT_PRT_DEBUG, 1341 "FREEZEQ\n"); 1342 } 1343 status = CAM_REQUEUE_REQ; 1344 } else { 1345 status = CAM_REQ_CMP_ERR; 1346 } 1347 mpt_set_ccb_status(ccb, status); 1348 } 1349 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1350 request_t *cmd_req = 1351 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1352 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1353 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1354 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1355 } 1356 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1357 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1358 xpt_done(ccb); 1359 CAMLOCK_2_MPTLOCK(mpt); 1360 mpt_free_request(mpt, req); 1361 MPTLOCK_2_CAMLOCK(mpt); 1362 return; 1363 } 1364 1365 /* 1366 * No data to transfer? 1367 * Just make a single simple SGL with zero length. 1368 */ 1369 1370 if (mpt->verbose >= MPT_PRT_DEBUG) { 1371 int tidx = ((char *)sglp) - mpt_off; 1372 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1373 } 1374 1375 if (nseg == 0) { 1376 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1377 MPI_pSGE_SET_FLAGS(se1, 1378 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1379 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1380 se1->FlagsLength = htole32(se1->FlagsLength); 1381 goto out; 1382 } 1383 1384 1385 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1386 if (istgt == 0) { 1387 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1388 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1389 } 1390 } else { 1391 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1392 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1393 } 1394 } 1395 1396 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1397 bus_dmasync_op_t op; 1398 if (istgt == 0) { 1399 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1400 op = BUS_DMASYNC_PREREAD; 1401 } else { 1402 op = BUS_DMASYNC_PREWRITE; 1403 } 1404 } else { 1405 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1406 op = BUS_DMASYNC_PREWRITE; 1407 } else { 1408 op = BUS_DMASYNC_PREREAD; 1409 } 1410 } 1411 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1412 } 1413 1414 /* 1415 * Okay, fill in what we can at the end of the command frame. 1416 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1417 * the command frame. 1418 * 1419 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1420 * SIMPLE64 pointers and start doing CHAIN64 entries after 1421 * that. 1422 */ 1423 1424 if (nseg < MPT_NSGL_FIRST(mpt)) { 1425 first_lim = nseg; 1426 } else { 1427 /* 1428 * Leave room for CHAIN element 1429 */ 1430 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1431 } 1432 1433 se = (SGE_SIMPLE64 *) sglp; 1434 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1435 uint32_t tf; 1436 1437 memset(se, 0, sizeof (*se)); 1438 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1439 if (sizeof(bus_addr_t) > 4) { 1440 se->Address.High = 1441 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1442 } 1443 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1444 tf = flags; 1445 if (seg == first_lim - 1) { 1446 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1447 } 1448 if (seg == nseg - 1) { 1449 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1450 MPI_SGE_FLAGS_END_OF_BUFFER; 1451 } 1452 MPI_pSGE_SET_FLAGS(se, tf); 1453 se->FlagsLength = htole32(se->FlagsLength); 1454 } 1455 1456 if (seg == nseg) { 1457 goto out; 1458 } 1459 1460 /* 1461 * Tell the IOC where to find the first chain element. 1462 */ 1463 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1464 nxt_off = MPT_RQSL(mpt); 1465 trq = req; 1466 1467 /* 1468 * Make up the rest of the data segments out of a chain element 1469 * (contiained in the current request frame) which points to 1470 * SIMPLE64 elements in the next request frame, possibly ending 1471 * with *another* chain element (if there's more). 1472 */ 1473 while (seg < nseg) { 1474 int this_seg_lim; 1475 uint32_t tf, cur_off; 1476 bus_addr_t chain_list_addr; 1477 1478 /* 1479 * Point to the chain descriptor. Note that the chain 1480 * descriptor is at the end of the *previous* list (whether 1481 * chain or simple). 1482 */ 1483 ce = (SGE_CHAIN64 *) se; 1484 1485 /* 1486 * Before we change our current pointer, make sure we won't 1487 * overflow the request area with this frame. Note that we 1488 * test against 'greater than' here as it's okay in this case 1489 * to have next offset be just outside the request area. 1490 */ 1491 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1492 nxt_off = MPT_REQUEST_AREA; 1493 goto next_chain; 1494 } 1495 1496 /* 1497 * Set our SGE element pointer to the beginning of the chain 1498 * list and update our next chain list offset. 1499 */ 1500 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1501 cur_off = nxt_off; 1502 nxt_off += MPT_RQSL(mpt); 1503 1504 /* 1505 * Now initialized the chain descriptor. 1506 */ 1507 memset(ce, 0, sizeof (*ce)); 1508 1509 /* 1510 * Get the physical address of the chain list. 1511 */ 1512 chain_list_addr = trq->req_pbuf; 1513 chain_list_addr += cur_off; 1514 if (sizeof (bus_addr_t) > 4) { 1515 ce->Address.High = 1516 htole32(((uint64_t)chain_list_addr) >> 32); 1517 } 1518 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1519 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1520 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1521 1522 /* 1523 * If we have more than a frame's worth of segments left, 1524 * set up the chain list to have the last element be another 1525 * chain descriptor. 1526 */ 1527 if ((nseg - seg) > MPT_NSGL(mpt)) { 1528 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1529 /* 1530 * The length of the chain is the length in bytes of the 1531 * number of segments plus the next chain element. 1532 * 1533 * The next chain descriptor offset is the length, 1534 * in words, of the number of segments. 1535 */ 1536 ce->Length = (this_seg_lim - seg) * 1537 sizeof (SGE_SIMPLE64); 1538 ce->NextChainOffset = ce->Length >> 2; 1539 ce->Length += sizeof (SGE_CHAIN64); 1540 } else { 1541 this_seg_lim = nseg; 1542 ce->Length = (this_seg_lim - seg) * 1543 sizeof (SGE_SIMPLE64); 1544 } 1545 ce->Length = htole16(ce->Length); 1546 1547 /* 1548 * Fill in the chain list SGE elements with our segment data. 1549 * 1550 * If we're the last element in this chain list, set the last 1551 * element flag. If we're the completely last element period, 1552 * set the end of list and end of buffer flags. 1553 */ 1554 while (seg < this_seg_lim) { 1555 memset(se, 0, sizeof (*se)); 1556 se->Address.Low = htole32(dm_segs->ds_addr & 1557 0xffffffff); 1558 if (sizeof (bus_addr_t) > 4) { 1559 se->Address.High = 1560 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1561 } 1562 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1563 tf = flags; 1564 if (seg == this_seg_lim - 1) { 1565 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1566 } 1567 if (seg == nseg - 1) { 1568 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1569 MPI_SGE_FLAGS_END_OF_BUFFER; 1570 } 1571 MPI_pSGE_SET_FLAGS(se, tf); 1572 se->FlagsLength = htole32(se->FlagsLength); 1573 se++; 1574 seg++; 1575 dm_segs++; 1576 } 1577 1578 next_chain: 1579 /* 1580 * If we have more segments to do and we've used up all of 1581 * the space in a request area, go allocate another one 1582 * and chain to that. 1583 */ 1584 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1585 request_t *nrq; 1586 1587 CAMLOCK_2_MPTLOCK(mpt); 1588 nrq = mpt_get_request(mpt, FALSE); 1589 MPTLOCK_2_CAMLOCK(mpt); 1590 1591 if (nrq == NULL) { 1592 error = ENOMEM; 1593 goto bad; 1594 } 1595 1596 /* 1597 * Append the new request area on the tail of our list. 1598 */ 1599 if ((trq = req->chain) == NULL) { 1600 req->chain = nrq; 1601 } else { 1602 while (trq->chain != NULL) { 1603 trq = trq->chain; 1604 } 1605 trq->chain = nrq; 1606 } 1607 trq = nrq; 1608 mpt_off = trq->req_vbuf; 1609 if (mpt->verbose >= MPT_PRT_DEBUG) { 1610 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1611 } 1612 nxt_off = 0; 1613 } 1614 } 1615 out: 1616 1617 /* 1618 * Last time we need to check if this CCB needs to be aborted. 1619 */ 1620 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1621 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1622 request_t *cmd_req = 1623 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1624 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1625 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1626 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1627 } 1628 mpt_prt(mpt, 1629 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1630 ccb->ccb_h.status & CAM_STATUS_MASK); 1631 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1632 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1633 } 1634 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1635 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1636 xpt_done(ccb); 1637 CAMLOCK_2_MPTLOCK(mpt); 1638 mpt_free_request(mpt, req); 1639 MPTLOCK_2_CAMLOCK(mpt); 1640 return; 1641 } 1642 1643 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1644 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1645 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1646 mpt_timeout, ccb); 1647 } 1648 if (mpt->verbose > MPT_PRT_DEBUG) { 1649 int nc = 0; 1650 mpt_print_request(req->req_vbuf); 1651 for (trq = req->chain; trq; trq = trq->chain) { 1652 kprintf(" Additional Chain Area %d\n", nc++); 1653 mpt_dump_sgl(trq->req_vbuf, 0); 1654 } 1655 } 1656 1657 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1658 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1659 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1660 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1661 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1662 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1663 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1664 } else { 1665 tgt->state = TGT_STATE_MOVING_DATA; 1666 } 1667 #else 1668 tgt->state = TGT_STATE_MOVING_DATA; 1669 #endif 1670 } 1671 CAMLOCK_2_MPTLOCK(mpt); 1672 mpt_send_cmd(mpt, req); 1673 MPTLOCK_2_CAMLOCK(mpt); 1674 } 1675 1676 static void 1677 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1678 { 1679 request_t *req, *trq; 1680 char *mpt_off; 1681 union ccb *ccb; 1682 struct mpt_softc *mpt; 1683 int seg, first_lim; 1684 uint32_t flags, nxt_off; 1685 void *sglp = NULL; 1686 MSG_REQUEST_HEADER *hdrp; 1687 SGE_SIMPLE32 *se; 1688 SGE_CHAIN32 *ce; 1689 int istgt = 0; 1690 1691 req = (request_t *)arg; 1692 ccb = req->ccb; 1693 1694 mpt = ccb->ccb_h.ccb_mpt_ptr; 1695 req = ccb->ccb_h.ccb_req_ptr; 1696 1697 hdrp = req->req_vbuf; 1698 mpt_off = req->req_vbuf; 1699 1700 1701 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1702 error = EFBIG; 1703 } 1704 1705 if (error == 0) { 1706 switch (hdrp->Function) { 1707 case MPI_FUNCTION_SCSI_IO_REQUEST: 1708 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1709 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1710 break; 1711 case MPI_FUNCTION_TARGET_ASSIST: 1712 istgt = 1; 1713 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1714 break; 1715 default: 1716 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1717 hdrp->Function); 1718 error = EINVAL; 1719 break; 1720 } 1721 } 1722 1723 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1724 error = EFBIG; 1725 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1726 nseg, mpt->max_seg_cnt); 1727 } 1728 1729 bad: 1730 if (error != 0) { 1731 if (error != EFBIG && error != ENOMEM) { 1732 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1733 } 1734 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1735 cam_status status; 1736 mpt_freeze_ccb(ccb); 1737 if (error == EFBIG) { 1738 status = CAM_REQ_TOO_BIG; 1739 } else if (error == ENOMEM) { 1740 if (mpt->outofbeer == 0) { 1741 mpt->outofbeer = 1; 1742 xpt_freeze_simq(mpt->sim, 1); 1743 mpt_lprt(mpt, MPT_PRT_DEBUG, 1744 "FREEZEQ\n"); 1745 } 1746 status = CAM_REQUEUE_REQ; 1747 } else { 1748 status = CAM_REQ_CMP_ERR; 1749 } 1750 mpt_set_ccb_status(ccb, status); 1751 } 1752 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1753 request_t *cmd_req = 1754 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1755 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1756 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1757 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1758 } 1759 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1760 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1761 xpt_done(ccb); 1762 CAMLOCK_2_MPTLOCK(mpt); 1763 mpt_free_request(mpt, req); 1764 MPTLOCK_2_CAMLOCK(mpt); 1765 return; 1766 } 1767 1768 /* 1769 * No data to transfer? 1770 * Just make a single simple SGL with zero length. 1771 */ 1772 1773 if (mpt->verbose >= MPT_PRT_DEBUG) { 1774 int tidx = ((char *)sglp) - mpt_off; 1775 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1776 } 1777 1778 if (nseg == 0) { 1779 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1780 MPI_pSGE_SET_FLAGS(se1, 1781 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1782 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1783 se1->FlagsLength = htole32(se1->FlagsLength); 1784 goto out; 1785 } 1786 1787 1788 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1789 if (istgt == 0) { 1790 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1791 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1792 } 1793 } else { 1794 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1795 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1796 } 1797 } 1798 1799 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1800 bus_dmasync_op_t op; 1801 if (istgt) { 1802 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1803 op = BUS_DMASYNC_PREREAD; 1804 } else { 1805 op = BUS_DMASYNC_PREWRITE; 1806 } 1807 } else { 1808 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1809 op = BUS_DMASYNC_PREWRITE; 1810 } else { 1811 op = BUS_DMASYNC_PREREAD; 1812 } 1813 } 1814 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1815 } 1816 1817 /* 1818 * Okay, fill in what we can at the end of the command frame. 1819 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1820 * the command frame. 1821 * 1822 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1823 * SIMPLE32 pointers and start doing CHAIN32 entries after 1824 * that. 1825 */ 1826 1827 if (nseg < MPT_NSGL_FIRST(mpt)) { 1828 first_lim = nseg; 1829 } else { 1830 /* 1831 * Leave room for CHAIN element 1832 */ 1833 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1834 } 1835 1836 se = (SGE_SIMPLE32 *) sglp; 1837 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1838 uint32_t tf; 1839 1840 memset(se, 0,sizeof (*se)); 1841 se->Address = htole32(dm_segs->ds_addr); 1842 1843 1844 1845 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1846 tf = flags; 1847 if (seg == first_lim - 1) { 1848 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1849 } 1850 if (seg == nseg - 1) { 1851 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1852 MPI_SGE_FLAGS_END_OF_BUFFER; 1853 } 1854 MPI_pSGE_SET_FLAGS(se, tf); 1855 se->FlagsLength = htole32(se->FlagsLength); 1856 } 1857 1858 if (seg == nseg) { 1859 goto out; 1860 } 1861 1862 /* 1863 * Tell the IOC where to find the first chain element. 1864 */ 1865 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1866 nxt_off = MPT_RQSL(mpt); 1867 trq = req; 1868 1869 /* 1870 * Make up the rest of the data segments out of a chain element 1871 * (contiained in the current request frame) which points to 1872 * SIMPLE32 elements in the next request frame, possibly ending 1873 * with *another* chain element (if there's more). 1874 */ 1875 while (seg < nseg) { 1876 int this_seg_lim; 1877 uint32_t tf, cur_off; 1878 bus_addr_t chain_list_addr; 1879 1880 /* 1881 * Point to the chain descriptor. Note that the chain 1882 * descriptor is at the end of the *previous* list (whether 1883 * chain or simple). 1884 */ 1885 ce = (SGE_CHAIN32 *) se; 1886 1887 /* 1888 * Before we change our current pointer, make sure we won't 1889 * overflow the request area with this frame. Note that we 1890 * test against 'greater than' here as it's okay in this case 1891 * to have next offset be just outside the request area. 1892 */ 1893 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1894 nxt_off = MPT_REQUEST_AREA; 1895 goto next_chain; 1896 } 1897 1898 /* 1899 * Set our SGE element pointer to the beginning of the chain 1900 * list and update our next chain list offset. 1901 */ 1902 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1903 cur_off = nxt_off; 1904 nxt_off += MPT_RQSL(mpt); 1905 1906 /* 1907 * Now initialized the chain descriptor. 1908 */ 1909 memset(ce, 0, sizeof (*ce)); 1910 1911 /* 1912 * Get the physical address of the chain list. 1913 */ 1914 chain_list_addr = trq->req_pbuf; 1915 chain_list_addr += cur_off; 1916 1917 1918 1919 ce->Address = htole32(chain_list_addr); 1920 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1921 1922 1923 /* 1924 * If we have more than a frame's worth of segments left, 1925 * set up the chain list to have the last element be another 1926 * chain descriptor. 1927 */ 1928 if ((nseg - seg) > MPT_NSGL(mpt)) { 1929 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1930 /* 1931 * The length of the chain is the length in bytes of the 1932 * number of segments plus the next chain element. 1933 * 1934 * The next chain descriptor offset is the length, 1935 * in words, of the number of segments. 1936 */ 1937 ce->Length = (this_seg_lim - seg) * 1938 sizeof (SGE_SIMPLE32); 1939 ce->NextChainOffset = ce->Length >> 2; 1940 ce->Length += sizeof (SGE_CHAIN32); 1941 } else { 1942 this_seg_lim = nseg; 1943 ce->Length = (this_seg_lim - seg) * 1944 sizeof (SGE_SIMPLE32); 1945 } 1946 ce->Length = htole16(ce->Length); 1947 1948 /* 1949 * Fill in the chain list SGE elements with our segment data. 1950 * 1951 * If we're the last element in this chain list, set the last 1952 * element flag. If we're the completely last element period, 1953 * set the end of list and end of buffer flags. 1954 */ 1955 while (seg < this_seg_lim) { 1956 memset(se, 0, sizeof (*se)); 1957 se->Address = htole32(dm_segs->ds_addr); 1958 1959 1960 1961 1962 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1963 tf = flags; 1964 if (seg == this_seg_lim - 1) { 1965 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1966 } 1967 if (seg == nseg - 1) { 1968 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1969 MPI_SGE_FLAGS_END_OF_BUFFER; 1970 } 1971 MPI_pSGE_SET_FLAGS(se, tf); 1972 se->FlagsLength = htole32(se->FlagsLength); 1973 se++; 1974 seg++; 1975 dm_segs++; 1976 } 1977 1978 next_chain: 1979 /* 1980 * If we have more segments to do and we've used up all of 1981 * the space in a request area, go allocate another one 1982 * and chain to that. 1983 */ 1984 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1985 request_t *nrq; 1986 1987 CAMLOCK_2_MPTLOCK(mpt); 1988 nrq = mpt_get_request(mpt, FALSE); 1989 MPTLOCK_2_CAMLOCK(mpt); 1990 1991 if (nrq == NULL) { 1992 error = ENOMEM; 1993 goto bad; 1994 } 1995 1996 /* 1997 * Append the new request area on the tail of our list. 1998 */ 1999 if ((trq = req->chain) == NULL) { 2000 req->chain = nrq; 2001 } else { 2002 while (trq->chain != NULL) { 2003 trq = trq->chain; 2004 } 2005 trq->chain = nrq; 2006 } 2007 trq = nrq; 2008 mpt_off = trq->req_vbuf; 2009 if (mpt->verbose >= MPT_PRT_DEBUG) { 2010 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2011 } 2012 nxt_off = 0; 2013 } 2014 } 2015 out: 2016 2017 /* 2018 * Last time we need to check if this CCB needs to be aborted. 2019 */ 2020 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2021 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2022 request_t *cmd_req = 2023 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2024 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2025 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2026 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2027 } 2028 mpt_prt(mpt, 2029 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2030 ccb->ccb_h.status & CAM_STATUS_MASK); 2031 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2032 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2033 } 2034 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2035 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2036 xpt_done(ccb); 2037 CAMLOCK_2_MPTLOCK(mpt); 2038 mpt_free_request(mpt, req); 2039 MPTLOCK_2_CAMLOCK(mpt); 2040 return; 2041 } 2042 2043 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2044 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2045 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2046 mpt_timeout, ccb); 2047 } 2048 if (mpt->verbose > MPT_PRT_DEBUG) { 2049 int nc = 0; 2050 mpt_print_request(req->req_vbuf); 2051 for (trq = req->chain; trq; trq = trq->chain) { 2052 kprintf(" Additional Chain Area %d\n", nc++); 2053 mpt_dump_sgl(trq->req_vbuf, 0); 2054 } 2055 } 2056 2057 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2058 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2059 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2060 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2061 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2062 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2063 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2064 } else { 2065 tgt->state = TGT_STATE_MOVING_DATA; 2066 } 2067 #else 2068 tgt->state = TGT_STATE_MOVING_DATA; 2069 #endif 2070 } 2071 CAMLOCK_2_MPTLOCK(mpt); 2072 mpt_send_cmd(mpt, req); 2073 MPTLOCK_2_CAMLOCK(mpt); 2074 } 2075 2076 static void 2077 mpt_start(struct cam_sim *sim, union ccb *ccb) 2078 { 2079 request_t *req; 2080 struct mpt_softc *mpt; 2081 MSG_SCSI_IO_REQUEST *mpt_req; 2082 struct ccb_scsiio *csio = &ccb->csio; 2083 struct ccb_hdr *ccbh = &ccb->ccb_h; 2084 bus_dmamap_callback_t *cb; 2085 target_id_t tgt; 2086 int raid_passthru; 2087 2088 /* Get the pointer for the physical addapter */ 2089 mpt = ccb->ccb_h.ccb_mpt_ptr; 2090 raid_passthru = (sim == mpt->phydisk_sim); 2091 2092 CAMLOCK_2_MPTLOCK(mpt); 2093 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2094 if (mpt->outofbeer == 0) { 2095 mpt->outofbeer = 1; 2096 xpt_freeze_simq(mpt->sim, 1); 2097 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2098 } 2099 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2100 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2101 MPTLOCK_2_CAMLOCK(mpt); 2102 xpt_done(ccb); 2103 return; 2104 } 2105 #ifdef INVARIANTS 2106 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2107 #endif 2108 MPTLOCK_2_CAMLOCK(mpt); 2109 2110 if (sizeof (bus_addr_t) > 4) { 2111 cb = mpt_execute_req_a64; 2112 } else { 2113 cb = mpt_execute_req; 2114 } 2115 2116 /* 2117 * Link the ccb and the request structure so we can find 2118 * the other knowing either the request or the ccb 2119 */ 2120 req->ccb = ccb; 2121 ccb->ccb_h.ccb_req_ptr = req; 2122 2123 /* Now we build the command for the IOC */ 2124 mpt_req = req->req_vbuf; 2125 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2126 2127 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2128 if (raid_passthru) { 2129 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2130 CAMLOCK_2_MPTLOCK(mpt); 2131 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2132 MPTLOCK_2_CAMLOCK(mpt); 2133 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2134 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2135 xpt_done(ccb); 2136 return; 2137 } 2138 MPTLOCK_2_CAMLOCK(mpt); 2139 mpt_req->Bus = 0; /* we never set bus here */ 2140 } else { 2141 tgt = ccb->ccb_h.target_id; 2142 mpt_req->Bus = 0; /* XXX */ 2143 2144 } 2145 mpt_req->SenseBufferLength = 2146 (csio->sense_len < MPT_SENSE_SIZE) ? 2147 csio->sense_len : MPT_SENSE_SIZE; 2148 2149 /* 2150 * We use the message context to find the request structure when we 2151 * Get the command completion interrupt from the IOC. 2152 */ 2153 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2154 2155 /* Which physical device to do the I/O on */ 2156 mpt_req->TargetID = tgt; 2157 2158 /* We assume a single level LUN type */ 2159 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2160 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2161 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2162 } else { 2163 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2164 } 2165 2166 /* Set the direction of the transfer */ 2167 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2168 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2169 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2170 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2171 } else { 2172 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2173 } 2174 2175 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2176 switch(ccb->csio.tag_action) { 2177 case MSG_HEAD_OF_Q_TAG: 2178 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2179 break; 2180 case MSG_ACA_TASK: 2181 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2182 break; 2183 case MSG_ORDERED_Q_TAG: 2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2185 break; 2186 case MSG_SIMPLE_Q_TAG: 2187 default: 2188 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2189 break; 2190 } 2191 } else { 2192 if (mpt->is_fc || mpt->is_sas) { 2193 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2194 } else { 2195 /* XXX No such thing for a target doing packetized. */ 2196 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2197 } 2198 } 2199 2200 if (mpt->is_spi) { 2201 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2202 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2203 } 2204 } 2205 mpt_req->Control = htole32(mpt_req->Control); 2206 2207 /* Copy the scsi command block into place */ 2208 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2209 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2210 } else { 2211 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2212 } 2213 2214 mpt_req->CDBLength = csio->cdb_len; 2215 mpt_req->DataLength = htole32(csio->dxfer_len); 2216 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2217 2218 /* 2219 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2220 */ 2221 if (mpt->verbose == MPT_PRT_DEBUG) { 2222 U32 df; 2223 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2224 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2225 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2226 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2227 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2228 mpt_prtc(mpt, "(%s %u byte%s ", 2229 (df == MPI_SCSIIO_CONTROL_READ)? 2230 "read" : "write", csio->dxfer_len, 2231 (csio->dxfer_len == 1)? ")" : "s)"); 2232 } 2233 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2234 ccb->ccb_h.target_lun, req, req->serno); 2235 } 2236 2237 /* 2238 * If we have any data to send with this command map it into bus space. 2239 */ 2240 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2241 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2242 /* 2243 * We've been given a pointer to a single buffer. 2244 */ 2245 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2246 /* 2247 * Virtual address that needs to translated into 2248 * one or more physical address ranges. 2249 */ 2250 int error; 2251 error = bus_dmamap_load(mpt->buffer_dmat, 2252 req->dmap, csio->data_ptr, csio->dxfer_len, 2253 cb, req, 0); 2254 if (error == EINPROGRESS) { 2255 /* 2256 * So as to maintain ordering, 2257 * freeze the controller queue 2258 * until our mapping is 2259 * returned. 2260 */ 2261 xpt_freeze_simq(mpt->sim, 1); 2262 ccbh->status |= CAM_RELEASE_SIMQ; 2263 } 2264 } else { 2265 /* 2266 * We have been given a pointer to single 2267 * physical buffer. 2268 */ 2269 struct bus_dma_segment seg; 2270 seg.ds_addr = 2271 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2272 seg.ds_len = csio->dxfer_len; 2273 (*cb)(req, &seg, 1, 0); 2274 } 2275 } else { 2276 /* 2277 * We have been given a list of addresses. 2278 * This case could be easily supported but they are not 2279 * currently generated by the CAM subsystem so there 2280 * is no point in wasting the time right now. 2281 */ 2282 struct bus_dma_segment *segs; 2283 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2284 (*cb)(req, NULL, 0, EFAULT); 2285 } else { 2286 /* Just use the segments provided */ 2287 segs = (struct bus_dma_segment *)csio->data_ptr; 2288 (*cb)(req, segs, csio->sglist_cnt, 0); 2289 } 2290 } 2291 } else { 2292 (*cb)(req, NULL, 0, 0); 2293 } 2294 } 2295 2296 static int 2297 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2298 int sleep_ok) 2299 { 2300 int error; 2301 uint16_t status; 2302 uint8_t response; 2303 2304 error = mpt_scsi_send_tmf(mpt, 2305 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2306 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2307 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2308 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2309 0, /* XXX How do I get the channel ID? */ 2310 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2311 lun != CAM_LUN_WILDCARD ? lun : 0, 2312 0, sleep_ok); 2313 2314 if (error != 0) { 2315 /* 2316 * mpt_scsi_send_tmf hard resets on failure, so no 2317 * need to do so here. 2318 */ 2319 mpt_prt(mpt, 2320 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2321 return (EIO); 2322 } 2323 2324 /* Wait for bus reset to be processed by the IOC. */ 2325 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2326 REQ_STATE_DONE, sleep_ok, 5000); 2327 2328 status = le16toh(mpt->tmf_req->IOCStatus); 2329 response = mpt->tmf_req->ResponseCode; 2330 mpt->tmf_req->state = REQ_STATE_FREE; 2331 2332 if (error) { 2333 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2334 "Resetting controller.\n"); 2335 mpt_reset(mpt, TRUE); 2336 return (ETIMEDOUT); 2337 } 2338 2339 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2340 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2341 "Resetting controller.\n", status); 2342 mpt_reset(mpt, TRUE); 2343 return (EIO); 2344 } 2345 2346 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2347 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2348 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2349 "Resetting controller.\n", response); 2350 mpt_reset(mpt, TRUE); 2351 return (EIO); 2352 } 2353 return (0); 2354 } 2355 2356 static int 2357 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2358 { 2359 int r = 0; 2360 request_t *req; 2361 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2362 2363 req = mpt_get_request(mpt, FALSE); 2364 if (req == NULL) { 2365 return (ENOMEM); 2366 } 2367 fc = req->req_vbuf; 2368 memset(fc, 0, sizeof(*fc)); 2369 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2370 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2371 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2372 mpt_send_cmd(mpt, req); 2373 if (dowait) { 2374 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2375 REQ_STATE_DONE, FALSE, 60 * 1000); 2376 if (r == 0) { 2377 mpt_free_request(mpt, req); 2378 } 2379 } 2380 return (r); 2381 } 2382 2383 static int 2384 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2385 MSG_EVENT_NOTIFY_REPLY *msg) 2386 { 2387 uint32_t data0, data1; 2388 2389 data0 = le32toh(msg->Data[0]); 2390 data1 = le32toh(msg->Data[1]); 2391 switch(msg->Event & 0xFF) { 2392 case MPI_EVENT_UNIT_ATTENTION: 2393 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2394 (data0 >> 8) & 0xff, data0 & 0xff); 2395 break; 2396 2397 case MPI_EVENT_IOC_BUS_RESET: 2398 /* We generated a bus reset */ 2399 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2400 (data0 >> 8) & 0xff); 2401 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2402 break; 2403 2404 case MPI_EVENT_EXT_BUS_RESET: 2405 /* Someone else generated a bus reset */ 2406 mpt_prt(mpt, "External Bus Reset Detected\n"); 2407 /* 2408 * These replies don't return EventData like the MPI 2409 * spec says they do 2410 */ 2411 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2412 break; 2413 2414 case MPI_EVENT_RESCAN: 2415 #if __FreeBSD_version >= 600000 2416 { 2417 union ccb *ccb; 2418 uint32_t pathid; 2419 /* 2420 * In general this means a device has been added to the loop. 2421 */ 2422 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2423 if (mpt->ready == 0) { 2424 break; 2425 } 2426 if (mpt->phydisk_sim) { 2427 pathid = cam_sim_path(mpt->phydisk_sim); 2428 } else { 2429 pathid = cam_sim_path(mpt->sim); 2430 } 2431 MPTLOCK_2_CAMLOCK(mpt); 2432 /* 2433 * Allocate a CCB, create a wildcard path for this bus, 2434 * and schedule a rescan. 2435 */ 2436 ccb = xpt_alloc_ccb_nowait(); 2437 if (ccb == NULL) { 2438 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2439 CAMLOCK_2_MPTLOCK(mpt); 2440 break; 2441 } 2442 2443 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2444 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2445 CAMLOCK_2_MPTLOCK(mpt); 2446 mpt_prt(mpt, "unable to create path for rescan\n"); 2447 xpt_free_ccb(ccb); 2448 break; 2449 } 2450 xpt_rescan(ccb); 2451 CAMLOCK_2_MPTLOCK(mpt); 2452 break; 2453 } 2454 #else 2455 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2456 break; 2457 #endif 2458 case MPI_EVENT_LINK_STATUS_CHANGE: 2459 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2460 (data1 >> 8) & 0xff, 2461 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2462 break; 2463 2464 case MPI_EVENT_LOOP_STATE_CHANGE: 2465 switch ((data0 >> 16) & 0xff) { 2466 case 0x01: 2467 mpt_prt(mpt, 2468 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2469 "(Loop Initialization)\n", 2470 (data1 >> 8) & 0xff, 2471 (data0 >> 8) & 0xff, 2472 (data0 ) & 0xff); 2473 switch ((data0 >> 8) & 0xff) { 2474 case 0xF7: 2475 if ((data0 & 0xff) == 0xF7) { 2476 mpt_prt(mpt, "Device needs AL_PA\n"); 2477 } else { 2478 mpt_prt(mpt, "Device %02x doesn't like " 2479 "FC performance\n", 2480 data0 & 0xFF); 2481 } 2482 break; 2483 case 0xF8: 2484 if ((data0 & 0xff) == 0xF7) { 2485 mpt_prt(mpt, "Device had loop failure " 2486 "at its receiver prior to acquiring" 2487 " AL_PA\n"); 2488 } else { 2489 mpt_prt(mpt, "Device %02x detected loop" 2490 " failure at its receiver\n", 2491 data0 & 0xFF); 2492 } 2493 break; 2494 default: 2495 mpt_prt(mpt, "Device %02x requests that device " 2496 "%02x reset itself\n", 2497 data0 & 0xFF, 2498 (data0 >> 8) & 0xFF); 2499 break; 2500 } 2501 break; 2502 case 0x02: 2503 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2504 "LPE(%02x,%02x) (Loop Port Enable)\n", 2505 (data1 >> 8) & 0xff, /* Port */ 2506 (data0 >> 8) & 0xff, /* Character 3 */ 2507 (data0 ) & 0xff /* Character 4 */); 2508 break; 2509 case 0x03: 2510 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2511 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2512 (data1 >> 8) & 0xff, /* Port */ 2513 (data0 >> 8) & 0xff, /* Character 3 */ 2514 (data0 ) & 0xff /* Character 4 */); 2515 break; 2516 default: 2517 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2518 "FC event (%02x %02x %02x)\n", 2519 (data1 >> 8) & 0xff, /* Port */ 2520 (data0 >> 16) & 0xff, /* Event */ 2521 (data0 >> 8) & 0xff, /* Character 3 */ 2522 (data0 ) & 0xff /* Character 4 */); 2523 } 2524 break; 2525 2526 case MPI_EVENT_LOGOUT: 2527 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2528 (data1 >> 8) & 0xff, data0); 2529 break; 2530 case MPI_EVENT_QUEUE_FULL: 2531 { 2532 struct cam_sim *sim; 2533 struct cam_path *tmppath; 2534 struct ccb_relsim crs; 2535 PTR_EVENT_DATA_QUEUE_FULL pqf; 2536 lun_id_t lun_id; 2537 2538 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2539 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2540 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2541 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2542 if (mpt->phydisk_sim) { 2543 sim = mpt->phydisk_sim; 2544 } else { 2545 sim = mpt->sim; 2546 } 2547 MPTLOCK_2_CAMLOCK(mpt); 2548 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2549 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2550 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2551 mpt_prt(mpt, "unable to create a path to send " 2552 "XPT_REL_SIMQ"); 2553 CAMLOCK_2_MPTLOCK(mpt); 2554 break; 2555 } 2556 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2557 crs.ccb_h.func_code = XPT_REL_SIMQ; 2558 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2559 crs.openings = pqf->CurrentDepth - 1; 2560 xpt_action((union ccb *)&crs); 2561 if (crs.ccb_h.status != CAM_REQ_CMP) { 2562 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2563 } 2564 xpt_free_path(tmppath); 2565 } 2566 CAMLOCK_2_MPTLOCK(mpt); 2567 break; 2568 } 2569 case MPI_EVENT_EVENT_CHANGE: 2570 case MPI_EVENT_INTEGRATED_RAID: 2571 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2572 case MPI_EVENT_SAS_SES: 2573 break; 2574 default: 2575 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2576 (unsigned)msg->Event & 0xFF); 2577 return (0); 2578 } 2579 return (1); 2580 } 2581 2582 /* 2583 * Reply path for all SCSI I/O requests, called from our 2584 * interrupt handler by extracting our handler index from 2585 * the MsgContext field of the reply from the IOC. 2586 * 2587 * This routine is optimized for the common case of a 2588 * completion without error. All exception handling is 2589 * offloaded to non-inlined helper routines to minimize 2590 * cache footprint. 2591 */ 2592 static int 2593 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2594 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2595 { 2596 MSG_SCSI_IO_REQUEST *scsi_req; 2597 union ccb *ccb; 2598 2599 if (req->state == REQ_STATE_FREE) { 2600 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2601 return (TRUE); 2602 } 2603 2604 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2605 ccb = req->ccb; 2606 if (ccb == NULL) { 2607 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2608 req, req->serno); 2609 return (TRUE); 2610 } 2611 2612 mpt_req_untimeout(req, mpt_timeout, ccb); 2613 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2614 2615 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2616 bus_dmasync_op_t op; 2617 2618 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2619 op = BUS_DMASYNC_POSTREAD; 2620 else 2621 op = BUS_DMASYNC_POSTWRITE; 2622 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2623 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2624 } 2625 2626 if (reply_frame == NULL) { 2627 /* 2628 * Context only reply, completion without error status. 2629 */ 2630 ccb->csio.resid = 0; 2631 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2632 ccb->csio.scsi_status = SCSI_STATUS_OK; 2633 } else { 2634 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2635 } 2636 2637 if (mpt->outofbeer) { 2638 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2639 mpt->outofbeer = 0; 2640 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2641 } 2642 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2643 struct scsi_inquiry_data *iq = 2644 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2645 if (scsi_req->Function == 2646 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2647 /* 2648 * Fake out the device type so that only the 2649 * pass-thru device will attach. 2650 */ 2651 iq->device &= ~0x1F; 2652 iq->device |= T_NODEVICE; 2653 } 2654 } 2655 if (mpt->verbose == MPT_PRT_DEBUG) { 2656 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2657 req, req->serno); 2658 } 2659 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2660 MPTLOCK_2_CAMLOCK(mpt); 2661 xpt_done(ccb); 2662 CAMLOCK_2_MPTLOCK(mpt); 2663 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2664 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2665 } else { 2666 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2667 req, req->serno); 2668 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2669 } 2670 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2671 ("CCB req needed wakeup")); 2672 #ifdef INVARIANTS 2673 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2674 #endif 2675 mpt_free_request(mpt, req); 2676 return (TRUE); 2677 } 2678 2679 static int 2680 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2681 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2682 { 2683 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2684 2685 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2686 #ifdef INVARIANTS 2687 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2688 #endif 2689 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2690 /* Record IOC Status and Response Code of TMF for any waiters. */ 2691 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2692 req->ResponseCode = tmf_reply->ResponseCode; 2693 2694 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2695 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2696 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2697 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2698 req->state |= REQ_STATE_DONE; 2699 wakeup(req); 2700 } else { 2701 mpt->tmf_req->state = REQ_STATE_FREE; 2702 } 2703 return (TRUE); 2704 } 2705 2706 /* 2707 * XXX: Move to definitions file 2708 */ 2709 #define ELS 0x22 2710 #define FC4LS 0x32 2711 #define ABTS 0x81 2712 #define BA_ACC 0x84 2713 2714 #define LS_RJT 0x01 2715 #define LS_ACC 0x02 2716 #define PLOGI 0x03 2717 #define LOGO 0x05 2718 #define SRR 0x14 2719 #define PRLI 0x20 2720 #define PRLO 0x21 2721 #define ADISC 0x52 2722 #define RSCN 0x61 2723 2724 static void 2725 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2726 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2727 { 2728 uint32_t fl; 2729 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2730 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2731 2732 /* 2733 * We are going to reuse the ELS request to send this response back. 2734 */ 2735 rsp = &tmp; 2736 memset(rsp, 0, sizeof(*rsp)); 2737 2738 #ifdef USE_IMMEDIATE_LINK_DATA 2739 /* 2740 * Apparently the IMMEDIATE stuff doesn't seem to work. 2741 */ 2742 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2743 #endif 2744 rsp->RspLength = length; 2745 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2746 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2747 2748 /* 2749 * Copy over information from the original reply frame to 2750 * it's correct place in the response. 2751 */ 2752 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2753 2754 /* 2755 * And now copy back the temporary area to the original frame. 2756 */ 2757 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2758 rsp = req->req_vbuf; 2759 2760 #ifdef USE_IMMEDIATE_LINK_DATA 2761 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2762 #else 2763 { 2764 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2765 bus_addr_t paddr = req->req_pbuf; 2766 paddr += MPT_RQSL(mpt); 2767 2768 fl = 2769 MPI_SGE_FLAGS_HOST_TO_IOC | 2770 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2771 MPI_SGE_FLAGS_LAST_ELEMENT | 2772 MPI_SGE_FLAGS_END_OF_LIST | 2773 MPI_SGE_FLAGS_END_OF_BUFFER; 2774 fl <<= MPI_SGE_FLAGS_SHIFT; 2775 fl |= (length); 2776 se->FlagsLength = htole32(fl); 2777 se->Address = htole32((uint32_t) paddr); 2778 } 2779 #endif 2780 2781 /* 2782 * Send it on... 2783 */ 2784 mpt_send_cmd(mpt, req); 2785 } 2786 2787 static int 2788 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2789 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2790 { 2791 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2792 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2793 U8 rctl; 2794 U8 type; 2795 U8 cmd; 2796 U16 status = le16toh(reply_frame->IOCStatus); 2797 U32 *elsbuf; 2798 int ioindex; 2799 int do_refresh = TRUE; 2800 2801 #ifdef INVARIANTS 2802 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2803 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2804 req, req->serno, rp->Function)); 2805 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2806 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2807 } else { 2808 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2809 } 2810 #endif 2811 mpt_lprt(mpt, MPT_PRT_DEBUG, 2812 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2813 req, req->serno, reply_frame, reply_frame->Function); 2814 2815 if (status != MPI_IOCSTATUS_SUCCESS) { 2816 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2817 status, reply_frame->Function); 2818 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2819 /* 2820 * XXX: to get around shutdown issue 2821 */ 2822 mpt->disabled = 1; 2823 return (TRUE); 2824 } 2825 return (TRUE); 2826 } 2827 2828 /* 2829 * If the function of a link service response, we recycle the 2830 * response to be a refresh for a new link service request. 2831 * 2832 * The request pointer is bogus in this case and we have to fetch 2833 * it based upon the TransactionContext. 2834 */ 2835 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2836 /* Freddie Uncle Charlie Katie */ 2837 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2838 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2839 if (mpt->els_cmd_ptrs[ioindex] == req) { 2840 break; 2841 } 2842 2843 KASSERT(ioindex < mpt->els_cmds_allocated, 2844 ("can't find my mommie!")); 2845 2846 /* remove from active list as we're going to re-post it */ 2847 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2848 req->state &= ~REQ_STATE_QUEUED; 2849 req->state |= REQ_STATE_DONE; 2850 mpt_fc_post_els(mpt, req, ioindex); 2851 return (TRUE); 2852 } 2853 2854 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2855 /* remove from active list as we're done */ 2856 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2857 req->state &= ~REQ_STATE_QUEUED; 2858 req->state |= REQ_STATE_DONE; 2859 if (req->state & REQ_STATE_TIMEDOUT) { 2860 mpt_lprt(mpt, MPT_PRT_DEBUG, 2861 "Sync Primitive Send Completed After Timeout\n"); 2862 mpt_free_request(mpt, req); 2863 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2864 mpt_lprt(mpt, MPT_PRT_DEBUG, 2865 "Async Primitive Send Complete\n"); 2866 mpt_free_request(mpt, req); 2867 } else { 2868 mpt_lprt(mpt, MPT_PRT_DEBUG, 2869 "Sync Primitive Send Complete- Waking Waiter\n"); 2870 wakeup(req); 2871 } 2872 return (TRUE); 2873 } 2874 2875 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2876 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2877 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2878 rp->MsgLength, rp->MsgFlags); 2879 return (TRUE); 2880 } 2881 2882 if (rp->MsgLength <= 5) { 2883 /* 2884 * This is just a ack of an original ELS buffer post 2885 */ 2886 mpt_lprt(mpt, MPT_PRT_DEBUG, 2887 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2888 return (TRUE); 2889 } 2890 2891 2892 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2893 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2894 2895 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2896 cmd = be32toh(elsbuf[0]) >> 24; 2897 2898 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2899 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2900 return (TRUE); 2901 } 2902 2903 ioindex = le32toh(rp->TransactionContext); 2904 req = mpt->els_cmd_ptrs[ioindex]; 2905 2906 if (rctl == ELS && type == 1) { 2907 switch (cmd) { 2908 case PRLI: 2909 /* 2910 * Send back a PRLI ACC 2911 */ 2912 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2913 le32toh(rp->Wwn.PortNameHigh), 2914 le32toh(rp->Wwn.PortNameLow)); 2915 elsbuf[0] = htobe32(0x02100014); 2916 elsbuf[1] |= htobe32(0x00000100); 2917 elsbuf[4] = htobe32(0x00000002); 2918 if (mpt->role & MPT_ROLE_TARGET) 2919 elsbuf[4] |= htobe32(0x00000010); 2920 if (mpt->role & MPT_ROLE_INITIATOR) 2921 elsbuf[4] |= htobe32(0x00000020); 2922 /* remove from active list as we're done */ 2923 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2924 req->state &= ~REQ_STATE_QUEUED; 2925 req->state |= REQ_STATE_DONE; 2926 mpt_fc_els_send_response(mpt, req, rp, 20); 2927 do_refresh = FALSE; 2928 break; 2929 case PRLO: 2930 memset(elsbuf, 0, 5 * (sizeof (U32))); 2931 elsbuf[0] = htobe32(0x02100014); 2932 elsbuf[1] = htobe32(0x08000100); 2933 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2934 le32toh(rp->Wwn.PortNameHigh), 2935 le32toh(rp->Wwn.PortNameLow)); 2936 /* remove from active list as we're done */ 2937 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2938 req->state &= ~REQ_STATE_QUEUED; 2939 req->state |= REQ_STATE_DONE; 2940 mpt_fc_els_send_response(mpt, req, rp, 20); 2941 do_refresh = FALSE; 2942 break; 2943 default: 2944 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2945 break; 2946 } 2947 } else if (rctl == ABTS && type == 0) { 2948 uint16_t rx_id = le16toh(rp->Rxid); 2949 uint16_t ox_id = le16toh(rp->Oxid); 2950 request_t *tgt_req = NULL; 2951 2952 mpt_prt(mpt, 2953 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2954 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2955 le32toh(rp->Wwn.PortNameLow)); 2956 if (rx_id >= mpt->mpt_max_tgtcmds) { 2957 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2958 } else if (mpt->tgt_cmd_ptrs == NULL) { 2959 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2960 } else { 2961 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2962 } 2963 if (tgt_req) { 2964 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2965 union ccb *ccb = tgt->ccb; 2966 uint32_t ct_id; 2967 2968 /* 2969 * Check to make sure we have the correct command 2970 * The reply descriptor in the target state should 2971 * should contain an IoIndex that should match the 2972 * RX_ID. 2973 * 2974 * It'd be nice to have OX_ID to crosscheck with 2975 * as well. 2976 */ 2977 ct_id = GET_IO_INDEX(tgt->reply_desc); 2978 2979 if (ct_id != rx_id) { 2980 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2981 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2982 rx_id, ct_id); 2983 goto skip; 2984 } 2985 2986 ccb = tgt->ccb; 2987 if (ccb) { 2988 mpt_prt(mpt, 2989 "CCB (%p): lun %u flags %x status %x\n", 2990 ccb, ccb->ccb_h.target_lun, 2991 ccb->ccb_h.flags, ccb->ccb_h.status); 2992 } 2993 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2994 "%x nxfers %x\n", tgt->state, 2995 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2996 tgt->nxfers); 2997 skip: 2998 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2999 mpt_prt(mpt, "unable to start TargetAbort\n"); 3000 } 3001 } else { 3002 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3003 } 3004 memset(elsbuf, 0, 5 * (sizeof (U32))); 3005 elsbuf[0] = htobe32(0); 3006 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3007 elsbuf[2] = htobe32(0x000ffff); 3008 /* 3009 * Dork with the reply frame so that the reponse to it 3010 * will be correct. 3011 */ 3012 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3013 /* remove from active list as we're done */ 3014 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3015 req->state &= ~REQ_STATE_QUEUED; 3016 req->state |= REQ_STATE_DONE; 3017 mpt_fc_els_send_response(mpt, req, rp, 12); 3018 do_refresh = FALSE; 3019 } else { 3020 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3021 } 3022 if (do_refresh == TRUE) { 3023 /* remove from active list as we're done */ 3024 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3025 req->state &= ~REQ_STATE_QUEUED; 3026 req->state |= REQ_STATE_DONE; 3027 mpt_fc_post_els(mpt, req, ioindex); 3028 } 3029 return (TRUE); 3030 } 3031 3032 /* 3033 * Clean up all SCSI Initiator personality state in response 3034 * to a controller reset. 3035 */ 3036 static void 3037 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3038 { 3039 /* 3040 * The pending list is already run down by 3041 * the generic handler. Perform the same 3042 * operation on the timed out request list. 3043 */ 3044 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3045 MPI_IOCSTATUS_INVALID_STATE); 3046 3047 /* 3048 * XXX: We need to repost ELS and Target Command Buffers? 3049 */ 3050 3051 /* 3052 * Inform the XPT that a bus reset has occurred. 3053 */ 3054 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3055 } 3056 3057 /* 3058 * Parse additional completion information in the reply 3059 * frame for SCSI I/O requests. 3060 */ 3061 static int 3062 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3063 MSG_DEFAULT_REPLY *reply_frame) 3064 { 3065 union ccb *ccb; 3066 MSG_SCSI_IO_REPLY *scsi_io_reply; 3067 u_int ioc_status; 3068 u_int sstate; 3069 3070 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3071 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3072 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3073 ("MPT SCSI I/O Handler called with incorrect reply type")); 3074 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3075 ("MPT SCSI I/O Handler called with continuation reply")); 3076 3077 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3078 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3079 ioc_status &= MPI_IOCSTATUS_MASK; 3080 sstate = scsi_io_reply->SCSIState; 3081 3082 ccb = req->ccb; 3083 ccb->csio.resid = 3084 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3085 3086 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3087 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3088 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3089 ccb->csio.sense_resid = 3090 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); 3091 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3092 min(ccb->csio.sense_len, 3093 le32toh(scsi_io_reply->SenseCount))); 3094 } 3095 3096 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3097 /* 3098 * Tag messages rejected, but non-tagged retry 3099 * was successful. 3100 XXXX 3101 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3102 */ 3103 } 3104 3105 switch(ioc_status) { 3106 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3107 /* 3108 * XXX 3109 * Linux driver indicates that a zero 3110 * transfer length with this error code 3111 * indicates a CRC error. 3112 * 3113 * No need to swap the bytes for checking 3114 * against zero. 3115 */ 3116 if (scsi_io_reply->TransferCount == 0) { 3117 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3118 break; 3119 } 3120 /* FALLTHROUGH */ 3121 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3122 case MPI_IOCSTATUS_SUCCESS: 3123 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3124 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3125 /* 3126 * Status was never returned for this transaction. 3127 */ 3128 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3129 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3130 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3131 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3132 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3133 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3134 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3135 3136 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 3137 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3138 } else 3139 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3140 break; 3141 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3142 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3143 break; 3144 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3145 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3146 break; 3147 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3148 /* 3149 * Since selection timeouts and "device really not 3150 * there" are grouped into this error code, report 3151 * selection timeout. Selection timeouts are 3152 * typically retried before giving up on the device 3153 * whereas "device not there" errors are considered 3154 * unretryable. 3155 */ 3156 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3157 break; 3158 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3159 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3160 break; 3161 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3162 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3163 break; 3164 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3165 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3166 break; 3167 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3168 ccb->ccb_h.status = CAM_UA_TERMIO; 3169 break; 3170 case MPI_IOCSTATUS_INVALID_STATE: 3171 /* 3172 * The IOC has been reset. Emulate a bus reset. 3173 */ 3174 /* FALLTHROUGH */ 3175 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3176 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3177 break; 3178 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3179 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3180 /* 3181 * Don't clobber any timeout status that has 3182 * already been set for this transaction. We 3183 * want the SCSI layer to be able to differentiate 3184 * between the command we aborted due to timeout 3185 * and any innocent bystanders. 3186 */ 3187 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3188 break; 3189 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3190 break; 3191 3192 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3193 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3194 break; 3195 case MPI_IOCSTATUS_BUSY: 3196 mpt_set_ccb_status(ccb, CAM_BUSY); 3197 break; 3198 case MPI_IOCSTATUS_INVALID_FUNCTION: 3199 case MPI_IOCSTATUS_INVALID_SGL: 3200 case MPI_IOCSTATUS_INTERNAL_ERROR: 3201 case MPI_IOCSTATUS_INVALID_FIELD: 3202 default: 3203 /* XXX 3204 * Some of the above may need to kick 3205 * of a recovery action!!!! 3206 */ 3207 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3208 break; 3209 } 3210 3211 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3212 mpt_freeze_ccb(ccb); 3213 } 3214 3215 return (TRUE); 3216 } 3217 3218 static void 3219 mpt_action(struct cam_sim *sim, union ccb *ccb) 3220 { 3221 struct mpt_softc *mpt; 3222 struct ccb_trans_settings *cts; 3223 target_id_t tgt; 3224 lun_id_t lun; 3225 int raid_passthru; 3226 3227 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3228 3229 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3230 raid_passthru = (sim == mpt->phydisk_sim); 3231 MPT_LOCK_ASSERT(mpt); 3232 3233 tgt = ccb->ccb_h.target_id; 3234 lun = ccb->ccb_h.target_lun; 3235 if (raid_passthru && 3236 ccb->ccb_h.func_code != XPT_PATH_INQ && 3237 ccb->ccb_h.func_code != XPT_RESET_BUS && 3238 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3239 CAMLOCK_2_MPTLOCK(mpt); 3240 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3241 MPTLOCK_2_CAMLOCK(mpt); 3242 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3243 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3244 xpt_done(ccb); 3245 return; 3246 } 3247 MPTLOCK_2_CAMLOCK(mpt); 3248 } 3249 ccb->ccb_h.ccb_mpt_ptr = mpt; 3250 3251 switch (ccb->ccb_h.func_code) { 3252 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3253 /* 3254 * Do a couple of preliminary checks... 3255 */ 3256 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3257 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3258 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3259 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3260 break; 3261 } 3262 } 3263 /* Max supported CDB length is 16 bytes */ 3264 /* XXX Unless we implement the new 32byte message type */ 3265 if (ccb->csio.cdb_len > 3266 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3268 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3269 break; 3270 } 3271 #ifdef MPT_TEST_MULTIPATH 3272 if (mpt->failure_id == ccb->ccb_h.target_id) { 3273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3274 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3275 break; 3276 } 3277 #endif 3278 ccb->csio.scsi_status = SCSI_STATUS_OK; 3279 mpt_start(sim, ccb); 3280 return; 3281 3282 case XPT_RESET_BUS: 3283 if (raid_passthru) { 3284 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3285 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3286 break; 3287 } 3288 case XPT_RESET_DEV: 3289 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3290 if (bootverbose) { 3291 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3292 } 3293 } else { 3294 xpt_print(ccb->ccb_h.path, "reset device\n"); 3295 } 3296 CAMLOCK_2_MPTLOCK(mpt); 3297 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3298 MPTLOCK_2_CAMLOCK(mpt); 3299 3300 /* 3301 * mpt_bus_reset is always successful in that it 3302 * will fall back to a hard reset should a bus 3303 * reset attempt fail. 3304 */ 3305 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3306 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3307 break; 3308 3309 case XPT_ABORT: 3310 { 3311 union ccb *accb = ccb->cab.abort_ccb; 3312 CAMLOCK_2_MPTLOCK(mpt); 3313 switch (accb->ccb_h.func_code) { 3314 case XPT_ACCEPT_TARGET_IO: 3315 case XPT_IMMED_NOTIFY: 3316 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3317 break; 3318 case XPT_CONT_TARGET_IO: 3319 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3320 ccb->ccb_h.status = CAM_UA_ABORT; 3321 break; 3322 case XPT_SCSI_IO: 3323 ccb->ccb_h.status = CAM_UA_ABORT; 3324 break; 3325 default: 3326 ccb->ccb_h.status = CAM_REQ_INVALID; 3327 break; 3328 } 3329 MPTLOCK_2_CAMLOCK(mpt); 3330 break; 3331 } 3332 3333 #ifdef CAM_NEW_TRAN_CODE 3334 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3335 #else 3336 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3337 #endif 3338 #define DP_DISC_ENABLE 0x1 3339 #define DP_DISC_DISABL 0x2 3340 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3341 3342 #define DP_TQING_ENABLE 0x4 3343 #define DP_TQING_DISABL 0x8 3344 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3345 3346 #define DP_WIDE 0x10 3347 #define DP_NARROW 0x20 3348 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3349 3350 #define DP_SYNC 0x40 3351 3352 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3353 { 3354 #ifdef CAM_NEW_TRAN_CODE 3355 struct ccb_trans_settings_scsi *scsi; 3356 struct ccb_trans_settings_spi *spi; 3357 #endif 3358 uint8_t dval; 3359 u_int period; 3360 u_int offset; 3361 int i, j; 3362 3363 cts = &ccb->cts; 3364 3365 if (mpt->is_fc || mpt->is_sas) { 3366 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3367 break; 3368 } 3369 3370 #ifdef CAM_NEW_TRAN_CODE 3371 scsi = &cts->proto_specific.scsi; 3372 spi = &cts->xport_specific.spi; 3373 3374 /* 3375 * We can be called just to valid transport and proto versions 3376 */ 3377 if (scsi->valid == 0 && spi->valid == 0) { 3378 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3379 break; 3380 } 3381 #endif 3382 3383 /* 3384 * Skip attempting settings on RAID volume disks. 3385 * Other devices on the bus get the normal treatment. 3386 */ 3387 if (mpt->phydisk_sim && raid_passthru == 0 && 3388 mpt_is_raid_volume(mpt, tgt) != 0) { 3389 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3390 "no transfer settings for RAID vols\n"); 3391 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3392 break; 3393 } 3394 3395 i = mpt->mpt_port_page2.PortSettings & 3396 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3397 j = mpt->mpt_port_page2.PortFlags & 3398 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3399 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3400 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3401 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3402 "honoring BIOS transfer negotiations\n"); 3403 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3404 break; 3405 } 3406 3407 dval = 0; 3408 period = 0; 3409 offset = 0; 3410 3411 #ifndef CAM_NEW_TRAN_CODE 3412 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3413 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3414 DP_DISC_ENABLE : DP_DISC_DISABL; 3415 } 3416 3417 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3418 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3419 DP_TQING_ENABLE : DP_TQING_DISABL; 3420 } 3421 3422 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3423 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3424 } 3425 3426 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3427 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3428 dval |= DP_SYNC; 3429 period = cts->sync_period; 3430 offset = cts->sync_offset; 3431 } 3432 #else 3433 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3434 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3435 DP_DISC_ENABLE : DP_DISC_DISABL; 3436 } 3437 3438 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3439 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3440 DP_TQING_ENABLE : DP_TQING_DISABL; 3441 } 3442 3443 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3444 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3445 DP_WIDE : DP_NARROW; 3446 } 3447 3448 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3449 dval |= DP_SYNC; 3450 offset = spi->sync_offset; 3451 } else { 3452 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3453 &mpt->mpt_dev_page1[tgt]; 3454 offset = ptr->RequestedParameters; 3455 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3456 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3457 } 3458 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3459 dval |= DP_SYNC; 3460 period = spi->sync_period; 3461 } else { 3462 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3463 &mpt->mpt_dev_page1[tgt]; 3464 period = ptr->RequestedParameters; 3465 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3466 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3467 } 3468 #endif 3469 CAMLOCK_2_MPTLOCK(mpt); 3470 if (dval & DP_DISC_ENABLE) { 3471 mpt->mpt_disc_enable |= (1 << tgt); 3472 } else if (dval & DP_DISC_DISABL) { 3473 mpt->mpt_disc_enable &= ~(1 << tgt); 3474 } 3475 if (dval & DP_TQING_ENABLE) { 3476 mpt->mpt_tag_enable |= (1 << tgt); 3477 } else if (dval & DP_TQING_DISABL) { 3478 mpt->mpt_tag_enable &= ~(1 << tgt); 3479 } 3480 if (dval & DP_WIDTH) { 3481 mpt_setwidth(mpt, tgt, 1); 3482 } 3483 if (dval & DP_SYNC) { 3484 mpt_setsync(mpt, tgt, period, offset); 3485 } 3486 if (dval == 0) { 3487 MPTLOCK_2_CAMLOCK(mpt); 3488 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3489 break; 3490 } 3491 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3492 "set [%d]: 0x%x period 0x%x offset %d\n", 3493 tgt, dval, period, offset); 3494 if (mpt_update_spi_config(mpt, tgt)) { 3495 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3496 } else { 3497 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3498 } 3499 MPTLOCK_2_CAMLOCK(mpt); 3500 break; 3501 } 3502 case XPT_GET_TRAN_SETTINGS: 3503 { 3504 #ifdef CAM_NEW_TRAN_CODE 3505 struct ccb_trans_settings_scsi *scsi; 3506 cts = &ccb->cts; 3507 cts->protocol = PROTO_SCSI; 3508 if (mpt->is_fc) { 3509 struct ccb_trans_settings_fc *fc = 3510 &cts->xport_specific.fc; 3511 cts->protocol_version = SCSI_REV_SPC; 3512 cts->transport = XPORT_FC; 3513 cts->transport_version = 0; 3514 fc->valid = CTS_FC_VALID_SPEED; 3515 fc->bitrate = 100000; 3516 } else if (mpt->is_sas) { 3517 struct ccb_trans_settings_sas *sas = 3518 &cts->xport_specific.sas; 3519 cts->protocol_version = SCSI_REV_SPC2; 3520 cts->transport = XPORT_SAS; 3521 cts->transport_version = 0; 3522 sas->valid = CTS_SAS_VALID_SPEED; 3523 sas->bitrate = 300000; 3524 } else { 3525 cts->protocol_version = SCSI_REV_2; 3526 cts->transport = XPORT_SPI; 3527 cts->transport_version = 2; 3528 if (mpt_get_spi_settings(mpt, cts) != 0) { 3529 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3530 break; 3531 } 3532 } 3533 scsi = &cts->proto_specific.scsi; 3534 scsi->valid = CTS_SCSI_VALID_TQ; 3535 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3536 #else 3537 cts = &ccb->cts; 3538 if (mpt->is_fc) { 3539 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3540 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3541 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3542 } else if (mpt->is_sas) { 3543 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3544 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3545 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3546 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3547 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3548 break; 3549 } 3550 #endif 3551 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3552 break; 3553 } 3554 case XPT_CALC_GEOMETRY: 3555 { 3556 struct ccb_calc_geometry *ccg; 3557 3558 ccg = &ccb->ccg; 3559 if (ccg->block_size == 0) { 3560 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3561 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3562 break; 3563 } 3564 mpt_calc_geometry(ccg, /*extended*/1); 3565 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3566 break; 3567 } 3568 case XPT_PATH_INQ: /* Path routing inquiry */ 3569 { 3570 struct ccb_pathinq *cpi = &ccb->cpi; 3571 3572 cpi->version_num = 1; 3573 cpi->target_sprt = 0; 3574 cpi->hba_eng_cnt = 0; 3575 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3576 /* 3577 * FC cards report MAX_DEVICES of 512, but 3578 * the MSG_SCSI_IO_REQUEST target id field 3579 * is only 8 bits. Until we fix the driver 3580 * to support 'channels' for bus overflow, 3581 * just limit it. 3582 */ 3583 if (cpi->max_target > 255) { 3584 cpi->max_target = 255; 3585 } 3586 3587 /* 3588 * VMware ESX reports > 16 devices and then dies when we probe. 3589 */ 3590 if (mpt->is_spi && cpi->max_target > 15) { 3591 cpi->max_target = 15; 3592 } 3593 if (mpt->is_spi) 3594 cpi->max_lun = 7; 3595 else 3596 cpi->max_lun = MPT_MAX_LUNS; 3597 cpi->initiator_id = mpt->mpt_ini_id; 3598 cpi->bus_id = cam_sim_bus(sim); 3599 3600 /* 3601 * The base speed is the speed of the underlying connection. 3602 */ 3603 #ifdef CAM_NEW_TRAN_CODE 3604 cpi->protocol = PROTO_SCSI; 3605 if (mpt->is_fc) { 3606 cpi->hba_misc = PIM_NOBUSRESET; 3607 cpi->base_transfer_speed = 100000; 3608 cpi->hba_inquiry = PI_TAG_ABLE; 3609 cpi->transport = XPORT_FC; 3610 cpi->transport_version = 0; 3611 cpi->protocol_version = SCSI_REV_SPC; 3612 } else if (mpt->is_sas) { 3613 cpi->hba_misc = PIM_NOBUSRESET; 3614 cpi->base_transfer_speed = 300000; 3615 cpi->hba_inquiry = PI_TAG_ABLE; 3616 cpi->transport = XPORT_SAS; 3617 cpi->transport_version = 0; 3618 cpi->protocol_version = SCSI_REV_SPC2; 3619 } else { 3620 cpi->hba_misc = PIM_SEQSCAN; 3621 cpi->base_transfer_speed = 3300; 3622 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3623 cpi->transport = XPORT_SPI; 3624 cpi->transport_version = 2; 3625 cpi->protocol_version = SCSI_REV_2; 3626 } 3627 #else 3628 if (mpt->is_fc) { 3629 cpi->hba_misc = PIM_NOBUSRESET; 3630 cpi->base_transfer_speed = 100000; 3631 cpi->hba_inquiry = PI_TAG_ABLE; 3632 } else if (mpt->is_sas) { 3633 cpi->hba_misc = PIM_NOBUSRESET; 3634 cpi->base_transfer_speed = 300000; 3635 cpi->hba_inquiry = PI_TAG_ABLE; 3636 } else { 3637 cpi->hba_misc = PIM_SEQSCAN; 3638 cpi->base_transfer_speed = 3300; 3639 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3640 } 3641 #endif 3642 3643 /* 3644 * We give our fake RAID passhtru bus a width that is MaxVolumes 3645 * wide and restrict it to one lun. 3646 */ 3647 if (raid_passthru) { 3648 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3649 cpi->initiator_id = cpi->max_target + 1; 3650 cpi->max_lun = 0; 3651 } 3652 3653 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3654 cpi->hba_misc |= PIM_NOINITIATOR; 3655 } 3656 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3657 cpi->target_sprt = 3658 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3659 } else { 3660 cpi->target_sprt = 0; 3661 } 3662 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3663 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3664 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3665 cpi->unit_number = cam_sim_unit(sim); 3666 cpi->ccb_h.status = CAM_REQ_CMP; 3667 break; 3668 } 3669 case XPT_EN_LUN: /* Enable LUN as a target */ 3670 { 3671 int result; 3672 3673 CAMLOCK_2_MPTLOCK(mpt); 3674 if (ccb->cel.enable) 3675 result = mpt_enable_lun(mpt, 3676 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3677 else 3678 result = mpt_disable_lun(mpt, 3679 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3680 MPTLOCK_2_CAMLOCK(mpt); 3681 if (result == 0) { 3682 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3683 } else { 3684 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3685 } 3686 break; 3687 } 3688 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3689 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3690 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3691 { 3692 tgt_resource_t *trtp; 3693 lun_id_t lun = ccb->ccb_h.target_lun; 3694 ccb->ccb_h.sim_priv.entries[0].field = 0; 3695 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3696 ccb->ccb_h.flags = 0; 3697 3698 if (lun == CAM_LUN_WILDCARD) { 3699 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3700 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3701 break; 3702 } 3703 trtp = &mpt->trt_wildcard; 3704 } else if (lun >= MPT_MAX_LUNS) { 3705 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3706 break; 3707 } else { 3708 trtp = &mpt->trt[lun]; 3709 } 3710 CAMLOCK_2_MPTLOCK(mpt); 3711 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3712 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3713 "Put FREE ATIO %p lun %d\n", ccb, lun); 3714 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3715 sim_links.stqe); 3716 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3717 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3718 "Put FREE INOT lun %d\n", lun); 3719 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3720 sim_links.stqe); 3721 } else { 3722 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3723 } 3724 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3725 MPTLOCK_2_CAMLOCK(mpt); 3726 return; 3727 } 3728 case XPT_CONT_TARGET_IO: 3729 CAMLOCK_2_MPTLOCK(mpt); 3730 mpt_target_start_io(mpt, ccb); 3731 MPTLOCK_2_CAMLOCK(mpt); 3732 return; 3733 3734 default: 3735 ccb->ccb_h.status = CAM_REQ_INVALID; 3736 break; 3737 } 3738 xpt_done(ccb); 3739 } 3740 3741 static int 3742 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3743 { 3744 #ifdef CAM_NEW_TRAN_CODE 3745 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3746 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3747 #endif 3748 target_id_t tgt; 3749 uint32_t dval, pval, oval; 3750 int rv; 3751 3752 if (IS_CURRENT_SETTINGS(cts) == 0) { 3753 tgt = cts->ccb_h.target_id; 3754 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3755 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3756 return (-1); 3757 } 3758 } else { 3759 tgt = cts->ccb_h.target_id; 3760 } 3761 3762 /* 3763 * We aren't looking at Port Page 2 BIOS settings here- 3764 * sometimes these have been known to be bogus XXX. 3765 * 3766 * For user settings, we pick the max from port page 0 3767 * 3768 * For current settings we read the current settings out from 3769 * device page 0 for that target. 3770 */ 3771 if (IS_CURRENT_SETTINGS(cts)) { 3772 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3773 dval = 0; 3774 3775 CAMLOCK_2_MPTLOCK(mpt); 3776 tmp = mpt->mpt_dev_page0[tgt]; 3777 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3778 sizeof(tmp), FALSE, 5000); 3779 if (rv) { 3780 MPTLOCK_2_CAMLOCK(mpt); 3781 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3782 return (rv); 3783 } 3784 mpt2host_config_page_scsi_device_0(&tmp); 3785 3786 MPTLOCK_2_CAMLOCK(mpt); 3787 mpt_lprt(mpt, MPT_PRT_DEBUG, 3788 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", 3789 tgt, 3790 (unsigned)tmp.NegotiatedParameters, 3791 (unsigned)tmp.Information); 3792 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3793 DP_WIDE : DP_NARROW; 3794 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3795 DP_DISC_ENABLE : DP_DISC_DISABL; 3796 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3797 DP_TQING_ENABLE : DP_TQING_DISABL; 3798 oval = tmp.NegotiatedParameters; 3799 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3800 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3801 pval = tmp.NegotiatedParameters; 3802 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3803 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3804 mpt->mpt_dev_page0[tgt] = tmp; 3805 } else { 3806 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3807 oval = mpt->mpt_port_page0.Capabilities; 3808 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3809 pval = mpt->mpt_port_page0.Capabilities; 3810 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3811 } 3812 3813 #ifndef CAM_NEW_TRAN_CODE 3814 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3815 cts->valid = 0; 3816 cts->sync_period = pval; 3817 cts->sync_offset = oval; 3818 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3819 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3820 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3821 if (dval & DP_WIDE) { 3822 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3823 } else { 3824 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3825 } 3826 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3827 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3828 if (dval & DP_DISC_ENABLE) { 3829 cts->flags |= CCB_TRANS_DISC_ENB; 3830 } 3831 if (dval & DP_TQING_ENABLE) { 3832 cts->flags |= CCB_TRANS_TAG_ENB; 3833 } 3834 } 3835 #else 3836 spi->valid = 0; 3837 scsi->valid = 0; 3838 spi->flags = 0; 3839 scsi->flags = 0; 3840 spi->sync_offset = oval; 3841 spi->sync_period = pval; 3842 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3843 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3844 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3845 if (dval & DP_WIDE) { 3846 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3847 } else { 3848 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3849 } 3850 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3851 scsi->valid = CTS_SCSI_VALID_TQ; 3852 if (dval & DP_TQING_ENABLE) { 3853 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3854 } 3855 spi->valid |= CTS_SPI_VALID_DISC; 3856 if (dval & DP_DISC_ENABLE) { 3857 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3858 } 3859 } 3860 #endif 3861 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3862 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3863 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3864 return (0); 3865 } 3866 3867 static void 3868 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3869 { 3870 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3871 3872 ptr = &mpt->mpt_dev_page1[tgt]; 3873 if (onoff) { 3874 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3875 } else { 3876 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3877 } 3878 } 3879 3880 static void 3881 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3882 { 3883 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3884 3885 ptr = &mpt->mpt_dev_page1[tgt]; 3886 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3887 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3888 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3889 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3890 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3891 if (period == 0) { 3892 return; 3893 } 3894 ptr->RequestedParameters |= 3895 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3896 ptr->RequestedParameters |= 3897 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3898 if (period < 0xa) { 3899 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3900 } 3901 if (period < 0x9) { 3902 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3903 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3904 } 3905 } 3906 3907 static int 3908 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3909 { 3910 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3911 int rv; 3912 3913 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3914 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3915 tgt, 3916 (unsigned)mpt->mpt_dev_page1[tgt].RequestedParameters); 3917 tmp = mpt->mpt_dev_page1[tgt]; 3918 host2mpt_config_page_scsi_device_1(&tmp); 3919 rv = mpt_write_cur_cfg_page(mpt, tgt, 3920 &tmp.Header, sizeof(tmp), FALSE, 5000); 3921 if (rv) { 3922 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3923 return (-1); 3924 } 3925 return (0); 3926 } 3927 3928 static void 3929 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3930 { 3931 #if __FreeBSD_version >= 500000 3932 cam_calc_geometry(ccg, extended); 3933 #else 3934 uint32_t size_mb; 3935 uint32_t secs_per_cylinder; 3936 3937 if (ccg->block_size == 0) { 3938 ccg->ccb_h.status = CAM_REQ_INVALID; 3939 return; 3940 } 3941 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3942 if (size_mb > 1024 && extended) { 3943 ccg->heads = 255; 3944 ccg->secs_per_track = 63; 3945 } else { 3946 ccg->heads = 64; 3947 ccg->secs_per_track = 32; 3948 } 3949 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3950 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3951 ccg->ccb_h.status = CAM_REQ_CMP; 3952 #endif 3953 } 3954 3955 /****************************** Timeout Recovery ******************************/ 3956 static int 3957 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3958 { 3959 int error; 3960 3961 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3962 &mpt->recovery_thread, /*flags*/0, 3963 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3964 return (error); 3965 } 3966 3967 static void 3968 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3969 { 3970 if (mpt->recovery_thread == NULL) { 3971 return; 3972 } 3973 mpt->shutdwn_recovery = 1; 3974 wakeup(mpt); 3975 /* 3976 * Sleep on a slightly different location 3977 * for this interlock just for added safety. 3978 */ 3979 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3980 } 3981 3982 static void 3983 mpt_recovery_thread(void *arg) 3984 { 3985 struct mpt_softc *mpt; 3986 3987 mpt = (struct mpt_softc *)arg; 3988 3989 get_mplock(); 3990 MPT_LOCK(mpt); 3991 3992 for (;;) { 3993 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3994 if (mpt->shutdwn_recovery == 0) { 3995 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3996 } 3997 } 3998 if (mpt->shutdwn_recovery != 0) { 3999 break; 4000 } 4001 mpt_recover_commands(mpt); 4002 } 4003 mpt->recovery_thread = NULL; 4004 wakeup(&mpt->recovery_thread); 4005 MPT_UNLOCK(mpt); 4006 rel_mplock(); 4007 } 4008 4009 static int 4010 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4011 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4012 { 4013 MSG_SCSI_TASK_MGMT *tmf_req; 4014 int error; 4015 4016 /* 4017 * Wait for any current TMF request to complete. 4018 * We're only allowed to issue one TMF at a time. 4019 */ 4020 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4021 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4022 if (error != 0) { 4023 mpt_reset(mpt, TRUE); 4024 return (ETIMEDOUT); 4025 } 4026 4027 mpt_assign_serno(mpt, mpt->tmf_req); 4028 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4029 4030 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4031 memset(tmf_req, 0, sizeof(*tmf_req)); 4032 tmf_req->TargetID = target; 4033 tmf_req->Bus = channel; 4034 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4035 tmf_req->TaskType = type; 4036 tmf_req->MsgFlags = flags; 4037 tmf_req->MsgContext = 4038 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4039 if (lun > MPT_MAX_LUNS) { 4040 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4041 tmf_req->LUN[1] = lun & 0xff; 4042 } else { 4043 tmf_req->LUN[1] = lun; 4044 } 4045 tmf_req->TaskMsgContext = abort_ctx; 4046 4047 mpt_lprt(mpt, MPT_PRT_DEBUG, 4048 "Issuing TMF %p:%u with MsgContext of 0x%x\n", 4049 mpt->tmf_req, 4050 (unsigned)mpt->tmf_req->serno, 4051 (unsigned)tmf_req->MsgContext); 4052 if (mpt->verbose > MPT_PRT_DEBUG) { 4053 mpt_print_request(tmf_req); 4054 } 4055 4056 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4057 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4058 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4059 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4060 if (error != MPT_OK) { 4061 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4062 mpt->tmf_req->state = REQ_STATE_FREE; 4063 mpt_reset(mpt, TRUE); 4064 } 4065 return (error); 4066 } 4067 4068 /* 4069 * When a command times out, it is placed on the requeust_timeout_list 4070 * and we wake our recovery thread. The MPT-Fusion architecture supports 4071 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4072 * the timedout transactions. The next TMF is issued either by the 4073 * completion handler of the current TMF waking our recovery thread, 4074 * or the TMF timeout handler causing a hard reset sequence. 4075 */ 4076 static void 4077 mpt_recover_commands(struct mpt_softc *mpt) 4078 { 4079 request_t *req; 4080 union ccb *ccb; 4081 int error; 4082 4083 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4084 /* 4085 * No work to do- leave. 4086 */ 4087 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4088 return; 4089 } 4090 4091 /* 4092 * Flush any commands whose completion coincides with their timeout. 4093 */ 4094 mpt_intr(mpt); 4095 4096 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4097 /* 4098 * The timedout commands have already 4099 * completed. This typically means 4100 * that either the timeout value was on 4101 * the hairy edge of what the device 4102 * requires or - more likely - interrupts 4103 * are not happening. 4104 */ 4105 mpt_prt(mpt, "Timedout requests already complete. " 4106 "Interrupts may not be functioning.\n"); 4107 mpt_enable_ints(mpt); 4108 return; 4109 } 4110 4111 /* 4112 * We have no visibility into the current state of the 4113 * controller, so attempt to abort the commands in the 4114 * order they timed-out. For initiator commands, we 4115 * depend on the reply handler pulling requests off 4116 * the timeout list. 4117 */ 4118 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4119 uint16_t status; 4120 uint8_t response; 4121 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4122 4123 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4124 req, req->serno, hdrp->Function); 4125 ccb = req->ccb; 4126 if (ccb == NULL) { 4127 mpt_prt(mpt, "null ccb in timed out request. " 4128 "Resetting Controller.\n"); 4129 mpt_reset(mpt, TRUE); 4130 continue; 4131 } 4132 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4133 4134 /* 4135 * Check to see if this is not an initiator command and 4136 * deal with it differently if it is. 4137 */ 4138 switch (hdrp->Function) { 4139 case MPI_FUNCTION_SCSI_IO_REQUEST: 4140 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4141 break; 4142 default: 4143 /* 4144 * XXX: FIX ME: need to abort target assists... 4145 */ 4146 mpt_prt(mpt, "just putting it back on the pend q\n"); 4147 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4148 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4149 links); 4150 continue; 4151 } 4152 4153 error = mpt_scsi_send_tmf(mpt, 4154 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4155 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4156 htole32(req->index | scsi_io_handler_id), TRUE); 4157 4158 if (error != 0) { 4159 /* 4160 * mpt_scsi_send_tmf hard resets on failure, so no 4161 * need to do so here. Our queue should be emptied 4162 * by the hard reset. 4163 */ 4164 continue; 4165 } 4166 4167 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4168 REQ_STATE_DONE, TRUE, 500); 4169 4170 status = le16toh(mpt->tmf_req->IOCStatus); 4171 response = mpt->tmf_req->ResponseCode; 4172 mpt->tmf_req->state = REQ_STATE_FREE; 4173 4174 if (error != 0) { 4175 /* 4176 * If we've errored out,, reset the controller. 4177 */ 4178 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4179 "Resetting controller\n"); 4180 mpt_reset(mpt, TRUE); 4181 continue; 4182 } 4183 4184 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4185 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4186 "Resetting controller.\n", status); 4187 mpt_reset(mpt, TRUE); 4188 continue; 4189 } 4190 4191 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4192 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4193 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4194 "Resetting controller.\n", response); 4195 mpt_reset(mpt, TRUE); 4196 continue; 4197 } 4198 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4199 } 4200 } 4201 4202 /************************ Target Mode Support ****************************/ 4203 static void 4204 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4205 { 4206 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4207 PTR_SGE_TRANSACTION32 tep; 4208 PTR_SGE_SIMPLE32 se; 4209 bus_addr_t paddr; 4210 uint32_t fl; 4211 4212 paddr = req->req_pbuf; 4213 paddr += MPT_RQSL(mpt); 4214 4215 fc = req->req_vbuf; 4216 memset(fc, 0, MPT_REQUEST_AREA); 4217 fc->BufferCount = 1; 4218 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4219 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4220 4221 /* 4222 * Okay, set up ELS buffer pointers. ELS buffer pointers 4223 * consist of a TE SGL element (with details length of zero) 4224 * followe by a SIMPLE SGL element which holds the address 4225 * of the buffer. 4226 */ 4227 4228 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4229 4230 tep->ContextSize = 4; 4231 tep->Flags = 0; 4232 tep->TransactionContext[0] = htole32(ioindex); 4233 4234 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4235 fl = 4236 MPI_SGE_FLAGS_HOST_TO_IOC | 4237 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4238 MPI_SGE_FLAGS_LAST_ELEMENT | 4239 MPI_SGE_FLAGS_END_OF_LIST | 4240 MPI_SGE_FLAGS_END_OF_BUFFER; 4241 fl <<= MPI_SGE_FLAGS_SHIFT; 4242 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4243 se->FlagsLength = htole32(fl); 4244 se->Address = htole32((uint32_t) paddr); 4245 mpt_lprt(mpt, MPT_PRT_DEBUG, 4246 "add ELS index %d ioindex %d for %p:%u\n", 4247 req->index, ioindex, req, req->serno); 4248 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4249 ("mpt_fc_post_els: request not locked")); 4250 mpt_send_cmd(mpt, req); 4251 } 4252 4253 static void 4254 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4255 { 4256 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4257 PTR_CMD_BUFFER_DESCRIPTOR cb; 4258 bus_addr_t paddr; 4259 4260 paddr = req->req_pbuf; 4261 paddr += MPT_RQSL(mpt); 4262 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4263 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4264 4265 fc = req->req_vbuf; 4266 fc->BufferCount = 1; 4267 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4268 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4269 4270 cb = &fc->Buffer[0]; 4271 cb->IoIndex = htole16(ioindex); 4272 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4273 4274 mpt_check_doorbell(mpt); 4275 mpt_send_cmd(mpt, req); 4276 } 4277 4278 static int 4279 mpt_add_els_buffers(struct mpt_softc *mpt) 4280 { 4281 int i; 4282 4283 if (mpt->is_fc == 0) { 4284 return (TRUE); 4285 } 4286 4287 if (mpt->els_cmds_allocated) { 4288 return (TRUE); 4289 } 4290 4291 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *), 4292 M_DEVBUF, M_NOWAIT | M_ZERO); 4293 4294 if (mpt->els_cmd_ptrs == NULL) { 4295 return (FALSE); 4296 } 4297 4298 /* 4299 * Feed the chip some ELS buffer resources 4300 */ 4301 for (i = 0; i < MPT_MAX_ELS; i++) { 4302 request_t *req = mpt_get_request(mpt, FALSE); 4303 if (req == NULL) { 4304 break; 4305 } 4306 req->state |= REQ_STATE_LOCKED; 4307 mpt->els_cmd_ptrs[i] = req; 4308 mpt_fc_post_els(mpt, req, i); 4309 } 4310 4311 if (i == 0) { 4312 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4313 kfree(mpt->els_cmd_ptrs, M_DEVBUF); 4314 mpt->els_cmd_ptrs = NULL; 4315 return (FALSE); 4316 } 4317 if (i != MPT_MAX_ELS) { 4318 mpt_lprt(mpt, MPT_PRT_INFO, 4319 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4320 } 4321 mpt->els_cmds_allocated = i; 4322 return(TRUE); 4323 } 4324 4325 static int 4326 mpt_add_target_commands(struct mpt_softc *mpt) 4327 { 4328 int i, max; 4329 4330 if (mpt->tgt_cmd_ptrs) { 4331 return (TRUE); 4332 } 4333 4334 max = MPT_MAX_REQUESTS(mpt) >> 1; 4335 if (max > mpt->mpt_max_tgtcmds) { 4336 max = mpt->mpt_max_tgtcmds; 4337 } 4338 mpt->tgt_cmd_ptrs = 4339 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4340 if (mpt->tgt_cmd_ptrs == NULL) { 4341 mpt_prt(mpt, 4342 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4343 return (FALSE); 4344 } 4345 4346 for (i = 0; i < max; i++) { 4347 request_t *req; 4348 4349 req = mpt_get_request(mpt, FALSE); 4350 if (req == NULL) { 4351 break; 4352 } 4353 req->state |= REQ_STATE_LOCKED; 4354 mpt->tgt_cmd_ptrs[i] = req; 4355 mpt_post_target_command(mpt, req, i); 4356 } 4357 4358 4359 if (i == 0) { 4360 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4361 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF); 4362 mpt->tgt_cmd_ptrs = NULL; 4363 return (FALSE); 4364 } 4365 4366 mpt->tgt_cmds_allocated = i; 4367 4368 if (i < max) { 4369 mpt_lprt(mpt, MPT_PRT_INFO, 4370 "added %d of %d target bufs\n", i, max); 4371 } 4372 return (i); 4373 } 4374 4375 static int 4376 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4377 { 4378 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4379 mpt->twildcard = 1; 4380 } else if (lun >= MPT_MAX_LUNS) { 4381 return (EINVAL); 4382 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4383 return (EINVAL); 4384 } 4385 if (mpt->tenabled == 0) { 4386 if (mpt->is_fc) { 4387 (void) mpt_fc_reset_link(mpt, 0); 4388 } 4389 mpt->tenabled = 1; 4390 } 4391 if (lun == CAM_LUN_WILDCARD) { 4392 mpt->trt_wildcard.enabled = 1; 4393 } else { 4394 mpt->trt[lun].enabled = 1; 4395 } 4396 return (0); 4397 } 4398 4399 static int 4400 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4401 { 4402 int i; 4403 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4404 mpt->twildcard = 0; 4405 } else if (lun >= MPT_MAX_LUNS) { 4406 return (EINVAL); 4407 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4408 return (EINVAL); 4409 } 4410 if (lun == CAM_LUN_WILDCARD) { 4411 mpt->trt_wildcard.enabled = 0; 4412 } else { 4413 mpt->trt[lun].enabled = 0; 4414 } 4415 for (i = 0; i < MPT_MAX_LUNS; i++) { 4416 if (mpt->trt[lun].enabled) { 4417 break; 4418 } 4419 } 4420 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4421 if (mpt->is_fc) { 4422 (void) mpt_fc_reset_link(mpt, 0); 4423 } 4424 mpt->tenabled = 0; 4425 } 4426 return (0); 4427 } 4428 4429 /* 4430 * Called with MPT lock held 4431 */ 4432 static void 4433 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4434 { 4435 struct ccb_scsiio *csio = &ccb->csio; 4436 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4437 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4438 4439 switch (tgt->state) { 4440 case TGT_STATE_IN_CAM: 4441 break; 4442 case TGT_STATE_MOVING_DATA: 4443 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4444 xpt_freeze_simq(mpt->sim, 1); 4445 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4446 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4447 MPTLOCK_2_CAMLOCK(mpt); 4448 xpt_done(ccb); 4449 CAMLOCK_2_MPTLOCK(mpt); 4450 return; 4451 default: 4452 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4453 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4454 mpt_tgt_dump_req_state(mpt, cmd_req); 4455 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4456 MPTLOCK_2_CAMLOCK(mpt); 4457 xpt_done(ccb); 4458 CAMLOCK_2_MPTLOCK(mpt); 4459 return; 4460 } 4461 4462 if (csio->dxfer_len) { 4463 bus_dmamap_callback_t *cb; 4464 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4465 request_t *req; 4466 4467 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4468 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4469 4470 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4471 if (mpt->outofbeer == 0) { 4472 mpt->outofbeer = 1; 4473 xpt_freeze_simq(mpt->sim, 1); 4474 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4475 } 4476 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4477 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4478 MPTLOCK_2_CAMLOCK(mpt); 4479 xpt_done(ccb); 4480 CAMLOCK_2_MPTLOCK(mpt); 4481 return; 4482 } 4483 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4484 if (sizeof (bus_addr_t) > 4) { 4485 cb = mpt_execute_req_a64; 4486 } else { 4487 cb = mpt_execute_req; 4488 } 4489 4490 req->ccb = ccb; 4491 ccb->ccb_h.ccb_req_ptr = req; 4492 4493 /* 4494 * Record the currently active ccb and the 4495 * request for it in our target state area. 4496 */ 4497 tgt->ccb = ccb; 4498 tgt->req = req; 4499 4500 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4501 ta = req->req_vbuf; 4502 4503 if (mpt->is_sas) { 4504 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4505 cmd_req->req_vbuf; 4506 ta->QueueTag = ssp->InitiatorTag; 4507 } else if (mpt->is_spi) { 4508 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4509 cmd_req->req_vbuf; 4510 ta->QueueTag = sp->Tag; 4511 } 4512 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4513 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4514 ta->ReplyWord = htole32(tgt->reply_desc); 4515 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4516 ta->LUN[0] = 4517 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4518 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4519 } else { 4520 ta->LUN[1] = csio->ccb_h.target_lun; 4521 } 4522 4523 ta->RelativeOffset = tgt->bytes_xfered; 4524 ta->DataLength = ccb->csio.dxfer_len; 4525 if (ta->DataLength > tgt->resid) { 4526 ta->DataLength = tgt->resid; 4527 } 4528 4529 /* 4530 * XXX Should be done after data transfer completes? 4531 */ 4532 tgt->resid -= csio->dxfer_len; 4533 tgt->bytes_xfered += csio->dxfer_len; 4534 4535 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4536 ta->TargetAssistFlags |= 4537 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4538 } 4539 4540 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4541 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4542 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4543 ta->TargetAssistFlags |= 4544 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4545 } 4546 #endif 4547 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4548 4549 mpt_lprt(mpt, MPT_PRT_DEBUG, 4550 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4551 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4552 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4553 4554 MPTLOCK_2_CAMLOCK(mpt); 4555 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4556 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4557 int error; 4558 error = bus_dmamap_load(mpt->buffer_dmat, 4559 req->dmap, csio->data_ptr, csio->dxfer_len, 4560 cb, req, 0); 4561 if (error == EINPROGRESS) { 4562 xpt_freeze_simq(mpt->sim, 1); 4563 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4564 } 4565 } else { 4566 /* 4567 * We have been given a pointer to single 4568 * physical buffer. 4569 */ 4570 struct bus_dma_segment seg; 4571 seg.ds_addr = (bus_addr_t) 4572 (vm_offset_t)csio->data_ptr; 4573 seg.ds_len = csio->dxfer_len; 4574 (*cb)(req, &seg, 1, 0); 4575 } 4576 } else { 4577 /* 4578 * We have been given a list of addresses. 4579 * This case could be easily supported but they are not 4580 * currently generated by the CAM subsystem so there 4581 * is no point in wasting the time right now. 4582 */ 4583 struct bus_dma_segment *sgs; 4584 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4585 (*cb)(req, NULL, 0, EFAULT); 4586 } else { 4587 /* Just use the segments provided */ 4588 sgs = (struct bus_dma_segment *)csio->data_ptr; 4589 (*cb)(req, sgs, csio->sglist_cnt, 0); 4590 } 4591 } 4592 CAMLOCK_2_MPTLOCK(mpt); 4593 } else { 4594 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4595 4596 /* 4597 * XXX: I don't know why this seems to happen, but 4598 * XXX: completing the CCB seems to make things happy. 4599 * XXX: This seems to happen if the initiator requests 4600 * XXX: enough data that we have to do multiple CTIOs. 4601 */ 4602 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4603 mpt_lprt(mpt, MPT_PRT_DEBUG, 4604 "Meaningless STATUS CCB (%p): flags %x status %x " 4605 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4606 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4607 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4608 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4609 MPTLOCK_2_CAMLOCK(mpt); 4610 xpt_done(ccb); 4611 CAMLOCK_2_MPTLOCK(mpt); 4612 return; 4613 } 4614 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4615 sp = sense; 4616 memcpy(sp, &csio->sense_data, 4617 min(csio->sense_len, MPT_SENSE_SIZE)); 4618 } 4619 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4620 } 4621 } 4622 4623 static void 4624 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4625 uint32_t lun, int send, uint8_t *data, size_t length) 4626 { 4627 mpt_tgt_state_t *tgt; 4628 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4629 SGE_SIMPLE32 *se; 4630 uint32_t flags; 4631 uint8_t *dptr; 4632 bus_addr_t pptr; 4633 request_t *req; 4634 4635 /* 4636 * We enter with resid set to the data load for the command. 4637 */ 4638 tgt = MPT_TGT_STATE(mpt, cmd_req); 4639 if (length == 0 || tgt->resid == 0) { 4640 tgt->resid = 0; 4641 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4642 return; 4643 } 4644 4645 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4646 mpt_prt(mpt, "out of resources- dropping local response\n"); 4647 return; 4648 } 4649 tgt->is_local = 1; 4650 4651 4652 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4653 ta = req->req_vbuf; 4654 4655 if (mpt->is_sas) { 4656 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4657 ta->QueueTag = ssp->InitiatorTag; 4658 } else if (mpt->is_spi) { 4659 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4660 ta->QueueTag = sp->Tag; 4661 } 4662 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4663 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4664 ta->ReplyWord = htole32(tgt->reply_desc); 4665 if (lun > MPT_MAX_LUNS) { 4666 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4667 ta->LUN[1] = lun & 0xff; 4668 } else { 4669 ta->LUN[1] = lun; 4670 } 4671 ta->RelativeOffset = 0; 4672 ta->DataLength = length; 4673 4674 dptr = req->req_vbuf; 4675 dptr += MPT_RQSL(mpt); 4676 pptr = req->req_pbuf; 4677 pptr += MPT_RQSL(mpt); 4678 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4679 4680 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4681 memset(se, 0,sizeof (*se)); 4682 4683 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4684 if (send) { 4685 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4686 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4687 } 4688 se->Address = pptr; 4689 MPI_pSGE_SET_LENGTH(se, length); 4690 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4691 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4692 MPI_pSGE_SET_FLAGS(se, flags); 4693 4694 tgt->ccb = NULL; 4695 tgt->req = req; 4696 tgt->resid -= length; 4697 tgt->bytes_xfered = length; 4698 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4699 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4700 #else 4701 tgt->state = TGT_STATE_MOVING_DATA; 4702 #endif 4703 mpt_send_cmd(mpt, req); 4704 } 4705 4706 /* 4707 * Abort queued up CCBs 4708 */ 4709 static cam_status 4710 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4711 { 4712 struct mpt_hdr_stailq *lp; 4713 struct ccb_hdr *srch; 4714 int found = 0; 4715 union ccb *accb = ccb->cab.abort_ccb; 4716 tgt_resource_t *trtp; 4717 4718 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4719 4720 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4721 trtp = &mpt->trt_wildcard; 4722 } else { 4723 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4724 } 4725 4726 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4727 lp = &trtp->atios; 4728 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4729 lp = &trtp->inots; 4730 } else { 4731 return (CAM_REQ_INVALID); 4732 } 4733 4734 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4735 if (srch == &accb->ccb_h) { 4736 found = 1; 4737 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4738 break; 4739 } 4740 } 4741 if (found) { 4742 accb->ccb_h.status = CAM_REQ_ABORTED; 4743 xpt_done(accb); 4744 return (CAM_REQ_CMP); 4745 } 4746 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4747 return (CAM_PATH_INVALID); 4748 } 4749 4750 /* 4751 * Ask the MPT to abort the current target command 4752 */ 4753 static int 4754 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4755 { 4756 int error; 4757 request_t *req; 4758 PTR_MSG_TARGET_MODE_ABORT abtp; 4759 4760 req = mpt_get_request(mpt, FALSE); 4761 if (req == NULL) { 4762 return (-1); 4763 } 4764 abtp = req->req_vbuf; 4765 memset(abtp, 0, sizeof (*abtp)); 4766 4767 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4768 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4769 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4770 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4771 error = 0; 4772 if (mpt->is_fc || mpt->is_sas) { 4773 mpt_send_cmd(mpt, req); 4774 } else { 4775 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4776 } 4777 return (error); 4778 } 4779 4780 /* 4781 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4782 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4783 * FC929 to set bogus FC_RSP fields (nonzero residuals 4784 * but w/o RESID fields set). This causes QLogic initiators 4785 * to think maybe that a frame was lost. 4786 * 4787 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4788 * we use allocated requests to do TARGET_ASSIST and we 4789 * need to know when to release them. 4790 */ 4791 4792 static void 4793 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4794 uint8_t status, uint8_t const *sense_data) 4795 { 4796 uint8_t *cmd_vbuf; 4797 mpt_tgt_state_t *tgt; 4798 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4799 request_t *req; 4800 bus_addr_t paddr; 4801 int resplen = 0; 4802 uint32_t fl; 4803 4804 cmd_vbuf = cmd_req->req_vbuf; 4805 cmd_vbuf += MPT_RQSL(mpt); 4806 tgt = MPT_TGT_STATE(mpt, cmd_req); 4807 4808 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4809 if (mpt->outofbeer == 0) { 4810 mpt->outofbeer = 1; 4811 xpt_freeze_simq(mpt->sim, 1); 4812 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4813 } 4814 if (ccb) { 4815 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4816 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4817 MPTLOCK_2_CAMLOCK(mpt); 4818 xpt_done(ccb); 4819 CAMLOCK_2_MPTLOCK(mpt); 4820 } else { 4821 mpt_prt(mpt, 4822 "could not allocate status request- dropping\n"); 4823 } 4824 return; 4825 } 4826 req->ccb = ccb; 4827 if (ccb) { 4828 ccb->ccb_h.ccb_mpt_ptr = mpt; 4829 ccb->ccb_h.ccb_req_ptr = req; 4830 } 4831 4832 /* 4833 * Record the currently active ccb, if any, and the 4834 * request for it in our target state area. 4835 */ 4836 tgt->ccb = ccb; 4837 tgt->req = req; 4838 tgt->state = TGT_STATE_SENDING_STATUS; 4839 4840 tp = req->req_vbuf; 4841 paddr = req->req_pbuf; 4842 paddr += MPT_RQSL(mpt); 4843 4844 memset(tp, 0, sizeof (*tp)); 4845 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4846 if (mpt->is_fc) { 4847 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4848 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4849 uint8_t *sts_vbuf; 4850 uint32_t *rsp; 4851 4852 sts_vbuf = req->req_vbuf; 4853 sts_vbuf += MPT_RQSL(mpt); 4854 rsp = (uint32_t *) sts_vbuf; 4855 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4856 4857 /* 4858 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4859 * It has to be big-endian in memory and is organized 4860 * in 32 bit words, which are much easier to deal with 4861 * as words which are swizzled as needed. 4862 * 4863 * All we're filling here is the FC_RSP payload. 4864 * We may just have the chip synthesize it if 4865 * we have no residual and an OK status. 4866 * 4867 */ 4868 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4869 4870 rsp[2] = status; 4871 if (tgt->resid) { 4872 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4873 rsp[3] = htobe32(tgt->resid); 4874 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4875 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4876 #endif 4877 } 4878 if (status == SCSI_STATUS_CHECK_COND) { 4879 int i; 4880 4881 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4882 rsp[4] = htobe32(MPT_SENSE_SIZE); 4883 if (sense_data) { 4884 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4885 } else { 4886 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4887 "TION but no sense data?\n"); 4888 memset(&rsp, 0, MPT_SENSE_SIZE); 4889 } 4890 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4891 rsp[i] = htobe32(rsp[i]); 4892 } 4893 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4894 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4895 #endif 4896 } 4897 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4898 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4899 #endif 4900 rsp[2] = htobe32(rsp[2]); 4901 } else if (mpt->is_sas) { 4902 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4903 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4904 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4905 } else { 4906 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4907 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4908 tp->StatusCode = status; 4909 tp->QueueTag = htole16(sp->Tag); 4910 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4911 } 4912 4913 tp->ReplyWord = htole32(tgt->reply_desc); 4914 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4915 4916 #ifdef WE_CAN_USE_AUTO_REPOST 4917 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4918 #endif 4919 if (status == SCSI_STATUS_OK && resplen == 0) { 4920 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4921 } else { 4922 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4923 fl = 4924 MPI_SGE_FLAGS_HOST_TO_IOC | 4925 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4926 MPI_SGE_FLAGS_LAST_ELEMENT | 4927 MPI_SGE_FLAGS_END_OF_LIST | 4928 MPI_SGE_FLAGS_END_OF_BUFFER; 4929 fl <<= MPI_SGE_FLAGS_SHIFT; 4930 fl |= resplen; 4931 tp->StatusDataSGE.FlagsLength = htole32(fl); 4932 } 4933 4934 mpt_lprt(mpt, MPT_PRT_DEBUG, 4935 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4936 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4937 req->serno, tgt->resid); 4938 if (ccb) { 4939 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4940 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4941 } 4942 mpt_send_cmd(mpt, req); 4943 } 4944 4945 static void 4946 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4947 tgt_resource_t *trtp, int init_id) 4948 { 4949 struct ccb_immed_notify *inot; 4950 mpt_tgt_state_t *tgt; 4951 4952 tgt = MPT_TGT_STATE(mpt, req); 4953 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4954 if (inot == NULL) { 4955 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4956 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4957 return; 4958 } 4959 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4960 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4961 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4962 4963 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4964 inot->sense_len = 0; 4965 memset(inot->message_args, 0, sizeof (inot->message_args)); 4966 inot->initiator_id = init_id; /* XXX */ 4967 4968 /* 4969 * This is a somewhat grotesque attempt to map from task management 4970 * to old style SCSI messages. God help us all. 4971 */ 4972 switch (fc) { 4973 case MPT_ABORT_TASK_SET: 4974 inot->message_args[0] = MSG_ABORT_TAG; 4975 break; 4976 case MPT_CLEAR_TASK_SET: 4977 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4978 break; 4979 case MPT_TARGET_RESET: 4980 inot->message_args[0] = MSG_TARGET_RESET; 4981 break; 4982 case MPT_CLEAR_ACA: 4983 inot->message_args[0] = MSG_CLEAR_ACA; 4984 break; 4985 case MPT_TERMINATE_TASK: 4986 inot->message_args[0] = MSG_ABORT_TAG; 4987 break; 4988 default: 4989 inot->message_args[0] = MSG_NOOP; 4990 break; 4991 } 4992 tgt->ccb = (union ccb *) inot; 4993 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4994 MPTLOCK_2_CAMLOCK(mpt); 4995 xpt_done((union ccb *)inot); 4996 CAMLOCK_2_MPTLOCK(mpt); 4997 } 4998 4999 static void 5000 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 5001 { 5002 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 5003 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 5004 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 5005 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 5006 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 5007 '0', '0', '0', '1' 5008 }; 5009 struct ccb_accept_tio *atiop; 5010 lun_id_t lun; 5011 int tag_action = 0; 5012 mpt_tgt_state_t *tgt; 5013 tgt_resource_t *trtp = NULL; 5014 U8 *lunptr; 5015 U8 *vbuf; 5016 U16 itag; 5017 U16 ioindex; 5018 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5019 uint8_t *cdbp; 5020 5021 /* 5022 * First, DMA sync the received command- 5023 * which is in the *request* * phys area. 5024 * 5025 * XXX: We could optimize this for a range 5026 */ 5027 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 5028 BUS_DMASYNC_POSTREAD); 5029 5030 /* 5031 * Stash info for the current command where we can get at it later. 5032 */ 5033 vbuf = req->req_vbuf; 5034 vbuf += MPT_RQSL(mpt); 5035 5036 /* 5037 * Get our state pointer set up. 5038 */ 5039 tgt = MPT_TGT_STATE(mpt, req); 5040 if (tgt->state != TGT_STATE_LOADED) { 5041 mpt_tgt_dump_req_state(mpt, req); 5042 panic("bad target state in mpt_scsi_tgt_atio"); 5043 } 5044 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5045 tgt->state = TGT_STATE_IN_CAM; 5046 tgt->reply_desc = reply_desc; 5047 ioindex = GET_IO_INDEX(reply_desc); 5048 if (mpt->verbose >= MPT_PRT_DEBUG) { 5049 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5050 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5051 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5052 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5053 } 5054 if (mpt->is_fc) { 5055 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5056 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5057 if (fc->FcpCntl[2]) { 5058 /* 5059 * Task Management Request 5060 */ 5061 switch (fc->FcpCntl[2]) { 5062 case 0x2: 5063 fct = MPT_ABORT_TASK_SET; 5064 break; 5065 case 0x4: 5066 fct = MPT_CLEAR_TASK_SET; 5067 break; 5068 case 0x20: 5069 fct = MPT_TARGET_RESET; 5070 break; 5071 case 0x40: 5072 fct = MPT_CLEAR_ACA; 5073 break; 5074 case 0x80: 5075 fct = MPT_TERMINATE_TASK; 5076 break; 5077 default: 5078 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5079 fc->FcpCntl[2]); 5080 mpt_scsi_tgt_status(mpt, 0, req, 5081 SCSI_STATUS_OK, 0); 5082 return; 5083 } 5084 } else { 5085 switch (fc->FcpCntl[1]) { 5086 case 0: 5087 tag_action = MSG_SIMPLE_Q_TAG; 5088 break; 5089 case 1: 5090 tag_action = MSG_HEAD_OF_Q_TAG; 5091 break; 5092 case 2: 5093 tag_action = MSG_ORDERED_Q_TAG; 5094 break; 5095 default: 5096 /* 5097 * Bah. Ignore Untagged Queing and ACA 5098 */ 5099 tag_action = MSG_SIMPLE_Q_TAG; 5100 break; 5101 } 5102 } 5103 tgt->resid = be32toh(fc->FcpDl); 5104 cdbp = fc->FcpCdb; 5105 lunptr = fc->FcpLun; 5106 itag = be16toh(fc->OptionalOxid); 5107 } else if (mpt->is_sas) { 5108 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5109 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5110 cdbp = ssp->CDB; 5111 lunptr = ssp->LogicalUnitNumber; 5112 itag = ssp->InitiatorTag; 5113 } else { 5114 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5115 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5116 cdbp = sp->CDB; 5117 lunptr = sp->LogicalUnitNumber; 5118 itag = sp->Tag; 5119 } 5120 5121 /* 5122 * Generate a simple lun 5123 */ 5124 switch (lunptr[0] & 0xc0) { 5125 case 0x40: 5126 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5127 break; 5128 case 0: 5129 lun = lunptr[1]; 5130 break; 5131 default: 5132 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5133 lun = 0xffff; 5134 break; 5135 } 5136 5137 /* 5138 * Deal with non-enabled or bad luns here. 5139 */ 5140 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5141 mpt->trt[lun].enabled == 0) { 5142 if (mpt->twildcard) { 5143 trtp = &mpt->trt_wildcard; 5144 } else if (fct == MPT_NIL_TMT_VALUE) { 5145 /* 5146 * In this case, we haven't got an upstream listener 5147 * for either a specific lun or wildcard luns. We 5148 * have to make some sensible response. For regular 5149 * inquiry, just return some NOT HERE inquiry data. 5150 * For VPD inquiry, report illegal field in cdb. 5151 * For REQUEST SENSE, just return NO SENSE data. 5152 * REPORT LUNS gets illegal command. 5153 * All other commands get 'no such device'. 5154 */ 5155 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5156 size_t len; 5157 5158 memset(buf, 0, MPT_SENSE_SIZE); 5159 cond = SCSI_STATUS_CHECK_COND; 5160 buf[0] = 0xf0; 5161 buf[2] = 0x5; 5162 buf[7] = 0x8; 5163 sp = buf; 5164 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5165 5166 switch (cdbp[0]) { 5167 case INQUIRY: 5168 { 5169 if (cdbp[1] != 0) { 5170 buf[12] = 0x26; 5171 buf[13] = 0x01; 5172 break; 5173 } 5174 len = min(tgt->resid, cdbp[4]); 5175 len = min(len, sizeof (null_iqd)); 5176 mpt_lprt(mpt, MPT_PRT_DEBUG, 5177 "local inquiry %ld bytes\n", (long) len); 5178 mpt_scsi_tgt_local(mpt, req, lun, 1, 5179 null_iqd, len); 5180 return; 5181 } 5182 case REQUEST_SENSE: 5183 { 5184 buf[2] = 0x0; 5185 len = min(tgt->resid, cdbp[4]); 5186 len = min(len, sizeof (buf)); 5187 mpt_lprt(mpt, MPT_PRT_DEBUG, 5188 "local reqsense %ld bytes\n", (long) len); 5189 mpt_scsi_tgt_local(mpt, req, lun, 1, 5190 buf, len); 5191 return; 5192 } 5193 case REPORT_LUNS: 5194 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5195 buf[12] = 0x26; 5196 return; 5197 default: 5198 mpt_lprt(mpt, MPT_PRT_DEBUG, 5199 "CMD 0x%x to unmanaged lun %u\n", 5200 cdbp[0], lun); 5201 buf[12] = 0x25; 5202 break; 5203 } 5204 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5205 return; 5206 } 5207 /* otherwise, leave trtp NULL */ 5208 } else { 5209 trtp = &mpt->trt[lun]; 5210 } 5211 5212 /* 5213 * Deal with any task management 5214 */ 5215 if (fct != MPT_NIL_TMT_VALUE) { 5216 if (trtp == NULL) { 5217 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5218 fct); 5219 mpt_scsi_tgt_status(mpt, 0, req, 5220 SCSI_STATUS_OK, 0); 5221 } else { 5222 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5223 GET_INITIATOR_INDEX(reply_desc)); 5224 } 5225 return; 5226 } 5227 5228 5229 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5230 if (atiop == NULL) { 5231 mpt_lprt(mpt, MPT_PRT_WARN, 5232 "no ATIOs for lun %u- sending back %s\n", lun, 5233 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5234 mpt_scsi_tgt_status(mpt, NULL, req, 5235 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5236 NULL); 5237 return; 5238 } 5239 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5240 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5241 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5242 atiop->ccb_h.ccb_mpt_ptr = mpt; 5243 atiop->ccb_h.status = CAM_CDB_RECVD; 5244 atiop->ccb_h.target_lun = lun; 5245 atiop->sense_len = 0; 5246 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5247 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5248 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5249 5250 /* 5251 * The tag we construct here allows us to find the 5252 * original request that the command came in with. 5253 * 5254 * This way we don't have to depend on anything but the 5255 * tag to find things when CCBs show back up from CAM. 5256 */ 5257 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5258 tgt->tag_id = atiop->tag_id; 5259 if (tag_action) { 5260 atiop->tag_action = tag_action; 5261 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5262 } 5263 if (mpt->verbose >= MPT_PRT_DEBUG) { 5264 int i; 5265 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5266 atiop->ccb_h.target_lun); 5267 for (i = 0; i < atiop->cdb_len; i++) { 5268 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5269 (i == (atiop->cdb_len - 1))? '>' : ' '); 5270 } 5271 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5272 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5273 } 5274 5275 MPTLOCK_2_CAMLOCK(mpt); 5276 xpt_done((union ccb *)atiop); 5277 CAMLOCK_2_MPTLOCK(mpt); 5278 } 5279 5280 static void 5281 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5282 { 5283 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5284 5285 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5286 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5287 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5288 tgt->tag_id, tgt->state); 5289 } 5290 5291 static void 5292 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5293 { 5294 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5295 req->index, req->index, req->state); 5296 mpt_tgt_dump_tgt_state(mpt, req); 5297 } 5298 5299 static int 5300 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5301 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5302 { 5303 int dbg; 5304 union ccb *ccb; 5305 U16 status; 5306 5307 if (reply_frame == NULL) { 5308 /* 5309 * Figure out what the state of the command is. 5310 */ 5311 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5312 5313 #ifdef INVARIANTS 5314 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5315 if (tgt->req) { 5316 mpt_req_not_spcl(mpt, tgt->req, 5317 "turbo scsi_tgt_reply associated req", __LINE__); 5318 } 5319 #endif 5320 switch(tgt->state) { 5321 case TGT_STATE_LOADED: 5322 /* 5323 * This is a new command starting. 5324 */ 5325 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5326 break; 5327 case TGT_STATE_MOVING_DATA: 5328 { 5329 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5330 5331 ccb = tgt->ccb; 5332 if (tgt->req == NULL) { 5333 panic("mpt: turbo target reply with null " 5334 "associated request moving data"); 5335 /* NOTREACHED */ 5336 } 5337 if (ccb == NULL) { 5338 if (tgt->is_local == 0) { 5339 panic("mpt: turbo target reply with " 5340 "null associated ccb moving data"); 5341 /* NOTREACHED */ 5342 } 5343 mpt_lprt(mpt, MPT_PRT_DEBUG, 5344 "TARGET_ASSIST local done\n"); 5345 TAILQ_REMOVE(&mpt->request_pending_list, 5346 tgt->req, links); 5347 mpt_free_request(mpt, tgt->req); 5348 tgt->req = NULL; 5349 mpt_scsi_tgt_status(mpt, NULL, req, 5350 0, NULL); 5351 return (TRUE); 5352 } 5353 tgt->ccb = NULL; 5354 tgt->nxfers++; 5355 mpt_req_untimeout(req, mpt_timeout, ccb); 5356 mpt_lprt(mpt, MPT_PRT_DEBUG, 5357 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5358 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5359 /* 5360 * Free the Target Assist Request 5361 */ 5362 KASSERT(tgt->req->ccb == ccb, 5363 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5364 tgt->req->serno, tgt->req->ccb)); 5365 TAILQ_REMOVE(&mpt->request_pending_list, 5366 tgt->req, links); 5367 mpt_free_request(mpt, tgt->req); 5368 tgt->req = NULL; 5369 5370 /* 5371 * Do we need to send status now? That is, are 5372 * we done with all our data transfers? 5373 */ 5374 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5375 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5376 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5377 KASSERT(ccb->ccb_h.status, 5378 ("zero ccb sts at %d\n", __LINE__)); 5379 tgt->state = TGT_STATE_IN_CAM; 5380 if (mpt->outofbeer) { 5381 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5382 mpt->outofbeer = 0; 5383 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5384 } 5385 MPTLOCK_2_CAMLOCK(mpt); 5386 xpt_done(ccb); 5387 CAMLOCK_2_MPTLOCK(mpt); 5388 break; 5389 } 5390 /* 5391 * Otherwise, send status (and sense) 5392 */ 5393 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5394 sp = sense; 5395 memcpy(sp, &ccb->csio.sense_data, 5396 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5397 } 5398 mpt_scsi_tgt_status(mpt, ccb, req, 5399 ccb->csio.scsi_status, sp); 5400 break; 5401 } 5402 case TGT_STATE_SENDING_STATUS: 5403 case TGT_STATE_MOVING_DATA_AND_STATUS: 5404 { 5405 int ioindex; 5406 ccb = tgt->ccb; 5407 5408 if (tgt->req == NULL) { 5409 panic("mpt: turbo target reply with null " 5410 "associated request sending status"); 5411 /* NOTREACHED */ 5412 } 5413 5414 if (ccb) { 5415 tgt->ccb = NULL; 5416 if (tgt->state == 5417 TGT_STATE_MOVING_DATA_AND_STATUS) { 5418 tgt->nxfers++; 5419 } 5420 mpt_req_untimeout(req, mpt_timeout, ccb); 5421 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5422 ccb->ccb_h.status |= CAM_SENT_SENSE; 5423 } 5424 mpt_lprt(mpt, MPT_PRT_DEBUG, 5425 "TARGET_STATUS tag %x sts %x flgs %x req " 5426 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5427 ccb->ccb_h.flags, tgt->req); 5428 /* 5429 * Free the Target Send Status Request 5430 */ 5431 KASSERT(tgt->req->ccb == ccb, 5432 ("tgt->req %p:%u tgt->req->ccb %p", 5433 tgt->req, tgt->req->serno, tgt->req->ccb)); 5434 /* 5435 * Notify CAM that we're done 5436 */ 5437 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5438 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5439 KASSERT(ccb->ccb_h.status, 5440 ("ZERO ccb sts at %d\n", __LINE__)); 5441 tgt->ccb = NULL; 5442 } else { 5443 mpt_lprt(mpt, MPT_PRT_DEBUG, 5444 "TARGET_STATUS non-CAM for req %p:%u\n", 5445 tgt->req, tgt->req->serno); 5446 } 5447 TAILQ_REMOVE(&mpt->request_pending_list, 5448 tgt->req, links); 5449 mpt_free_request(mpt, tgt->req); 5450 tgt->req = NULL; 5451 5452 /* 5453 * And re-post the Command Buffer. 5454 * This will reset the state. 5455 */ 5456 ioindex = GET_IO_INDEX(reply_desc); 5457 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5458 tgt->is_local = 0; 5459 mpt_post_target_command(mpt, req, ioindex); 5460 5461 /* 5462 * And post a done for anyone who cares 5463 */ 5464 if (ccb) { 5465 if (mpt->outofbeer) { 5466 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5467 mpt->outofbeer = 0; 5468 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5469 } 5470 MPTLOCK_2_CAMLOCK(mpt); 5471 xpt_done(ccb); 5472 CAMLOCK_2_MPTLOCK(mpt); 5473 } 5474 break; 5475 } 5476 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5477 tgt->state = TGT_STATE_LOADED; 5478 break; 5479 default: 5480 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5481 "Reply Function\n", tgt->state); 5482 } 5483 return (TRUE); 5484 } 5485 5486 status = le16toh(reply_frame->IOCStatus); 5487 if (status != MPI_IOCSTATUS_SUCCESS) { 5488 dbg = MPT_PRT_ERROR; 5489 } else { 5490 dbg = MPT_PRT_DEBUG1; 5491 } 5492 5493 mpt_lprt(mpt, dbg, 5494 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5495 req, req->serno, reply_frame, reply_frame->Function, status); 5496 5497 switch (reply_frame->Function) { 5498 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5499 { 5500 mpt_tgt_state_t *tgt; 5501 #ifdef INVARIANTS 5502 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5503 #endif 5504 if (status != MPI_IOCSTATUS_SUCCESS) { 5505 /* 5506 * XXX What to do? 5507 */ 5508 break; 5509 } 5510 tgt = MPT_TGT_STATE(mpt, req); 5511 KASSERT(tgt->state == TGT_STATE_LOADING, 5512 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5513 mpt_assign_serno(mpt, req); 5514 tgt->state = TGT_STATE_LOADED; 5515 break; 5516 } 5517 case MPI_FUNCTION_TARGET_ASSIST: 5518 #ifdef INVARIANTS 5519 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5520 #endif 5521 mpt_prt(mpt, "target assist completion\n"); 5522 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5523 mpt_free_request(mpt, req); 5524 break; 5525 case MPI_FUNCTION_TARGET_STATUS_SEND: 5526 #ifdef INVARIANTS 5527 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5528 #endif 5529 mpt_prt(mpt, "status send completion\n"); 5530 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5531 mpt_free_request(mpt, req); 5532 break; 5533 case MPI_FUNCTION_TARGET_MODE_ABORT: 5534 { 5535 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5536 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5537 PTR_MSG_TARGET_MODE_ABORT abtp = 5538 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5539 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5540 #ifdef INVARIANTS 5541 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5542 #endif 5543 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5544 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5545 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5546 mpt_free_request(mpt, req); 5547 break; 5548 } 5549 default: 5550 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5551 "0x%x\n", reply_frame->Function); 5552 break; 5553 } 5554 return (TRUE); 5555 } 5556