1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 * 96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $ 97 */ 98 99 #include <dev/disk/mpt/mpt.h> 100 #include <dev/disk/mpt/mpt_cam.h> 101 #include <dev/disk/mpt/mpt_raid.h> 102 103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/disk/mpt/mpilib/mpi_init.h" 105 #include "dev/disk/mpt/mpilib/mpi_targ.h" 106 #include "dev/disk/mpt/mpilib/mpi_fc.h" 107 #include "dev/disk/mpt/mpilib/mpi_sas.h" 108 #include <sys/callout.h> 109 #include <sys/kthread.h> 110 #include <sys/sysctl.h> 111 112 static void mpt_poll(struct cam_sim *); 113 static timeout_t mpt_timeout; 114 static void mpt_action(struct cam_sim *, union ccb *); 115 static int 116 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 117 static void mpt_setwidth(struct mpt_softc *, int, int); 118 static void mpt_setsync(struct mpt_softc *, int, int, int); 119 static int mpt_update_spi_config(struct mpt_softc *, int); 120 121 static mpt_reply_handler_t mpt_scsi_reply_handler; 122 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 123 static mpt_reply_handler_t mpt_fc_els_reply_handler; 124 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 125 MSG_DEFAULT_REPLY *); 126 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 127 static int mpt_fc_reset_link(struct mpt_softc *, int); 128 129 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 130 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 131 static void mpt_recovery_thread(void *arg); 132 static void mpt_recover_commands(struct mpt_softc *mpt); 133 134 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 135 u_int, u_int, u_int, int); 136 137 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 138 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 139 static int mpt_add_els_buffers(struct mpt_softc *mpt); 140 static int mpt_add_target_commands(struct mpt_softc *mpt); 141 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 142 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 143 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 144 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 145 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 146 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 147 uint8_t, uint8_t const *); 148 static void 149 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 150 tgt_resource_t *, int); 151 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 152 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 153 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 154 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 155 156 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 157 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 158 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 159 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 160 161 static mpt_probe_handler_t mpt_cam_probe; 162 static mpt_attach_handler_t mpt_cam_attach; 163 static mpt_enable_handler_t mpt_cam_enable; 164 static mpt_ready_handler_t mpt_cam_ready; 165 static mpt_event_handler_t mpt_cam_event; 166 static mpt_reset_handler_t mpt_cam_ioc_reset; 167 static mpt_detach_handler_t mpt_cam_detach; 168 169 static struct mpt_personality mpt_cam_personality = 170 { 171 .name = "mpt_cam", 172 .probe = mpt_cam_probe, 173 .attach = mpt_cam_attach, 174 .enable = mpt_cam_enable, 175 .ready = mpt_cam_ready, 176 .event = mpt_cam_event, 177 .reset = mpt_cam_ioc_reset, 178 .detach = mpt_cam_detach, 179 }; 180 181 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 182 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 183 184 int mpt_enable_sata_wc = -1; 185 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 186 187 static int 188 mpt_cam_probe(struct mpt_softc *mpt) 189 { 190 int role; 191 192 /* 193 * Only attach to nodes that support the initiator or target role 194 * (or want to) or have RAID physical devices that need CAM pass-thru 195 * support. 196 */ 197 if (mpt->do_cfg_role) { 198 role = mpt->cfg_role; 199 } else { 200 role = mpt->role; 201 } 202 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 203 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 204 return (0); 205 } 206 return (ENODEV); 207 } 208 209 static int 210 mpt_cam_attach(struct mpt_softc *mpt) 211 { 212 struct cam_devq *devq; 213 mpt_handler_t handler; 214 int maxq; 215 int error; 216 217 MPT_LOCK(mpt); 218 TAILQ_INIT(&mpt->request_timeout_list); 219 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 220 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 221 222 handler.reply_handler = mpt_scsi_reply_handler; 223 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 224 &scsi_io_handler_id); 225 if (error != 0) { 226 MPT_UNLOCK(mpt); 227 goto cleanup; 228 } 229 230 handler.reply_handler = mpt_scsi_tmf_reply_handler; 231 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 232 &scsi_tmf_handler_id); 233 if (error != 0) { 234 MPT_UNLOCK(mpt); 235 goto cleanup; 236 } 237 238 /* 239 * If we're fibre channel and could support target mode, we register 240 * an ELS reply handler and give it resources. 241 */ 242 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 243 handler.reply_handler = mpt_fc_els_reply_handler; 244 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 245 &fc_els_handler_id); 246 if (error != 0) { 247 MPT_UNLOCK(mpt); 248 goto cleanup; 249 } 250 if (mpt_add_els_buffers(mpt) == FALSE) { 251 error = ENOMEM; 252 MPT_UNLOCK(mpt); 253 goto cleanup; 254 } 255 maxq -= mpt->els_cmds_allocated; 256 } 257 258 /* 259 * If we support target mode, we register a reply handler for it, 260 * but don't add command resources until we actually enable target 261 * mode. 262 */ 263 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 264 handler.reply_handler = mpt_scsi_tgt_reply_handler; 265 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 266 &mpt->scsi_tgt_handler_id); 267 if (error != 0) { 268 MPT_UNLOCK(mpt); 269 goto cleanup; 270 } 271 } 272 273 if (mpt->is_sas) { 274 handler.reply_handler = mpt_sata_pass_reply_handler; 275 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 276 &sata_pass_handler_id); 277 if (error != 0) { 278 MPT_UNLOCK(mpt); 279 goto cleanup; 280 } 281 } 282 283 /* 284 * We keep one request reserved for timeout TMF requests. 285 */ 286 mpt->tmf_req = mpt_get_request(mpt, FALSE); 287 if (mpt->tmf_req == NULL) { 288 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 289 error = ENOMEM; 290 MPT_UNLOCK(mpt); 291 goto cleanup; 292 } 293 294 /* 295 * Mark the request as free even though not on the free list. 296 * There is only one TMF request allowed to be outstanding at 297 * a time and the TMF routines perform their own allocation 298 * tracking using the standard state flags. 299 */ 300 mpt->tmf_req->state = REQ_STATE_FREE; 301 maxq--; 302 303 /* 304 * The rest of this is CAM foo, for which we need to drop our lock 305 */ 306 MPT_UNLOCK(mpt); 307 308 if (mpt_spawn_recovery_thread(mpt) != 0) { 309 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 310 error = ENOMEM; 311 goto cleanup; 312 } 313 314 /* 315 * Create the device queue for our SIM(s). 316 */ 317 devq = cam_simq_alloc(maxq); 318 if (devq == NULL) { 319 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 320 error = ENOMEM; 321 goto cleanup; 322 } 323 324 /* 325 * Construct our SIM entry. 326 */ 327 mpt->sim = 328 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 329 if (mpt->sim == NULL) { 330 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 331 cam_devq_release(devq); 332 error = ENOMEM; 333 goto cleanup; 334 } 335 336 /* 337 * Register exactly this bus. 338 */ 339 MPT_LOCK(mpt); 340 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { 341 mpt_prt(mpt, "Bus registration Failed!\n"); 342 error = ENOMEM; 343 MPT_UNLOCK(mpt); 344 goto cleanup; 345 } 346 347 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 348 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 349 mpt_prt(mpt, "Unable to allocate Path!\n"); 350 error = ENOMEM; 351 MPT_UNLOCK(mpt); 352 goto cleanup; 353 } 354 MPT_UNLOCK(mpt); 355 356 /* 357 * Only register a second bus for RAID physical 358 * devices if the controller supports RAID. 359 */ 360 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 361 return (0); 362 } 363 364 /* 365 * Create a "bus" to export all hidden disks to CAM. 366 */ 367 mpt->phydisk_sim = 368 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 369 if (mpt->phydisk_sim == NULL) { 370 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 371 error = ENOMEM; 372 goto cleanup; 373 } 374 375 /* 376 * Register this bus. 377 */ 378 MPT_LOCK(mpt); 379 if (xpt_bus_register(mpt->phydisk_sim, 1) != 380 CAM_SUCCESS) { 381 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 382 error = ENOMEM; 383 MPT_UNLOCK(mpt); 384 goto cleanup; 385 } 386 387 if (xpt_create_path(&mpt->phydisk_path, NULL, 388 cam_sim_path(mpt->phydisk_sim), 389 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 390 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 MPT_UNLOCK(mpt); 396 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 397 return (0); 398 399 cleanup: 400 mpt_cam_detach(mpt); 401 return (error); 402 } 403 404 /* 405 * Read FC configuration information 406 */ 407 static int 408 mpt_read_config_info_fc(struct mpt_softc *mpt) 409 { 410 struct sysctl_ctx_list *ctx; 411 struct sysctl_oid *tree; 412 char *topology = NULL; 413 int rv; 414 415 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 416 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 417 if (rv) { 418 return (-1); 419 } 420 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 421 mpt->mpt_fcport_page0.Header.PageVersion, 422 mpt->mpt_fcport_page0.Header.PageLength, 423 mpt->mpt_fcport_page0.Header.PageNumber, 424 mpt->mpt_fcport_page0.Header.PageType); 425 426 427 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 428 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 429 if (rv) { 430 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 431 return (-1); 432 } 433 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 434 435 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 436 437 switch (mpt->mpt_fcport_page0.Flags & 438 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 439 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 440 mpt->mpt_fcport_speed = 0; 441 topology = "<NO LOOP>"; 442 break; 443 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 444 topology = "N-Port"; 445 break; 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 447 topology = "NL-Port"; 448 break; 449 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 450 topology = "F-Port"; 451 break; 452 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 453 topology = "FL-Port"; 454 break; 455 default: 456 mpt->mpt_fcport_speed = 0; 457 topology = "?"; 458 break; 459 } 460 461 mpt_lprt(mpt, MPT_PRT_INFO, 462 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 463 "Speed %u-Gbit\n", topology, 464 mpt->mpt_fcport_page0.WWNN.High, 465 mpt->mpt_fcport_page0.WWNN.Low, 466 mpt->mpt_fcport_page0.WWPN.High, 467 mpt->mpt_fcport_page0.WWPN.Low, 468 mpt->mpt_fcport_speed); 469 MPT_UNLOCK(mpt); 470 ctx = device_get_sysctl_ctx(mpt->dev); 471 tree = device_get_sysctl_tree(mpt->dev); 472 473 ksnprintf(mpt->scinfo.fc.wwnn, 474 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 475 mpt->mpt_fcport_page0.WWNN.High, 476 mpt->mpt_fcport_page0.WWNN.Low); 477 478 ksnprintf(mpt->scinfo.fc.wwpn, 479 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 480 mpt->mpt_fcport_page0.WWPN.High, 481 mpt->mpt_fcport_page0.WWPN.Low); 482 483 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 484 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 485 "World Wide Node Name"); 486 487 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 488 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 489 "World Wide Port Name"); 490 491 MPT_LOCK(mpt); 492 return (0); 493 } 494 495 /* 496 * Set FC configuration information. 497 */ 498 static int 499 mpt_set_initial_config_fc(struct mpt_softc *mpt) 500 { 501 CONFIG_PAGE_FC_PORT_1 fc; 502 U32 fl; 503 int r, doit = 0; 504 int role; 505 506 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 507 &fc.Header, FALSE, 5000); 508 if (r) { 509 mpt_prt(mpt, "failed to read FC page 1 header\n"); 510 return (mpt_fc_reset_link(mpt, 1)); 511 } 512 513 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 514 &fc.Header, sizeof (fc), FALSE, 5000); 515 if (r) { 516 mpt_prt(mpt, "failed to read FC page 1\n"); 517 return (mpt_fc_reset_link(mpt, 1)); 518 } 519 mpt2host_config_page_fc_port_1(&fc); 520 521 /* 522 * Check our flags to make sure we support the role we want. 523 */ 524 doit = 0; 525 role = 0; 526 fl = fc.Flags; 527 528 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 529 role |= MPT_ROLE_INITIATOR; 530 } 531 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 532 role |= MPT_ROLE_TARGET; 533 } 534 535 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 536 537 if (mpt->do_cfg_role == 0) { 538 role = mpt->cfg_role; 539 } else { 540 mpt->do_cfg_role = 0; 541 } 542 543 if (role != mpt->cfg_role) { 544 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 545 if ((role & MPT_ROLE_INITIATOR) == 0) { 546 mpt_prt(mpt, "adding initiator role\n"); 547 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 548 doit++; 549 } else { 550 mpt_prt(mpt, "keeping initiator role\n"); 551 } 552 } else if (role & MPT_ROLE_INITIATOR) { 553 mpt_prt(mpt, "removing initiator role\n"); 554 doit++; 555 } 556 if (mpt->cfg_role & MPT_ROLE_TARGET) { 557 if ((role & MPT_ROLE_TARGET) == 0) { 558 mpt_prt(mpt, "adding target role\n"); 559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 560 doit++; 561 } else { 562 mpt_prt(mpt, "keeping target role\n"); 563 } 564 } else if (role & MPT_ROLE_TARGET) { 565 mpt_prt(mpt, "removing target role\n"); 566 doit++; 567 } 568 mpt->role = mpt->cfg_role; 569 } 570 571 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 572 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 573 mpt_prt(mpt, "adding OXID option\n"); 574 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 575 doit++; 576 } 577 } 578 579 if (doit) { 580 fc.Flags = fl; 581 host2mpt_config_page_fc_port_1(&fc); 582 r = mpt_write_cfg_page(mpt, 583 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 584 sizeof(fc), FALSE, 5000); 585 if (r != 0) { 586 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 587 return (0); 588 } 589 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 590 "effect until next reboot or IOC reset\n"); 591 } 592 return (0); 593 } 594 595 static int 596 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 597 { 598 ConfigExtendedPageHeader_t hdr; 599 struct mptsas_phyinfo *phyinfo; 600 SasIOUnitPage0_t *buffer; 601 int error, len, i; 602 603 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 604 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 605 &hdr, 0, 10000); 606 if (error) 607 goto out; 608 if (hdr.ExtPageLength == 0) { 609 error = ENXIO; 610 goto out; 611 } 612 613 len = hdr.ExtPageLength * 4; 614 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 615 if (buffer == NULL) { 616 error = ENOMEM; 617 goto out; 618 } 619 620 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 621 0, &hdr, buffer, len, 0, 10000); 622 if (error) { 623 kfree(buffer, M_DEVBUF); 624 goto out; 625 } 626 627 portinfo->num_phys = buffer->NumPhys; 628 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) * 629 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 630 if (portinfo->phy_info == NULL) { 631 kfree(buffer, M_DEVBUF); 632 error = ENOMEM; 633 goto out; 634 } 635 636 for (i = 0; i < portinfo->num_phys; i++) { 637 phyinfo = &portinfo->phy_info[i]; 638 phyinfo->phy_num = i; 639 phyinfo->port_id = buffer->PhyData[i].Port; 640 phyinfo->negotiated_link_rate = 641 buffer->PhyData[i].NegotiatedLinkRate; 642 phyinfo->handle = 643 le16toh(buffer->PhyData[i].ControllerDevHandle); 644 } 645 646 kfree(buffer, M_DEVBUF); 647 out: 648 return (error); 649 } 650 651 static int 652 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 653 uint32_t form, uint32_t form_specific) 654 { 655 ConfigExtendedPageHeader_t hdr; 656 SasPhyPage0_t *buffer; 657 int error; 658 659 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 660 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 661 0, 10000); 662 if (error) 663 goto out; 664 if (hdr.ExtPageLength == 0) { 665 error = ENXIO; 666 goto out; 667 } 668 669 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 670 if (buffer == NULL) { 671 error = ENOMEM; 672 goto out; 673 } 674 675 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 676 form + form_specific, &hdr, buffer, 677 sizeof(SasPhyPage0_t), 0, 10000); 678 if (error) { 679 kfree(buffer, M_DEVBUF); 680 goto out; 681 } 682 683 phy_info->hw_link_rate = buffer->HwLinkRate; 684 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 685 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 686 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 687 688 kfree(buffer, M_DEVBUF); 689 out: 690 return (error); 691 } 692 693 static int 694 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 695 uint32_t form, uint32_t form_specific) 696 { 697 ConfigExtendedPageHeader_t hdr; 698 SasDevicePage0_t *buffer; 699 uint64_t sas_address; 700 int error = 0; 701 702 bzero(device_info, sizeof(*device_info)); 703 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 704 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 705 &hdr, 0, 10000); 706 if (error) 707 goto out; 708 if (hdr.ExtPageLength == 0) { 709 error = ENXIO; 710 goto out; 711 } 712 713 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 714 if (buffer == NULL) { 715 error = ENOMEM; 716 goto out; 717 } 718 719 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 720 form + form_specific, &hdr, buffer, 721 sizeof(SasDevicePage0_t), 0, 10000); 722 if (error) { 723 kfree(buffer, M_DEVBUF); 724 goto out; 725 } 726 727 device_info->dev_handle = le16toh(buffer->DevHandle); 728 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 729 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 730 device_info->slot = le16toh(buffer->Slot); 731 device_info->phy_num = buffer->PhyNum; 732 device_info->physical_port = buffer->PhysicalPort; 733 device_info->target_id = buffer->TargetID; 734 device_info->bus = buffer->Bus; 735 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 736 device_info->sas_address = le64toh(sas_address); 737 device_info->device_info = le32toh(buffer->DeviceInfo); 738 739 kfree(buffer, M_DEVBUF); 740 out: 741 return (error); 742 } 743 744 /* 745 * Read SAS configuration information. Nothing to do yet. 746 */ 747 static int 748 mpt_read_config_info_sas(struct mpt_softc *mpt) 749 { 750 struct mptsas_portinfo *portinfo; 751 struct mptsas_phyinfo *phyinfo; 752 int error, i; 753 754 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 755 if (portinfo == NULL) 756 return (ENOMEM); 757 758 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 759 if (error) { 760 kfree(portinfo, M_DEVBUF); 761 return (0); 762 } 763 764 for (i = 0; i < portinfo->num_phys; i++) { 765 phyinfo = &portinfo->phy_info[i]; 766 error = mptsas_sas_phy_pg0(mpt, phyinfo, 767 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 768 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 769 if (error) 770 break; 771 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 772 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 773 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 774 phyinfo->handle); 775 if (error) 776 break; 777 phyinfo->identify.phy_num = phyinfo->phy_num = i; 778 if (phyinfo->attached.dev_handle) 779 error = mptsas_sas_device_pg0(mpt, 780 &phyinfo->attached, 781 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 782 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 783 phyinfo->attached.dev_handle); 784 if (error) 785 break; 786 } 787 mpt->sas_portinfo = portinfo; 788 return (0); 789 } 790 791 static void 792 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 793 int enabled) 794 { 795 SataPassthroughRequest_t *pass; 796 request_t *req; 797 int error, status; 798 799 req = mpt_get_request(mpt, 0); 800 if (req == NULL) 801 return; 802 803 pass = req->req_vbuf; 804 bzero(pass, sizeof(SataPassthroughRequest_t)); 805 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 806 pass->TargetID = devinfo->target_id; 807 pass->Bus = devinfo->bus; 808 pass->PassthroughFlags = 0; 809 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 810 pass->DataLength = 0; 811 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 812 pass->CommandFIS[0] = 0x27; 813 pass->CommandFIS[1] = 0x80; 814 pass->CommandFIS[2] = 0xef; 815 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 816 pass->CommandFIS[7] = 0x40; 817 pass->CommandFIS[15] = 0x08; 818 819 mpt_check_doorbell(mpt); 820 mpt_send_cmd(mpt, req); 821 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 822 10 * 1000); 823 if (error) { 824 mpt_free_request(mpt, req); 825 kprintf("error %d sending passthrough\n", error); 826 return; 827 } 828 829 status = le16toh(req->IOCStatus); 830 if (status != MPI_IOCSTATUS_SUCCESS) { 831 mpt_free_request(mpt, req); 832 kprintf("IOCSTATUS %d\n", status); 833 return; 834 } 835 836 mpt_free_request(mpt, req); 837 } 838 839 /* 840 * Set SAS configuration information. Nothing to do yet. 841 */ 842 static int 843 mpt_set_initial_config_sas(struct mpt_softc *mpt) 844 { 845 struct mptsas_phyinfo *phyinfo; 846 int i; 847 848 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 849 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 850 phyinfo = &mpt->sas_portinfo->phy_info[i]; 851 if (phyinfo->attached.dev_handle == 0) 852 continue; 853 if ((phyinfo->attached.device_info & 854 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 855 continue; 856 if (bootverbose) 857 device_printf(mpt->dev, 858 "%sabling SATA WC on phy %d\n", 859 (mpt_enable_sata_wc) ? "En" : "Dis", i); 860 mptsas_set_sata_wc(mpt, &phyinfo->attached, 861 mpt_enable_sata_wc); 862 } 863 } 864 865 return (0); 866 } 867 868 static int 869 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 870 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 871 { 872 873 if (req != NULL) { 874 if (reply_frame != NULL) { 875 req->IOCStatus = le16toh(reply_frame->IOCStatus); 876 } 877 req->state &= ~REQ_STATE_QUEUED; 878 req->state |= REQ_STATE_DONE; 879 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 880 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 881 wakeup(req); 882 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 883 /* 884 * Whew- we can free this request (late completion) 885 */ 886 mpt_free_request(mpt, req); 887 } 888 } 889 890 return (TRUE); 891 } 892 893 /* 894 * Read SCSI configuration information 895 */ 896 static int 897 mpt_read_config_info_spi(struct mpt_softc *mpt) 898 { 899 int rv, i; 900 901 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 902 &mpt->mpt_port_page0.Header, FALSE, 5000); 903 if (rv) { 904 return (-1); 905 } 906 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 907 mpt->mpt_port_page0.Header.PageVersion, 908 mpt->mpt_port_page0.Header.PageLength, 909 mpt->mpt_port_page0.Header.PageNumber, 910 mpt->mpt_port_page0.Header.PageType); 911 912 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 913 &mpt->mpt_port_page1.Header, FALSE, 5000); 914 if (rv) { 915 return (-1); 916 } 917 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 918 mpt->mpt_port_page1.Header.PageVersion, 919 mpt->mpt_port_page1.Header.PageLength, 920 mpt->mpt_port_page1.Header.PageNumber, 921 mpt->mpt_port_page1.Header.PageType); 922 923 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 924 &mpt->mpt_port_page2.Header, FALSE, 5000); 925 if (rv) { 926 return (-1); 927 } 928 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 929 mpt->mpt_port_page2.Header.PageVersion, 930 mpt->mpt_port_page2.Header.PageLength, 931 mpt->mpt_port_page2.Header.PageNumber, 932 mpt->mpt_port_page2.Header.PageType); 933 934 for (i = 0; i < 16; i++) { 935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 936 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 937 if (rv) { 938 return (-1); 939 } 940 mpt_lprt(mpt, MPT_PRT_DEBUG, 941 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 942 mpt->mpt_dev_page0[i].Header.PageVersion, 943 mpt->mpt_dev_page0[i].Header.PageLength, 944 mpt->mpt_dev_page0[i].Header.PageNumber, 945 mpt->mpt_dev_page0[i].Header.PageType); 946 947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 948 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 949 if (rv) { 950 return (-1); 951 } 952 mpt_lprt(mpt, MPT_PRT_DEBUG, 953 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 954 mpt->mpt_dev_page1[i].Header.PageVersion, 955 mpt->mpt_dev_page1[i].Header.PageLength, 956 mpt->mpt_dev_page1[i].Header.PageNumber, 957 mpt->mpt_dev_page1[i].Header.PageType); 958 } 959 960 /* 961 * At this point, we don't *have* to fail. As long as we have 962 * valid config header information, we can (barely) lurch 963 * along. 964 */ 965 966 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 967 sizeof(mpt->mpt_port_page0), FALSE, 5000); 968 if (rv) { 969 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 970 } else { 971 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 972 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 973 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 974 mpt->mpt_port_page0.Capabilities, 975 mpt->mpt_port_page0.PhysicalInterface); 976 } 977 978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 979 sizeof(mpt->mpt_port_page1), FALSE, 5000); 980 if (rv) { 981 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 982 } else { 983 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 984 mpt_lprt(mpt, MPT_PRT_DEBUG, 985 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 986 mpt->mpt_port_page1.Configuration, 987 mpt->mpt_port_page1.OnBusTimerValue); 988 } 989 990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 991 sizeof(mpt->mpt_port_page2), FALSE, 5000); 992 if (rv) { 993 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 994 } else { 995 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 996 "Port Page 2: Flags %x Settings %x\n", 997 mpt->mpt_port_page2.PortFlags, 998 mpt->mpt_port_page2.PortSettings); 999 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1000 for (i = 0; i < 16; i++) { 1001 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1002 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1003 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1004 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1005 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1006 } 1007 } 1008 1009 for (i = 0; i < 16; i++) { 1010 rv = mpt_read_cur_cfg_page(mpt, i, 1011 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1012 FALSE, 5000); 1013 if (rv) { 1014 mpt_prt(mpt, 1015 "cannot read SPI Target %d Device Page 0\n", i); 1016 continue; 1017 } 1018 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1019 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1020 "target %d page 0: Negotiated Params %x Information %x\n", 1021 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1022 mpt->mpt_dev_page0[i].Information); 1023 1024 rv = mpt_read_cur_cfg_page(mpt, i, 1025 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1026 FALSE, 5000); 1027 if (rv) { 1028 mpt_prt(mpt, 1029 "cannot read SPI Target %d Device Page 1\n", i); 1030 continue; 1031 } 1032 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1033 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1034 "target %d page 1: Requested Params %x Configuration %x\n", 1035 i, mpt->mpt_dev_page1[i].RequestedParameters, 1036 mpt->mpt_dev_page1[i].Configuration); 1037 } 1038 return (0); 1039 } 1040 1041 /* 1042 * Validate SPI configuration information. 1043 * 1044 * In particular, validate SPI Port Page 1. 1045 */ 1046 static int 1047 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1048 { 1049 int error, i, pp1val; 1050 1051 mpt->mpt_disc_enable = 0xff; 1052 mpt->mpt_tag_enable = 0; 1053 1054 pp1val = ((1 << mpt->mpt_ini_id) << 1055 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1056 if (mpt->mpt_port_page1.Configuration != pp1val) { 1057 CONFIG_PAGE_SCSI_PORT_1 tmp; 1058 1059 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1060 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1061 tmp = mpt->mpt_port_page1; 1062 tmp.Configuration = pp1val; 1063 host2mpt_config_page_scsi_port_1(&tmp); 1064 error = mpt_write_cur_cfg_page(mpt, 0, 1065 &tmp.Header, sizeof(tmp), FALSE, 5000); 1066 if (error) { 1067 return (-1); 1068 } 1069 error = mpt_read_cur_cfg_page(mpt, 0, 1070 &tmp.Header, sizeof(tmp), FALSE, 5000); 1071 if (error) { 1072 return (-1); 1073 } 1074 mpt2host_config_page_scsi_port_1(&tmp); 1075 if (tmp.Configuration != pp1val) { 1076 mpt_prt(mpt, 1077 "failed to reset SPI Port Page 1 Config value\n"); 1078 return (-1); 1079 } 1080 mpt->mpt_port_page1 = tmp; 1081 } 1082 1083 /* 1084 * The purpose of this exercise is to get 1085 * all targets back to async/narrow. 1086 * 1087 * We skip this step if the BIOS has already negotiated 1088 * speeds with the targets. 1089 */ 1090 i = mpt->mpt_port_page2.PortSettings & 1091 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1092 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1093 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1094 "honoring BIOS transfer negotiations\n"); 1095 } else { 1096 for (i = 0; i < 16; i++) { 1097 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1098 mpt->mpt_dev_page1[i].Configuration = 0; 1099 (void) mpt_update_spi_config(mpt, i); 1100 } 1101 } 1102 return (0); 1103 } 1104 1105 static int 1106 mpt_cam_enable(struct mpt_softc *mpt) 1107 { 1108 int error; 1109 1110 MPT_LOCK(mpt); 1111 1112 error = EIO; 1113 if (mpt->is_fc) { 1114 if (mpt_read_config_info_fc(mpt)) { 1115 goto out; 1116 } 1117 if (mpt_set_initial_config_fc(mpt)) { 1118 goto out; 1119 } 1120 } else if (mpt->is_sas) { 1121 if (mpt_read_config_info_sas(mpt)) { 1122 goto out; 1123 } 1124 if (mpt_set_initial_config_sas(mpt)) { 1125 goto out; 1126 } 1127 } else if (mpt->is_spi) { 1128 if (mpt_read_config_info_spi(mpt)) { 1129 goto out; 1130 } 1131 if (mpt_set_initial_config_spi(mpt)) { 1132 goto out; 1133 } 1134 } 1135 error = 0; 1136 1137 out: 1138 MPT_UNLOCK(mpt); 1139 return (error); 1140 } 1141 1142 static void 1143 mpt_cam_ready(struct mpt_softc *mpt) 1144 { 1145 1146 /* 1147 * If we're in target mode, hang out resources now 1148 * so we don't cause the world to hang talking to us. 1149 */ 1150 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1151 /* 1152 * Try to add some target command resources 1153 */ 1154 MPT_LOCK(mpt); 1155 if (mpt_add_target_commands(mpt) == FALSE) { 1156 mpt_prt(mpt, "failed to add target commands\n"); 1157 } 1158 MPT_UNLOCK(mpt); 1159 } 1160 mpt->ready = 1; 1161 } 1162 1163 static void 1164 mpt_cam_detach(struct mpt_softc *mpt) 1165 { 1166 mpt_handler_t handler; 1167 1168 MPT_LOCK(mpt); 1169 mpt->ready = 0; 1170 mpt_terminate_recovery_thread(mpt); 1171 1172 handler.reply_handler = mpt_scsi_reply_handler; 1173 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1174 scsi_io_handler_id); 1175 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1176 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1177 scsi_tmf_handler_id); 1178 handler.reply_handler = mpt_fc_els_reply_handler; 1179 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1180 fc_els_handler_id); 1181 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1182 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1183 mpt->scsi_tgt_handler_id); 1184 handler.reply_handler = mpt_sata_pass_reply_handler; 1185 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1186 sata_pass_handler_id); 1187 1188 if (mpt->tmf_req != NULL) { 1189 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1190 mpt_free_request(mpt, mpt->tmf_req); 1191 mpt->tmf_req = NULL; 1192 } 1193 if (mpt->sas_portinfo != NULL) { 1194 kfree(mpt->sas_portinfo, M_DEVBUF); 1195 mpt->sas_portinfo = NULL; 1196 } 1197 1198 if (mpt->sim != NULL) { 1199 xpt_free_path(mpt->path); 1200 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1201 cam_sim_free(mpt->sim); 1202 mpt->sim = NULL; 1203 } 1204 1205 if (mpt->phydisk_sim != NULL) { 1206 xpt_free_path(mpt->phydisk_path); 1207 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1208 cam_sim_free(mpt->phydisk_sim); 1209 mpt->phydisk_sim = NULL; 1210 } 1211 MPT_UNLOCK(mpt); 1212 } 1213 1214 /* This routine is used after a system crash to dump core onto the swap device. 1215 */ 1216 static void 1217 mpt_poll(struct cam_sim *sim) 1218 { 1219 struct mpt_softc *mpt; 1220 1221 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1222 mpt_intr(mpt); 1223 } 1224 1225 /* 1226 * Watchdog timeout routine for SCSI requests. 1227 */ 1228 static void 1229 mpt_timeout(void *arg) 1230 { 1231 union ccb *ccb; 1232 struct mpt_softc *mpt; 1233 request_t *req; 1234 1235 ccb = (union ccb *)arg; 1236 mpt = ccb->ccb_h.ccb_mpt_ptr; 1237 1238 MPT_LOCK(mpt); 1239 req = ccb->ccb_h.ccb_req_ptr; 1240 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1241 req->serno, ccb, req->ccb); 1242 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1243 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1244 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1245 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1246 req->state |= REQ_STATE_TIMEDOUT; 1247 mpt_wakeup_recovery_thread(mpt); 1248 } 1249 MPT_UNLOCK(mpt); 1250 } 1251 1252 /* 1253 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1254 * 1255 * Takes a list of physical segments and builds the SGL for SCSI IO command 1256 * and forwards the commard to the IOC after one last check that CAM has not 1257 * aborted the transaction. 1258 */ 1259 static void 1260 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1261 { 1262 request_t *req, *trq; 1263 char *mpt_off; 1264 union ccb *ccb; 1265 struct mpt_softc *mpt; 1266 bus_addr_t chain_list_addr; 1267 int first_lim, seg, this_seg_lim; 1268 uint32_t addr, cur_off, flags, nxt_off, tf; 1269 void *sglp = NULL; 1270 MSG_REQUEST_HEADER *hdrp; 1271 SGE_SIMPLE64 *se; 1272 SGE_CHAIN64 *ce; 1273 int istgt = 0; 1274 1275 req = (request_t *)arg; 1276 ccb = req->ccb; 1277 1278 mpt = ccb->ccb_h.ccb_mpt_ptr; 1279 req = ccb->ccb_h.ccb_req_ptr; 1280 1281 hdrp = req->req_vbuf; 1282 mpt_off = req->req_vbuf; 1283 1284 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1285 error = EFBIG; 1286 } 1287 1288 if (error == 0) { 1289 switch (hdrp->Function) { 1290 case MPI_FUNCTION_SCSI_IO_REQUEST: 1291 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1292 istgt = 0; 1293 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1294 break; 1295 case MPI_FUNCTION_TARGET_ASSIST: 1296 istgt = 1; 1297 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1298 break; 1299 default: 1300 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1301 hdrp->Function); 1302 error = EINVAL; 1303 break; 1304 } 1305 } 1306 1307 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1308 error = EFBIG; 1309 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1310 nseg, mpt->max_seg_cnt); 1311 } 1312 1313 bad: 1314 if (error != 0) { 1315 if (error != EFBIG && error != ENOMEM) { 1316 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1317 } 1318 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1319 cam_status status; 1320 mpt_freeze_ccb(ccb); 1321 if (error == EFBIG) { 1322 status = CAM_REQ_TOO_BIG; 1323 } else if (error == ENOMEM) { 1324 if (mpt->outofbeer == 0) { 1325 mpt->outofbeer = 1; 1326 xpt_freeze_simq(mpt->sim, 1); 1327 mpt_lprt(mpt, MPT_PRT_DEBUG, 1328 "FREEZEQ\n"); 1329 } 1330 status = CAM_REQUEUE_REQ; 1331 } else { 1332 status = CAM_REQ_CMP_ERR; 1333 } 1334 mpt_set_ccb_status(ccb, status); 1335 } 1336 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1337 request_t *cmd_req = 1338 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1339 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1340 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1341 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1342 } 1343 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1344 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1345 xpt_done(ccb); 1346 mpt_free_request(mpt, req); 1347 return; 1348 } 1349 1350 /* 1351 * No data to transfer? 1352 * Just make a single simple SGL with zero length. 1353 */ 1354 1355 if (mpt->verbose >= MPT_PRT_DEBUG) { 1356 int tidx = ((char *)sglp) - mpt_off; 1357 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1358 } 1359 1360 if (nseg == 0) { 1361 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1362 MPI_pSGE_SET_FLAGS(se1, 1363 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1364 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1365 se1->FlagsLength = htole32(se1->FlagsLength); 1366 goto out; 1367 } 1368 1369 1370 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1371 if (istgt == 0) { 1372 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1373 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1374 } 1375 } else { 1376 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1377 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1378 } 1379 } 1380 1381 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1382 bus_dmasync_op_t op; 1383 if (istgt == 0) { 1384 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1385 op = BUS_DMASYNC_PREREAD; 1386 } else { 1387 op = BUS_DMASYNC_PREWRITE; 1388 } 1389 } else { 1390 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1391 op = BUS_DMASYNC_PREWRITE; 1392 } else { 1393 op = BUS_DMASYNC_PREREAD; 1394 } 1395 } 1396 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1397 } 1398 1399 /* 1400 * Okay, fill in what we can at the end of the command frame. 1401 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1402 * the command frame. 1403 * 1404 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1405 * SIMPLE64 pointers and start doing CHAIN64 entries after 1406 * that. 1407 */ 1408 1409 if (nseg < MPT_NSGL_FIRST(mpt)) { 1410 first_lim = nseg; 1411 } else { 1412 /* 1413 * Leave room for CHAIN element 1414 */ 1415 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1416 } 1417 1418 se = (SGE_SIMPLE64 *) sglp; 1419 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1420 tf = flags; 1421 memset(se, 0, sizeof (*se)); 1422 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1423 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1424 if (sizeof(bus_addr_t) > 4) { 1425 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1426 /* SAS1078 36GB limitation WAR */ 1427 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 1428 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1429 addr |= (1U << 31); 1430 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1431 } 1432 se->Address.High = htole32(addr); 1433 } 1434 if (seg == first_lim - 1) { 1435 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1436 } 1437 if (seg == nseg - 1) { 1438 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1439 MPI_SGE_FLAGS_END_OF_BUFFER; 1440 } 1441 MPI_pSGE_SET_FLAGS(se, tf); 1442 se->FlagsLength = htole32(se->FlagsLength); 1443 } 1444 1445 if (seg == nseg) { 1446 goto out; 1447 } 1448 1449 /* 1450 * Tell the IOC where to find the first chain element. 1451 */ 1452 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1453 nxt_off = MPT_RQSL(mpt); 1454 trq = req; 1455 1456 /* 1457 * Make up the rest of the data segments out of a chain element 1458 * (contained in the current request frame) which points to 1459 * SIMPLE64 elements in the next request frame, possibly ending 1460 * with *another* chain element (if there's more). 1461 */ 1462 while (seg < nseg) { 1463 /* 1464 * Point to the chain descriptor. Note that the chain 1465 * descriptor is at the end of the *previous* list (whether 1466 * chain or simple). 1467 */ 1468 ce = (SGE_CHAIN64 *) se; 1469 1470 /* 1471 * Before we change our current pointer, make sure we won't 1472 * overflow the request area with this frame. Note that we 1473 * test against 'greater than' here as it's okay in this case 1474 * to have next offset be just outside the request area. 1475 */ 1476 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1477 nxt_off = MPT_REQUEST_AREA; 1478 goto next_chain; 1479 } 1480 1481 /* 1482 * Set our SGE element pointer to the beginning of the chain 1483 * list and update our next chain list offset. 1484 */ 1485 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1486 cur_off = nxt_off; 1487 nxt_off += MPT_RQSL(mpt); 1488 1489 /* 1490 * Now initialize the chain descriptor. 1491 */ 1492 memset(ce, 0, sizeof (*ce)); 1493 1494 /* 1495 * Get the physical address of the chain list. 1496 */ 1497 chain_list_addr = trq->req_pbuf; 1498 chain_list_addr += cur_off; 1499 if (sizeof (bus_addr_t) > 4) { 1500 ce->Address.High = 1501 htole32(((uint64_t)chain_list_addr) >> 32); 1502 } 1503 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1504 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1505 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1506 1507 /* 1508 * If we have more than a frame's worth of segments left, 1509 * set up the chain list to have the last element be another 1510 * chain descriptor. 1511 */ 1512 if ((nseg - seg) > MPT_NSGL(mpt)) { 1513 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1514 /* 1515 * The length of the chain is the length in bytes of the 1516 * number of segments plus the next chain element. 1517 * 1518 * The next chain descriptor offset is the length, 1519 * in words, of the number of segments. 1520 */ 1521 ce->Length = (this_seg_lim - seg) * 1522 sizeof (SGE_SIMPLE64); 1523 ce->NextChainOffset = ce->Length >> 2; 1524 ce->Length += sizeof (SGE_CHAIN64); 1525 } else { 1526 this_seg_lim = nseg; 1527 ce->Length = (this_seg_lim - seg) * 1528 sizeof (SGE_SIMPLE64); 1529 } 1530 ce->Length = htole16(ce->Length); 1531 1532 /* 1533 * Fill in the chain list SGE elements with our segment data. 1534 * 1535 * If we're the last element in this chain list, set the last 1536 * element flag. If we're the completely last element period, 1537 * set the end of list and end of buffer flags. 1538 */ 1539 while (seg < this_seg_lim) { 1540 tf = flags; 1541 memset(se, 0, sizeof (*se)); 1542 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1543 se->Address.Low = htole32(dm_segs->ds_addr & 1544 0xffffffff); 1545 if (sizeof (bus_addr_t) > 4) { 1546 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1547 /* SAS1078 36GB limitation WAR */ 1548 if (mpt->is_1078 && 1549 (((uint64_t)dm_segs->ds_addr + 1550 MPI_SGE_LENGTH(se->FlagsLength)) >> 1551 32) == 9) { 1552 addr |= (1U << 31); 1553 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1554 } 1555 se->Address.High = htole32(addr); 1556 } 1557 if (seg == this_seg_lim - 1) { 1558 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1559 } 1560 if (seg == nseg - 1) { 1561 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1562 MPI_SGE_FLAGS_END_OF_BUFFER; 1563 } 1564 MPI_pSGE_SET_FLAGS(se, tf); 1565 se->FlagsLength = htole32(se->FlagsLength); 1566 se++; 1567 seg++; 1568 dm_segs++; 1569 } 1570 1571 next_chain: 1572 /* 1573 * If we have more segments to do and we've used up all of 1574 * the space in a request area, go allocate another one 1575 * and chain to that. 1576 */ 1577 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1578 request_t *nrq; 1579 1580 nrq = mpt_get_request(mpt, FALSE); 1581 1582 if (nrq == NULL) { 1583 error = ENOMEM; 1584 goto bad; 1585 } 1586 1587 /* 1588 * Append the new request area on the tail of our list. 1589 */ 1590 if ((trq = req->chain) == NULL) { 1591 req->chain = nrq; 1592 } else { 1593 while (trq->chain != NULL) { 1594 trq = trq->chain; 1595 } 1596 trq->chain = nrq; 1597 } 1598 trq = nrq; 1599 mpt_off = trq->req_vbuf; 1600 if (mpt->verbose >= MPT_PRT_DEBUG) { 1601 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1602 } 1603 nxt_off = 0; 1604 } 1605 } 1606 out: 1607 1608 /* 1609 * Last time we need to check if this CCB needs to be aborted. 1610 */ 1611 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1612 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1613 request_t *cmd_req = 1614 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1615 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1616 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1617 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1618 } 1619 mpt_prt(mpt, 1620 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1621 ccb->ccb_h.status & CAM_STATUS_MASK); 1622 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1623 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1624 } 1625 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1626 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1627 xpt_done(ccb); 1628 mpt_free_request(mpt, req); 1629 return; 1630 } 1631 1632 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1633 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1634 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1635 mpt_timeout, ccb); 1636 } 1637 if (mpt->verbose > MPT_PRT_DEBUG) { 1638 int nc = 0; 1639 mpt_print_request(req->req_vbuf); 1640 for (trq = req->chain; trq; trq = trq->chain) { 1641 kprintf(" Additional Chain Area %d\n", nc++); 1642 mpt_dump_sgl(trq->req_vbuf, 0); 1643 } 1644 } 1645 1646 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1647 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1648 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1649 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1650 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1651 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1652 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1653 } else { 1654 tgt->state = TGT_STATE_MOVING_DATA; 1655 } 1656 #else 1657 tgt->state = TGT_STATE_MOVING_DATA; 1658 #endif 1659 } 1660 mpt_send_cmd(mpt, req); 1661 } 1662 1663 static void 1664 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1665 { 1666 request_t *req, *trq; 1667 char *mpt_off; 1668 union ccb *ccb; 1669 struct mpt_softc *mpt; 1670 int seg, first_lim; 1671 uint32_t flags, nxt_off; 1672 void *sglp = NULL; 1673 MSG_REQUEST_HEADER *hdrp; 1674 SGE_SIMPLE32 *se; 1675 SGE_CHAIN32 *ce; 1676 int istgt = 0; 1677 1678 req = (request_t *)arg; 1679 ccb = req->ccb; 1680 1681 mpt = ccb->ccb_h.ccb_mpt_ptr; 1682 req = ccb->ccb_h.ccb_req_ptr; 1683 1684 hdrp = req->req_vbuf; 1685 mpt_off = req->req_vbuf; 1686 1687 1688 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1689 error = EFBIG; 1690 } 1691 1692 if (error == 0) { 1693 switch (hdrp->Function) { 1694 case MPI_FUNCTION_SCSI_IO_REQUEST: 1695 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1696 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1697 break; 1698 case MPI_FUNCTION_TARGET_ASSIST: 1699 istgt = 1; 1700 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1701 break; 1702 default: 1703 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1704 hdrp->Function); 1705 error = EINVAL; 1706 break; 1707 } 1708 } 1709 1710 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1711 error = EFBIG; 1712 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1713 nseg, mpt->max_seg_cnt); 1714 } 1715 1716 bad: 1717 if (error != 0) { 1718 if (error != EFBIG && error != ENOMEM) { 1719 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1720 } 1721 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1722 cam_status status; 1723 mpt_freeze_ccb(ccb); 1724 if (error == EFBIG) { 1725 status = CAM_REQ_TOO_BIG; 1726 } else if (error == ENOMEM) { 1727 if (mpt->outofbeer == 0) { 1728 mpt->outofbeer = 1; 1729 xpt_freeze_simq(mpt->sim, 1); 1730 mpt_lprt(mpt, MPT_PRT_DEBUG, 1731 "FREEZEQ\n"); 1732 } 1733 status = CAM_REQUEUE_REQ; 1734 } else { 1735 status = CAM_REQ_CMP_ERR; 1736 } 1737 mpt_set_ccb_status(ccb, status); 1738 } 1739 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1740 request_t *cmd_req = 1741 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1742 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1743 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1744 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1745 } 1746 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1747 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1748 xpt_done(ccb); 1749 mpt_free_request(mpt, req); 1750 return; 1751 } 1752 1753 /* 1754 * No data to transfer? 1755 * Just make a single simple SGL with zero length. 1756 */ 1757 1758 if (mpt->verbose >= MPT_PRT_DEBUG) { 1759 int tidx = ((char *)sglp) - mpt_off; 1760 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1761 } 1762 1763 if (nseg == 0) { 1764 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1765 MPI_pSGE_SET_FLAGS(se1, 1766 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1767 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1768 se1->FlagsLength = htole32(se1->FlagsLength); 1769 goto out; 1770 } 1771 1772 1773 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1774 if (istgt == 0) { 1775 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1776 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1777 } 1778 } else { 1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1780 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1781 } 1782 } 1783 1784 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1785 bus_dmasync_op_t op; 1786 if (istgt) { 1787 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1788 op = BUS_DMASYNC_PREREAD; 1789 } else { 1790 op = BUS_DMASYNC_PREWRITE; 1791 } 1792 } else { 1793 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1794 op = BUS_DMASYNC_PREWRITE; 1795 } else { 1796 op = BUS_DMASYNC_PREREAD; 1797 } 1798 } 1799 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1800 } 1801 1802 /* 1803 * Okay, fill in what we can at the end of the command frame. 1804 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1805 * the command frame. 1806 * 1807 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1808 * SIMPLE32 pointers and start doing CHAIN32 entries after 1809 * that. 1810 */ 1811 1812 if (nseg < MPT_NSGL_FIRST(mpt)) { 1813 first_lim = nseg; 1814 } else { 1815 /* 1816 * Leave room for CHAIN element 1817 */ 1818 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1819 } 1820 1821 se = (SGE_SIMPLE32 *) sglp; 1822 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1823 uint32_t tf; 1824 1825 memset(se, 0,sizeof (*se)); 1826 se->Address = htole32(dm_segs->ds_addr); 1827 1828 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1829 tf = flags; 1830 if (seg == first_lim - 1) { 1831 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1832 } 1833 if (seg == nseg - 1) { 1834 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1835 MPI_SGE_FLAGS_END_OF_BUFFER; 1836 } 1837 MPI_pSGE_SET_FLAGS(se, tf); 1838 se->FlagsLength = htole32(se->FlagsLength); 1839 } 1840 1841 if (seg == nseg) { 1842 goto out; 1843 } 1844 1845 /* 1846 * Tell the IOC where to find the first chain element. 1847 */ 1848 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1849 nxt_off = MPT_RQSL(mpt); 1850 trq = req; 1851 1852 /* 1853 * Make up the rest of the data segments out of a chain element 1854 * (contained in the current request frame) which points to 1855 * SIMPLE32 elements in the next request frame, possibly ending 1856 * with *another* chain element (if there's more). 1857 */ 1858 while (seg < nseg) { 1859 int this_seg_lim; 1860 uint32_t tf, cur_off; 1861 bus_addr_t chain_list_addr; 1862 1863 /* 1864 * Point to the chain descriptor. Note that the chain 1865 * descriptor is at the end of the *previous* list (whether 1866 * chain or simple). 1867 */ 1868 ce = (SGE_CHAIN32 *) se; 1869 1870 /* 1871 * Before we change our current pointer, make sure we won't 1872 * overflow the request area with this frame. Note that we 1873 * test against 'greater than' here as it's okay in this case 1874 * to have next offset be just outside the request area. 1875 */ 1876 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1877 nxt_off = MPT_REQUEST_AREA; 1878 goto next_chain; 1879 } 1880 1881 /* 1882 * Set our SGE element pointer to the beginning of the chain 1883 * list and update our next chain list offset. 1884 */ 1885 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1886 cur_off = nxt_off; 1887 nxt_off += MPT_RQSL(mpt); 1888 1889 /* 1890 * Now initialize the chain descriptor. 1891 */ 1892 memset(ce, 0, sizeof (*ce)); 1893 1894 /* 1895 * Get the physical address of the chain list. 1896 */ 1897 chain_list_addr = trq->req_pbuf; 1898 chain_list_addr += cur_off; 1899 1900 1901 1902 ce->Address = htole32(chain_list_addr); 1903 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1904 1905 1906 /* 1907 * If we have more than a frame's worth of segments left, 1908 * set up the chain list to have the last element be another 1909 * chain descriptor. 1910 */ 1911 if ((nseg - seg) > MPT_NSGL(mpt)) { 1912 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1913 /* 1914 * The length of the chain is the length in bytes of the 1915 * number of segments plus the next chain element. 1916 * 1917 * The next chain descriptor offset is the length, 1918 * in words, of the number of segments. 1919 */ 1920 ce->Length = (this_seg_lim - seg) * 1921 sizeof (SGE_SIMPLE32); 1922 ce->NextChainOffset = ce->Length >> 2; 1923 ce->Length += sizeof (SGE_CHAIN32); 1924 } else { 1925 this_seg_lim = nseg; 1926 ce->Length = (this_seg_lim - seg) * 1927 sizeof (SGE_SIMPLE32); 1928 } 1929 ce->Length = htole16(ce->Length); 1930 1931 /* 1932 * Fill in the chain list SGE elements with our segment data. 1933 * 1934 * If we're the last element in this chain list, set the last 1935 * element flag. If we're the completely last element period, 1936 * set the end of list and end of buffer flags. 1937 */ 1938 while (seg < this_seg_lim) { 1939 memset(se, 0, sizeof (*se)); 1940 se->Address = htole32(dm_segs->ds_addr); 1941 1942 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1943 tf = flags; 1944 if (seg == this_seg_lim - 1) { 1945 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1946 } 1947 if (seg == nseg - 1) { 1948 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1949 MPI_SGE_FLAGS_END_OF_BUFFER; 1950 } 1951 MPI_pSGE_SET_FLAGS(se, tf); 1952 se->FlagsLength = htole32(se->FlagsLength); 1953 se++; 1954 seg++; 1955 dm_segs++; 1956 } 1957 1958 next_chain: 1959 /* 1960 * If we have more segments to do and we've used up all of 1961 * the space in a request area, go allocate another one 1962 * and chain to that. 1963 */ 1964 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1965 request_t *nrq; 1966 1967 nrq = mpt_get_request(mpt, FALSE); 1968 1969 if (nrq == NULL) { 1970 error = ENOMEM; 1971 goto bad; 1972 } 1973 1974 /* 1975 * Append the new request area on the tail of our list. 1976 */ 1977 if ((trq = req->chain) == NULL) { 1978 req->chain = nrq; 1979 } else { 1980 while (trq->chain != NULL) { 1981 trq = trq->chain; 1982 } 1983 trq->chain = nrq; 1984 } 1985 trq = nrq; 1986 mpt_off = trq->req_vbuf; 1987 if (mpt->verbose >= MPT_PRT_DEBUG) { 1988 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1989 } 1990 nxt_off = 0; 1991 } 1992 } 1993 out: 1994 1995 /* 1996 * Last time we need to check if this CCB needs to be aborted. 1997 */ 1998 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1999 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2000 request_t *cmd_req = 2001 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2002 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2003 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2004 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2005 } 2006 mpt_prt(mpt, 2007 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2008 ccb->ccb_h.status & CAM_STATUS_MASK); 2009 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2010 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2011 } 2012 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2013 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2014 xpt_done(ccb); 2015 mpt_free_request(mpt, req); 2016 return; 2017 } 2018 2019 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2020 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2021 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2022 mpt_timeout, ccb); 2023 } 2024 if (mpt->verbose > MPT_PRT_DEBUG) { 2025 int nc = 0; 2026 mpt_print_request(req->req_vbuf); 2027 for (trq = req->chain; trq; trq = trq->chain) { 2028 kprintf(" Additional Chain Area %d\n", nc++); 2029 mpt_dump_sgl(trq->req_vbuf, 0); 2030 } 2031 } 2032 2033 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2034 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2035 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2036 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2037 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2038 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2039 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2040 } else { 2041 tgt->state = TGT_STATE_MOVING_DATA; 2042 } 2043 #else 2044 tgt->state = TGT_STATE_MOVING_DATA; 2045 #endif 2046 } 2047 mpt_send_cmd(mpt, req); 2048 } 2049 2050 static void 2051 mpt_start(struct cam_sim *sim, union ccb *ccb) 2052 { 2053 request_t *req; 2054 struct mpt_softc *mpt; 2055 MSG_SCSI_IO_REQUEST *mpt_req; 2056 struct ccb_scsiio *csio = &ccb->csio; 2057 struct ccb_hdr *ccbh = &ccb->ccb_h; 2058 bus_dmamap_callback_t *cb; 2059 target_id_t tgt; 2060 int raid_passthru; 2061 2062 /* Get the pointer for the physical addapter */ 2063 mpt = ccb->ccb_h.ccb_mpt_ptr; 2064 raid_passthru = (sim == mpt->phydisk_sim); 2065 2066 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2067 if (mpt->outofbeer == 0) { 2068 mpt->outofbeer = 1; 2069 xpt_freeze_simq(mpt->sim, 1); 2070 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2071 } 2072 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2073 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2074 xpt_done(ccb); 2075 return; 2076 } 2077 #ifdef INVARIANTS 2078 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2079 #endif 2080 2081 if (sizeof (bus_addr_t) > 4) { 2082 cb = mpt_execute_req_a64; 2083 } else { 2084 cb = mpt_execute_req; 2085 } 2086 2087 /* 2088 * Link the ccb and the request structure so we can find 2089 * the other knowing either the request or the ccb 2090 */ 2091 req->ccb = ccb; 2092 ccb->ccb_h.ccb_req_ptr = req; 2093 2094 /* Now we build the command for the IOC */ 2095 mpt_req = req->req_vbuf; 2096 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2097 2098 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2099 if (raid_passthru) { 2100 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2101 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2102 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2103 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2104 xpt_done(ccb); 2105 return; 2106 } 2107 mpt_req->Bus = 0; /* we never set bus here */ 2108 } else { 2109 tgt = ccb->ccb_h.target_id; 2110 mpt_req->Bus = 0; /* XXX */ 2111 2112 } 2113 mpt_req->SenseBufferLength = 2114 (csio->sense_len < MPT_SENSE_SIZE) ? 2115 csio->sense_len : MPT_SENSE_SIZE; 2116 2117 /* 2118 * We use the message context to find the request structure when we 2119 * Get the command completion interrupt from the IOC. 2120 */ 2121 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2122 2123 /* Which physical device to do the I/O on */ 2124 mpt_req->TargetID = tgt; 2125 2126 /* We assume a single level LUN type */ 2127 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2128 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2129 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2130 } else { 2131 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2132 } 2133 2134 /* Set the direction of the transfer */ 2135 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2136 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2137 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2138 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2139 } else { 2140 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2141 } 2142 2143 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2144 switch(ccb->csio.tag_action) { 2145 case MSG_HEAD_OF_Q_TAG: 2146 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2147 break; 2148 case MSG_ACA_TASK: 2149 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2150 break; 2151 case MSG_ORDERED_Q_TAG: 2152 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2153 break; 2154 case MSG_SIMPLE_Q_TAG: 2155 default: 2156 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2157 break; 2158 } 2159 } else { 2160 if (mpt->is_fc || mpt->is_sas) { 2161 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2162 } else { 2163 /* XXX No such thing for a target doing packetized. */ 2164 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2165 } 2166 } 2167 2168 if (mpt->is_spi) { 2169 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2170 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2171 } 2172 } 2173 mpt_req->Control = htole32(mpt_req->Control); 2174 2175 /* Copy the scsi command block into place */ 2176 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2177 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2178 } else { 2179 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2180 } 2181 2182 mpt_req->CDBLength = csio->cdb_len; 2183 mpt_req->DataLength = htole32(csio->dxfer_len); 2184 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2185 2186 /* 2187 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2188 */ 2189 if (mpt->verbose == MPT_PRT_DEBUG) { 2190 U32 df; 2191 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2192 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2193 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2194 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2195 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2196 mpt_prtc(mpt, "(%s %u byte%s ", 2197 (df == MPI_SCSIIO_CONTROL_READ)? 2198 "read" : "write", csio->dxfer_len, 2199 (csio->dxfer_len == 1)? ")" : "s)"); 2200 } 2201 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2202 ccb->ccb_h.target_lun, req, req->serno); 2203 } 2204 2205 /* 2206 * If we have any data to send with this command map it into bus space. 2207 */ 2208 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2209 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2210 /* 2211 * We've been given a pointer to a single buffer. 2212 */ 2213 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2214 /* 2215 * Virtual address that needs to translated into 2216 * one or more physical address ranges. 2217 */ 2218 int error; 2219 crit_enter(); 2220 error = bus_dmamap_load(mpt->buffer_dmat, 2221 req->dmap, csio->data_ptr, csio->dxfer_len, 2222 cb, req, 0); 2223 crit_exit(); 2224 if (error == EINPROGRESS) { 2225 /* 2226 * So as to maintain ordering, 2227 * freeze the controller queue 2228 * until our mapping is 2229 * returned. 2230 */ 2231 xpt_freeze_simq(mpt->sim, 1); 2232 ccbh->status |= CAM_RELEASE_SIMQ; 2233 } 2234 } else { 2235 /* 2236 * We have been given a pointer to single 2237 * physical buffer. 2238 */ 2239 struct bus_dma_segment seg; 2240 seg.ds_addr = 2241 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2242 seg.ds_len = csio->dxfer_len; 2243 (*cb)(req, &seg, 1, 0); 2244 } 2245 } else { 2246 /* 2247 * We have been given a list of addresses. 2248 * This case could be easily supported but they are not 2249 * currently generated by the CAM subsystem so there 2250 * is no point in wasting the time right now. 2251 */ 2252 struct bus_dma_segment *segs; 2253 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2254 (*cb)(req, NULL, 0, EFAULT); 2255 } else { 2256 /* Just use the segments provided */ 2257 segs = (struct bus_dma_segment *)csio->data_ptr; 2258 (*cb)(req, segs, csio->sglist_cnt, 0); 2259 } 2260 } 2261 } else { 2262 (*cb)(req, NULL, 0, 0); 2263 } 2264 } 2265 2266 static int 2267 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2268 int sleep_ok) 2269 { 2270 int error; 2271 uint16_t status; 2272 uint8_t response; 2273 2274 error = mpt_scsi_send_tmf(mpt, 2275 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2276 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2277 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2278 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2279 0, /* XXX How do I get the channel ID? */ 2280 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2281 lun != CAM_LUN_WILDCARD ? lun : 0, 2282 0, sleep_ok); 2283 2284 if (error != 0) { 2285 /* 2286 * mpt_scsi_send_tmf hard resets on failure, so no 2287 * need to do so here. 2288 */ 2289 mpt_prt(mpt, 2290 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2291 return (EIO); 2292 } 2293 2294 /* Wait for bus reset to be processed by the IOC. */ 2295 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2296 REQ_STATE_DONE, sleep_ok, 5000); 2297 2298 status = le16toh(mpt->tmf_req->IOCStatus); 2299 response = mpt->tmf_req->ResponseCode; 2300 mpt->tmf_req->state = REQ_STATE_FREE; 2301 2302 if (error) { 2303 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2304 "Resetting controller.\n"); 2305 mpt_reset(mpt, TRUE); 2306 return (ETIMEDOUT); 2307 } 2308 2309 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2310 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2311 "Resetting controller.\n", status); 2312 mpt_reset(mpt, TRUE); 2313 return (EIO); 2314 } 2315 2316 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2317 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2318 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2319 "Resetting controller.\n", response); 2320 mpt_reset(mpt, TRUE); 2321 return (EIO); 2322 } 2323 return (0); 2324 } 2325 2326 static int 2327 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2328 { 2329 int r = 0; 2330 request_t *req; 2331 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2332 2333 req = mpt_get_request(mpt, FALSE); 2334 if (req == NULL) { 2335 return (ENOMEM); 2336 } 2337 fc = req->req_vbuf; 2338 memset(fc, 0, sizeof(*fc)); 2339 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2340 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2341 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2342 mpt_send_cmd(mpt, req); 2343 if (dowait) { 2344 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2345 REQ_STATE_DONE, FALSE, 60 * 1000); 2346 if (r == 0) { 2347 mpt_free_request(mpt, req); 2348 } 2349 } 2350 return (r); 2351 } 2352 2353 static void 2354 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) 2355 { 2356 xpt_free_path(ccb->ccb_h.path); 2357 kfree(ccb, M_TEMP); 2358 } 2359 2360 static int 2361 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2362 MSG_EVENT_NOTIFY_REPLY *msg) 2363 { 2364 uint32_t data0, data1; 2365 2366 data0 = le32toh(msg->Data[0]); 2367 data1 = le32toh(msg->Data[1]); 2368 switch(msg->Event & 0xFF) { 2369 case MPI_EVENT_UNIT_ATTENTION: 2370 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2371 (data0 >> 8) & 0xff, data0 & 0xff); 2372 break; 2373 2374 case MPI_EVENT_IOC_BUS_RESET: 2375 /* We generated a bus reset */ 2376 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2377 (data0 >> 8) & 0xff); 2378 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2379 break; 2380 2381 case MPI_EVENT_EXT_BUS_RESET: 2382 /* Someone else generated a bus reset */ 2383 mpt_prt(mpt, "External Bus Reset Detected\n"); 2384 /* 2385 * These replies don't return EventData like the MPI 2386 * spec says they do 2387 */ 2388 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2389 break; 2390 2391 case MPI_EVENT_RESCAN: 2392 { 2393 union ccb *ccb; 2394 uint32_t pathid; 2395 /* 2396 * In general this means a device has been added to the loop. 2397 */ 2398 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2399 if (mpt->ready == 0) { 2400 break; 2401 } 2402 if (mpt->phydisk_sim) { 2403 pathid = cam_sim_path(mpt->phydisk_sim); 2404 } else { 2405 pathid = cam_sim_path(mpt->sim); 2406 } 2407 /* 2408 * Allocate a CCB, create a wildcard path for this bus, 2409 * and schedule a rescan. 2410 */ 2411 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO); 2412 2413 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2414 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2415 mpt_prt(mpt, "unable to create path for rescan\n"); 2416 kfree(ccb, M_TEMP); 2417 break; 2418 } 2419 2420 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); 2421 ccb->ccb_h.func_code = XPT_SCAN_BUS; 2422 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; 2423 ccb->crcn.flags = CAM_FLAG_NONE; 2424 xpt_action(ccb); 2425 2426 /* scan is now in progress */ 2427 2428 break; 2429 } 2430 case MPI_EVENT_LINK_STATUS_CHANGE: 2431 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2432 (data1 >> 8) & 0xff, 2433 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2434 break; 2435 2436 case MPI_EVENT_LOOP_STATE_CHANGE: 2437 switch ((data0 >> 16) & 0xff) { 2438 case 0x01: 2439 mpt_prt(mpt, 2440 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2441 "(Loop Initialization)\n", 2442 (data1 >> 8) & 0xff, 2443 (data0 >> 8) & 0xff, 2444 (data0 ) & 0xff); 2445 switch ((data0 >> 8) & 0xff) { 2446 case 0xF7: 2447 if ((data0 & 0xff) == 0xF7) { 2448 mpt_prt(mpt, "Device needs AL_PA\n"); 2449 } else { 2450 mpt_prt(mpt, "Device %02x doesn't like " 2451 "FC performance\n", 2452 data0 & 0xFF); 2453 } 2454 break; 2455 case 0xF8: 2456 if ((data0 & 0xff) == 0xF7) { 2457 mpt_prt(mpt, "Device had loop failure " 2458 "at its receiver prior to acquiring" 2459 " AL_PA\n"); 2460 } else { 2461 mpt_prt(mpt, "Device %02x detected loop" 2462 " failure at its receiver\n", 2463 data0 & 0xFF); 2464 } 2465 break; 2466 default: 2467 mpt_prt(mpt, "Device %02x requests that device " 2468 "%02x reset itself\n", 2469 data0 & 0xFF, 2470 (data0 >> 8) & 0xFF); 2471 break; 2472 } 2473 break; 2474 case 0x02: 2475 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2476 "LPE(%02x,%02x) (Loop Port Enable)\n", 2477 (data1 >> 8) & 0xff, /* Port */ 2478 (data0 >> 8) & 0xff, /* Character 3 */ 2479 (data0 ) & 0xff /* Character 4 */); 2480 break; 2481 case 0x03: 2482 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2483 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2484 (data1 >> 8) & 0xff, /* Port */ 2485 (data0 >> 8) & 0xff, /* Character 3 */ 2486 (data0 ) & 0xff /* Character 4 */); 2487 break; 2488 default: 2489 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2490 "FC event (%02x %02x %02x)\n", 2491 (data1 >> 8) & 0xff, /* Port */ 2492 (data0 >> 16) & 0xff, /* Event */ 2493 (data0 >> 8) & 0xff, /* Character 3 */ 2494 (data0 ) & 0xff /* Character 4 */); 2495 } 2496 break; 2497 2498 case MPI_EVENT_LOGOUT: 2499 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2500 (data1 >> 8) & 0xff, data0); 2501 break; 2502 case MPI_EVENT_QUEUE_FULL: 2503 { 2504 struct cam_sim *sim; 2505 struct cam_path *tmppath; 2506 struct ccb_relsim crs; 2507 PTR_EVENT_DATA_QUEUE_FULL pqf; 2508 lun_id_t lun_id; 2509 2510 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2511 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2512 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2513 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2514 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2515 pqf->TargetID) != 0) { 2516 sim = mpt->phydisk_sim; 2517 } else { 2518 sim = mpt->sim; 2519 } 2520 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2521 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2522 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2523 mpt_prt(mpt, "unable to create a path to send " 2524 "XPT_REL_SIMQ"); 2525 break; 2526 } 2527 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2528 crs.ccb_h.func_code = XPT_REL_SIMQ; 2529 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2530 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2531 crs.openings = pqf->CurrentDepth - 1; 2532 xpt_action((union ccb *)&crs); 2533 if (crs.ccb_h.status != CAM_REQ_CMP) { 2534 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2535 } 2536 xpt_free_path(tmppath); 2537 } 2538 break; 2539 } 2540 case MPI_EVENT_IR_RESYNC_UPDATE: 2541 mpt_prt(mpt, "IR resync update %d completed\n", 2542 (data0 >> 16) & 0xff); 2543 break; 2544 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2545 { 2546 union ccb *ccb; 2547 struct cam_sim *sim; 2548 struct cam_path *tmppath; 2549 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 2550 2551 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 2552 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2553 psdsc->TargetID) != 0) 2554 sim = mpt->phydisk_sim; 2555 else 2556 sim = mpt->sim; 2557 switch(psdsc->ReasonCode) { 2558 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2559 ccb = kmalloc(sizeof(union ccb), M_TEMP, 2560 M_WAITOK | M_ZERO); 2561 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2562 cam_sim_path(sim), psdsc->TargetID, 2563 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2564 mpt_prt(mpt, 2565 "unable to create path for rescan\n"); 2566 kfree(ccb, M_TEMP); 2567 break; 2568 } 2569 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 2570 5/*priority (low)*/); 2571 ccb->ccb_h.func_code = XPT_SCAN_BUS; 2572 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; 2573 ccb->crcn.flags = CAM_FLAG_NONE; 2574 xpt_action(ccb); 2575 break; 2576 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2577 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2578 psdsc->TargetID, CAM_LUN_WILDCARD) != 2579 CAM_REQ_CMP) { 2580 mpt_prt(mpt, 2581 "unable to create path for async event"); 2582 break; 2583 } 2584 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2585 xpt_free_path(tmppath); 2586 break; 2587 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 2588 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 2589 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 2590 break; 2591 default: 2592 mpt_lprt(mpt, MPT_PRT_WARN, 2593 "SAS device status change: Bus: 0x%02x TargetID: " 2594 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 2595 psdsc->TargetID, psdsc->ReasonCode); 2596 break; 2597 } 2598 break; 2599 } 2600 case MPI_EVENT_SAS_DISCOVERY_ERROR: 2601 { 2602 PTR_EVENT_DATA_DISCOVERY_ERROR pde; 2603 2604 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 2605 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 2606 mpt_lprt(mpt, MPT_PRT_WARN, 2607 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 2608 pde->Port, pde->DiscoveryStatus); 2609 break; 2610 } 2611 case MPI_EVENT_EVENT_CHANGE: 2612 case MPI_EVENT_INTEGRATED_RAID: 2613 case MPI_EVENT_IR2: 2614 case MPI_EVENT_LOG_ENTRY_ADDED: 2615 case MPI_EVENT_SAS_DISCOVERY: 2616 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2617 case MPI_EVENT_SAS_SES: 2618 break; 2619 default: 2620 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2621 msg->Event & 0xFF); 2622 return (0); 2623 } 2624 return (1); 2625 } 2626 2627 /* 2628 * Reply path for all SCSI I/O requests, called from our 2629 * interrupt handler by extracting our handler index from 2630 * the MsgContext field of the reply from the IOC. 2631 * 2632 * This routine is optimized for the common case of a 2633 * completion without error. All exception handling is 2634 * offloaded to non-inlined helper routines to minimize 2635 * cache footprint. 2636 */ 2637 static int 2638 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2639 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2640 { 2641 MSG_SCSI_IO_REQUEST *scsi_req; 2642 union ccb *ccb; 2643 2644 if (req->state == REQ_STATE_FREE) { 2645 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2646 return (TRUE); 2647 } 2648 2649 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2650 ccb = req->ccb; 2651 if (ccb == NULL) { 2652 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2653 req, req->serno); 2654 return (TRUE); 2655 } 2656 2657 mpt_req_untimeout(req, mpt_timeout, ccb); 2658 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2659 2660 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2661 bus_dmasync_op_t op; 2662 2663 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2664 op = BUS_DMASYNC_POSTREAD; 2665 else 2666 op = BUS_DMASYNC_POSTWRITE; 2667 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2668 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2669 } 2670 2671 if (reply_frame == NULL) { 2672 /* 2673 * Context only reply, completion without error status. 2674 */ 2675 ccb->csio.resid = 0; 2676 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2677 ccb->csio.scsi_status = SCSI_STATUS_OK; 2678 } else { 2679 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2680 } 2681 2682 if (mpt->outofbeer) { 2683 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2684 mpt->outofbeer = 0; 2685 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2686 } 2687 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2688 struct scsi_inquiry_data *iq = 2689 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2690 if (scsi_req->Function == 2691 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2692 /* 2693 * Fake out the device type so that only the 2694 * pass-thru device will attach. 2695 */ 2696 iq->device &= ~0x1F; 2697 iq->device |= T_NODEVICE; 2698 } 2699 } 2700 if (mpt->verbose == MPT_PRT_DEBUG) { 2701 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2702 req, req->serno); 2703 } 2704 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2705 xpt_done(ccb); 2706 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2707 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2708 } else { 2709 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2710 req, req->serno); 2711 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2712 } 2713 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2714 ("CCB req needed wakeup")); 2715 #ifdef INVARIANTS 2716 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2717 #endif 2718 mpt_free_request(mpt, req); 2719 return (TRUE); 2720 } 2721 2722 static int 2723 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2724 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2725 { 2726 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2727 2728 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2729 #ifdef INVARIANTS 2730 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2731 #endif 2732 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2733 /* Record IOC Status and Response Code of TMF for any waiters. */ 2734 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2735 req->ResponseCode = tmf_reply->ResponseCode; 2736 2737 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2738 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2739 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2740 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2741 req->state |= REQ_STATE_DONE; 2742 wakeup(req); 2743 } else { 2744 mpt->tmf_req->state = REQ_STATE_FREE; 2745 } 2746 return (TRUE); 2747 } 2748 2749 /* 2750 * XXX: Move to definitions file 2751 */ 2752 #define ELS 0x22 2753 #define FC4LS 0x32 2754 #define ABTS 0x81 2755 #define BA_ACC 0x84 2756 2757 #define LS_RJT 0x01 2758 #define LS_ACC 0x02 2759 #define PLOGI 0x03 2760 #define LOGO 0x05 2761 #define SRR 0x14 2762 #define PRLI 0x20 2763 #define PRLO 0x21 2764 #define ADISC 0x52 2765 #define RSCN 0x61 2766 2767 static void 2768 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2769 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2770 { 2771 uint32_t fl; 2772 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2773 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2774 2775 /* 2776 * We are going to reuse the ELS request to send this response back. 2777 */ 2778 rsp = &tmp; 2779 memset(rsp, 0, sizeof(*rsp)); 2780 2781 #ifdef USE_IMMEDIATE_LINK_DATA 2782 /* 2783 * Apparently the IMMEDIATE stuff doesn't seem to work. 2784 */ 2785 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2786 #endif 2787 rsp->RspLength = length; 2788 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2789 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2790 2791 /* 2792 * Copy over information from the original reply frame to 2793 * it's correct place in the response. 2794 */ 2795 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2796 2797 /* 2798 * And now copy back the temporary area to the original frame. 2799 */ 2800 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2801 rsp = req->req_vbuf; 2802 2803 #ifdef USE_IMMEDIATE_LINK_DATA 2804 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2805 #else 2806 { 2807 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2808 bus_addr_t paddr = req->req_pbuf; 2809 paddr += MPT_RQSL(mpt); 2810 2811 fl = 2812 MPI_SGE_FLAGS_HOST_TO_IOC | 2813 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2814 MPI_SGE_FLAGS_LAST_ELEMENT | 2815 MPI_SGE_FLAGS_END_OF_LIST | 2816 MPI_SGE_FLAGS_END_OF_BUFFER; 2817 fl <<= MPI_SGE_FLAGS_SHIFT; 2818 fl |= (length); 2819 se->FlagsLength = htole32(fl); 2820 se->Address = htole32((uint32_t) paddr); 2821 } 2822 #endif 2823 2824 /* 2825 * Send it on... 2826 */ 2827 mpt_send_cmd(mpt, req); 2828 } 2829 2830 static int 2831 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2832 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2833 { 2834 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2835 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2836 U8 rctl; 2837 U8 type; 2838 U8 cmd; 2839 U16 status = le16toh(reply_frame->IOCStatus); 2840 U32 *elsbuf; 2841 int ioindex; 2842 int do_refresh = TRUE; 2843 2844 #ifdef INVARIANTS 2845 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2846 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2847 req, req->serno, rp->Function)); 2848 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2849 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2850 } else { 2851 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2852 } 2853 #endif 2854 mpt_lprt(mpt, MPT_PRT_DEBUG, 2855 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2856 req, req->serno, reply_frame, reply_frame->Function); 2857 2858 if (status != MPI_IOCSTATUS_SUCCESS) { 2859 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2860 status, reply_frame->Function); 2861 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2862 /* 2863 * XXX: to get around shutdown issue 2864 */ 2865 mpt->disabled = 1; 2866 return (TRUE); 2867 } 2868 return (TRUE); 2869 } 2870 2871 /* 2872 * If the function of a link service response, we recycle the 2873 * response to be a refresh for a new link service request. 2874 * 2875 * The request pointer is bogus in this case and we have to fetch 2876 * it based upon the TransactionContext. 2877 */ 2878 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2879 /* Freddie Uncle Charlie Katie */ 2880 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2881 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2882 if (mpt->els_cmd_ptrs[ioindex] == req) { 2883 break; 2884 } 2885 2886 KASSERT(ioindex < mpt->els_cmds_allocated, 2887 ("can't find my mommie!")); 2888 2889 /* remove from active list as we're going to re-post it */ 2890 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2891 req->state &= ~REQ_STATE_QUEUED; 2892 req->state |= REQ_STATE_DONE; 2893 mpt_fc_post_els(mpt, req, ioindex); 2894 return (TRUE); 2895 } 2896 2897 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2898 /* remove from active list as we're done */ 2899 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2900 req->state &= ~REQ_STATE_QUEUED; 2901 req->state |= REQ_STATE_DONE; 2902 if (req->state & REQ_STATE_TIMEDOUT) { 2903 mpt_lprt(mpt, MPT_PRT_DEBUG, 2904 "Sync Primitive Send Completed After Timeout\n"); 2905 mpt_free_request(mpt, req); 2906 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2907 mpt_lprt(mpt, MPT_PRT_DEBUG, 2908 "Async Primitive Send Complete\n"); 2909 mpt_free_request(mpt, req); 2910 } else { 2911 mpt_lprt(mpt, MPT_PRT_DEBUG, 2912 "Sync Primitive Send Complete- Waking Waiter\n"); 2913 wakeup(req); 2914 } 2915 return (TRUE); 2916 } 2917 2918 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2919 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2920 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2921 rp->MsgLength, rp->MsgFlags); 2922 return (TRUE); 2923 } 2924 2925 if (rp->MsgLength <= 5) { 2926 /* 2927 * This is just a ack of an original ELS buffer post 2928 */ 2929 mpt_lprt(mpt, MPT_PRT_DEBUG, 2930 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2931 return (TRUE); 2932 } 2933 2934 2935 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2936 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2937 2938 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2939 cmd = be32toh(elsbuf[0]) >> 24; 2940 2941 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2942 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2943 return (TRUE); 2944 } 2945 2946 ioindex = le32toh(rp->TransactionContext); 2947 req = mpt->els_cmd_ptrs[ioindex]; 2948 2949 if (rctl == ELS && type == 1) { 2950 switch (cmd) { 2951 case PRLI: 2952 /* 2953 * Send back a PRLI ACC 2954 */ 2955 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2956 le32toh(rp->Wwn.PortNameHigh), 2957 le32toh(rp->Wwn.PortNameLow)); 2958 elsbuf[0] = htobe32(0x02100014); 2959 elsbuf[1] |= htobe32(0x00000100); 2960 elsbuf[4] = htobe32(0x00000002); 2961 if (mpt->role & MPT_ROLE_TARGET) 2962 elsbuf[4] |= htobe32(0x00000010); 2963 if (mpt->role & MPT_ROLE_INITIATOR) 2964 elsbuf[4] |= htobe32(0x00000020); 2965 /* remove from active list as we're done */ 2966 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2967 req->state &= ~REQ_STATE_QUEUED; 2968 req->state |= REQ_STATE_DONE; 2969 mpt_fc_els_send_response(mpt, req, rp, 20); 2970 do_refresh = FALSE; 2971 break; 2972 case PRLO: 2973 memset(elsbuf, 0, 5 * (sizeof (U32))); 2974 elsbuf[0] = htobe32(0x02100014); 2975 elsbuf[1] = htobe32(0x08000100); 2976 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2977 le32toh(rp->Wwn.PortNameHigh), 2978 le32toh(rp->Wwn.PortNameLow)); 2979 /* remove from active list as we're done */ 2980 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2981 req->state &= ~REQ_STATE_QUEUED; 2982 req->state |= REQ_STATE_DONE; 2983 mpt_fc_els_send_response(mpt, req, rp, 20); 2984 do_refresh = FALSE; 2985 break; 2986 default: 2987 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2988 break; 2989 } 2990 } else if (rctl == ABTS && type == 0) { 2991 uint16_t rx_id = le16toh(rp->Rxid); 2992 uint16_t ox_id = le16toh(rp->Oxid); 2993 request_t *tgt_req = NULL; 2994 2995 mpt_prt(mpt, 2996 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2997 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2998 le32toh(rp->Wwn.PortNameLow)); 2999 if (rx_id >= mpt->mpt_max_tgtcmds) { 3000 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 3001 } else if (mpt->tgt_cmd_ptrs == NULL) { 3002 mpt_prt(mpt, "No TGT CMD PTRS\n"); 3003 } else { 3004 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 3005 } 3006 if (tgt_req) { 3007 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 3008 union ccb *ccb; 3009 uint32_t ct_id; 3010 3011 /* 3012 * Check to make sure we have the correct command 3013 * The reply descriptor in the target state should 3014 * should contain an IoIndex that should match the 3015 * RX_ID. 3016 * 3017 * It'd be nice to have OX_ID to crosscheck with 3018 * as well. 3019 */ 3020 ct_id = GET_IO_INDEX(tgt->reply_desc); 3021 3022 if (ct_id != rx_id) { 3023 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 3024 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 3025 rx_id, ct_id); 3026 goto skip; 3027 } 3028 3029 ccb = tgt->ccb; 3030 if (ccb) { 3031 mpt_prt(mpt, 3032 "CCB (%p): lun %u flags %x status %x\n", 3033 ccb, ccb->ccb_h.target_lun, 3034 ccb->ccb_h.flags, ccb->ccb_h.status); 3035 } 3036 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 3037 "%x nxfers %x\n", tgt->state, 3038 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 3039 tgt->nxfers); 3040 skip: 3041 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3042 mpt_prt(mpt, "unable to start TargetAbort\n"); 3043 } 3044 } else { 3045 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3046 } 3047 memset(elsbuf, 0, 5 * (sizeof (U32))); 3048 elsbuf[0] = htobe32(0); 3049 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3050 elsbuf[2] = htobe32(0x000ffff); 3051 /* 3052 * Dork with the reply frame so that the response to it 3053 * will be correct. 3054 */ 3055 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3056 /* remove from active list as we're done */ 3057 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3058 req->state &= ~REQ_STATE_QUEUED; 3059 req->state |= REQ_STATE_DONE; 3060 mpt_fc_els_send_response(mpt, req, rp, 12); 3061 do_refresh = FALSE; 3062 } else { 3063 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3064 } 3065 if (do_refresh == TRUE) { 3066 /* remove from active list as we're done */ 3067 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3068 req->state &= ~REQ_STATE_QUEUED; 3069 req->state |= REQ_STATE_DONE; 3070 mpt_fc_post_els(mpt, req, ioindex); 3071 } 3072 return (TRUE); 3073 } 3074 3075 /* 3076 * Clean up all SCSI Initiator personality state in response 3077 * to a controller reset. 3078 */ 3079 static void 3080 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3081 { 3082 3083 /* 3084 * The pending list is already run down by 3085 * the generic handler. Perform the same 3086 * operation on the timed out request list. 3087 */ 3088 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3089 MPI_IOCSTATUS_INVALID_STATE); 3090 3091 /* 3092 * XXX: We need to repost ELS and Target Command Buffers? 3093 */ 3094 3095 /* 3096 * Inform the XPT that a bus reset has occurred. 3097 */ 3098 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3099 } 3100 3101 /* 3102 * Parse additional completion information in the reply 3103 * frame for SCSI I/O requests. 3104 */ 3105 static int 3106 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3107 MSG_DEFAULT_REPLY *reply_frame) 3108 { 3109 union ccb *ccb; 3110 MSG_SCSI_IO_REPLY *scsi_io_reply; 3111 u_int ioc_status; 3112 u_int sstate; 3113 3114 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3115 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3116 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3117 ("MPT SCSI I/O Handler called with incorrect reply type")); 3118 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3119 ("MPT SCSI I/O Handler called with continuation reply")); 3120 3121 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3122 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3123 ioc_status &= MPI_IOCSTATUS_MASK; 3124 sstate = scsi_io_reply->SCSIState; 3125 3126 ccb = req->ccb; 3127 ccb->csio.resid = 3128 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3129 3130 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3131 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3132 uint32_t sense_returned; 3133 3134 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3135 3136 sense_returned = le32toh(scsi_io_reply->SenseCount); 3137 if (sense_returned < ccb->csio.sense_len) 3138 ccb->csio.sense_resid = ccb->csio.sense_len - 3139 sense_returned; 3140 else 3141 ccb->csio.sense_resid = 0; 3142 3143 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3144 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3145 min(ccb->csio.sense_len, sense_returned)); 3146 } 3147 3148 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3149 /* 3150 * Tag messages rejected, but non-tagged retry 3151 * was successful. 3152 XXXX 3153 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3154 */ 3155 } 3156 3157 switch(ioc_status) { 3158 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3159 /* 3160 * XXX 3161 * Linux driver indicates that a zero 3162 * transfer length with this error code 3163 * indicates a CRC error. 3164 * 3165 * No need to swap the bytes for checking 3166 * against zero. 3167 */ 3168 if (scsi_io_reply->TransferCount == 0) { 3169 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3170 break; 3171 } 3172 /* FALLTHROUGH */ 3173 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3174 case MPI_IOCSTATUS_SUCCESS: 3175 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3176 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3177 /* 3178 * Status was never returned for this transaction. 3179 */ 3180 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3181 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3182 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3183 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3184 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3185 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3186 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3187 3188 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3189 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3190 } else 3191 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3192 break; 3193 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3194 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3195 break; 3196 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3197 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3198 break; 3199 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3200 /* 3201 * Since selection timeouts and "device really not 3202 * there" are grouped into this error code, report 3203 * selection timeout. Selection timeouts are 3204 * typically retried before giving up on the device 3205 * whereas "device not there" errors are considered 3206 * unretryable. 3207 */ 3208 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3209 break; 3210 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3211 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3212 break; 3213 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3214 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3215 break; 3216 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3217 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3218 break; 3219 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3220 ccb->ccb_h.status = CAM_UA_TERMIO; 3221 break; 3222 case MPI_IOCSTATUS_INVALID_STATE: 3223 /* 3224 * The IOC has been reset. Emulate a bus reset. 3225 */ 3226 /* FALLTHROUGH */ 3227 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3228 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3229 break; 3230 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3231 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3232 /* 3233 * Don't clobber any timeout status that has 3234 * already been set for this transaction. We 3235 * want the SCSI layer to be able to differentiate 3236 * between the command we aborted due to timeout 3237 * and any innocent bystanders. 3238 */ 3239 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3240 break; 3241 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3242 break; 3243 3244 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3245 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3246 break; 3247 case MPI_IOCSTATUS_BUSY: 3248 mpt_set_ccb_status(ccb, CAM_BUSY); 3249 break; 3250 case MPI_IOCSTATUS_INVALID_FUNCTION: 3251 case MPI_IOCSTATUS_INVALID_SGL: 3252 case MPI_IOCSTATUS_INTERNAL_ERROR: 3253 case MPI_IOCSTATUS_INVALID_FIELD: 3254 default: 3255 /* XXX 3256 * Some of the above may need to kick 3257 * of a recovery action!!!! 3258 */ 3259 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3260 break; 3261 } 3262 3263 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3264 mpt_freeze_ccb(ccb); 3265 } 3266 3267 return (TRUE); 3268 } 3269 3270 static void 3271 mpt_action(struct cam_sim *sim, union ccb *ccb) 3272 { 3273 struct mpt_softc *mpt; 3274 struct ccb_trans_settings *cts; 3275 target_id_t tgt; 3276 lun_id_t lun; 3277 int raid_passthru; 3278 3279 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3280 3281 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3282 raid_passthru = (sim == mpt->phydisk_sim); 3283 MPT_LOCK_ASSERT(mpt); 3284 3285 tgt = ccb->ccb_h.target_id; 3286 lun = ccb->ccb_h.target_lun; 3287 if (raid_passthru && 3288 ccb->ccb_h.func_code != XPT_PATH_INQ && 3289 ccb->ccb_h.func_code != XPT_RESET_BUS && 3290 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3291 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3292 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3293 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3294 xpt_done(ccb); 3295 return; 3296 } 3297 } 3298 ccb->ccb_h.ccb_mpt_ptr = mpt; 3299 3300 switch (ccb->ccb_h.func_code) { 3301 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3302 /* 3303 * Do a couple of preliminary checks... 3304 */ 3305 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3306 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3307 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3308 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3309 break; 3310 } 3311 } 3312 /* Max supported CDB length is 16 bytes */ 3313 /* XXX Unless we implement the new 32byte message type */ 3314 if (ccb->csio.cdb_len > 3315 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3316 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3317 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3318 break; 3319 } 3320 #ifdef MPT_TEST_MULTIPATH 3321 if (mpt->failure_id == ccb->ccb_h.target_id) { 3322 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3323 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3324 break; 3325 } 3326 #endif 3327 ccb->csio.scsi_status = SCSI_STATUS_OK; 3328 mpt_start(sim, ccb); 3329 return; 3330 3331 case XPT_RESET_BUS: 3332 if (raid_passthru) { 3333 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3334 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3335 break; 3336 } 3337 case XPT_RESET_DEV: 3338 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3339 if (bootverbose) { 3340 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3341 } 3342 } else { 3343 xpt_print(ccb->ccb_h.path, "reset device\n"); 3344 } 3345 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3346 3347 /* 3348 * mpt_bus_reset is always successful in that it 3349 * will fall back to a hard reset should a bus 3350 * reset attempt fail. 3351 */ 3352 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3353 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3354 break; 3355 3356 case XPT_ABORT: 3357 { 3358 union ccb *accb = ccb->cab.abort_ccb; 3359 switch (accb->ccb_h.func_code) { 3360 case XPT_ACCEPT_TARGET_IO: 3361 case XPT_IMMED_NOTIFY: 3362 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3363 break; 3364 case XPT_CONT_TARGET_IO: 3365 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3366 ccb->ccb_h.status = CAM_UA_ABORT; 3367 break; 3368 case XPT_SCSI_IO: 3369 ccb->ccb_h.status = CAM_UA_ABORT; 3370 break; 3371 default: 3372 ccb->ccb_h.status = CAM_REQ_INVALID; 3373 break; 3374 } 3375 break; 3376 } 3377 3378 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3379 #define DP_DISC_ENABLE 0x1 3380 #define DP_DISC_DISABL 0x2 3381 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3382 3383 #define DP_TQING_ENABLE 0x4 3384 #define DP_TQING_DISABL 0x8 3385 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3386 3387 #define DP_WIDE 0x10 3388 #define DP_NARROW 0x20 3389 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3390 3391 #define DP_SYNC 0x40 3392 3393 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3394 { 3395 struct ccb_trans_settings_scsi *scsi; 3396 struct ccb_trans_settings_spi *spi; 3397 uint8_t dval; 3398 u_int period; 3399 u_int offset; 3400 int i, j; 3401 3402 cts = &ccb->cts; 3403 3404 if (mpt->is_fc || mpt->is_sas) { 3405 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3406 break; 3407 } 3408 3409 scsi = &cts->proto_specific.scsi; 3410 spi = &cts->xport_specific.spi; 3411 3412 /* 3413 * We can be called just to valid transport and proto versions 3414 */ 3415 if (scsi->valid == 0 && spi->valid == 0) { 3416 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3417 break; 3418 } 3419 3420 /* 3421 * Skip attempting settings on RAID volume disks. 3422 * Other devices on the bus get the normal treatment. 3423 */ 3424 if (mpt->phydisk_sim && raid_passthru == 0 && 3425 mpt_is_raid_volume(mpt, tgt) != 0) { 3426 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3427 "no transfer settings for RAID vols\n"); 3428 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3429 break; 3430 } 3431 3432 i = mpt->mpt_port_page2.PortSettings & 3433 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3434 j = mpt->mpt_port_page2.PortFlags & 3435 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3436 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3437 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3438 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3439 "honoring BIOS transfer negotiations\n"); 3440 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3441 break; 3442 } 3443 3444 dval = 0; 3445 period = 0; 3446 offset = 0; 3447 3448 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3449 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3450 DP_DISC_ENABLE : DP_DISC_DISABL; 3451 } 3452 3453 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3454 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3455 DP_TQING_ENABLE : DP_TQING_DISABL; 3456 } 3457 3458 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3459 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3460 DP_WIDE : DP_NARROW; 3461 } 3462 3463 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3464 dval |= DP_SYNC; 3465 offset = spi->sync_offset; 3466 } else { 3467 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3468 &mpt->mpt_dev_page1[tgt]; 3469 offset = ptr->RequestedParameters; 3470 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3471 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3472 } 3473 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3474 dval |= DP_SYNC; 3475 period = spi->sync_period; 3476 } else { 3477 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3478 &mpt->mpt_dev_page1[tgt]; 3479 period = ptr->RequestedParameters; 3480 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3481 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3482 } 3483 if (dval & DP_DISC_ENABLE) { 3484 mpt->mpt_disc_enable |= (1 << tgt); 3485 } else if (dval & DP_DISC_DISABL) { 3486 mpt->mpt_disc_enable &= ~(1 << tgt); 3487 } 3488 if (dval & DP_TQING_ENABLE) { 3489 mpt->mpt_tag_enable |= (1 << tgt); 3490 } else if (dval & DP_TQING_DISABL) { 3491 mpt->mpt_tag_enable &= ~(1 << tgt); 3492 } 3493 if (dval & DP_WIDTH) { 3494 mpt_setwidth(mpt, tgt, 1); 3495 } 3496 if (dval & DP_SYNC) { 3497 mpt_setsync(mpt, tgt, period, offset); 3498 } 3499 if (dval == 0) { 3500 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3501 break; 3502 } 3503 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3504 "set [%d]: 0x%x period 0x%x offset %d\n", 3505 tgt, dval, period, offset); 3506 if (mpt_update_spi_config(mpt, tgt)) { 3507 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3508 } else { 3509 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3510 } 3511 break; 3512 } 3513 case XPT_GET_TRAN_SETTINGS: 3514 { 3515 struct ccb_trans_settings_scsi *scsi; 3516 cts = &ccb->cts; 3517 cts->protocol = PROTO_SCSI; 3518 if (mpt->is_fc) { 3519 struct ccb_trans_settings_fc *fc = 3520 &cts->xport_specific.fc; 3521 cts->protocol_version = SCSI_REV_SPC; 3522 cts->transport = XPORT_FC; 3523 cts->transport_version = 0; 3524 fc->valid = CTS_FC_VALID_SPEED; 3525 fc->bitrate = 100000; 3526 } else if (mpt->is_sas) { 3527 struct ccb_trans_settings_sas *sas = 3528 &cts->xport_specific.sas; 3529 cts->protocol_version = SCSI_REV_SPC2; 3530 cts->transport = XPORT_SAS; 3531 cts->transport_version = 0; 3532 sas->valid = CTS_SAS_VALID_SPEED; 3533 sas->bitrate = 300000; 3534 } else { 3535 cts->protocol_version = SCSI_REV_2; 3536 cts->transport = XPORT_SPI; 3537 cts->transport_version = 2; 3538 if (mpt_get_spi_settings(mpt, cts) != 0) { 3539 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3540 break; 3541 } 3542 } 3543 scsi = &cts->proto_specific.scsi; 3544 scsi->valid = CTS_SCSI_VALID_TQ; 3545 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3546 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3547 break; 3548 } 3549 case XPT_CALC_GEOMETRY: 3550 { 3551 struct ccb_calc_geometry *ccg; 3552 3553 ccg = &ccb->ccg; 3554 if (ccg->block_size == 0) { 3555 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3556 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3557 break; 3558 } 3559 cam_calc_geometry(ccg, /*extended*/1); 3560 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 3561 break; 3562 } 3563 case XPT_PATH_INQ: /* Path routing inquiry */ 3564 { 3565 struct ccb_pathinq *cpi = &ccb->cpi; 3566 3567 cpi->version_num = 1; 3568 cpi->target_sprt = 0; 3569 cpi->hba_eng_cnt = 0; 3570 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3571 #if 0 /* XXX swildner */ 3572 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3573 #endif 3574 /* 3575 * FC cards report MAX_DEVICES of 512, but 3576 * the MSG_SCSI_IO_REQUEST target id field 3577 * is only 8 bits. Until we fix the driver 3578 * to support 'channels' for bus overflow, 3579 * just limit it. 3580 */ 3581 if (cpi->max_target > 255) { 3582 cpi->max_target = 255; 3583 } 3584 3585 /* 3586 * VMware ESX reports > 16 devices and then dies when we probe. 3587 */ 3588 if (mpt->is_spi && cpi->max_target > 15) { 3589 cpi->max_target = 15; 3590 } 3591 if (mpt->is_spi) 3592 cpi->max_lun = 7; 3593 else 3594 cpi->max_lun = MPT_MAX_LUNS; 3595 cpi->initiator_id = mpt->mpt_ini_id; 3596 cpi->bus_id = cam_sim_bus(sim); 3597 3598 /* 3599 * The base speed is the speed of the underlying connection. 3600 */ 3601 cpi->protocol = PROTO_SCSI; 3602 if (mpt->is_fc) { 3603 cpi->hba_misc = PIM_NOBUSRESET; 3604 cpi->base_transfer_speed = 100000; 3605 cpi->hba_inquiry = PI_TAG_ABLE; 3606 cpi->transport = XPORT_FC; 3607 cpi->transport_version = 0; 3608 cpi->protocol_version = SCSI_REV_SPC; 3609 } else if (mpt->is_sas) { 3610 cpi->hba_misc = PIM_NOBUSRESET; 3611 cpi->base_transfer_speed = 300000; 3612 cpi->hba_inquiry = PI_TAG_ABLE; 3613 cpi->transport = XPORT_SAS; 3614 cpi->transport_version = 0; 3615 cpi->protocol_version = SCSI_REV_SPC2; 3616 } else { 3617 cpi->hba_misc = PIM_SEQSCAN; 3618 cpi->base_transfer_speed = 3300; 3619 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3620 cpi->transport = XPORT_SPI; 3621 cpi->transport_version = 2; 3622 cpi->protocol_version = SCSI_REV_2; 3623 } 3624 3625 /* 3626 * We give our fake RAID passhtru bus a width that is MaxVolumes 3627 * wide and restrict it to one lun. 3628 */ 3629 if (raid_passthru) { 3630 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3631 cpi->initiator_id = cpi->max_target + 1; 3632 cpi->max_lun = 0; 3633 } 3634 3635 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3636 cpi->hba_misc |= PIM_NOINITIATOR; 3637 } 3638 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3639 cpi->target_sprt = 3640 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3641 } else { 3642 cpi->target_sprt = 0; 3643 } 3644 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3645 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3646 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3647 cpi->unit_number = cam_sim_unit(sim); 3648 cpi->ccb_h.status = CAM_REQ_CMP; 3649 break; 3650 } 3651 case XPT_EN_LUN: /* Enable LUN as a target */ 3652 { 3653 int result; 3654 3655 if (ccb->cel.enable) 3656 result = mpt_enable_lun(mpt, 3657 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3658 else 3659 result = mpt_disable_lun(mpt, 3660 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3661 if (result == 0) { 3662 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3663 } else { 3664 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3665 } 3666 break; 3667 } 3668 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3669 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3670 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3671 { 3672 tgt_resource_t *trtp; 3673 lun_id_t lun = ccb->ccb_h.target_lun; 3674 ccb->ccb_h.sim_priv.entries[0].field = 0; 3675 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3676 ccb->ccb_h.flags = 0; 3677 3678 if (lun == CAM_LUN_WILDCARD) { 3679 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3680 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3681 break; 3682 } 3683 trtp = &mpt->trt_wildcard; 3684 } else if (lun >= MPT_MAX_LUNS) { 3685 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3686 break; 3687 } else { 3688 trtp = &mpt->trt[lun]; 3689 } 3690 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3691 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3692 "Put FREE ATIO %p lun %d\n", ccb, lun); 3693 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3694 sim_links.stqe); 3695 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3696 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3697 "Put FREE INOT lun %d\n", lun); 3698 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3699 sim_links.stqe); 3700 } else { 3701 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3702 } 3703 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3704 return; 3705 } 3706 case XPT_CONT_TARGET_IO: 3707 mpt_target_start_io(mpt, ccb); 3708 return; 3709 3710 default: 3711 ccb->ccb_h.status = CAM_REQ_INVALID; 3712 break; 3713 } 3714 xpt_done(ccb); 3715 } 3716 3717 static int 3718 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3719 { 3720 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3721 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3722 target_id_t tgt; 3723 uint32_t dval, pval, oval; 3724 int rv; 3725 3726 if (IS_CURRENT_SETTINGS(cts) == 0) { 3727 tgt = cts->ccb_h.target_id; 3728 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3729 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3730 return (-1); 3731 } 3732 } else { 3733 tgt = cts->ccb_h.target_id; 3734 } 3735 3736 /* 3737 * We aren't looking at Port Page 2 BIOS settings here- 3738 * sometimes these have been known to be bogus XXX. 3739 * 3740 * For user settings, we pick the max from port page 0 3741 * 3742 * For current settings we read the current settings out from 3743 * device page 0 for that target. 3744 */ 3745 if (IS_CURRENT_SETTINGS(cts)) { 3746 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3747 dval = 0; 3748 3749 tmp = mpt->mpt_dev_page0[tgt]; 3750 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3751 sizeof(tmp), FALSE, 5000); 3752 if (rv) { 3753 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3754 return (rv); 3755 } 3756 mpt2host_config_page_scsi_device_0(&tmp); 3757 3758 mpt_lprt(mpt, MPT_PRT_DEBUG, 3759 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3760 tmp.NegotiatedParameters, tmp.Information); 3761 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3762 DP_WIDE : DP_NARROW; 3763 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3764 DP_DISC_ENABLE : DP_DISC_DISABL; 3765 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3766 DP_TQING_ENABLE : DP_TQING_DISABL; 3767 oval = tmp.NegotiatedParameters; 3768 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3769 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3770 pval = tmp.NegotiatedParameters; 3771 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3772 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3773 mpt->mpt_dev_page0[tgt] = tmp; 3774 } else { 3775 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3776 oval = mpt->mpt_port_page0.Capabilities; 3777 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3778 pval = mpt->mpt_port_page0.Capabilities; 3779 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3780 } 3781 3782 spi->valid = 0; 3783 scsi->valid = 0; 3784 spi->flags = 0; 3785 scsi->flags = 0; 3786 spi->sync_offset = oval; 3787 spi->sync_period = pval; 3788 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3789 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3790 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3791 if (dval & DP_WIDE) { 3792 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3793 } else { 3794 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3795 } 3796 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3797 scsi->valid = CTS_SCSI_VALID_TQ; 3798 if (dval & DP_TQING_ENABLE) { 3799 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3800 } 3801 spi->valid |= CTS_SPI_VALID_DISC; 3802 if (dval & DP_DISC_ENABLE) { 3803 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3804 } 3805 } 3806 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3807 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3808 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); 3809 return (0); 3810 } 3811 3812 static void 3813 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3814 { 3815 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3816 3817 ptr = &mpt->mpt_dev_page1[tgt]; 3818 if (onoff) { 3819 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3820 } else { 3821 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3822 } 3823 } 3824 3825 static void 3826 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3827 { 3828 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3829 3830 ptr = &mpt->mpt_dev_page1[tgt]; 3831 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3832 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3833 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3834 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3835 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3836 if (period == 0) { 3837 return; 3838 } 3839 ptr->RequestedParameters |= 3840 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3841 ptr->RequestedParameters |= 3842 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3843 if (period < 0xa) { 3844 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3845 } 3846 if (period < 0x9) { 3847 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3848 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3849 } 3850 } 3851 3852 static int 3853 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3854 { 3855 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3856 int rv; 3857 3858 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3859 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3860 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3861 tmp = mpt->mpt_dev_page1[tgt]; 3862 host2mpt_config_page_scsi_device_1(&tmp); 3863 rv = mpt_write_cur_cfg_page(mpt, tgt, 3864 &tmp.Header, sizeof(tmp), FALSE, 5000); 3865 if (rv) { 3866 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3867 return (-1); 3868 } 3869 return (0); 3870 } 3871 3872 /****************************** Timeout Recovery ******************************/ 3873 static int 3874 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3875 { 3876 int error; 3877 3878 error = kthread_create(mpt_recovery_thread, mpt, 3879 &mpt->recovery_thread, "mpt_recovery%d", mpt->unit); 3880 return (error); 3881 } 3882 3883 static void 3884 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3885 { 3886 3887 if (mpt->recovery_thread == NULL) { 3888 return; 3889 } 3890 mpt->shutdwn_recovery = 1; 3891 wakeup(mpt); 3892 /* 3893 * Sleep on a slightly different location 3894 * for this interlock just for added safety. 3895 */ 3896 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0); 3897 } 3898 3899 static void 3900 mpt_recovery_thread(void *arg) 3901 { 3902 struct mpt_softc *mpt; 3903 3904 mpt = (struct mpt_softc *)arg; 3905 MPT_LOCK(mpt); 3906 for (;;) { 3907 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3908 if (mpt->shutdwn_recovery == 0) { 3909 mpt_sleep(mpt, mpt, 0, "idle", 0); 3910 } 3911 } 3912 if (mpt->shutdwn_recovery != 0) { 3913 break; 3914 } 3915 mpt_recover_commands(mpt); 3916 } 3917 mpt->recovery_thread = NULL; 3918 wakeup(&mpt->recovery_thread); 3919 MPT_UNLOCK(mpt); 3920 kthread_exit(); 3921 } 3922 3923 static int 3924 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3925 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 3926 { 3927 MSG_SCSI_TASK_MGMT *tmf_req; 3928 int error; 3929 3930 /* 3931 * Wait for any current TMF request to complete. 3932 * We're only allowed to issue one TMF at a time. 3933 */ 3934 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 3935 sleep_ok, MPT_TMF_MAX_TIMEOUT); 3936 if (error != 0) { 3937 mpt_reset(mpt, TRUE); 3938 return (ETIMEDOUT); 3939 } 3940 3941 mpt_assign_serno(mpt, mpt->tmf_req); 3942 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 3943 3944 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 3945 memset(tmf_req, 0, sizeof(*tmf_req)); 3946 tmf_req->TargetID = target; 3947 tmf_req->Bus = channel; 3948 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 3949 tmf_req->TaskType = type; 3950 tmf_req->MsgFlags = flags; 3951 tmf_req->MsgContext = 3952 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 3953 if (lun > MPT_MAX_LUNS) { 3954 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 3955 tmf_req->LUN[1] = lun & 0xff; 3956 } else { 3957 tmf_req->LUN[1] = lun; 3958 } 3959 tmf_req->TaskMsgContext = abort_ctx; 3960 3961 mpt_lprt(mpt, MPT_PRT_DEBUG, 3962 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 3963 mpt->tmf_req->serno, tmf_req->MsgContext); 3964 if (mpt->verbose > MPT_PRT_DEBUG) { 3965 mpt_print_request(tmf_req); 3966 } 3967 3968 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 3969 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 3970 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 3971 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 3972 if (error != MPT_OK) { 3973 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 3974 mpt->tmf_req->state = REQ_STATE_FREE; 3975 mpt_reset(mpt, TRUE); 3976 } 3977 return (error); 3978 } 3979 3980 /* 3981 * When a command times out, it is placed on the requeust_timeout_list 3982 * and we wake our recovery thread. The MPT-Fusion architecture supports 3983 * only a single TMF operation at a time, so we serially abort/bdr, etc, 3984 * the timedout transactions. The next TMF is issued either by the 3985 * completion handler of the current TMF waking our recovery thread, 3986 * or the TMF timeout handler causing a hard reset sequence. 3987 */ 3988 static void 3989 mpt_recover_commands(struct mpt_softc *mpt) 3990 { 3991 request_t *req; 3992 union ccb *ccb; 3993 int error; 3994 3995 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3996 /* 3997 * No work to do- leave. 3998 */ 3999 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4000 return; 4001 } 4002 4003 /* 4004 * Flush any commands whose completion coincides with their timeout. 4005 */ 4006 mpt_intr(mpt); 4007 4008 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4009 /* 4010 * The timedout commands have already 4011 * completed. This typically means 4012 * that either the timeout value was on 4013 * the hairy edge of what the device 4014 * requires or - more likely - interrupts 4015 * are not happening. 4016 */ 4017 mpt_prt(mpt, "Timedout requests already complete. " 4018 "Interrupts may not be functioning.\n"); 4019 mpt_enable_ints(mpt); 4020 return; 4021 } 4022 4023 /* 4024 * We have no visibility into the current state of the 4025 * controller, so attempt to abort the commands in the 4026 * order they timed-out. For initiator commands, we 4027 * depend on the reply handler pulling requests off 4028 * the timeout list. 4029 */ 4030 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4031 uint16_t status; 4032 uint8_t response; 4033 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4034 4035 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4036 req, req->serno, hdrp->Function); 4037 ccb = req->ccb; 4038 if (ccb == NULL) { 4039 mpt_prt(mpt, "null ccb in timed out request. " 4040 "Resetting Controller.\n"); 4041 mpt_reset(mpt, TRUE); 4042 continue; 4043 } 4044 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4045 4046 /* 4047 * Check to see if this is not an initiator command and 4048 * deal with it differently if it is. 4049 */ 4050 switch (hdrp->Function) { 4051 case MPI_FUNCTION_SCSI_IO_REQUEST: 4052 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4053 break; 4054 default: 4055 /* 4056 * XXX: FIX ME: need to abort target assists... 4057 */ 4058 mpt_prt(mpt, "just putting it back on the pend q\n"); 4059 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4060 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4061 links); 4062 continue; 4063 } 4064 4065 error = mpt_scsi_send_tmf(mpt, 4066 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4067 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4068 htole32(req->index | scsi_io_handler_id), TRUE); 4069 4070 if (error != 0) { 4071 /* 4072 * mpt_scsi_send_tmf hard resets on failure, so no 4073 * need to do so here. Our queue should be emptied 4074 * by the hard reset. 4075 */ 4076 continue; 4077 } 4078 4079 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4080 REQ_STATE_DONE, TRUE, 500); 4081 4082 status = le16toh(mpt->tmf_req->IOCStatus); 4083 response = mpt->tmf_req->ResponseCode; 4084 mpt->tmf_req->state = REQ_STATE_FREE; 4085 4086 if (error != 0) { 4087 /* 4088 * If we've errored out,, reset the controller. 4089 */ 4090 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4091 "Resetting controller\n"); 4092 mpt_reset(mpt, TRUE); 4093 continue; 4094 } 4095 4096 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4097 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4098 "Resetting controller.\n", status); 4099 mpt_reset(mpt, TRUE); 4100 continue; 4101 } 4102 4103 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4104 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4105 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4106 "Resetting controller.\n", response); 4107 mpt_reset(mpt, TRUE); 4108 continue; 4109 } 4110 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4111 } 4112 } 4113 4114 /************************ Target Mode Support ****************************/ 4115 static void 4116 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4117 { 4118 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4119 PTR_SGE_TRANSACTION32 tep; 4120 PTR_SGE_SIMPLE32 se; 4121 bus_addr_t paddr; 4122 uint32_t fl; 4123 4124 paddr = req->req_pbuf; 4125 paddr += MPT_RQSL(mpt); 4126 4127 fc = req->req_vbuf; 4128 memset(fc, 0, MPT_REQUEST_AREA); 4129 fc->BufferCount = 1; 4130 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4131 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4132 4133 /* 4134 * Okay, set up ELS buffer pointers. ELS buffer pointers 4135 * consist of a TE SGL element (with details length of zero) 4136 * followed by a SIMPLE SGL element which holds the address 4137 * of the buffer. 4138 */ 4139 4140 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4141 4142 tep->ContextSize = 4; 4143 tep->Flags = 0; 4144 tep->TransactionContext[0] = htole32(ioindex); 4145 4146 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4147 fl = 4148 MPI_SGE_FLAGS_HOST_TO_IOC | 4149 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4150 MPI_SGE_FLAGS_LAST_ELEMENT | 4151 MPI_SGE_FLAGS_END_OF_LIST | 4152 MPI_SGE_FLAGS_END_OF_BUFFER; 4153 fl <<= MPI_SGE_FLAGS_SHIFT; 4154 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4155 se->FlagsLength = htole32(fl); 4156 se->Address = htole32((uint32_t) paddr); 4157 mpt_lprt(mpt, MPT_PRT_DEBUG, 4158 "add ELS index %d ioindex %d for %p:%u\n", 4159 req->index, ioindex, req, req->serno); 4160 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4161 ("mpt_fc_post_els: request not locked")); 4162 mpt_send_cmd(mpt, req); 4163 } 4164 4165 static void 4166 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4167 { 4168 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4169 PTR_CMD_BUFFER_DESCRIPTOR cb; 4170 bus_addr_t paddr; 4171 4172 paddr = req->req_pbuf; 4173 paddr += MPT_RQSL(mpt); 4174 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4175 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4176 4177 fc = req->req_vbuf; 4178 fc->BufferCount = 1; 4179 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4180 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4181 4182 cb = &fc->Buffer[0]; 4183 cb->IoIndex = htole16(ioindex); 4184 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4185 4186 mpt_check_doorbell(mpt); 4187 mpt_send_cmd(mpt, req); 4188 } 4189 4190 static int 4191 mpt_add_els_buffers(struct mpt_softc *mpt) 4192 { 4193 int i; 4194 4195 if (mpt->is_fc == 0) { 4196 return (TRUE); 4197 } 4198 4199 if (mpt->els_cmds_allocated) { 4200 return (TRUE); 4201 } 4202 4203 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *), 4204 M_DEVBUF, M_NOWAIT | M_ZERO); 4205 4206 if (mpt->els_cmd_ptrs == NULL) { 4207 return (FALSE); 4208 } 4209 4210 /* 4211 * Feed the chip some ELS buffer resources 4212 */ 4213 for (i = 0; i < MPT_MAX_ELS; i++) { 4214 request_t *req = mpt_get_request(mpt, FALSE); 4215 if (req == NULL) { 4216 break; 4217 } 4218 req->state |= REQ_STATE_LOCKED; 4219 mpt->els_cmd_ptrs[i] = req; 4220 mpt_fc_post_els(mpt, req, i); 4221 } 4222 4223 if (i == 0) { 4224 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4225 kfree(mpt->els_cmd_ptrs, M_DEVBUF); 4226 mpt->els_cmd_ptrs = NULL; 4227 return (FALSE); 4228 } 4229 if (i != MPT_MAX_ELS) { 4230 mpt_lprt(mpt, MPT_PRT_INFO, 4231 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4232 } 4233 mpt->els_cmds_allocated = i; 4234 return(TRUE); 4235 } 4236 4237 static int 4238 mpt_add_target_commands(struct mpt_softc *mpt) 4239 { 4240 int i, max; 4241 4242 if (mpt->tgt_cmd_ptrs) { 4243 return (TRUE); 4244 } 4245 4246 max = MPT_MAX_REQUESTS(mpt) >> 1; 4247 if (max > mpt->mpt_max_tgtcmds) { 4248 max = mpt->mpt_max_tgtcmds; 4249 } 4250 mpt->tgt_cmd_ptrs = 4251 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4252 if (mpt->tgt_cmd_ptrs == NULL) { 4253 mpt_prt(mpt, 4254 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4255 return (FALSE); 4256 } 4257 4258 for (i = 0; i < max; i++) { 4259 request_t *req; 4260 4261 req = mpt_get_request(mpt, FALSE); 4262 if (req == NULL) { 4263 break; 4264 } 4265 req->state |= REQ_STATE_LOCKED; 4266 mpt->tgt_cmd_ptrs[i] = req; 4267 mpt_post_target_command(mpt, req, i); 4268 } 4269 4270 4271 if (i == 0) { 4272 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4273 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF); 4274 mpt->tgt_cmd_ptrs = NULL; 4275 return (FALSE); 4276 } 4277 4278 mpt->tgt_cmds_allocated = i; 4279 4280 if (i < max) { 4281 mpt_lprt(mpt, MPT_PRT_INFO, 4282 "added %d of %d target bufs\n", i, max); 4283 } 4284 return (i); 4285 } 4286 4287 static int 4288 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4289 { 4290 4291 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4292 mpt->twildcard = 1; 4293 } else if (lun >= MPT_MAX_LUNS) { 4294 return (EINVAL); 4295 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4296 return (EINVAL); 4297 } 4298 if (mpt->tenabled == 0) { 4299 if (mpt->is_fc) { 4300 (void) mpt_fc_reset_link(mpt, 0); 4301 } 4302 mpt->tenabled = 1; 4303 } 4304 if (lun == CAM_LUN_WILDCARD) { 4305 mpt->trt_wildcard.enabled = 1; 4306 } else { 4307 mpt->trt[lun].enabled = 1; 4308 } 4309 return (0); 4310 } 4311 4312 static int 4313 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4314 { 4315 int i; 4316 4317 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4318 mpt->twildcard = 0; 4319 } else if (lun >= MPT_MAX_LUNS) { 4320 return (EINVAL); 4321 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4322 return (EINVAL); 4323 } 4324 if (lun == CAM_LUN_WILDCARD) { 4325 mpt->trt_wildcard.enabled = 0; 4326 } else { 4327 mpt->trt[lun].enabled = 0; 4328 } 4329 for (i = 0; i < MPT_MAX_LUNS; i++) { 4330 if (mpt->trt[lun].enabled) { 4331 break; 4332 } 4333 } 4334 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4335 if (mpt->is_fc) { 4336 (void) mpt_fc_reset_link(mpt, 0); 4337 } 4338 mpt->tenabled = 0; 4339 } 4340 return (0); 4341 } 4342 4343 /* 4344 * Called with MPT lock held 4345 */ 4346 static void 4347 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4348 { 4349 struct ccb_scsiio *csio = &ccb->csio; 4350 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4351 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4352 4353 switch (tgt->state) { 4354 case TGT_STATE_IN_CAM: 4355 break; 4356 case TGT_STATE_MOVING_DATA: 4357 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4358 xpt_freeze_simq(mpt->sim, 1); 4359 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4360 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4361 xpt_done(ccb); 4362 return; 4363 default: 4364 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4365 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4366 mpt_tgt_dump_req_state(mpt, cmd_req); 4367 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4368 xpt_done(ccb); 4369 return; 4370 } 4371 4372 if (csio->dxfer_len) { 4373 bus_dmamap_callback_t *cb; 4374 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4375 request_t *req; 4376 4377 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4378 ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 4379 4380 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4381 if (mpt->outofbeer == 0) { 4382 mpt->outofbeer = 1; 4383 xpt_freeze_simq(mpt->sim, 1); 4384 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4385 } 4386 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4387 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4388 xpt_done(ccb); 4389 return; 4390 } 4391 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4392 if (sizeof (bus_addr_t) > 4) { 4393 cb = mpt_execute_req_a64; 4394 } else { 4395 cb = mpt_execute_req; 4396 } 4397 4398 req->ccb = ccb; 4399 ccb->ccb_h.ccb_req_ptr = req; 4400 4401 /* 4402 * Record the currently active ccb and the 4403 * request for it in our target state area. 4404 */ 4405 tgt->ccb = ccb; 4406 tgt->req = req; 4407 4408 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4409 ta = req->req_vbuf; 4410 4411 if (mpt->is_sas) { 4412 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4413 cmd_req->req_vbuf; 4414 ta->QueueTag = ssp->InitiatorTag; 4415 } else if (mpt->is_spi) { 4416 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4417 cmd_req->req_vbuf; 4418 ta->QueueTag = sp->Tag; 4419 } 4420 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4421 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4422 ta->ReplyWord = htole32(tgt->reply_desc); 4423 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4424 ta->LUN[0] = 4425 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4426 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4427 } else { 4428 ta->LUN[1] = csio->ccb_h.target_lun; 4429 } 4430 4431 ta->RelativeOffset = tgt->bytes_xfered; 4432 ta->DataLength = ccb->csio.dxfer_len; 4433 if (ta->DataLength > tgt->resid) { 4434 ta->DataLength = tgt->resid; 4435 } 4436 4437 /* 4438 * XXX Should be done after data transfer completes? 4439 */ 4440 tgt->resid -= csio->dxfer_len; 4441 tgt->bytes_xfered += csio->dxfer_len; 4442 4443 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4444 ta->TargetAssistFlags |= 4445 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4446 } 4447 4448 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4449 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4450 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4451 ta->TargetAssistFlags |= 4452 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4453 } 4454 #endif 4455 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4456 4457 mpt_lprt(mpt, MPT_PRT_DEBUG, 4458 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4459 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4460 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4461 4462 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4463 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4464 int error; 4465 crit_enter(); 4466 error = bus_dmamap_load(mpt->buffer_dmat, 4467 req->dmap, csio->data_ptr, csio->dxfer_len, 4468 cb, req, 0); 4469 crit_exit(); 4470 if (error == EINPROGRESS) { 4471 xpt_freeze_simq(mpt->sim, 1); 4472 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4473 } 4474 } else { 4475 /* 4476 * We have been given a pointer to single 4477 * physical buffer. 4478 */ 4479 struct bus_dma_segment seg; 4480 seg.ds_addr = (bus_addr_t) 4481 (vm_offset_t)csio->data_ptr; 4482 seg.ds_len = csio->dxfer_len; 4483 (*cb)(req, &seg, 1, 0); 4484 } 4485 } else { 4486 /* 4487 * We have been given a list of addresses. 4488 * This case could be easily supported but they are not 4489 * currently generated by the CAM subsystem so there 4490 * is no point in wasting the time right now. 4491 */ 4492 struct bus_dma_segment *sgs; 4493 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4494 (*cb)(req, NULL, 0, EFAULT); 4495 } else { 4496 /* Just use the segments provided */ 4497 sgs = (struct bus_dma_segment *)csio->data_ptr; 4498 (*cb)(req, sgs, csio->sglist_cnt, 0); 4499 } 4500 } 4501 } else { 4502 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4503 4504 /* 4505 * XXX: I don't know why this seems to happen, but 4506 * XXX: completing the CCB seems to make things happy. 4507 * XXX: This seems to happen if the initiator requests 4508 * XXX: enough data that we have to do multiple CTIOs. 4509 */ 4510 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4511 mpt_lprt(mpt, MPT_PRT_DEBUG, 4512 "Meaningless STATUS CCB (%p): flags %x status %x " 4513 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4514 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4515 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4516 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4517 xpt_done(ccb); 4518 return; 4519 } 4520 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4521 sp = sense; 4522 memcpy(sp, &csio->sense_data, 4523 min(csio->sense_len, MPT_SENSE_SIZE)); 4524 } 4525 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4526 } 4527 } 4528 4529 static void 4530 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4531 uint32_t lun, int send, uint8_t *data, size_t length) 4532 { 4533 mpt_tgt_state_t *tgt; 4534 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4535 SGE_SIMPLE32 *se; 4536 uint32_t flags; 4537 uint8_t *dptr; 4538 bus_addr_t pptr; 4539 request_t *req; 4540 4541 /* 4542 * We enter with resid set to the data load for the command. 4543 */ 4544 tgt = MPT_TGT_STATE(mpt, cmd_req); 4545 if (length == 0 || tgt->resid == 0) { 4546 tgt->resid = 0; 4547 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4548 return; 4549 } 4550 4551 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4552 mpt_prt(mpt, "out of resources- dropping local response\n"); 4553 return; 4554 } 4555 tgt->is_local = 1; 4556 4557 4558 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4559 ta = req->req_vbuf; 4560 4561 if (mpt->is_sas) { 4562 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4563 ta->QueueTag = ssp->InitiatorTag; 4564 } else if (mpt->is_spi) { 4565 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4566 ta->QueueTag = sp->Tag; 4567 } 4568 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4569 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4570 ta->ReplyWord = htole32(tgt->reply_desc); 4571 if (lun > MPT_MAX_LUNS) { 4572 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4573 ta->LUN[1] = lun & 0xff; 4574 } else { 4575 ta->LUN[1] = lun; 4576 } 4577 ta->RelativeOffset = 0; 4578 ta->DataLength = length; 4579 4580 dptr = req->req_vbuf; 4581 dptr += MPT_RQSL(mpt); 4582 pptr = req->req_pbuf; 4583 pptr += MPT_RQSL(mpt); 4584 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4585 4586 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4587 memset(se, 0,sizeof (*se)); 4588 4589 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4590 if (send) { 4591 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4592 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4593 } 4594 se->Address = pptr; 4595 MPI_pSGE_SET_LENGTH(se, length); 4596 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4597 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4598 MPI_pSGE_SET_FLAGS(se, flags); 4599 4600 tgt->ccb = NULL; 4601 tgt->req = req; 4602 tgt->resid -= length; 4603 tgt->bytes_xfered = length; 4604 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4605 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4606 #else 4607 tgt->state = TGT_STATE_MOVING_DATA; 4608 #endif 4609 mpt_send_cmd(mpt, req); 4610 } 4611 4612 /* 4613 * Abort queued up CCBs 4614 */ 4615 static cam_status 4616 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4617 { 4618 struct mpt_hdr_stailq *lp; 4619 struct ccb_hdr *srch; 4620 int found = 0; 4621 union ccb *accb = ccb->cab.abort_ccb; 4622 tgt_resource_t *trtp; 4623 4624 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4625 4626 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4627 trtp = &mpt->trt_wildcard; 4628 } else { 4629 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4630 } 4631 4632 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4633 lp = &trtp->atios; 4634 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4635 lp = &trtp->inots; 4636 } else { 4637 return (CAM_REQ_INVALID); 4638 } 4639 4640 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4641 if (srch == &accb->ccb_h) { 4642 found = 1; 4643 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4644 break; 4645 } 4646 } 4647 if (found) { 4648 accb->ccb_h.status = CAM_REQ_ABORTED; 4649 xpt_done(accb); 4650 return (CAM_REQ_CMP); 4651 } 4652 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4653 return (CAM_PATH_INVALID); 4654 } 4655 4656 /* 4657 * Ask the MPT to abort the current target command 4658 */ 4659 static int 4660 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4661 { 4662 int error; 4663 request_t *req; 4664 PTR_MSG_TARGET_MODE_ABORT abtp; 4665 4666 req = mpt_get_request(mpt, FALSE); 4667 if (req == NULL) { 4668 return (-1); 4669 } 4670 abtp = req->req_vbuf; 4671 memset(abtp, 0, sizeof (*abtp)); 4672 4673 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4674 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4675 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4676 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4677 error = 0; 4678 if (mpt->is_fc || mpt->is_sas) { 4679 mpt_send_cmd(mpt, req); 4680 } else { 4681 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4682 } 4683 return (error); 4684 } 4685 4686 /* 4687 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4688 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4689 * FC929 to set bogus FC_RSP fields (nonzero residuals 4690 * but w/o RESID fields set). This causes QLogic initiators 4691 * to think maybe that a frame was lost. 4692 * 4693 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4694 * we use allocated requests to do TARGET_ASSIST and we 4695 * need to know when to release them. 4696 */ 4697 4698 static void 4699 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4700 uint8_t status, uint8_t const *sense_data) 4701 { 4702 uint8_t *cmd_vbuf; 4703 mpt_tgt_state_t *tgt; 4704 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4705 request_t *req; 4706 bus_addr_t paddr; 4707 int resplen = 0; 4708 uint32_t fl; 4709 4710 cmd_vbuf = cmd_req->req_vbuf; 4711 cmd_vbuf += MPT_RQSL(mpt); 4712 tgt = MPT_TGT_STATE(mpt, cmd_req); 4713 4714 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4715 if (mpt->outofbeer == 0) { 4716 mpt->outofbeer = 1; 4717 xpt_freeze_simq(mpt->sim, 1); 4718 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4719 } 4720 if (ccb) { 4721 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4722 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4723 xpt_done(ccb); 4724 } else { 4725 mpt_prt(mpt, 4726 "could not allocate status request- dropping\n"); 4727 } 4728 return; 4729 } 4730 req->ccb = ccb; 4731 if (ccb) { 4732 ccb->ccb_h.ccb_mpt_ptr = mpt; 4733 ccb->ccb_h.ccb_req_ptr = req; 4734 } 4735 4736 /* 4737 * Record the currently active ccb, if any, and the 4738 * request for it in our target state area. 4739 */ 4740 tgt->ccb = ccb; 4741 tgt->req = req; 4742 tgt->state = TGT_STATE_SENDING_STATUS; 4743 4744 tp = req->req_vbuf; 4745 paddr = req->req_pbuf; 4746 paddr += MPT_RQSL(mpt); 4747 4748 memset(tp, 0, sizeof (*tp)); 4749 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4750 if (mpt->is_fc) { 4751 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4752 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4753 uint8_t *sts_vbuf; 4754 uint32_t *rsp; 4755 4756 sts_vbuf = req->req_vbuf; 4757 sts_vbuf += MPT_RQSL(mpt); 4758 rsp = (uint32_t *) sts_vbuf; 4759 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4760 4761 /* 4762 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4763 * It has to be big-endian in memory and is organized 4764 * in 32 bit words, which are much easier to deal with 4765 * as words which are swizzled as needed. 4766 * 4767 * All we're filling here is the FC_RSP payload. 4768 * We may just have the chip synthesize it if 4769 * we have no residual and an OK status. 4770 * 4771 */ 4772 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4773 4774 rsp[2] = status; 4775 if (tgt->resid) { 4776 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4777 rsp[3] = htobe32(tgt->resid); 4778 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4779 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4780 #endif 4781 } 4782 if (status == SCSI_STATUS_CHECK_COND) { 4783 int i; 4784 4785 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4786 rsp[4] = htobe32(MPT_SENSE_SIZE); 4787 if (sense_data) { 4788 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4789 } else { 4790 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4791 "TION but no sense data?\n"); 4792 memset(&rsp, 0, MPT_SENSE_SIZE); 4793 } 4794 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4795 rsp[i] = htobe32(rsp[i]); 4796 } 4797 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4798 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4799 #endif 4800 } 4801 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4802 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4803 #endif 4804 rsp[2] = htobe32(rsp[2]); 4805 } else if (mpt->is_sas) { 4806 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4807 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4808 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4809 } else { 4810 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4811 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4812 tp->StatusCode = status; 4813 tp->QueueTag = htole16(sp->Tag); 4814 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4815 } 4816 4817 tp->ReplyWord = htole32(tgt->reply_desc); 4818 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4819 4820 #ifdef WE_CAN_USE_AUTO_REPOST 4821 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4822 #endif 4823 if (status == SCSI_STATUS_OK && resplen == 0) { 4824 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4825 } else { 4826 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4827 fl = 4828 MPI_SGE_FLAGS_HOST_TO_IOC | 4829 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4830 MPI_SGE_FLAGS_LAST_ELEMENT | 4831 MPI_SGE_FLAGS_END_OF_LIST | 4832 MPI_SGE_FLAGS_END_OF_BUFFER; 4833 fl <<= MPI_SGE_FLAGS_SHIFT; 4834 fl |= resplen; 4835 tp->StatusDataSGE.FlagsLength = htole32(fl); 4836 } 4837 4838 mpt_lprt(mpt, MPT_PRT_DEBUG, 4839 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4840 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4841 req->serno, tgt->resid); 4842 if (ccb) { 4843 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4844 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4845 } 4846 mpt_send_cmd(mpt, req); 4847 } 4848 4849 static void 4850 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4851 tgt_resource_t *trtp, int init_id) 4852 { 4853 struct ccb_immed_notify *inot; 4854 mpt_tgt_state_t *tgt; 4855 4856 tgt = MPT_TGT_STATE(mpt, req); 4857 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4858 if (inot == NULL) { 4859 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4860 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4861 return; 4862 } 4863 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4864 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4865 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4866 4867 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4868 inot->sense_len = 0; 4869 memset(inot->message_args, 0, sizeof (inot->message_args)); 4870 inot->initiator_id = init_id; /* XXX */ 4871 4872 /* 4873 * This is a somewhat grotesque attempt to map from task management 4874 * to old style SCSI messages. God help us all. 4875 */ 4876 switch (fc) { 4877 case MPT_ABORT_TASK_SET: 4878 inot->message_args[0] = MSG_ABORT_TAG; 4879 break; 4880 case MPT_CLEAR_TASK_SET: 4881 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4882 break; 4883 case MPT_TARGET_RESET: 4884 inot->message_args[0] = MSG_TARGET_RESET; 4885 break; 4886 case MPT_CLEAR_ACA: 4887 inot->message_args[0] = MSG_CLEAR_ACA; 4888 break; 4889 case MPT_TERMINATE_TASK: 4890 inot->message_args[0] = MSG_ABORT_TAG; 4891 break; 4892 default: 4893 inot->message_args[0] = MSG_NOOP; 4894 break; 4895 } 4896 tgt->ccb = (union ccb *) inot; 4897 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4898 xpt_done((union ccb *)inot); 4899 } 4900 4901 static void 4902 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4903 { 4904 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4905 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4906 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4907 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4908 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4909 '0', '0', '0', '1' 4910 }; 4911 struct ccb_accept_tio *atiop; 4912 lun_id_t lun; 4913 int tag_action = 0; 4914 mpt_tgt_state_t *tgt; 4915 tgt_resource_t *trtp = NULL; 4916 U8 *lunptr; 4917 U8 *vbuf; 4918 U16 itag; 4919 U16 ioindex; 4920 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 4921 uint8_t *cdbp; 4922 4923 /* 4924 * Stash info for the current command where we can get at it later. 4925 */ 4926 vbuf = req->req_vbuf; 4927 vbuf += MPT_RQSL(mpt); 4928 4929 /* 4930 * Get our state pointer set up. 4931 */ 4932 tgt = MPT_TGT_STATE(mpt, req); 4933 if (tgt->state != TGT_STATE_LOADED) { 4934 mpt_tgt_dump_req_state(mpt, req); 4935 panic("bad target state in mpt_scsi_tgt_atio"); 4936 } 4937 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 4938 tgt->state = TGT_STATE_IN_CAM; 4939 tgt->reply_desc = reply_desc; 4940 ioindex = GET_IO_INDEX(reply_desc); 4941 if (mpt->verbose >= MPT_PRT_DEBUG) { 4942 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 4943 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 4944 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 4945 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 4946 } 4947 if (mpt->is_fc) { 4948 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 4949 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 4950 if (fc->FcpCntl[2]) { 4951 /* 4952 * Task Management Request 4953 */ 4954 switch (fc->FcpCntl[2]) { 4955 case 0x2: 4956 fct = MPT_ABORT_TASK_SET; 4957 break; 4958 case 0x4: 4959 fct = MPT_CLEAR_TASK_SET; 4960 break; 4961 case 0x20: 4962 fct = MPT_TARGET_RESET; 4963 break; 4964 case 0x40: 4965 fct = MPT_CLEAR_ACA; 4966 break; 4967 case 0x80: 4968 fct = MPT_TERMINATE_TASK; 4969 break; 4970 default: 4971 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 4972 fc->FcpCntl[2]); 4973 mpt_scsi_tgt_status(mpt, 0, req, 4974 SCSI_STATUS_OK, 0); 4975 return; 4976 } 4977 } else { 4978 switch (fc->FcpCntl[1]) { 4979 case 0: 4980 tag_action = MSG_SIMPLE_Q_TAG; 4981 break; 4982 case 1: 4983 tag_action = MSG_HEAD_OF_Q_TAG; 4984 break; 4985 case 2: 4986 tag_action = MSG_ORDERED_Q_TAG; 4987 break; 4988 default: 4989 /* 4990 * Bah. Ignore Untagged Queing and ACA 4991 */ 4992 tag_action = MSG_SIMPLE_Q_TAG; 4993 break; 4994 } 4995 } 4996 tgt->resid = be32toh(fc->FcpDl); 4997 cdbp = fc->FcpCdb; 4998 lunptr = fc->FcpLun; 4999 itag = be16toh(fc->OptionalOxid); 5000 } else if (mpt->is_sas) { 5001 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5002 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5003 cdbp = ssp->CDB; 5004 lunptr = ssp->LogicalUnitNumber; 5005 itag = ssp->InitiatorTag; 5006 } else { 5007 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5008 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5009 cdbp = sp->CDB; 5010 lunptr = sp->LogicalUnitNumber; 5011 itag = sp->Tag; 5012 } 5013 5014 /* 5015 * Generate a simple lun 5016 */ 5017 switch (lunptr[0] & 0xc0) { 5018 case 0x40: 5019 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5020 break; 5021 case 0: 5022 lun = lunptr[1]; 5023 break; 5024 default: 5025 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5026 lun = 0xffff; 5027 break; 5028 } 5029 5030 /* 5031 * Deal with non-enabled or bad luns here. 5032 */ 5033 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5034 mpt->trt[lun].enabled == 0) { 5035 if (mpt->twildcard) { 5036 trtp = &mpt->trt_wildcard; 5037 } else if (fct == MPT_NIL_TMT_VALUE) { 5038 /* 5039 * In this case, we haven't got an upstream listener 5040 * for either a specific lun or wildcard luns. We 5041 * have to make some sensible response. For regular 5042 * inquiry, just return some NOT HERE inquiry data. 5043 * For VPD inquiry, report illegal field in cdb. 5044 * For REQUEST SENSE, just return NO SENSE data. 5045 * REPORT LUNS gets illegal command. 5046 * All other commands get 'no such device'. 5047 */ 5048 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5049 size_t len; 5050 5051 memset(buf, 0, MPT_SENSE_SIZE); 5052 cond = SCSI_STATUS_CHECK_COND; 5053 buf[0] = 0xf0; 5054 buf[2] = 0x5; 5055 buf[7] = 0x8; 5056 sp = buf; 5057 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5058 5059 switch (cdbp[0]) { 5060 case INQUIRY: 5061 { 5062 if (cdbp[1] != 0) { 5063 buf[12] = 0x26; 5064 buf[13] = 0x01; 5065 break; 5066 } 5067 len = min(tgt->resid, cdbp[4]); 5068 len = min(len, sizeof (null_iqd)); 5069 mpt_lprt(mpt, MPT_PRT_DEBUG, 5070 "local inquiry %ld bytes\n", (long) len); 5071 mpt_scsi_tgt_local(mpt, req, lun, 1, 5072 null_iqd, len); 5073 return; 5074 } 5075 case REQUEST_SENSE: 5076 { 5077 buf[2] = 0x0; 5078 len = min(tgt->resid, cdbp[4]); 5079 len = min(len, sizeof (buf)); 5080 mpt_lprt(mpt, MPT_PRT_DEBUG, 5081 "local reqsense %ld bytes\n", (long) len); 5082 mpt_scsi_tgt_local(mpt, req, lun, 1, 5083 buf, len); 5084 return; 5085 } 5086 case REPORT_LUNS: 5087 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5088 buf[12] = 0x26; 5089 return; 5090 default: 5091 mpt_lprt(mpt, MPT_PRT_DEBUG, 5092 "CMD 0x%x to unmanaged lun %u\n", 5093 cdbp[0], lun); 5094 buf[12] = 0x25; 5095 break; 5096 } 5097 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5098 return; 5099 } 5100 /* otherwise, leave trtp NULL */ 5101 } else { 5102 trtp = &mpt->trt[lun]; 5103 } 5104 5105 /* 5106 * Deal with any task management 5107 */ 5108 if (fct != MPT_NIL_TMT_VALUE) { 5109 if (trtp == NULL) { 5110 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5111 fct); 5112 mpt_scsi_tgt_status(mpt, 0, req, 5113 SCSI_STATUS_OK, 0); 5114 } else { 5115 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5116 GET_INITIATOR_INDEX(reply_desc)); 5117 } 5118 return; 5119 } 5120 5121 5122 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5123 if (atiop == NULL) { 5124 mpt_lprt(mpt, MPT_PRT_WARN, 5125 "no ATIOs for lun %u- sending back %s\n", lun, 5126 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5127 mpt_scsi_tgt_status(mpt, NULL, req, 5128 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5129 NULL); 5130 return; 5131 } 5132 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5133 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5134 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5135 atiop->ccb_h.ccb_mpt_ptr = mpt; 5136 atiop->ccb_h.status = CAM_CDB_RECVD; 5137 atiop->ccb_h.target_lun = lun; 5138 atiop->sense_len = 0; 5139 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5140 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5141 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5142 5143 /* 5144 * The tag we construct here allows us to find the 5145 * original request that the command came in with. 5146 * 5147 * This way we don't have to depend on anything but the 5148 * tag to find things when CCBs show back up from CAM. 5149 */ 5150 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5151 tgt->tag_id = atiop->tag_id; 5152 if (tag_action) { 5153 atiop->tag_action = tag_action; 5154 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5155 } 5156 if (mpt->verbose >= MPT_PRT_DEBUG) { 5157 int i; 5158 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5159 atiop->ccb_h.target_lun); 5160 for (i = 0; i < atiop->cdb_len; i++) { 5161 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5162 (i == (atiop->cdb_len - 1))? '>' : ' '); 5163 } 5164 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5165 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5166 } 5167 5168 xpt_done((union ccb *)atiop); 5169 } 5170 5171 static void 5172 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5173 { 5174 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5175 5176 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5177 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5178 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5179 tgt->tag_id, tgt->state); 5180 } 5181 5182 static void 5183 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5184 { 5185 5186 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5187 req->index, req->index, req->state); 5188 mpt_tgt_dump_tgt_state(mpt, req); 5189 } 5190 5191 static int 5192 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5193 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5194 { 5195 int dbg; 5196 union ccb *ccb; 5197 U16 status; 5198 5199 if (reply_frame == NULL) { 5200 /* 5201 * Figure out what the state of the command is. 5202 */ 5203 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5204 5205 #ifdef INVARIANTS 5206 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5207 if (tgt->req) { 5208 mpt_req_not_spcl(mpt, tgt->req, 5209 "turbo scsi_tgt_reply associated req", __LINE__); 5210 } 5211 #endif 5212 switch(tgt->state) { 5213 case TGT_STATE_LOADED: 5214 /* 5215 * This is a new command starting. 5216 */ 5217 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5218 break; 5219 case TGT_STATE_MOVING_DATA: 5220 { 5221 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5222 5223 ccb = tgt->ccb; 5224 if (tgt->req == NULL) { 5225 panic("mpt: turbo target reply with null " 5226 "associated request moving data"); 5227 /* NOTREACHED */ 5228 } 5229 if (ccb == NULL) { 5230 if (tgt->is_local == 0) { 5231 panic("mpt: turbo target reply with " 5232 "null associated ccb moving data"); 5233 /* NOTREACHED */ 5234 } 5235 mpt_lprt(mpt, MPT_PRT_DEBUG, 5236 "TARGET_ASSIST local done\n"); 5237 TAILQ_REMOVE(&mpt->request_pending_list, 5238 tgt->req, links); 5239 mpt_free_request(mpt, tgt->req); 5240 tgt->req = NULL; 5241 mpt_scsi_tgt_status(mpt, NULL, req, 5242 0, NULL); 5243 return (TRUE); 5244 } 5245 tgt->ccb = NULL; 5246 tgt->nxfers++; 5247 mpt_req_untimeout(req, mpt_timeout, ccb); 5248 mpt_lprt(mpt, MPT_PRT_DEBUG, 5249 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5250 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5251 /* 5252 * Free the Target Assist Request 5253 */ 5254 KASSERT(tgt->req->ccb == ccb, 5255 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5256 tgt->req->serno, tgt->req->ccb)); 5257 TAILQ_REMOVE(&mpt->request_pending_list, 5258 tgt->req, links); 5259 mpt_free_request(mpt, tgt->req); 5260 tgt->req = NULL; 5261 5262 /* 5263 * Do we need to send status now? That is, are 5264 * we done with all our data transfers? 5265 */ 5266 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5267 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5268 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5269 KASSERT(ccb->ccb_h.status, 5270 ("zero ccb sts at %d", __LINE__)); 5271 tgt->state = TGT_STATE_IN_CAM; 5272 if (mpt->outofbeer) { 5273 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5274 mpt->outofbeer = 0; 5275 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5276 } 5277 xpt_done(ccb); 5278 break; 5279 } 5280 /* 5281 * Otherwise, send status (and sense) 5282 */ 5283 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5284 sp = sense; 5285 memcpy(sp, &ccb->csio.sense_data, 5286 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5287 } 5288 mpt_scsi_tgt_status(mpt, ccb, req, 5289 ccb->csio.scsi_status, sp); 5290 break; 5291 } 5292 case TGT_STATE_SENDING_STATUS: 5293 case TGT_STATE_MOVING_DATA_AND_STATUS: 5294 { 5295 int ioindex; 5296 ccb = tgt->ccb; 5297 5298 if (tgt->req == NULL) { 5299 panic("mpt: turbo target reply with null " 5300 "associated request sending status"); 5301 /* NOTREACHED */ 5302 } 5303 5304 if (ccb) { 5305 tgt->ccb = NULL; 5306 if (tgt->state == 5307 TGT_STATE_MOVING_DATA_AND_STATUS) { 5308 tgt->nxfers++; 5309 } 5310 mpt_req_untimeout(req, mpt_timeout, ccb); 5311 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5312 ccb->ccb_h.status |= CAM_SENT_SENSE; 5313 } 5314 mpt_lprt(mpt, MPT_PRT_DEBUG, 5315 "TARGET_STATUS tag %x sts %x flgs %x req " 5316 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5317 ccb->ccb_h.flags, tgt->req); 5318 /* 5319 * Free the Target Send Status Request 5320 */ 5321 KASSERT(tgt->req->ccb == ccb, 5322 ("tgt->req %p:%u tgt->req->ccb %p", 5323 tgt->req, tgt->req->serno, tgt->req->ccb)); 5324 /* 5325 * Notify CAM that we're done 5326 */ 5327 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5328 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5329 KASSERT(ccb->ccb_h.status, 5330 ("ZERO ccb sts at %d", __LINE__)); 5331 tgt->ccb = NULL; 5332 } else { 5333 mpt_lprt(mpt, MPT_PRT_DEBUG, 5334 "TARGET_STATUS non-CAM for req %p:%u\n", 5335 tgt->req, tgt->req->serno); 5336 } 5337 TAILQ_REMOVE(&mpt->request_pending_list, 5338 tgt->req, links); 5339 mpt_free_request(mpt, tgt->req); 5340 tgt->req = NULL; 5341 5342 /* 5343 * And re-post the Command Buffer. 5344 * This will reset the state. 5345 */ 5346 ioindex = GET_IO_INDEX(reply_desc); 5347 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5348 tgt->is_local = 0; 5349 mpt_post_target_command(mpt, req, ioindex); 5350 5351 /* 5352 * And post a done for anyone who cares 5353 */ 5354 if (ccb) { 5355 if (mpt->outofbeer) { 5356 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5357 mpt->outofbeer = 0; 5358 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5359 } 5360 xpt_done(ccb); 5361 } 5362 break; 5363 } 5364 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5365 tgt->state = TGT_STATE_LOADED; 5366 break; 5367 default: 5368 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5369 "Reply Function\n", tgt->state); 5370 } 5371 return (TRUE); 5372 } 5373 5374 status = le16toh(reply_frame->IOCStatus); 5375 if (status != MPI_IOCSTATUS_SUCCESS) { 5376 dbg = MPT_PRT_ERROR; 5377 } else { 5378 dbg = MPT_PRT_DEBUG1; 5379 } 5380 5381 mpt_lprt(mpt, dbg, 5382 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5383 req, req->serno, reply_frame, reply_frame->Function, status); 5384 5385 switch (reply_frame->Function) { 5386 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5387 { 5388 mpt_tgt_state_t *tgt; 5389 #ifdef INVARIANTS 5390 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5391 #endif 5392 if (status != MPI_IOCSTATUS_SUCCESS) { 5393 /* 5394 * XXX What to do? 5395 */ 5396 break; 5397 } 5398 tgt = MPT_TGT_STATE(mpt, req); 5399 KASSERT(tgt->state == TGT_STATE_LOADING, 5400 ("bad state 0x%x on reply to buffer post", tgt->state)); 5401 mpt_assign_serno(mpt, req); 5402 tgt->state = TGT_STATE_LOADED; 5403 break; 5404 } 5405 case MPI_FUNCTION_TARGET_ASSIST: 5406 #ifdef INVARIANTS 5407 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5408 #endif 5409 mpt_prt(mpt, "target assist completion\n"); 5410 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5411 mpt_free_request(mpt, req); 5412 break; 5413 case MPI_FUNCTION_TARGET_STATUS_SEND: 5414 #ifdef INVARIANTS 5415 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5416 #endif 5417 mpt_prt(mpt, "status send completion\n"); 5418 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5419 mpt_free_request(mpt, req); 5420 break; 5421 case MPI_FUNCTION_TARGET_MODE_ABORT: 5422 { 5423 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5424 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5425 PTR_MSG_TARGET_MODE_ABORT abtp = 5426 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5427 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5428 #ifdef INVARIANTS 5429 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5430 #endif 5431 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5432 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5433 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5434 mpt_free_request(mpt, req); 5435 break; 5436 } 5437 default: 5438 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5439 "0x%x\n", reply_frame->Function); 5440 break; 5441 } 5442 return (TRUE); 5443 } 5444